following instr

This commit is contained in:
Carsten 2018-06-19 21:01:56 +02:00
parent 654887208c
commit 6f74204c5b

View File

@ -52,7 +52,9 @@
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import sys\n",
@ -71,7 +73,9 @@
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"SINGLE_LABEL = True"
@ -104,7 +108,9 @@
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"data_root_folder = \"./data_en/\" # i created a symlink here"
@ -120,7 +126,9 @@
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"json_files = sorted(glob.glob(data_root_folder + \"/*.json\"))"
@ -1254,7 +1262,9 @@
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"emojis = twitter_data['EMOJI']\n",
@ -1273,7 +1283,9 @@
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# defining blacklist for modifier emojis:\n",
@ -1291,7 +1303,9 @@
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# filtering them and the EMOJI keyword out:\n",
@ -1308,7 +1322,9 @@
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def latest(lst):\n",
@ -1328,7 +1344,9 @@
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"labels = emoji2sent([latest(e) for e in emojis])\n"
@ -1357,7 +1375,9 @@
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"wrong_labels = np.isnan(np.linalg.norm(labels, axis=1))"
@ -1373,7 +1393,9 @@
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"labels = labels[np.invert(wrong_labels)]\n",
@ -1408,7 +1430,9 @@
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"from nltk.stem.snowball import SnowballStemmer\n",
@ -1421,7 +1445,9 @@
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def get_wordnet_pos(treebank_tag):\n",
@ -1441,7 +1467,9 @@
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"stemmer = SnowballStemmer(\"english\")\n",
@ -1552,7 +1580,9 @@
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# at first count over our table\n",
@ -1593,7 +1623,9 @@
{
"cell_type": "code",
"execution_count": 21,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import operator"
@ -1707,7 +1739,9 @@
{
"cell_type": "code",
"execution_count": 24,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"X1, Xt1, y1, yt1 = train_test_split(plain_text, labels, test_size=0.1, random_state=4222)"
@ -1716,7 +1750,9 @@
{
"cell_type": "code",
"execution_count": 25,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"#y1_weights = np.array([(sum([emoji_weights[e] for e in e_list]) / len(e_list)) if len(e_list) > 0 else 0 for e_list in sent2emoji(y1)])"
@ -1725,7 +1761,9 @@
{
"cell_type": "code",
"execution_count": 26,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"vectorizer = TfidfVectorizer(stop_words='english')\n",
@ -1765,7 +1803,9 @@
{
"cell_type": "code",
"execution_count": 28,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"def train(max_size = 10000, layers=[(1024, 'relu'),(y1[0].shape[0],'softmax')], random_state=4222, ovrc=False, n_iter=5):\n",
@ -1827,7 +1867,9 @@
{
"cell_type": "code",
"execution_count": 30,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"pred = clf.predict(vectorizer.transform(Xt1))"
@ -1855,7 +1897,9 @@
{
"cell_type": "code",
"execution_count": 32,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# build a dataframe to visualize test results:\n",
@ -2677,7 +2721,9 @@
{
"cell_type": "code",
"execution_count": 37,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"testlist.to_csv('test.csv')"
@ -2693,7 +2739,9 @@
{
"cell_type": "code",
"execution_count": 38,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import pickle\n",
@ -2723,7 +2771,9 @@
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"from IPython.display import clear_output, Markdown, Math\n",
@ -2771,7 +2821,9 @@
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"lookup_emojis = [#'😂',\n",
@ -2912,7 +2964,9 @@
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"metadata": {
"collapsed": true
},
"outputs": [],
"source": []
}
@ -2933,7 +2987,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
"version": "3.6.3"
}
},
"nbformat": 4,