From 322200b2e0e968dbf82b0f7d9840bb1a185ecee3 Mon Sep 17 00:00:00 2001 From: Carsten Date: Mon, 4 Jun 2018 16:57:09 +0200 Subject: [PATCH 1/2] update --- .../Task 1 - Carsten Draschner.ipynb | 1321 +++++++++++++++++ 1 file changed, 1321 insertions(+) create mode 100644 Carsten_Solutions/Task 1 - Carsten Draschner.ipynb diff --git a/Carsten_Solutions/Task 1 - Carsten Draschner.ipynb b/Carsten_Solutions/Task 1 - Carsten Draschner.ipynb new file mode 100644 index 0000000..1822a2e --- /dev/null +++ b/Carsten_Solutions/Task 1 - Carsten Draschner.ipynb @@ -0,0 +1,1321 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Exercise 1\n", + "Solution by:\n", + "Carsten Draschner \n", + "2719095\n", + "\n", + "Following Instructions: \n", + "https://github.com/SmartDataAnalytics/MA-INF-4222-NLP-Lab/blob/master/2018_SoSe/exercises/Task01_Instructions.ipynb" + ] + }, + { + "cell_type": "code", + "execution_count": 152, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import nltk\n", + "from nltk import word_tokenize, pos_tag" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Classifiers\n", + "**Decision Tree** import from skikit learn" + ] + }, + { + "cell_type": "code", + "execution_count": 153, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "from sklearn.tree import DecisionTreeClassifier\n", + "from sklearn.feature_extraction import DictVectorizer\n", + "from sklearn.pipeline import Pipeline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1. model1 = your POS tagger model (english)\n", + "for a words defined by its in dex with the given sentences a feature vector fot this word will be determinded" + ] + }, + { + "cell_type": "code", + "execution_count": 154, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "defined own feature model\n" + ] + } + ], + "source": [ + "def features(sentence, index):\n", + " return {\n", + " 'word': sentence[index],\n", + " 'length': len(sentence[index]),\n", + " 'is_capitalized': sentence[index][0].upper() == sentence[index][0],\n", + " 'prefix-1': sentence[index][0],\n", + " 'suffix-1': sentence[index][-1],\n", + " 'prev_word': '' if index == 0 else sentence[index - 1],\n", + " 'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],\n", + " 'kindOfCamelCase': sentence[index][1:].lower() != sentence[index][1:],\n", + " 'includesSpace': True if ((' ') in sentence[index]) else False, #depemds on tokenizer\n", + " 'containsNumber': sum(str(i) in (sentence[index]) for i in range(10))>0,\n", + " 'prefix-2': sentence[index][1] if len(sentence[index])>1 else \"-1\",\n", + " 'suffix-2': sentence[index][-2] if len(sentence[index])>1 else \"-1\"\n", + " }\n", + "print(\"defined own feature model\")\n", + "#print(features(\"halli hallo i bims der Programmierer\".strip().split(\" \"), 3))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 2. model2 = pre-trained POS tagger model using NLTK (maxentropy english)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 155, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "#max entropie pre trained pos tag\n", + "#see Calculate performance 1.2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 3. model3.x = rule-based classifiers (x = 1 to 5)\n", + "1. DefaultTagger that simply tags everything with the same tag\n", + "2. RegexpTagger that applies tags according to a set of regular expressions\n", + "3. N-Gram (n-gram tagger is a generalization of a unigram tagger whose context is the current word together with the part-of-speech tags of the n-1 preceding token)\n", + " + UnigramTagger\n", + " + BigramTagger\n", + " + TrigramTagger" + ] + }, + { + "cell_type": "code", + "execution_count": 156, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "\"#used from description for RegexpTagger\\npatterns = [(r'.*ing$', 'VBG'), (r'.*ed$', 'VBD'), (r'.*es$', 'VBZ'), (r'.*ould$', 'MD'), (r'.*'s$', 'NN$'), \\n (r'.*s$', 'NNS'), (r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')]\\n\\n#train taggers\\ndef_model = nltk.DefaultTagger('NN')\\nregexp_model = nltk.RegexpTagger(patterns)\\nuni_model = nltk.UnigramTagger(training_sentences_X1)\\nbi_model = nltk.BigramTagger(training_sentences_X1)\\ntri_model = nltk.TrigramTagger(training_sentences_X1)\"" + ] + }, + "execution_count": 156, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "#see Task 1.3 and 1.6\n", + "\n", + "'''#used from description for RegexpTagger\n", + "patterns = [(r'.*ing$', 'VBG'), (r'.*ed$', 'VBD'), (r'.*es$', 'VBZ'), (r'.*ould$', 'MD'), (r'.*\\'s$', 'NN$'), \n", + " (r'.*s$', 'NNS'), (r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')]\n", + "\n", + "#train taggers\n", + "def_model = nltk.DefaultTagger('NN')\n", + "regexp_model = nltk.RegexpTagger(patterns)\n", + "uni_model = nltk.UnigramTagger(training_sentences_X1)\n", + "bi_model = nltk.BigramTagger(training_sentences_X1)\n", + "tri_model = nltk.TrigramTagger(training_sentences_X1)'''" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 4. model4 = your POS tagger model (not english)" + ] + }, + { + "cell_type": "code", + "execution_count": 157, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "#see Task 2.1" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 5. model5 = pre-trained POS tagger model using RDRPOSTagger 1 or TreeTagger 2 (not english)" + ] + }, + { + "cell_type": "code", + "execution_count": 158, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "#see Task 2.2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Corpora\n", + "note: data split for training/test = 0.8/0.2 (sequencial)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 1. X1 = nltk.corpus.treebank (english)" + ] + }, + { + "cell_type": "code", + "execution_count": 159, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[nltk_data] Downloading package treebank to\n", + "[nltk_data] /Users/Carsten/nltk_data...\n", + "[nltk_data] Package treebank is already up-to-date!\n", + "downloaded treebank\n" + ] + } + ], + "source": [ + "nltk.download('treebank')\n", + "x1 = nltk.corpus.treebank\n", + "print(\"downloaded treebank\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 2. X2 = nltk.corpus.brown (english)" + ] + }, + { + "cell_type": "code", + "execution_count": 160, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[nltk_data] Downloading package brown to /Users/Carsten/nltk_data...\n", + "[nltk_data] Package brown is already up-to-date!\n", + "downloaded brown\n" + ] + } + ], + "source": [ + "nltk.download('brown')\n", + "x2 = nltk.corpus.brown\n", + "print(\"downloaded brown\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 3. X3 = other language German" + ] + }, + { + "cell_type": "code", + "execution_count": 161, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "#? nltk.corpus.ConllCorpusReader" + ] + }, + { + "cell_type": "code", + "execution_count": 162, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "downloaded german tiger corpus\n" + ] + } + ], + "source": [ + "# TODO: loading german corpus \n", + "X3 = nltk.corpus.ConllCorpusReader(root='/Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions/sets/german/', fileids=['tiger_release_aug07.corrected.16012013.conll09'], columntypes=['ignore', 'words', 'ignore', 'ignore', 'pos'], encoding='utf-8')\n", + "german_tagged_sents = X3.tagged_sents()\n", + "print(\"downloaded german tiger corpus\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### 4. X4 = other language Croatian" + ] + }, + { + "cell_type": "code", + "execution_count": 201, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "#x4 = other language\n", + "#from croatia:\n", + "#by ZˇeljkoAgic ́,⋆NikolaLjubesˇic ́ http://www.lrec-conf.org/proceedings/lrec2014/pdf/690_Paper.pdf\n", + "#licenses: https://creativecommons.org/licenses/by-sa/4.0/\n", + "corp = nltk.corpus.ConllCorpusReader(root=\"/Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions/sets/croatia/\", fileids=[\"set.hr.conll\"], columntypes=('ignore','words','ignore','ignore','ignore','ignore','ignore','ignore','ignore','pos'))" + ] + }, + { + "cell_type": "code", + "execution_count": 200, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + ">\n" + ] + } + ], + "source": [ + "print(corp.tagged_sents.)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Task 1\n", + "* get results for english (plot a graph with all classifiers x results)\n", + " * performance 1.1 = model1 in X1\n", + " * performance 1.2 = model2 in X1\n", + " * performance 1.3.x = model3.x in X1\n", + " * performance 1.4 = model1 in X2\n", + " * performance 1.5 = model2 in X2\n", + " * performance 1.6.x = model3.x in X2" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Generating Testdata" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Generate Training and Testdata for X1\n", + "1. split annotaed sentences into training and testdata\n", + "2. split trainingdata into input data and teacherdata\n", + " *input is the feature vector of each word\n", + " *output is a list of POS tags for each word and sentences" + ] + }, + { + "cell_type": "code", + "execution_count": 165, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "#to generate trainingsdata, ignore the assigned tags as a function\n", + "def untag(tagged_sentence):\n", + " return [w for w, t in tagged_sentence]" + ] + }, + { + "cell_type": "code", + "execution_count": 166, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "got 3131 training sentences and 783 test sentences\n" + ] + } + ], + "source": [ + "#print(type(nltk.corpus.treebank.tagged_sents()))\n", + "\n", + "#object including the annotated sentences\n", + "annotated_sent = nltk.corpus.treebank.tagged_sents()\n", + "\n", + "#to split the data, calculate the borders for ratio\n", + "cutoff = int(.8 * len(annotated_sent))\n", + "training_sentences_X1 = annotated_sent[:cutoff]\n", + "test_sentences_X1 = annotated_sent[cutoff:]\n", + "\n", + "#show the amount of sentences\n", + "print(\"got \",len(training_sentences_X1),\" training sentences and \", len(test_sentences_X1), \" test sentences\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "**transform_to_dataset** generates the input X as a list of feature dictinionaries and an output y as a list of pos tags. " + ] + }, + { + "cell_type": "code", + "execution_count": 167, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "#for training split sentences with its tags into y (for a sentences its resulting tags for each word) and transform sentences and x as a list of the features extracet for echt word in the sentences\n", + "def transform_to_dataset(tagged_sentences):\n", + " X, y = [], []\n", + " for tagged_sentence in tagged_sentences:\n", + " for index in range(len(tagged_sentence)):\n", + " X.append(features(untag(tagged_sentence), index))\n", + " y.append(tagged_sentence[index][1]) \n", + " return X, y" + ] + }, + { + "cell_type": "code", + "execution_count": 168, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "generated X1 (feature sets) and y1 set of teacher tags\n" + ] + } + ], + "source": [ + "#trainings inputset X and training teacher set y\n", + "X1, y1 = transform_to_dataset(training_sentences_X1)\n", + "print(\"generated X1 (feature sets) and y1 set of teacher tags\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Generate Training and Testdata for X2\n", + "1. split annotaed sentences into training and testdata\n", + "2. split trainingdata into input data and teacherdata\n", + " *input is the feature vector of each word\n", + " *output is a list of POS tags for each word and sentences" + ] + }, + { + "cell_type": "code", + "execution_count": 169, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "got 45872 training sentences and 11468 test sentences\n" + ] + } + ], + "source": [ + "#object including the annotated sentences\n", + "annotated_sent = nltk.corpus.brown.tagged_sents()\n", + "\n", + "#to split the data, calculate the borders for ratio\n", + "cutoff = int(.8 * len(annotated_sent))\n", + "training_sentences_X2 = annotated_sent[:cutoff]\n", + "test_sentences_X2 = annotated_sent[cutoff:]\n", + "\n", + "#show the amount of sentences\n", + "print(\"got \",len(training_sentences_X2),\" training sentences and \", len(test_sentences_X2), \" test sentences\")" + ] + }, + { + "cell_type": "code", + "execution_count": 170, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "generated X2 (feature sets) and y2 set of teacher tags\n" + ] + } + ], + "source": [ + "#trainings inputset X and training teacher set y\n", + "X2, y2 = transform_to_dataset(training_sentences_X2)\n", + "print(\"generated X2 (feature sets) and y2 set of teacher tags\")#(X3[:3], y3[:3])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Generate Training and Testdata for X3\n", + "1. split annotaed sentences into training and testdata\n", + "2. split trainingdata into input data and teacherdata\n", + " *input is the feature vector of each word\n", + " *output is a list of POS tags for each word and sentences" + ] + }, + { + "cell_type": "code", + "execution_count": 171, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "got 40377 training sentences and 10095 test sentences\n" + ] + } + ], + "source": [ + "#object including the annotated sentences\n", + "annotated_sent = X3.tagged_sents()\n", + "\n", + "#print(type(annotated_sent))\n", + "\n", + "#to split the data, calculate the borders for ratio\n", + "cutoff = int(.8 * len(annotated_sent))\n", + "training_sentences_X3 = annotated_sent[:cutoff]\n", + "test_sentences_X3 = annotated_sent[cutoff:]\n", + "\n", + "#show the amount of sentences\n", + "print(\"got \",len(training_sentences_X3),\" training sentences and \", len(test_sentences_X3), \" test sentences\")" + ] + }, + { + "cell_type": "code", + "execution_count": 172, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "generated X3 (feature sets) and y3 set of teacher tags\n" + ] + } + ], + "source": [ + "#trainings inputset X and training teacher set y\n", + "X3, y3 = transform_to_dataset(training_sentences_X3)\n", + "print(\"generated X3 (feature sets) and y3 set of teacher tags\")#(X3[:3], y3[:3])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": true + }, + "source": [ + "#### Implementing a classifier\n", + "relevant imports\n", + "* decision tree as the AI for classfing\n", + "* dict vercorizer transforms the feature dictionary into a vector as the input for the tree" + ] + }, + { + "cell_type": "code", + "execution_count": 173, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "imported sktree, DictVectorizer, Pipeline\n" + ] + } + ], + "source": [ + "from sklearn.tree import DecisionTreeClassifier\n", + "from sklearn.feature_extraction import DictVectorizer\n", + "from sklearn.pipeline import Pipeline\n", + "print(\"imported sktree, DictVectorizer, Pipeline\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Pipeline manages vectorizer and classifier" + ] + }, + { + "cell_type": "code", + "execution_count": 174, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Initialized classifier\n" + ] + } + ], + "source": [ + "clf = Pipeline([\n", + " ('vectorizer', DictVectorizer(sparse=False)),\n", + " ('classifier', DecisionTreeClassifier(criterion='entropy'))\n", + "])\n", + "print(\"Initialized classifier\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Calculating performances" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Calculate performance 1.1 - own POS tagger model with X1 = treebank\n", + "* fit the decision tree for a limited amount (size) of training \n", + "* test data and compare with score function on testdata" + ] + }, + { + "cell_type": "code", + "execution_count": 175, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "training OK\n", + "Accuracy: 0.885273716253\n" + ] + } + ], + "source": [ + "size=10000\n", + "clf.fit(X1[:size], y1[:size])\n", + " \n", + "print('training OK')\n", + " \n", + "X1_test, y1_test = transform_to_dataset(test_sentences_X1)\n", + "\n", + "performance1_1 = clf.score(X1_test, y1_test)\n", + "\n", + "print(\"Accuracy:\", performance1_1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Calculate performance 1.2 - pre-trained POS tagger model using NLTK (maxentropy english) with X1 = treebank" + ] + }, + { + "cell_type": "code", + "execution_count": 176, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accuracy: 0.8936074654423873\n" + ] + } + ], + "source": [ + "#extract only the words from feature trainings set\n", + "only_words_X1 = [x['word'] for x in X1_test]\n", + "\n", + "#train with the pos tagger by nltk\n", + "pos_tags_by_pre_trained_pos_tagger = [word_tag_tuple[1] for word_tag_tuple in pos_tag(only_words_X1, lang='eng')]\n", + "\n", + "#calculate performance by comparing each pos tag\n", + "performance1_2 = 0\n", + "for index in range(len(pos_tags_by_pre_trained_pos_tagger)):\n", + " if(pos_tags_by_pre_trained_pos_tagger[index]==y1_test[index]):\n", + " performance1_2 += 1\n", + "performance1_2 /= len(pos_tags_by_pre_trained_pos_tagger)\n", + "\n", + "print(\"Accuracy:\", performance1_2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Calculate performance 1.3 - rule-based classifiers with X1 = treebank\n", + "1. DefaultTagger that simply tags everything with the same tag\n", + "2. RegexpTagger that applies tags according to a set of regular expressions\n", + "3. N-Gram (n-gram tagger is a generalization of a unigram tagger whose context is the current word together with the part-of-speech tags of the n-1 preceding token)\n", + " + UnigramTagger\n", + " + BigramTagger\n", + " + TrigramTagger" + ] + }, + { + "cell_type": "code", + "execution_count": 177, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "performance 1.3.1 is: 0.1447677029791906\n", + "performance 1.3.2 is: 0.24232746145017217\n", + "performance 1.3.3 is: 0.8608213982733669\n", + "performance 1.3.4 is: 0.1132791057437996\n", + "performance 1.3.5 is: 0.06736863116922003\n" + ] + } + ], + "source": [ + "#used from description for RegexpTagger\n", + "patterns = [(r'.*ing$', 'VBG'), (r'.*ed$', 'VBD'), (r'.*es$', 'VBZ'), (r'.*ould$', 'MD'), (r'.*\\'s$', 'NN$'), \n", + " (r'.*s$', 'NNS'), (r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')]\n", + "\n", + "#train taggers\n", + "def_model = nltk.DefaultTagger('NN')\n", + "regexp_model = nltk.RegexpTagger(patterns)\n", + "uni_model = nltk.UnigramTagger(training_sentences_X1)\n", + "bi_model = nltk.BigramTagger(training_sentences_X1)\n", + "tri_model = nltk.TrigramTagger(training_sentences_X1)\n", + "\n", + "#evaluate taggers\n", + "# performance of Default Tagger\n", + "performance1_3_1 = def_model.evaluate(test_sentences_X1)\n", + "print('performance 1.3.1 is: ',performance1_3_1)\n", + "\n", + "# performance of Regex Tagger\n", + "performance1_3_2 = regexp_model.evaluate(test_sentences_X1)\n", + "print('performance 1.3.2 is: ',performance1_3_2)\n", + "\n", + "# performance of Unigram Tagger\n", + "performance1_3_3 = uni_model.evaluate(test_sentences_X1)\n", + "print('performance 1.3.3 is: ',performance1_3_3)\n", + "\n", + "# performance of Bigram Tagger\n", + "performance1_3_4 = bi_model.evaluate(test_sentences_X1)\n", + "print('performance 1.3.4 is: ',performance1_3_4)\n", + "\n", + "# performance of Trigram Tagger\n", + "performance1_3_5 = tri_model.evaluate(test_sentences_X1)\n", + "print('performance 1.3.5 is: ',performance1_3_5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Calculate performance 1.4 - own POS tagger model with X2 = brown" + ] + }, + { + "cell_type": "code", + "execution_count": 178, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "calculated perfomance 1.4= 0.785464840867\n" + ] + } + ], + "source": [ + "size=10000\n", + "clf.fit(X2[:size], y2[:size])\n", + "X2_test, y2_test = transform_to_dataset(test_sentences_X2)\n", + "performance1_4 = clf.score(X2_test, y2_test)\n", + "print(\"calculated perfomance 1.4= \",performance1_4)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Calculate performance 1.5 - pre-trained POS tagger model using NLTK (maxentropy english) with X2 = brown" + ] + }, + { + "cell_type": "code", + "execution_count": 179, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accuracy: 0.6044583741861567\n" + ] + } + ], + "source": [ + "#extract only the words from feature trainings set\n", + "only_words_X2 = [x['word'] for x in X2_test]\n", + "\n", + "#train with the pos tagger by nltk\n", + "pos_tags_by_pre_trained_pos_tagger = [word_tag_tuple[1] for word_tag_tuple in pos_tag(only_words_X2, lang='eng')]\n", + "\n", + "#calculate performance by comparing each pos tag\n", + "performance1_5 = 0\n", + "for index in range(len(pos_tags_by_pre_trained_pos_tagger)):\n", + " if(pos_tags_by_pre_trained_pos_tagger[index]==y2_test[index]):\n", + " performance1_5 += 1\n", + "performance1_5 /= len(pos_tags_by_pre_trained_pos_tagger)\n", + "\n", + "print(\"Accuracy:\", performance1_5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Calculate performance 1.6 - rule-based classifiers with X2 = brown" + ] + }, + { + "cell_type": "code", + "execution_count": 180, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "performance 1.6.1 is: 0.10997763652187324\n", + "performance 1.6.2 is: 0.17594438874995869\n", + "performance 1.6.3 is: 0.8773754310202373\n", + "performance 1.6.4 is: 0.3390490564374869\n", + "performance 1.6.5 is: 0.19178610379738467\n" + ] + } + ], + "source": [ + "uni_model = nltk.UnigramTagger(training_sentences_X2)\n", + "bi_model = nltk.BigramTagger(training_sentences_X2)\n", + "tri_model = nltk.TrigramTagger(training_sentences_X2)\n", + "\n", + "#evaluate taggers\n", + "# performance of Default Tagger\n", + "performance1_6_1 = def_model.evaluate(test_sentences_X2)\n", + "print('performance 1.6.1 is: ',performance1_6_1)\n", + "\n", + "# performance of Regex Tagger\n", + "performance1_6_2 = regexp_model.evaluate(test_sentences_X2)\n", + "print('performance 1.6.2 is: ',performance1_6_2)\n", + "\n", + "# performance of Unigram Tagger\n", + "performance1_6_3 = uni_model.evaluate(test_sentences_X2)\n", + "print('performance 1.6.3 is: ',performance1_6_3)\n", + "\n", + "# performance of Bigram Tagger\n", + "performance1_6_4 = bi_model.evaluate(test_sentences_X2)\n", + "print('performance 1.6.4 is: ',performance1_6_4)\n", + "\n", + "# performance of Trigram Tagger\n", + "performance1_6_5 = tri_model.evaluate(test_sentences_X2)\n", + "print('performance 1.6.5 is: ',performance1_6_5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "#### Using the classifier\n", + "for results the link of pos_tags:\n", + "https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html" + ] + }, + { + "cell_type": "code", + "execution_count": 181, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "3.6.3\n" + ] + } + ], + "source": [ + "def pos_tag(sentence):\n", + " print('checking...')\n", + " tagged_sentence = []\n", + " tags = clf.predict([features(sentence, index) for index in range(len(sentence))])\n", + " return zip(sentence, tags)\n", + "\n", + "import platform\n", + "print(platform.python_version())\n", + "\n", + "#print(list(pos_tag(word_tokenize('Hello world, lets do something awesome today!'))))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Results for Task 1\n", + "* get results for english (plot a graph with all classifiers x results)\n", + " * performance 1.1 = model1 in X1\n", + " * performance 1.2 = model2 in X1\n", + " * performance 1.3.x = model3.x in X1\n", + " * performance 1.4 = model1 in X2\n", + " * performance 1.5 = model2 in X2\n", + " * performance 1.6.x = model3.x in X2" + ] + }, + { + "cell_type": "code", + "execution_count": 182, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "High five! You successfully sent some data to your account on plotly. View your plot in your browser at https://plot.ly/~carsten95/0 or inside your plot.ly account where it is named 'basic-bar'\n" + ] + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 182, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import plotly\n", + "plotly.tools.set_credentials_file(username='carsten95', api_key='vElf5IOxiFheQdjTxjXW')\n", + "plotly.__version__\n", + "import plotly.plotly as py\n", + "import plotly.graph_objs as go\n", + "\n", + "data = [go.Bar(\n", + " x=['performance 1.1', 'performance 1.2', 'performance 1.3.1', 'performance 1.3.2', 'performance 1.3.3', 'performance 1.3.4', 'performance 1.3.5', 'performance 1.4', 'performance 1.5' , 'performance 1.6.1', 'performance 1.6.2', 'performance 1.6.3', 'performance 1.6.4', 'performance 1.6.5'],\n", + " y=[performance1_1, performance1_2, performance1_3_1, performance1_3_2, performance1_3_3, performance1_3_4, performance1_3_5, performance1_4, performance1_5, performance1_6_1, performance1_6_2, performance1_6_3, performance1_6_4, performance1_6_5]\n", + " )]\n", + "\n", + "py.iplot(data, filename='basic-bar')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": true + }, + "source": [ + "### Results for Task 2\n", + "* train your model with standard features (plot a graph with all classifiers x results)\n", + " * performance 2.1 = model4 in X3\n", + " * model 4 your POS tagger model (not english)\n", + " * performance 2.2 = model5 in X3\n", + " * pre-trained POS tagger model using RDRPOSTagger 1 or TreeTagger 2 (not english)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Calculate Performance 2.1" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "again building a pipeline:\n", + "* first vectorizing the dictionary based on feature dict\n", + "* second, initializing and training the max entropy classifier decision tree" + ] + }, + { + "cell_type": "code", + "execution_count": 183, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "clf = Pipeline([\n", + " ('vectorizer', DictVectorizer(sparse=False)),\n", + " ('classifier', DecisionTreeClassifier(criterion='entropy'))\n", + "])" + ] + }, + { + "cell_type": "code", + "execution_count": 184, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "training done\n", + "Accuracy: 0.838410907381\n" + ] + } + ], + "source": [ + "size=10000\n", + "clf.fit(X3[:size], y3[:size])\n", + " \n", + "print('training done')\n", + " \n", + "X3_test, y3_test = transform_to_dataset(test_sentences_X3)\n", + "\n", + "performance2_1 = clf.score(X3_test, y3_test)\n", + "\n", + "print(\"Accuracy:\", performance2_1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "##### Calculate Performance 2.2\n", + "* using RDRPOS Taggger in a python 3 port rom https://github.com/jacopofar/RDRPOSTagger-python-3" + ] + }, + { + "cell_type": "code", + "execution_count": 185, + "metadata": {}, + "outputs": [], + "source": [ + "#RDRPOSTagger port python 3 from https://github.com/jacopofar/RDRPOSTagger-python-3" + ] + }, + { + "cell_type": "code", + "execution_count": 186, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "wrote file de_text.tx in cwd with each word of the sentence seperated by a space\n" + ] + } + ], + "source": [ + "#generate a german txt text file:\n", + "f = open(\"de_text.txt\", 'w')\n", + "for sentence in test_sentences_X3:\n", + " for word, tag in sentence:\n", + " f.write(word + \" \")\n", + " f.write(\"\\n\")\n", + "f.close()\n", + "\n", + "print(\"wrote file de_text.tx in cwd with each word of the sentence seperated by a space\")" + ] + }, + { + "cell_type": "code", + "execution_count": 187, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "stored: /Users/Carsten/GitRepos/NLP-LAB/Carsten_Solutions\n" + ] + } + ], + "source": [ + "#to use RDRPOSTagger we have to store the path where we are working currently and where the donwnloaded RDRPOSTagger is stored\n", + "import sys, os\n", + "\n", + "#current working directory to restore it later\n", + "dir_path = os.getcwd()\n", + "print(\"stored: \", dir_path)" + ] + }, + { + "cell_type": "code", + "execution_count": 188, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "switched to path: /Users/Carsten/Downloads/RDRPOSTagger-python-3-master/pSCRDRtagger\n" + ] + } + ], + "source": [ + "#set the rdrpos as path to work in lownloaded api\n", + "RDRPOS_TAGGER_PATH = \"/Users/Carsten/Downloads/RDRPOSTagger-python-3-master/pSCRDRtagger\"\n", + "sys.path.insert(0, RDRPOS_TAGGER_PATH)\n", + "os.chdir(RDRPOS_TAGGER_PATH)\n", + "print(\"switched to path:\", RDRPOS_TAGGER_PATH)" + ] + }, + { + "cell_type": "code", + "execution_count": 189, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['Node', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', 'tabStr']\n" + ] + } + ], + "source": [ + "# import and rename for easier use\n", + "import RDRPOSTagger\n", + "r = RDRPOSTagger.RDRPOSTagger()\n", + "\n", + "#load files\n", + "r.constructSCRDRtreeFromRDRfile(\"../Models/POS/German.RDR\")\n", + "DICT = RDRPOSTagger.readDictionary(\"../Models/POS/German.DICT\")" + ] + }, + { + "cell_type": "code", + "execution_count": 190, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "('\\nOutput file:', 'de_text.txt.TAGGED')\n" + ] + } + ], + "source": [ + "#switch back to dir in which we worked at the start\n", + "os.chdir(dir_path)\n", + "\n", + "# generate file with tags after each word with the delimiter /\n", + "r.tagRawCorpus(DICT, \"de_text.txt\")" + ] + }, + { + "cell_type": "code", + "execution_count": 191, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "#from generated textfile above, seperate the word and tags\n", + "tagged_words = []\n", + "f = open(\"de_text.txt.TAGGED\", 'r')\n", + "for line in f:\n", + " for splits in line.split():\n", + " cmp = splits.rsplit('/',1)\n", + " if len(cmp) != 2:\n", + " print(\"error parsing: \", cmp)\n", + " else:\n", + " w,t = cmp\n", + " tagged_words.append((w,t))" + ] + }, + { + "cell_type": "code", + "execution_count": 192, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[('CUPERTINO', 'NE'), ('(', '$('), ('rtr', 'NE'), ('/', '$('), ('whp', 'XY'), (')', '$('), ('.', '$.'), ('Der', 'ART'), ('Chef', 'NN'), ('des', 'ART')]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "[[('CUPERTINO', 'NE')], [('(', '$('), ('rtr', 'NE'), ('/', '$('), ('whp', 'XY'), (')', '$('), ('.', '$.')], ...]" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "display(tagged_words[:10])\n", + "display(test_sentences_X3[:10])" + ] + }, + { + "cell_type": "code", + "execution_count": 193, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accuracy 2.2 = 0.9754407616361072\n" + ] + } + ], + "source": [ + "performance2_2 = 0 # for test \n", + "\n", + "#counter for the words\n", + "i = 0\n", + "\n", + "#evaluate accuracy\n", + "for sent in test_sentences_X3:\n", + " for tagged_w in sent:\n", + " if tagged_w[1] == tagged_words[i][1]:\n", + " performance2_2 += 1\n", + " i += 1\n", + "performance2_2 = performance2_2 / len(tagged_words)\n", + "print(\"Accuracy 2.2 = \",performance2_2)" + ] + }, + { + "cell_type": "code", + "execution_count": 194, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "High five! You successfully sent some data to your account on plotly. View your plot in your browser at https://plot.ly/~carsten95/0 or inside your plot.ly account where it is named 'basic-bar'\n" + ] + }, + { + "data": { + "text/html": [ + "" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 194, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "#visualize results with plotly\n", + "data = [go.Bar(\n", + " x=['performance 2.1', 'performance 2.2'],\n", + " y=[performance2_1, performance2_2]\n", + " )]\n", + "\n", + "py.iplot(data, filename='basic-bar')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From 72d1b5ca86a654d85f7e82a65c6db8d9a5775fa8 Mon Sep 17 00:00:00 2001 From: Carsten Date: Mon, 4 Jun 2018 19:59:31 +0200 Subject: [PATCH 2/2] approach for emoticon emoji assignment --- .../Emoji-Emoticon-Assignment_eastern.csv | 45 + .../Emoji-Emoticon-Assignment_western.csv | 26 + .../Emoticon_Emoji_Assignment.ipynb | 999 ++++++++++++++++++ 3 files changed, 1070 insertions(+) create mode 100644 Project/Tools/Emoji-Emoticon-Assignment_eastern.csv create mode 100644 Project/Tools/Emoji-Emoticon-Assignment_western.csv create mode 100644 Project/naive_approach/Emoticon_Emoji_Assignment.ipynb diff --git a/Project/Tools/Emoji-Emoticon-Assignment_eastern.csv b/Project/Tools/Emoji-Emoticon-Assignment_eastern.csv new file mode 100644 index 0000000..ee0430e --- /dev/null +++ b/Project/Tools/Emoji-Emoticon-Assignment_eastern.csv @@ -0,0 +1,45 @@ +Icon,Emoji,Meaning +(>_<)(>_<)>,😣😖,Troubled[19][20] +(';'),👶,Baby[19] +(^^ゞ(^_^;)(-_-;)(~_~;) (・.・;)(・_・;)(・・;) ^^;^_^;(#^.^#)(^^;),😅😳😓😥,"Nervous, embarrassed,[19] troubled, shy,[20] sweat drop[7]" +(^.^)y-.o○(-.-)y-°°°,🚬,Smoking[19] +(-_-)zzz,😴💤,Sleeping[19] +(^_-)(^_-)-☆,😉😜,Wink[19] +((+_+))(+o+)(°°)(°-°) (°.°)(°_°)(°_°>)(°レ°),😕😶😵🙄,Confused[19] +(o|o),,Ultraman [19] +<(`^´)>,,N/A[19] +^_^(°o°)(^_^)/(^O^)/(^o^)/(^^)/ (≧∇≦)/(/◕ヮ◕)/(^o^)丿∩(·ω·)∩(·ω·)^ω^,😀😅😆😃😄🙌,Joyful[19][20][21] +(__)_(._.)__(_^_)_<(_ _)> m(__)mm(_ _)m,🙇,"Kowtow as a sign of respect, or dogeza for apology[19][20]" +\(°ロ\)(/ロ°)/,,Questioning[19] +('_')(/_;)(T_T) (;_;)(;_;(;_:)(;O;)(:_;)(ToT)(T▽T) ;_;;-;;n;;;Q.QT.TTnTQQQ_Q,😭😢,"Sad, crying[10][19][20]" +(ー_ー)!!(-.-)(-_-)(一一)(;一_一),😒😩😑😞😔,Shame[19] +(=_=),😫😩😪,Tired[19] +(=^·^=)(=^··^=)=_^=,😺😸😹😻😼😽🙀😿😾🐱,Cat[19][21] +(..)(._.),🙍😔,Looking down[19] +^m^,,Giggling with hand covering mouth[19] +(・・?(?_?),😕😵,Confusion[19] +(-‸ლ),🤦,Facepalm [22] +>^_^<<^!^>^/^(*^_^*)§^.^§(^<^) (^.^)(^ム^)(^·^)(^.^)(^_^.)(^_^)(^^) (^J^)(*^.^*)^_^(#^.^#)(^—^),😃😄☺️😁😀😍,Normal laugh[19] +(^^)/~~~(^_^)/~(;_;)/~~~(^.^)/~~~(-_-)/~~~ ($··)/~~~(@^^)/~~~(T_T)/~~~(ToT)/~~~,👋,Waving[19] +(V)o¥o(V),👽👾,Alien Baltan[19] +\(~o~)/\(^o^)/\(-o-)/ ヽ(^。^)ノヽ(^o^)丿(*^0^*),😍😀🙌💃,Excited[19] +(*_*)(*_*;(+_+) (@_@)(@_@。(@_@;)\(◎o◎)/!,😍,Amazed[19] +!(^^)!,,N/A[19] +(*^^)v(^^)v(^_^)v(’-’*) (^v^)(^▽^)(・∀・)(´∀`)(⌒▽⌒),😂✌️,"Laughing, cheerful[19][20]" +(~o~)(~_~),😔😒😏,N/A [19] +(^^ゞ,😙😚,N/A [19] +(p_-),,N/A [19] +((d[-_-]b)),🎧,"Headphones, listening to music[19]" +"(-""-)(ーー゛)(^_^メ)(-_-メ)(~_~メ)(--〆) (・へ・)(`´)<`~´><`ヘ´>(ーー;)",😟😓😬,Worried[19][20] +(^0_0^),😎🤓,Eyeglasses[19] +( ..)φφ(..),,Jotting note[19] +(●^o^●)(^v^)(^u^)(^◇^) ( ^)o(^ )(^O^)(^o^)(^○^))^o^( (*^▽^*)(✿◠‿◠),😀😁😆😅😃😄,Happy[19][20] +( ̄ー ̄),😁,Grinning[20] +( ̄□ ̄;)°o°°O°:O o_Oo_0o.O(o.o)oO,😲😮😯,Surprised[3][20] +(*´▽`*)(*°∀°)=3,,Infatuation[20] +( ゚ Д゚)(°◇°),😨😱😮😲,"Shocked, surprised[20]" +(* ̄m ̄),😬😠😡,Dissatisfied[20] +ヽ(´ー`)┌¯\_(ツ)_/¯,,"Mellow, shrug[20]" +(´・ω・`)(‘A`),,Snubbed or deflated[20] +(*^3^)/~☆,😘😚😙😗,Blowing a kiss[23] +.....φ(・∀・*),,Studying is good[23] diff --git a/Project/Tools/Emoji-Emoticon-Assignment_western.csv b/Project/Tools/Emoji-Emoticon-Assignment_western.csv new file mode 100644 index 0000000..e23db82 --- /dev/null +++ b/Project/Tools/Emoji-Emoticon-Assignment_western.csv @@ -0,0 +1,26 @@ +Icon,Icon,Icon,Icon,Icon,Icon,Icon,Icon,Icon,Icon,Icon,Emoji,Meaning +:‑) :),:-] :],:-3 :3,:-> :>,8-) 8),:-} :},:o),:c),:^),=],=),☺️🙂😊😀😁,Smiley or happy face.[4][5][6] +:‑D :D,8‑D 8D,x‑D xD,X‑D XD,=D,=3,B^D,,,,,😃😄😆😍,"Laughing,[4] big grin,[5][6] laugh with glasses,[7] or wide-eyed surprise[8]" +:-)),:-)),:-)),:-)),:-)),:-)),:-)),:-)),:-)),:-)),:-)),,Very happy or double chin[7] +:‑( :(,:‑c :c,:‑< :<,:‑[ :[,:-||,>:[,:{,:@,>:(,,,☹️🙁😠😡😞😟😣😖,"Frown,[4][5][6] sad,[9] angry,[7] pouting" +:'‑( :'(,:'‑( :'(,:'‑( :'(,:'‑( :'(,:'‑( :'(,:'‑( :'(,:'‑( :'(,:'‑( :'(,:'‑( :'(,:'‑( :'(,:'‑( :'(,😢😭,Crying[9] +:'‑) :'),:'‑) :'),:'‑) :'),:'‑) :'),:'‑) :'),:'‑) :'),:'‑) :'),:'‑) :'),:'‑) :'),:'‑) :'),:'‑) :'),😂,Tears of happiness[9] +D‑':,D:<,D:,D8,D;,D=,DX,,,,,😨😧😦😱😫😩,"Horror, disgust, sadness, great dismay[5][6] (right to left)" +:‑O :O,:‑o :o,:-0,8‑0,>:O,,,,,,,😮😯😲,"Surprise,[3] shock,[4][10] yawn[11]" +:-* :*,:×,,,,,,,,,,😗😙😚😘😍,Kiss +;‑) ;),*-) *),;‑] ;],;^),":‑,",;D,,,,,,😉😜😘,"Wink,[4][5][6] smirk[10][11]" +:‑P :P,X‑P XP,x‑p xp,:‑p :p,:‑Þ :Þ,:‑þ :þ,:‑b :b,d:,=p,>:P,,😛😝😜🤑,"Tongue sticking out, cheeky/playful,[4] blowing a raspberry" +:‑/ :/,:‑.,>:\,>:/,:\,=/,=\,:L,=L,:S,,🤔😕😟,"Skeptical, annoyed, undecided, uneasy, hesitant[4]" +:‑| :|,,,,,,,,,,,😐😑,"Straight face[5] no expression, indecision[9]" +:$,,,,,,,,,,,😳😞😖,"Embarrassed,[6] blushing[7]" +:‑X :X,:‑# :#,:‑& :&,,,,,,,,,🤐😶,"Sealed lips or wearing braces,[4] tongue-tied[9]" +O:‑) O:),0:‑3 0:3,0:‑) 0:),0;^),,,,,,,,😇👼,"Angel,[4][5][10] saint,[9] innocent" +>:‑) >:),}:‑) }:),3:‑) 3:),>;),,,,,,,,😈,"Evil,[5] devilish[9]" +|;‑),|‑O,,,,,,,,,,😎😪,"Cool,[9] bored/yawning[10]" +:‑J,:‑J,:‑J,:‑J,:‑J,:‑J,:‑J,:‑J,:‑J,:‑J,:‑J,😏😒,Tongue-in-cheek[12] +#‑),#‑),#‑),#‑),#‑),#‑),#‑),#‑),#‑),#‑),#‑),—,Partied all night[9] +%‑) %),%‑) %),%‑) %),%‑) %),%‑) %),%‑) %),%‑) %),%‑) %),%‑) %),%‑) %),%‑) %),😵😕🤕,"Drunk,[9] confused" +:‑###.. :###..,:‑###.. :###..,:‑###.. :###..,:‑###.. :###..,:‑###.. :###..,:‑###.. :###..,:‑###.. :###..,:‑###.. :###..,:‑###.. :###..,:‑###.. :###..,:‑###.. :###..,🤒😷🤢,Being sick[9] +<:‑|,<:‑|,<:‑|,<:‑|,<:‑|,<:‑|,<:‑|,<:‑|,<:‑|,<:‑|,<:‑|,—,"Dumb, dunce-like[10]" +"',:-|","',:-l",,,,,,,,,,🤨,"Scepticism, disbelief, or disapproval[13][14]" +<_<,>_>,,,,,,,,,,,Sideways look. Devious or guilty. diff --git a/Project/naive_approach/Emoticon_Emoji_Assignment.ipynb b/Project/naive_approach/Emoticon_Emoji_Assignment.ipynb new file mode 100644 index 0000000..f606814 --- /dev/null +++ b/Project/naive_approach/Emoticon_Emoji_Assignment.ipynb @@ -0,0 +1,999 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Emoticon Emoji Assignment\n", + "https://en.wikipedia.org/wiki/List_of_emoticons\n", + "download of eastern and western table as csv file in project git folder" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import math" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
icon0icon1icon2icon3icon4icon5icon6icon7icon8icon9icon10emojimeaning
0:‑) :):-] :]:-3 :3:-> :>8-) 8):-} :}:o):c):^)=]=)☺️🙂😊😀😁Smiley or happy face.[4][5][6]
1:‑D :D8‑D 8Dx‑D xDX‑D XD=D=3B^DNaNNaNNaNNaN😃😄😆😍Laughing,[4] big grin,[5][6] laugh with glasse...
2:-)):-)):-)):-)):-)):-)):-)):-)):-)):-)):-))NaNVery happy or double chin[7]
3:‑( :(:‑c :c:‑< :<:‑[ :[:-||>:[:{:@>:(NaNNaN☹️🙁😠😡😞😟😣😖Frown,[4][5][6] sad,[9] angry,[7] pouting
4:'‑( :'(:'‑( :'(:'‑( :'(:'‑( :'(:'‑( :'(:'‑( :'(:'‑( :'(:'‑( :'(:'‑( :'(:'‑( :'(:'‑( :'(😢😭Crying[9]
5:'‑) :'):'‑) :'):'‑) :'):'‑) :'):'‑) :'):'‑) :'):'‑) :'):'‑) :'):'‑) :'):'‑) :'):'‑) :')😂Tears of happiness[9]
6D‑':D:<D:D8D;D=DXNaNNaNNaNNaN😨😧😦😱😫😩Horror, disgust, sadness, great dismay[5][6] (...
7:‑O :O:‑o :o:-08‑0>:ONaNNaNNaNNaNNaNNaN😮😯😲Surprise,[3] shock,[4][10] yawn[11]
8:-* :*NaNNaNNaNNaNNaNNaNNaNNaNNaN😗😙😚😘😍Kiss
9;‑) ;)*-) *);‑] ;];^):‑,;DNaNNaNNaNNaNNaN😉😜😘Wink,[4][5][6] smirk[10][11]
10:‑P :PX‑P XPx‑p xp:‑p :p:‑Þ :Þ:‑þ :þ:‑b :bd:=p>:PNaN😛😝😜🤑Tongue sticking out, cheeky/playful,[4] blowin...
11:‑/ :/:‑.>:\\>:/:\\=/=\\:L=L:SNaN🤔😕😟Skeptical, annoyed, undecided, uneasy, hesitan...
12:‑| :|NaNNaNNaNNaNNaNNaNNaNNaNNaNNaN😐😑Straight face[5] no expression, indecision[9]
13:$NaNNaNNaNNaNNaNNaNNaNNaNNaNNaN😳😞😖Embarrassed,[6] blushing[7]
14:‑X :X:‑# :#:‑& :&NaNNaNNaNNaNNaNNaNNaNNaN🤐😶Sealed lips or wearing braces,[4] tongue-tied[9]
15O:‑) O:)0:‑3 0:30:‑) 0:)0;^)NaNNaNNaNNaNNaNNaNNaN😇👼Angel,[4][5][10] saint,[9] innocent
16>:‑) >:)}:‑) }:)3:‑) 3:)>;)NaNNaNNaNNaNNaNNaNNaN😈Evil,[5] devilish[9]
17|;‑)|‑ONaNNaNNaNNaNNaNNaNNaNNaNNaN😎😪Cool,[9] bored/yawning[10]
18:‑J:‑J:‑J:‑J:‑J:‑J:‑J:‑J:‑J:‑J:‑J😏😒Tongue-in-cheek[12]
19#‑)#‑)#‑)#‑)#‑)#‑)#‑)#‑)#‑)#‑)#‑)Partied all night[9]
20%‑) %)%‑) %)%‑) %)%‑) %)%‑) %)%‑) %)%‑) %)%‑) %)%‑) %)%‑) %)%‑) %)😵😕🤕Drunk,[9] confused
21:‑###.. :###..:‑###.. :###..:‑###.. :###..:‑###.. :###..:‑###.. :###..:‑###.. :###..:‑###.. :###..:‑###.. :###..:‑###.. :###..:‑###.. :###..:‑###.. :###..🤒😷🤢Being sick[9]
22<:‑|<:‑|<:‑|<:‑|<:‑|<:‑|<:‑|<:‑|<:‑|<:‑|<:‑|Dumb, dunce-like[10]
23',:-|',:-lNaNNaNNaNNaNNaNNaNNaNNaNNaN🤨Scepticism, disbelief, or disapproval[13][14]
24<_<>_>NaNNaNNaNNaNNaNNaNNaNNaNNaNNaNSideways look. Devious or guilty.
\n", + "
" + ], + "text/plain": [ + " icon0 icon1 icon2 icon3 \\\n", + "0 :‑) :) :-] :] :-3 :3 :-> :> \n", + "1 :‑D :D 8‑D 8D x‑D xD X‑D XD \n", + "2 :-)) :-)) :-)) :-)) \n", + "3 :‑( :( :‑c :c :‑< :< :‑[ :[ \n", + "4 :'‑( :'( :'‑( :'( :'‑( :'( :'‑( :'( \n", + "5 :'‑) :') :'‑) :') :'‑) :') :'‑) :') \n", + "6 D‑': D:< D: D8 \n", + "7 :‑O :O :‑o :o :-0 8‑0 \n", + "8 :-* :* :× NaN NaN \n", + "9 ;‑) ;) *-) *) ;‑] ;] ;^) \n", + "10 :‑P :P X‑P XP x‑p xp :‑p :p \n", + "11 :‑/ :/ :‑. >:\\ >:/ \n", + "12 :‑| :| NaN NaN NaN \n", + "13 :$ NaN NaN NaN \n", + "14 :‑X :X :‑# :# :‑& :& NaN \n", + "15 O:‑) O:) 0:‑3 0:3 0:‑) 0:) 0;^) \n", + "16 >:‑) >:) }:‑) }:) 3:‑) 3:) >;) \n", + "17 |;‑) |‑O NaN NaN \n", + "18 :‑J :‑J :‑J :‑J \n", + "19 #‑) #‑) #‑) #‑) \n", + "20 %‑) %) %‑) %) %‑) %) %‑) %) \n", + "21 :‑###.. :###.. :‑###.. :###.. :‑###.. :###.. :‑###.. :###.. \n", + "22 <:‑| <:‑| <:‑| <:‑| \n", + "23 ',:-| ',:-l NaN NaN \n", + "24 <_< >_> NaN NaN \n", + "\n", + " icon4 icon5 icon6 icon7 \\\n", + "0 8-) 8) :-} :} :o) :c) \n", + "1 =D =3 B^D NaN \n", + "2 :-)) :-)) :-)) :-)) \n", + "3 :-|| >:[ :{ :@ \n", + "4 :'‑( :'( :'‑( :'( :'‑( :'( :'‑( :'( \n", + "5 :'‑) :') :'‑) :') :'‑) :') :'‑) :') \n", + "6 D; D= DX NaN \n", + "7 >:O NaN NaN NaN \n", + "8 NaN NaN NaN NaN \n", + "9 :‑, ;D NaN NaN \n", + "10 :‑Þ :Þ :‑þ :þ :‑b :b d: \n", + "11 :\\ =/ =\\ :L \n", + "12 NaN NaN NaN NaN \n", + "13 NaN NaN NaN NaN \n", + "14 NaN NaN NaN NaN \n", + "15 NaN NaN NaN NaN \n", + "16 NaN NaN NaN NaN \n", + "17 NaN NaN NaN NaN \n", + "18 :‑J :‑J :‑J :‑J \n", + "19 #‑) #‑) #‑) #‑) \n", + "20 %‑) %) %‑) %) %‑) %) %‑) %) \n", + "21 :‑###.. :###.. :‑###.. :###.. :‑###.. :###.. :‑###.. :###.. \n", + "22 <:‑| <:‑| <:‑| <:‑| \n", + "23 NaN NaN NaN NaN \n", + "24 NaN NaN NaN NaN \n", + "\n", + " icon8 icon9 icon10 emoji \\\n", + "0 :^) =] =) ☺️🙂😊😀😁 \n", + "1 NaN NaN NaN 😃😄😆😍 \n", + "2 :-)) :-)) :-)) NaN \n", + "3 >:( NaN NaN ☹️🙁😠😡😞😟😣😖 \n", + "4 :'‑( :'( :'‑( :'( :'‑( :'( 😢😭 \n", + "5 :'‑) :') :'‑) :') :'‑) :') 😂 \n", + "6 NaN NaN NaN 😨😧😦😱😫😩 \n", + "7 NaN NaN NaN 😮😯😲 \n", + "8 NaN NaN NaN 😗😙😚😘😍 \n", + "9 NaN NaN NaN 😉😜😘 \n", + "10 =p >:P NaN 😛😝😜🤑 \n", + "11 =L :S NaN 🤔😕😟 \n", + "12 NaN NaN NaN 😐😑 \n", + "13 NaN NaN NaN 😳😞😖 \n", + "14 NaN NaN NaN 🤐😶 \n", + "15 NaN NaN NaN 😇👼 \n", + "16 NaN NaN NaN 😈 \n", + "17 NaN NaN NaN 😎😪 \n", + "18 :‑J :‑J :‑J 😏😒 \n", + "19 #‑) #‑) #‑) — \n", + "20 %‑) %) %‑) %) %‑) %) 😵😕🤕 \n", + "21 :‑###.. :###.. :‑###.. :###.. :‑###.. :###.. 🤒😷🤢 \n", + "22 <:‑| <:‑| <:‑| — \n", + "23 NaN NaN NaN 🤨 \n", + "24 NaN NaN NaN NaN \n", + "\n", + " meaning \n", + "0 Smiley or happy face.[4][5][6] \n", + "1 Laughing,[4] big grin,[5][6] laugh with glasse... \n", + "2 Very happy or double chin[7] \n", + "3 Frown,[4][5][6] sad,[9] angry,[7] pouting \n", + "4 Crying[9] \n", + "5 Tears of happiness[9] \n", + "6 Horror, disgust, sadness, great dismay[5][6] (... \n", + "7 Surprise,[3] shock,[4][10] yawn[11] \n", + "8 Kiss \n", + "9 Wink,[4][5][6] smirk[10][11] \n", + "10 Tongue sticking out, cheeky/playful,[4] blowin... \n", + "11 Skeptical, annoyed, undecided, uneasy, hesitan... \n", + "12 Straight face[5] no expression, indecision[9] \n", + "13 Embarrassed,[6] blushing[7] \n", + "14 Sealed lips or wearing braces,[4] tongue-tied[9] \n", + "15 Angel,[4][5][10] saint,[9] innocent \n", + "16 Evil,[5] devilish[9] \n", + "17 Cool,[9] bored/yawning[10] \n", + "18 Tongue-in-cheek[12] \n", + "19 Partied all night[9] \n", + "20 Drunk,[9] confused \n", + "21 Being sick[9] \n", + "22 Dumb, dunce-like[10] \n", + "23 Scepticism, disbelief, or disapproval[13][14] \n", + "24 Sideways look. Devious or guilty. " + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "df = pd.read_csv(\"/Users/Carsten/GitRepos/NLP-LAB/Project/Tools/Emoji-Emoticon-Assignment_western.csv\", names=[\"icon0\",\"icon1\",\"icon2\",\"icon3\",\"icon4\",\"icon5\",\"icon6\",\"icon7\",\"icon8\",\"icon9\",\"icon10\",\"emoji\",\"meaning\"],header=0)\n", + "df#.head()" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "#list of emoticon , list of emojis, meaning as string\n", + "assignment = []\n", + "\n", + "tmp_single_assignments = []" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "#icon,icon1,icon2,icon3,icon4,icon5,icon6,icon7,icon8,icon9,icon10,emoji,meaning \n", + "for pos,d in df.iterrows():\n", + " #print(d)\n", + " emoticons = []\n", + " #ignore rows with no entry\n", + " if(isinstance(d[\"emoji\"],str) and d[\"emoji\"]!=\"—\"):\n", + " #concatinate all emoticons in one list\n", + " emoticons = []\n", + " column_name = \"icon\"\n", + " column_number = 0\n", + " for col in range(11):\n", + " column_name+=str(column_number)\n", + " if(isinstance(d[column_name],str) and d[column_name]!=\"\"):\n", + " #print(type(d[column_name]))\n", + " #print(d[column_name])\n", + " emoticons+=d[column_name].split()\n", + " column_name = \"icon\"\n", + " column_number += 1\n", + "\n", + " #remove doublicate entries in emoticons\n", + " emoticons=list(set(emoticons))\n", + " \n", + " #make list of emoticons\n", + " emojis = list(d[\"emoji\"])\n", + " \n", + " #for tmp ideas\n", + " #for e in emoticons:\n", + " # print([e,emojis])\n", + " # tmp_single_assignments.append([e,emojis])\n", + "\n", + "\n", + " \n", + " #append triple\n", + " assignment.append([emoticons,emojis,str(d[\"meaning\"])])\n", + "\n", + "#print(assignment)\n", + "\n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [ + "own_assignment = []" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + ":) |0 = ☺|1 = ️|2 = 🙂|3 = 😊|4 = 😀|5 = 😁\n", + ":) ☺\n", + "8-) |0 = ☺|1 = ️|2 = 🙂|3 = 😊|4 = 😀|5 = 😁\n", + "8-) ☺\n", + ":] |0 = ☺|1 = ️|2 = 🙂|3 = 😊|4 = 😀|5 = 😁\n", + ":] ☺\n", + ":-> |0 = ☺|1 = ️|2 = 🙂|3 = 😊|4 = 😀|5 = 😁\n", + ":-> ☺\n", + ":> |0 = ☺|1 = ️|2 = 🙂|3 = 😊|4 = 😀|5 = 😁\n", + ":> ☺\n", + ":o) |0 = ☺|1 = ️|2 = 🙂|3 = 😊|4 = 😀|5 = 😁\n", + ":o) ☺\n", + "=) |0 = ☺|1 = ️|2 = 🙂|3 = 😊|4 = 😀|5 = 😁\n", + "=) ☺\n", + "8) |0 = ☺|1 = ️|2 = 🙂|3 = 😊|4 = 😀|5 = 😁\n", + "8) ☺\n", + ":3 |0 = ☺|1 = ️|2 = 🙂|3 = 😊|4 = 😀|5 = 😁\n", + ":3 ☺\n", + ":-3 |0 = ☺|1 = ️|2 = 🙂|3 = 😊|4 = 😀|5 = 😁\n", + ":-3 ☺\n", + ":-} |0 = ☺|1 = ️|2 = 🙂|3 = 😊|4 = 😀|5 = 😁\n", + ":-} ☺\n", + "=] |0 = ☺|1 = ️|2 = 🙂|3 = 😊|4 = 😀|5 = 😁\n", + "=] ☺\n", + ":c) |0 = ☺|1 = ️|2 = 🙂|3 = 😊|4 = 😀|5 = 😁\n", + ":c) ☺\n", + ":-] |0 = ☺|1 = ️|2 = 🙂|3 = 😊|4 = 😀|5 = 😁\n", + ":-] ☺\n", + ":‑) |0 = ☺|1 = ️|2 = 🙂|3 = 😊|4 = 😀|5 = 😁\n", + ":‑) ☺\n", + ":} |0 = ☺|1 = ️|2 = 🙂|3 = 😊|4 = 😀|5 = 😁\n", + ":} ☺\n", + ":^) |0 = ☺|1 = ️|2 = 🙂|3 = 😊|4 = 😀|5 = 😁\n", + ":^) ☺\n", + "x‑D |0 = 😃|1 = 😄|2 = 😆|3 = 😍\n", + "x‑D 😃\n", + "XD |0 = 😃|1 = 😄|2 = 😆|3 = 😍\n", + "XD 😃\n", + "X‑D |0 = 😃|1 = 😄|2 = 😆|3 = 😍\n", + "X‑D 😃\n", + "=D |0 = 😃|1 = 😄|2 = 😆|3 = 😍\n", + "=D 😃\n", + ":‑D |0 = 😃|1 = 😄|2 = 😆|3 = 😍\n", + ":‑D 😃\n", + "8‑D |0 = 😃|1 = 😄|2 = 😆|3 = 😍\n", + "8‑D 😃\n", + "8D |0 = 😃|1 = 😄|2 = 😆|3 = 😍\n", + "8D 😃\n", + "B^D |0 = 😃|1 = 😄|2 = 😆|3 = 😍\n", + "B^D 😃\n", + "xD |0 = 😃|1 = 😄|2 = 😆|3 = 😍\n", + "xD 😃\n", + "=3 |0 = 😃|1 = 😄|2 = 😆|3 = 😍\n", + "=3 😃\n", + ":D |0 = 😃|1 = 😄|2 = 😆|3 = 😍\n", + ":D 😃\n", + ":@ |0 = ☹|1 = ️|2 = 🙁|3 = 😠|4 = 😡|5 = 😞|6 = 😟|7 = 😣|8 = 😖\n", + ":@ ☹\n", + ":c |0 = ☹|1 = ️|2 = 🙁|3 = 😠|4 = 😡|5 = 😞|6 = 😟|7 = 😣|8 = 😖\n", + ":c ☹\n", + ":‑< |0 = ☹|1 = ️|2 = 🙁|3 = 😠|4 = 😡|5 = 😞|6 = 😟|7 = 😣|8 = 😖\n", + ":‑< ☹\n", + ":‑[ |0 = ☹|1 = ️|2 = 🙁|3 = 😠|4 = 😡|5 = 😞|6 = 😟|7 = 😣|8 = 😖\n", + ":‑[ ☹\n", + ":‑c |0 = ☹|1 = ️|2 = 🙁|3 = 😠|4 = 😡|5 = 😞|6 = 😟|7 = 😣|8 = 😖\n", + ":‑c ☹\n", + ":[ |0 = ☹|1 = ️|2 = 🙁|3 = 😠|4 = 😡|5 = 😞|6 = 😟|7 = 😣|8 = 😖\n", + ":[ ☹\n", + ":{ |0 = ☹|1 = ️|2 = 🙁|3 = 😠|4 = 😡|5 = 😞|6 = 😟|7 = 😣|8 = 😖\n", + ":{ ☹\n", + ":( |0 = ☹|1 = ️|2 = 🙁|3 = 😠|4 = 😡|5 = 😞|6 = 😟|7 = 😣|8 = 😖\n", + ":( ☹\n", + ":-|| |0 = ☹|1 = ️|2 = 🙁|3 = 😠|4 = 😡|5 = 😞|6 = 😟|7 = 😣|8 = 😖\n", + ":-|| ☹\n", + ">:( |0 = ☹|1 = ️|2 = 🙁|3 = 😠|4 = 😡|5 = 😞|6 = 😟|7 = 😣|8 = 😖\n", + ">:( ☹\n", + ">:[ |0 = ☹|1 = ️|2 = 🙁|3 = 😠|4 = 😡|5 = 😞|6 = 😟|7 = 😣|8 = 😖\n", + ">:[ ☹\n", + ":< |0 = ☹|1 = ️|2 = 🙁|3 = 😠|4 = 😡|5 = 😞|6 = 😟|7 = 😣|8 = 😖\n", + ":< ☹\n", + ":‑( |0 = ☹|1 = ️|2 = 🙁|3 = 😠|4 = 😡|5 = 😞|6 = 😟|7 = 😣|8 = 😖\n", + ":‑( ☹\n", + ":'( |0 = 😢|1 = 😭\n", + ":'( 😢\n", + ":'‑( |0 = 😢|1 = 😭\n", + ":'‑( 😢\n", + ":') |0 = 😂\n", + ":') 😂\n", + ":'‑) |0 = 😂\n", + ":'‑) 😂\n", + "DX |0 = 😨|1 = 😧|2 = 😦|3 = 😱|4 = 😫|5 = 😩\n", + "DX 😨\n", + "D‑': |0 = 😨|1 = 😧|2 = 😦|3 = 😱|4 = 😫|5 = 😩\n", + "D‑': 😨\n", + "D: |0 = 😨|1 = 😧|2 = 😦|3 = 😱|4 = 😫|5 = 😩\n", + "D: 😨\n", + "D:< |0 = 😨|1 = 😧|2 = 😦|3 = 😱|4 = 😫|5 = 😩\n", + "D:< 😨\n", + "D; |0 = 😨|1 = 😧|2 = 😦|3 = 😱|4 = 😫|5 = 😩\n", + "D; 😨\n", + "D8 |0 = 😨|1 = 😧|2 = 😦|3 = 😱|4 = 😫|5 = 😩\n", + "D8 😨\n", + "D= |0 = 😨|1 = 😧|2 = 😦|3 = 😱|4 = 😫|5 = 😩\n", + "D= 😨\n", + ":-0 |0 = 😮|1 = 😯|2 = 😲\n", + ":-0 😮\n", + ":o |0 = 😮|1 = 😯|2 = 😲\n", + ":o 😮\n", + "8‑0 |0 = 😮|1 = 😯|2 = 😲\n", + "8‑0 😮\n", + ":‑O |0 = 😮|1 = 😯|2 = 😲\n", + ":‑O 😮\n", + ":‑o |0 = 😮|1 = 😯|2 = 😲\n", + ":‑o 😮\n", + ":O |0 = 😮|1 = 😯|2 = 😲\n", + ":O 😮\n", + ">:O |0 = 😮|1 = 😯|2 = 😲\n", + ">:O 😮\n", + ":× |0 = 😗|1 = 😙|2 = 😚|3 = 😘|4 = 😍\n", + ":× 😗\n", + ":* |0 = 😗|1 = 😙|2 = 😚|3 = 😘|4 = 😍\n", + ":* 😗\n", + ":-* |0 = 😗|1 = 😙|2 = 😚|3 = 😘|4 = 😍\n", + ":-* 😗\n", + ";) |0 = 😉|1 = 😜|2 = 😘\n", + ";) 😉\n", + ";D |0 = 😉|1 = 😜|2 = 😘\n", + ";D 😉\n", + ";‑) |0 = 😉|1 = 😜|2 = 😘\n", + ";‑) 😉\n", + "*) |0 = 😉|1 = 😜|2 = 😘\n", + "*) 😉\n", + ";^) |0 = 😉|1 = 😜|2 = 😘\n", + ";^) 😉\n", + "*-) |0 = 😉|1 = 😜|2 = 😘\n", + "*-) 😉\n", + ";‑] |0 = 😉|1 = 😜|2 = 😘\n", + ";‑] 😉\n", + ":‑, |0 = 😉|1 = 😜|2 = 😘\n", + ":‑, 😉\n", + ";] |0 = 😉|1 = 😜|2 = 😘\n", + ";] 😉\n", + ":‑þ |0 = 😛|1 = 😝|2 = 😜|3 = 🤑\n", + ":‑þ 😛\n", + "XP |0 = 😛|1 = 😝|2 = 😜|3 = 🤑\n", + "XP 😛\n", + "x‑p |0 = 😛|1 = 😝|2 = 😜|3 = 🤑\n", + "x‑p 😛\n", + "d: |0 = 😛|1 = 😝|2 = 😜|3 = 🤑\n", + "d: 😛\n", + ":þ |0 = 😛|1 = 😝|2 = 😜|3 = 🤑\n", + ":þ 😛\n", + ":‑b |0 = 😛|1 = 😝|2 = 😜|3 = 🤑\n", + ":‑b 😛\n", + ":p |0 = 😛|1 = 😝|2 = 😜|3 = 🤑\n", + ":p 😛\n", + ":b |0 = 😛|1 = 😝|2 = 😜|3 = 🤑\n", + ":b 😛\n", + ":Þ |0 = 😛|1 = 😝|2 = 😜|3 = 🤑\n", + ":Þ 😛\n", + ":‑Þ |0 = 😛|1 = 😝|2 = 😜|3 = 🤑\n", + ":‑Þ 😛\n", + ">:P |0 = 😛|1 = 😝|2 = 😜|3 = 🤑\n", + ">:P 😛\n", + ":P |0 = 😛|1 = 😝|2 = 😜|3 = 🤑\n", + ":P 😛\n", + ":‑p |0 = 😛|1 = 😝|2 = 😜|3 = 🤑\n", + ":‑p 😛\n", + "=p |0 = 😛|1 = 😝|2 = 😜|3 = 🤑\n", + "=p 😛\n", + ":‑P |0 = 😛|1 = 😝|2 = 😜|3 = 🤑\n", + ":‑P 😛\n", + "X‑P |0 = 😛|1 = 😝|2 = 😜|3 = 🤑\n", + "X‑P 😛\n", + "xp |0 = 😛|1 = 😝|2 = 😜|3 = 🤑\n", + "xp 😛\n", + "=L |0 = 🤔|1 = 😕|2 = 😟\n", + "=L 🤔\n", + "=\\ |0 = 🤔|1 = 😕|2 = 😟\n", + "=\\ 🤔\n", + ":S |0 = 🤔|1 = 😕|2 = 😟\n", + ":S 🤔\n", + "=/ |0 = 🤔|1 = 😕|2 = 😟\n", + "=/ 🤔\n", + ":\\ |0 = 🤔|1 = 😕|2 = 😟\n", + ":\\ 🤔\n", + ">:\\ |0 = 🤔|1 = 😕|2 = 😟\n", + ">:\\ 🤔\n", + ":L |0 = 🤔|1 = 😕|2 = 😟\n", + ":L 🤔\n", + ":/ |0 = 🤔|1 = 😕|2 = 😟\n", + ":/ 🤔\n", + ":‑. |0 = 🤔|1 = 😕|2 = 😟\n", + ":‑. 🤔\n", + ":‑/ |0 = 🤔|1 = 😕|2 = 😟\n", + ":‑/ 🤔\n", + ">:/ |0 = 🤔|1 = 😕|2 = 😟\n", + ">:/ 🤔\n", + ":| |0 = 😐|1 = 😑\n", + ":| 😐\n", + ":‑| |0 = 😐|1 = 😑\n", + ":‑| 😐\n", + ":$ |0 = 😳|1 = 😞|2 = 😖\n", + ":$ 😳\n", + ":& |0 = 🤐|1 = 😶\n", + ":& 🤐\n", + ":# |0 = 🤐|1 = 😶\n", + ":# 🤐\n", + ":‑X |0 = 🤐|1 = 😶\n", + ":‑X 🤐\n", + ":‑& |0 = 🤐|1 = 😶\n", + ":‑& 🤐\n", + ":X |0 = 🤐|1 = 😶\n", + ":X 🤐\n", + ":‑# |0 = 🤐|1 = 😶\n", + ":‑# 🤐\n", + "0:) |0 = 😇|1 = 👼\n", + "0:) 😇\n", + "O:) |0 = 😇|1 = 👼\n", + "O:) 😇\n", + "0:‑) |0 = 😇|1 = 👼\n", + "0:‑) 😇\n", + "0:‑3 |0 = 😇|1 = 👼\n", + "0:‑3 😇\n", + "O:‑) |0 = 😇|1 = 👼\n", + "O:‑) 😇\n", + "0;^) |0 = 😇|1 = 👼\n", + "0;^) 😇\n", + "0:3 |0 = 😇|1 = 👼\n", + "0:3 😇\n", + "3:) |0 = 😈\n", + "3:) 😈\n", + ">;) |0 = 😈\n", + ">;) 😈\n", + "3:‑) |0 = 😈\n", + "3:‑) 😈\n", + "}:) |0 = 😈\n", + "}:) 😈\n", + "}:‑) |0 = 😈\n", + "}:‑) 😈\n", + ">:‑) |0 = 😈\n", + ">:‑) 😈\n", + ">:) |0 = 😈\n", + ">:) 😈\n", + "|‑O |0 = 😎|1 = 😪\n", + "|‑O 😎\n", + "|;‑) |0 = 😎|1 = 😪\n", + "|;‑) 😎\n", + ":‑J |0 = 😏|1 = 😒\n", + ":‑J 😏\n", + "%‑) |0 = 😵|1 = 😕|2 = 🤕\n", + "%‑) 😵\n", + "%) |0 = 😵|1 = 😕|2 = 🤕\n", + "%) 😵\n", + ":###.. |0 = 🤒|1 = 😷|2 = 🤢\n", + ":###.. 🤒\n", + ":‑###.. |0 = 🤒|1 = 😷|2 = 🤢\n", + ":‑###.. 🤒\n", + "',:-l |0 = 🤨\n", + "',:-l 🤨\n", + "',:-| |0 = 🤨\n", + "',:-| 🤨\n" + ] + } + ], + "source": [ + "for emoticons, emojis, meaning in assignment:\n", + " for emoticon in emoticons:\n", + " tmp = \" \"\n", + " c= 0\n", + " for emoji in emojis:\n", + " tmp+=\"|\"+str(c)+\" = \"+emoji\n", + " c+=1\n", + " number = input(str(emoticon)+str(tmp))\n", + " #if(not number.isnumeric() and int(number)in [0:len(emojis)]):\n", + " # number = 0\n", + " #else:\n", + " # number = int(number)\n", + " if(number == \"\"):number = 0\n", + " print(emoticon,emojis[number])\n", + " own_assignment.append([emoticon,emojis[number]])" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[[':)', '☺'], ['8-)', '☺'], [':]', '☺'], [':->', '☺'], [':>', '☺'], [':o)', '☺'], ['=)', '☺'], ['8)', '☺'], [':3', '😊'], [':-3', '😊'], [':-}', '️'], ['=]', '☺'], [':c)', '☺'], [':-]', '☺'], [':‑)', '☺'], [':}', '☺'], [':^)', '😁'], ['x‑D', '😆'], ['XD', '😆'], ['X‑D', '😆'], ['=D', '😃'], [':‑D', '😃'], ['8‑D', '😃'], ['8D', '😃'], ['B^D', '😄'], ['xD', '😆'], ['=3', '😃'], [':D', '😃'], [':@', '😡'], [':c', '☹'], [':‑<', '☹'], [':‑[', '🙁'], [':‑c', '☹'], [':[', '🙁'], [':{', '☹'], [':(', '☹'], [':-||', '🙁'], ['>:(', '😡'], ['>:[', '😡'], [':<', '☹'], [':‑(', '🙁'], [':)', '☺'], ['8-)', '☺'], [':]', '☺'], [':->', '☺'], [':>', '☺'], [':o)', '☺'], ['=)', '☺'], ['8)', '☺'], [':3', '☺'], [':-3', '☺'], [':-}', '☺'], ['=]', '☺'], [':c)', '☺'], [':-]', '☺'], [':‑)', '☺'], [':}', '☺'], [':^)', '☺'], ['x‑D', '😃'], ['XD', '😃'], ['X‑D', '😃'], ['=D', '😃'], [':‑D', '😃'], ['8‑D', '😃'], ['8D', '😃'], ['B^D', '😃'], ['xD', '😃'], ['=3', '😃'], [':D', '😃'], [':@', '☹'], [':c', '☹'], [':‑<', '☹'], [':‑[', '☹'], [':‑c', '☹'], [':[', '☹'], [':{', '☹'], [':(', '☹'], [':-||', '☹'], ['>:(', '☹'], ['>:[', '☹'], [':<', '☹'], [':‑(', '☹'], [\":'(\", '😢'], [\":'‑(\", '😢'], [\":')\", '😂'], [\":'‑)\", '😂'], ['DX', '😨'], [\"D‑':\", '😨'], ['D:', '😨'], ['D:<', '😨'], ['D;', '😨'], ['D8', '😨'], ['D=', '😨'], [':-0', '😮'], [':o', '😮'], ['8‑0', '😮'], [':‑O', '😮'], [':‑o', '😮'], [':O', '😮'], ['>:O', '😮'], [':×', '😗'], [':*', '😗'], [':-*', '😗'], [';)', '😉'], [';D', '😉'], [';‑)', '😉'], ['*)', '😉'], [';^)', '😉'], ['*-)', '😉'], [';‑]', '😉'], [':‑,', '😉'], [';]', '😉'], [':‑þ', '😛'], ['XP', '😛'], ['x‑p', '😛'], ['d:', '😛'], [':þ', '😛'], [':‑b', '😛'], [':p', '😛'], [':b', '😛'], [':Þ', '😛'], [':‑Þ', '😛'], ['>:P', '😛'], [':P', '😛'], [':‑p', '😛'], ['=p', '😛'], [':‑P', '😛'], ['X‑P', '😛'], ['xp', '😛'], ['=L', '🤔'], ['=\\\\', '🤔'], [':S', '🤔'], ['=/', '🤔'], [':\\\\', '🤔'], ['>:\\\\', '🤔'], [':L', '🤔'], [':/', '🤔'], [':‑.', '🤔'], [':‑/', '🤔'], ['>:/', '🤔'], [':|', '😐'], [':‑|', '😐'], [':$', '😳'], [':&', '🤐'], [':#', '🤐'], [':‑X', '🤐'], [':‑&', '🤐'], [':X', '🤐'], [':‑#', '🤐'], ['0:)', '😇'], ['O:)', '😇'], ['0:‑)', '😇'], ['0:‑3', '😇'], ['O:‑)', '😇'], ['0;^)', '😇'], ['0:3', '😇'], ['3:)', '😈'], ['>;)', '😈'], ['3:‑)', '😈'], ['}:)', '😈'], ['}:‑)', '😈'], ['>:‑)', '😈'], ['>:)', '😈'], ['|‑O', '😎'], ['|;‑)', '😎'], [':‑J', '😏'], ['%‑)', '😵'], ['%)', '😵'], [':###..', '🤒'], [':‑###..', '🤒'], [\"',:-l\", '\\U0001f928'], [\"',:-|\", '\\U0001f928']]\n" + ] + } + ], + "source": [ + "print(own_assignment)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": true + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}