Merge branch 'master' of ssh://the-cake-is-a-lie.net:20022/jonas/NLP-LAB

This commit is contained in:
Carsten 2018-05-13 19:40:45 +02:00
commit 41dc093c17
12 changed files with 2425 additions and 1321539 deletions

View File

@ -1,721 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# NLP-LAB Exercise 01 by jonas weinz\n",
"----"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"%matplotlib ipympl\n",
"import nltk\n",
"import pprint\n",
"from sklearn.tree import DecisionTreeClassifier\n",
"from sklearn.feature_extraction import DictVectorizer\n",
"from sklearn.pipeline import Pipeline"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## implementing own classifiers"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* writing an own feature funtion"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"def features(sentence, index):\n",
" word = sentence[index]\n",
" is_punctuation_mark = word == \"!\" or word == \".\" or word == \",\" or word == \"?\"\n",
" sentence_length = len(sentence)\n",
" relative_third = (index * 3) // sentence_length \n",
" vowels = word.count('a') + word.count('e') + word.count('i') + word.count('o') + word.count('u')\n",
" return {\n",
" 'word': word,\n",
" 'is_capitalized': sentence[index][0].upper() == sentence[index][0],\n",
" 'prefix-1': sentence[index][0],\n",
" 'suffix-1': sentence[index][-1],\n",
" 'prefix-2': sentence[index][1] if len(word) > 1 else '',\n",
" 'suffix-2': sentence[index][-2] if len(word) > 1 else '',\n",
" 'prev_word': '' if index == 0 else sentence[index - 1],\n",
" 'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],\n",
" 'length': len(word),\n",
" 'index' : index,\n",
" 'rev_index': len(sentence) - index,\n",
" 'sentence_length_': len(sentence),\n",
" 'relative_third': relative_third,\n",
" 'numerical': word.isnumeric(),\n",
" 'is_punctuation_mark': is_punctuation_mark,\n",
" ',': word == \",\",\n",
" '.': word == \".\",\n",
" '!': word == \"!\",\n",
" '?': word == \"?\",\n",
" 'vowels' : vowels\n",
" }"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"#test_sentence = ['The','cake','is','a','lie','!']\n",
"#for i in range(len(test_sentence)):\n",
"# pprint.pprint(features(test_sentence, i))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* function for creating training sets"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [],
"source": [
"def untag(tagged_sentence):\n",
" return [w for w,t in tagged_sentence]\n",
"\n",
"def transform_to_dataset(tagged_sentences):\n",
" X,y = [], []\n",
" \n",
" for s in tagged_sentences:\n",
" for i in range(len(s)):\n",
" X.append(features(untag(s),i))\n",
" y.append(s[i][1])\n",
" return X,y\n",
"\n",
"def create_training_and_test_set(annotated_sentences, relative_cutoff):\n",
" cutoff = int(relative_cutoff * len(annotated_sentences))\n",
" training_sentences = annotated_sentences[:cutoff]\n",
" test_sentences = annotated_sentences[cutoff:]\n",
" \n",
" X,y = transform_to_dataset(training_sentences)\n",
" tX, ty = transform_to_dataset(test_sentences)\n",
" \n",
" return X,y,tX,ty"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* Decision Tree classifier"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"def train_classifier(X,y,classifier,max_size=10000):\n",
" clf = Pipeline([\n",
" ('vectorizer', DictVectorizer(sparse=False)),\n",
" ('classifier', classifier)\n",
" ])\n",
" \n",
" print(\"start training…\")\n",
" \n",
" clf.fit(\n",
" X if len(X) < max_size else X[:max_size],\n",
" y if len(y) < max_size else y[:max_size]\n",
" )\n",
" \n",
" print(\"training done\")\n",
" \n",
" return clf"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* classifier evaluater"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [],
"source": [
"def test_classifier(clf, tX, ty):\n",
" accuracy = clf.score(tX, ty)\n",
" print(\"Accuracy: \", accuracy)\n",
" # TODO: more analytics\n",
" return accuracy"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Task 01:"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Performance 1\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Model 01\n",
"* train and testing english custom POS tagger model:"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"def model_01(X,y,tX,ty, max_size=1000):\n",
" #classifier = DecisionTreeClassifier(criterion='entropy')\n",
" from sklearn.neural_network import MLPClassifier\n",
" model01_clf = train_classifier(X,y,MLPClassifier(),max_size=max_size)\n",
" return test_classifier(clf=model01_clf, tX=tX, ty=ty)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Model 02"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"def model_02(tX,ty):\n",
" m2_y = nltk.pos_tag([w['word'] for w in tX])\n",
" # compare results\n",
" n_correct = sum((1 if m2_y[i][1] == ty[i] else 0) for i in range(len(ty)))\n",
" return n_correct / len(ty)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Model 03"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"def model_03(corpus_tagged, corpus_sents, cut=0.8):\n",
" \n",
" patterns = [(r'.*ing$', 'VBG'), (r'.*ed$', 'VBD'), (r'.*es$', 'VBZ'), (r'.*ould$', 'MD'), (r'.*\\'s$', 'NN$'), \n",
" (r'.*s$', 'NNS'), (r'^-?[0-9]+(.[0-9]+)?$', 'CD'), (r'.*', 'NN')]\n",
" \n",
" s = int(len(corpus_tagged) * cut)\n",
" train_sents = corpus_tagged[:s]\n",
" test_sents = corpus_tagged[s:]\n",
" \n",
" models = {\n",
" 'def_model': nltk.DefaultTagger('NN'),\n",
" 'regexp_model': nltk.RegexpTagger(patterns),\n",
" 'uni_model': nltk.UnigramTagger(train_sents),\n",
" 'bi_model': nltk.BigramTagger(train_sents),\n",
" 'tri_model': nltk.TrigramTagger(train_sents)\n",
" }\n",
" \n",
" performance = {}\n",
" for name,model in models.items():\n",
" performance[name] = model.evaluate(test_sents)\n",
" \n",
" return performance\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Applying models on Datasets"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[('Pierre', 'NNP'),\n",
" ('Vinken', 'NNP'),\n",
" (',', ','),\n",
" ('61', 'CD'),\n",
" ('years', 'NNS'),\n",
" ('old', 'JJ'),\n",
" (',', ','),\n",
" ('will', 'MD'),\n",
" ('join', 'VB'),\n",
" ('the', 'DT'),\n",
" ('board', 'NN'),\n",
" ('as', 'IN'),\n",
" ('a', 'DT'),\n",
" ('nonexecutive', 'JJ'),\n",
" ('director', 'NN'),\n",
" ('Nov.', 'NNP'),\n",
" ('29', 'CD'),\n",
" ('.', '.')]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"P1.1\n",
"start training…\n",
"training done\n",
"Accuracy: 0.7755377014821099\n",
"P1.2\n",
"P1.3\n",
"P1.4\n",
"start training…\n",
"training done\n",
"Accuracy: 0.63253390325317\n",
"P1.5\n",
"P1.6\n",
"{'P1.1': 0.7755377014821099,\n",
" 'P1.2': 0.8936074654423873,\n",
" 'P1.3 -- bi_model': 0.1132791057437996,\n",
" 'P1.3 -- def_model': 0.1447677029791906,\n",
" 'P1.3 -- regexp_model': 0.24232746145017217,\n",
" 'P1.3 -- tri_model': 0.06736863116922003,\n",
" 'P1.3 -- uni_model': 0.8608213982733669,\n",
" 'P1.4': 0.63253390325317,\n",
" 'P1.5': 0.6044583741861567,\n",
" 'P1.6 -- bi_model': 0.1132791057437996,\n",
" 'P1.6 -- def_model': 0.1447677029791906,\n",
" 'P1.6 -- regexp_model': 0.24232746145017217,\n",
" 'P1.6 -- tri_model': 0.06736863116922003,\n",
" 'P1.6 -- uni_model': 0.8608213982733669}\n"
]
}
],
"source": [
"performances = {}\n",
"\n",
"treebank_tagged = nltk.corpus.treebank.tagged_sents()\n",
"treebank_sents = nltk.corpus.treebank.sents()\n",
"\n",
"brown_tagged = nltk.corpus.brown.tagged_sents()#(categories='news')\n",
"brown_sents = nltk.corpus.brown.sents()#(categories='news')\n",
"\n",
"display(treebank_tagged[0])\n",
"\n",
"X1,y1,tX1,ty1 = create_training_and_test_set(annotated_sentences=treebank_tagged, \n",
" relative_cutoff=0.8)\n",
"\n",
"X2,y2,tX2,ty2 = create_training_and_test_set(annotated_sentences=brown_tagged, \n",
" relative_cutoff=0.8)\n",
"\n",
"\n",
"print(\"P1.1\")\n",
"performances['P1.1'] = model_01(X1,y1,tX1,ty1)\n",
"\n",
"print(\"P1.2\")\n",
"performances['P1.2'] = model_02(tX1,ty1)\n",
"\n",
"print(\"P1.3\")\n",
"p3 = model_03(treebank_tagged, treebank_sents)\n",
"for k,v in p3.items():\n",
" performances[\"P1.3 -- \" + k] = v\n",
"\n",
"print(\"P1.4\")\n",
"performances['P1.4'] = model_01(X2,y2,tX2,ty2)\n",
"\n",
"print(\"P1.5\")\n",
"performances['P1.5'] = model_02(tX2,ty2)\n",
"\n",
"print(\"P1.6\")\n",
"p6 = model_03(brown_tagged, brown_sents)\n",
"for k,v in p3.items():\n",
" performances[\"P1.6 -- \" + k] = v\n",
"\n",
"pprint.pprint(performances)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"### Plotting Data"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "c6d8e1186c1f44dcb77b146346b1dedb",
"version_major": 2,
"version_minor": 0
},
"text/html": [
"<p>Failed to display Jupyter Widget of type <code>FigureCanvasNbAgg</code>.</p>\n",
"<p>\n",
" If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean\n",
" that the widgets JavaScript is still loading. If this message persists, it\n",
" likely means that the widgets JavaScript library is either not installed or\n",
" not enabled. See the <a href=\"https://ipywidgets.readthedocs.io/en/stable/user_install.html\">Jupyter\n",
" Widgets Documentation</a> for setup instructions.\n",
"</p>\n",
"<p>\n",
" If you're reading this message in another frontend (for example, a static\n",
" rendering on GitHub or <a href=\"https://nbviewer.jupyter.org/\">NBViewer</a>),\n",
" it may mean that your frontend doesn't currently support widgets.\n",
"</p>\n"
],
"text/plain": [
"FigureCanvasNbAgg()"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"#weights = clf.named_steps['classifier'].feature_importances_\n",
"#labels = clf.named_steps['vectorizer'].get_feature_names()\n",
"\n",
"#sort\n",
"#weights, labels = (list(t) for t in zip(*sorted(zip(weights, labels))))\n",
"\n",
"fig_1, ax_1 = plt.subplots()\n",
"plt.bar(np.arange(len(performances)), performances.values())\n",
"plt.xticks(np.arange(len(performances)), performances.keys(), rotation=30, ha='right')\n",
"plt.tight_layout()\n",
"plt.show()\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"----\n",
"## Task 2"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {},
"outputs": [],
"source": [
"WORDS = 'words' #: column type for words\n",
"POS = 'pos' #: column type for part-of-speech tags\n",
"TREE = 'tree' #: column type for parse trees\n",
"CHUNK = 'chunk' #: column type for chunk structures\n",
"NE = 'ne' #: column type for named entities\n",
"SRL = 'srl' #: column type for semantic role labels\n",
"IGNORE = 'ignore' #: column type for column that should be ignored\n",
"train_path = \"./ru_syntagrus-ud-train.conllu\"\n",
"ru_corp = nltk.corpus.ConllCorpusReader(root=\"./\", \n",
" fileids=[\"ru_syntagrus-ud-train-uncommented.conllu\"],\n",
" columntypes=[IGNORE, WORDS, IGNORE, POS],\n",
" encoding='utf-8')\n"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {},
"outputs": [],
"source": [
"ru_tagged = ru_corp.tagged_sents()\n",
"\n",
"\n",
"\n",
"X3,y3,tX3,ty3 = create_training_and_test_set(annotated_sentences=ru_tagged, \n",
" relative_cutoff=0.8)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Model 04, Performance 2.1"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"P2.1\n",
"start training…\n",
"training done\n",
"Accuracy: 0.7079014288483687\n",
"0.7079014288483687\n"
]
}
],
"source": [
"print(\"P2.1\")\n",
"performances2 = {}\n",
"performances2['P2.1'] = model_01(X3,y3,tX3,ty3, max_size=1000)\n",
"print(performances2['P2.1'])"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Model 05, Performance 2.2"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {},
"outputs": [],
"source": [
"# write russian text out to file:\n",
"f = open(\"ru_text.txt\", 'w')\n",
"for sentence in ru_tagged:\n",
" for word, tag in sentence:\n",
" f.write(word + \" \")\n",
" f.write(\"\\n\")\n",
"f.close()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* download the python 3 fork of the rdrpos-tagger: https://github.com/jacopofar/RDRPOSTagger-python-3\n",
"* adjust `RDRPOS_TAGGER_PATH` to match with the download location"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"/home/jonas/Dokumente/gitRepos/NLP-LAB/Jonas_Solutions\n"
]
}
],
"source": [
"import sys, os\n",
"\n",
"dir_path = os.getcwd()\n",
"print(dir_path)"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"['Node', '__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__spec__', 'tabStr']\n",
"('\\nOutput file:', 'ru_text.txt.TAGGED')\n"
]
}
],
"source": [
"RDRPOS_TAGGER_PATH = r\"/home/jonas/src/RDRPOSTagger-python-3/pSCRDRtagger/\"\n",
"\n",
"sys.path.insert(0, RDRPOS_TAGGER_PATH)\n",
"os.chdir(RDRPOS_TAGGER_PATH)\n",
"\n",
"import RDRPOSTagger as model05_tagger \n",
"\n",
"r = model05_tagger.RDRPOSTagger()\n",
"r.constructSCRDRtreeFromRDRfile(\"../Models/UniPOS/UD_Russian-SynTagRus/train.UniPOS.RDR\")\n",
"DICT = model05_tagger.readDictionary(\"../Models/UniPOS/UD_Russian-SynTagRus/train.UniPOS.DICT\")\n",
"\n",
"os.chdir(dir_path)\n",
"\n",
"r.tagRawCorpus(DICT, \"ru_text.txt\")"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {},
"outputs": [],
"source": [
"tagged_words = []\n",
"f = open(\"ru_text.txt.TAGGED\", 'r')\n",
"for line in f:\n",
" for splits in line.split():\n",
" cmp = splits.rsplit('/',1)\n",
" if len(cmp) != 2:\n",
" print(\"error parsing: \", cmp)\n",
" else:\n",
" w,t = cmp\n",
" tagged_words.append((w,t))\n"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {},
"outputs": [],
"source": [
"score_2_2 = 0\n",
"i = 0\n",
"for sent in ru_tagged:\n",
" for tagged_w in sent:\n",
" if tagged_w[1] == tagged_words[i][1]:\n",
" score_2_2 += 1\n",
" i += 1\n",
"performances2['P2.2'] = score_2_2 / len(tagged_words)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Results of performance 2.2"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'P2.1': 0.7079014288483687, 'P2.2': 0.8899716702179293}\n"
]
}
],
"source": [
"pprint.pprint(performances2)"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {},
"outputs": [
{
"data": {
"application/vnd.jupyter.widget-view+json": {
"model_id": "aeb29243e58d49b8942122ceec03fab5",
"version_major": 2,
"version_minor": 0
},
"text/html": [
"<p>Failed to display Jupyter Widget of type <code>FigureCanvasNbAgg</code>.</p>\n",
"<p>\n",
" If you're reading this message in the Jupyter Notebook or JupyterLab Notebook, it may mean\n",
" that the widgets JavaScript is still loading. If this message persists, it\n",
" likely means that the widgets JavaScript library is either not installed or\n",
" not enabled. See the <a href=\"https://ipywidgets.readthedocs.io/en/stable/user_install.html\">Jupyter\n",
" Widgets Documentation</a> for setup instructions.\n",
"</p>\n",
"<p>\n",
" If you're reading this message in another frontend (for example, a static\n",
" rendering on GitHub or <a href=\"https://nbviewer.jupyter.org/\">NBViewer</a>),\n",
" it may mean that your frontend doesn't currently support widgets.\n",
"</p>\n"
],
"text/plain": [
"FigureCanvasNbAgg()"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"fig_2, ax_2 = plt.subplots()\n",
"plt.bar(np.arange(len(performances2)), performances2.values())\n",
"plt.xticks(np.arange(len(performances2)), performances2.keys(), rotation=30, ha='right')\n",
"plt.tight_layout()\n",
"plt.show()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

File diff suppressed because one or more lines are too long

View File

@ -1,678 +0,0 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# NLP-LAB Exercise 01 (Jonas Weinz)\n",
"----"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## playing around with nltk"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* import nltk and download packages"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[nltk_data] Downloading package punkt to /home/jonas/nltk_data...\n",
"[nltk_data] Package punkt is already up-to-date!\n",
"[nltk_data] Downloading package averaged_perceptron_tagger to\n",
"[nltk_data] /home/jonas/nltk_data...\n",
"[nltk_data] Package averaged_perceptron_tagger is already up-to-\n",
"[nltk_data] date!\n"
]
},
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import nltk\n",
"\n",
"#nltk.download('all')\n",
"nltk.download('punkt')\n",
"nltk.download('averaged_perceptron_tagger')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"* analyse a text fragment"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"monologue = \"\"\"Ah! Now Ive done Philosophy,\n",
"Ive finished Law and Medicine,\n",
"And sadly even Theology:\n",
"Taken fierce pains, from end to end.\n",
"Now here I am, a fool for sure!\n",
"No wiser than I was before:\n",
"Master, Doctors what they call me,\n",
"And Ive been ten years, already,\n",
"Crosswise, arcing, to and fro,\n",
"Leading my students by the nose,\n",
"And see that we can know - nothing!\n",
"It almost sets my heart burning.\n",
"Im cleverer than all these teachers,\n",
"Doctors, Masters, scribes, preachers:\n",
"Im not plagued by doubt or scruple,\n",
"Scared by neither Hell nor Devil \n",
"Instead all Joy is snatched away,\n",
"Whats worth knowing, I cant say,\n",
"I cant say what I should teach\n",
"To make men better or convert each.\n",
"And then Ive neither goods nor gold,\n",
"No worldly honour, or splendour hold:\n",
"Not even a dog would play this part!\n",
"So Ive given myself to Magic art,\n",
"To see if, through Spirit powers and lips,\n",
"I might have all secrets at my fingertips.\n",
"And no longer, with rancid sweat, so,\n",
"Still have to speak what I cannot know:\n",
"That I may understand whatever\n",
"Binds the worlds innermost core together\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[('Ah', 'NN'),\n",
" ('!', '.'),\n",
" ('Now', 'RB'),\n",
" ('I', 'PRP'),\n",
" ('', 'VBP'),\n",
" ('ve', 'JJ'),\n",
" ('done', 'VBN'),\n",
" ('Philosophy', 'NNP'),\n",
" (',', ','),\n",
" ('I', 'PRP'),\n",
" ('', 'VBP'),\n",
" ('ve', 'RB'),\n",
" ('finished', 'VBN'),\n",
" ('Law', 'NNP'),\n",
" ('and', 'CC'),\n",
" ('Medicine', 'NNP'),\n",
" (',', ','),\n",
" ('And', 'CC'),\n",
" ('sadly', 'RB'),\n",
" ('even', 'RB'),\n",
" ('Theology', 'NNP'),\n",
" (':', ':'),\n",
" ('Taken', 'NNP'),\n",
" ('fierce', 'NN'),\n",
" ('pains', 'NNS'),\n",
" (',', ','),\n",
" ('from', 'IN'),\n",
" ('end', 'NN'),\n",
" ('to', 'TO'),\n",
" ('end', 'VB'),\n",
" ('.', '.'),\n",
" ('Now', 'RB'),\n",
" ('here', 'RB'),\n",
" ('I', 'PRP'),\n",
" ('am', 'VBP'),\n",
" (',', ','),\n",
" ('a', 'DT'),\n",
" ('fool', 'NN'),\n",
" ('for', 'IN'),\n",
" ('sure', 'JJ'),\n",
" ('!', '.'),\n",
" ('No', 'DT'),\n",
" ('wiser', 'JJR'),\n",
" ('than', 'IN'),\n",
" ('I', 'PRP'),\n",
" ('was', 'VBD'),\n",
" ('before', 'IN'),\n",
" (':', ':'),\n",
" ('Master', 'NN'),\n",
" (',', ','),\n",
" ('Doctor', 'NNP'),\n",
" ('', 'NNP'),\n",
" ('s', 'VBD'),\n",
" ('what', 'WP'),\n",
" ('they', 'PRP'),\n",
" ('call', 'VBP'),\n",
" ('me', 'PRP'),\n",
" (',', ','),\n",
" ('And', 'CC'),\n",
" ('I', 'PRP'),\n",
" ('', 'VBP'),\n",
" ('ve', 'RB'),\n",
" ('been', 'VBN'),\n",
" ('ten', 'CD'),\n",
" ('years', 'NNS'),\n",
" (',', ','),\n",
" ('already', 'RB'),\n",
" (',', ','),\n",
" ('Crosswise', 'NNP'),\n",
" (',', ','),\n",
" ('arcing', 'VBG'),\n",
" (',', ','),\n",
" ('to', 'TO'),\n",
" ('and', 'CC'),\n",
" ('fro', 'VB'),\n",
" (',', ','),\n",
" ('Leading', 'VBG'),\n",
" ('my', 'PRP$'),\n",
" ('students', 'NNS'),\n",
" ('by', 'IN'),\n",
" ('the', 'DT'),\n",
" ('nose', 'NN'),\n",
" (',', ','),\n",
" ('And', 'CC'),\n",
" ('see', 'VBP'),\n",
" ('that', 'IN'),\n",
" ('we', 'PRP'),\n",
" ('can', 'MD'),\n",
" ('know', 'VB'),\n",
" ('-', ':'),\n",
" ('nothing', 'NN'),\n",
" ('!', '.'),\n",
" ('It', 'PRP'),\n",
" ('almost', 'RB'),\n",
" ('sets', 'VBZ'),\n",
" ('my', 'PRP$'),\n",
" ('heart', 'NN'),\n",
" ('burning', 'NN'),\n",
" ('.', '.'),\n",
" ('I', 'PRP'),\n",
" ('', 'VBP'),\n",
" ('m', 'RB'),\n",
" ('cleverer', 'JJR'),\n",
" ('than', 'IN'),\n",
" ('all', 'PDT'),\n",
" ('these', 'DT'),\n",
" ('teachers', 'NNS'),\n",
" (',', ','),\n",
" ('Doctors', 'NNS'),\n",
" (',', ','),\n",
" ('Masters', 'NNS'),\n",
" (',', ','),\n",
" ('scribes', 'NNS'),\n",
" (',', ','),\n",
" ('preachers', 'NNS'),\n",
" (':', ':'),\n",
" ('I', 'PRP'),\n",
" ('', 'VBP'),\n",
" ('m', 'RB'),\n",
" ('not', 'RB'),\n",
" ('plagued', 'VBN'),\n",
" ('by', 'IN'),\n",
" ('doubt', 'NN'),\n",
" ('or', 'CC'),\n",
" ('scruple', 'NN'),\n",
" (',', ','),\n",
" ('Scared', 'VBN'),\n",
" ('by', 'IN'),\n",
" ('neither', 'DT'),\n",
" ('Hell', 'NNP'),\n",
" ('nor', 'CC'),\n",
" ('Devil', 'NNP'),\n",
" ('', 'NNP'),\n",
" ('Instead', 'RB'),\n",
" ('all', 'DT'),\n",
" ('Joy', 'NNP'),\n",
" ('is', 'VBZ'),\n",
" ('snatched', 'VBN'),\n",
" ('away', 'RB'),\n",
" (',', ','),\n",
" ('What', 'WP'),\n",
" ('', 'VBD'),\n",
" ('s', 'JJ'),\n",
" ('worth', 'JJ'),\n",
" ('knowing', 'NN'),\n",
" (',', ','),\n",
" ('I', 'PRP'),\n",
" ('can', 'MD'),\n",
" ('', 'VB'),\n",
" ('t', 'NNS'),\n",
" ('say', 'VBP'),\n",
" (',', ','),\n",
" ('I', 'PRP'),\n",
" ('can', 'MD'),\n",
" ('', 'VB'),\n",
" ('t', 'NNS'),\n",
" ('say', 'VBP'),\n",
" ('what', 'WP'),\n",
" ('I', 'PRP'),\n",
" ('should', 'MD'),\n",
" ('teach', 'VB'),\n",
" ('To', 'TO'),\n",
" ('make', 'VB'),\n",
" ('men', 'NNS'),\n",
" ('better', 'JJR'),\n",
" ('or', 'CC'),\n",
" ('convert', 'VB'),\n",
" ('each', 'DT'),\n",
" ('.', '.'),\n",
" ('And', 'CC'),\n",
" ('then', 'RB'),\n",
" ('I', 'PRP'),\n",
" ('', 'VBP'),\n",
" ('ve', 'JJ'),\n",
" ('neither', 'CC'),\n",
" ('goods', 'NNS'),\n",
" ('nor', 'CC'),\n",
" ('gold', 'NN'),\n",
" (',', ','),\n",
" ('No', 'DT'),\n",
" ('worldly', 'RB'),\n",
" ('honour', 'VBZ'),\n",
" (',', ','),\n",
" ('or', 'CC'),\n",
" ('splendour', 'JJ'),\n",
" ('hold', 'NN'),\n",
" (':', ':'),\n",
" ('Not', 'RB'),\n",
" ('even', 'RB'),\n",
" ('a', 'DT'),\n",
" ('dog', 'NN'),\n",
" ('would', 'MD'),\n",
" ('play', 'VB'),\n",
" ('this', 'DT'),\n",
" ('part', 'NN'),\n",
" ('!', '.'),\n",
" ('So', 'RB'),\n",
" ('I', 'PRP'),\n",
" ('', 'VBP'),\n",
" ('ve', 'JJ'),\n",
" ('given', 'VBN'),\n",
" ('myself', 'PRP'),\n",
" ('to', 'TO'),\n",
" ('Magic', 'NNP'),\n",
" ('art', 'NN'),\n",
" (',', ','),\n",
" ('To', 'TO'),\n",
" ('see', 'VB'),\n",
" ('if', 'IN'),\n",
" (',', ','),\n",
" ('through', 'IN'),\n",
" ('Spirit', 'NNP'),\n",
" ('powers', 'NNS'),\n",
" ('and', 'CC'),\n",
" ('lips', 'NNS'),\n",
" (',', ','),\n",
" ('I', 'PRP'),\n",
" ('might', 'MD'),\n",
" ('have', 'VB'),\n",
" ('all', 'DT'),\n",
" ('secrets', 'NNS'),\n",
" ('at', 'IN'),\n",
" ('my', 'PRP$'),\n",
" ('fingertips', 'NNS'),\n",
" ('.', '.'),\n",
" ('And', 'CC'),\n",
" ('no', 'DT'),\n",
" ('longer', 'RBR'),\n",
" (',', ','),\n",
" ('with', 'IN'),\n",
" ('rancid', 'JJ'),\n",
" ('sweat', 'NN'),\n",
" (',', ','),\n",
" ('so', 'RB'),\n",
" (',', ','),\n",
" ('Still', 'RB'),\n",
" ('have', 'VBP'),\n",
" ('to', 'TO'),\n",
" ('speak', 'VB'),\n",
" ('what', 'WP'),\n",
" ('I', 'PRP'),\n",
" ('can', 'MD'),\n",
" ('not', 'RB'),\n",
" ('know', 'VB'),\n",
" (':', ':'),\n",
" ('That', 'IN'),\n",
" ('I', 'PRP'),\n",
" ('may', 'MD'),\n",
" ('understand', 'VB'),\n",
" ('whatever', 'WDT'),\n",
" ('Binds', 'NNP'),\n",
" ('the', 'DT'),\n",
" ('world', 'NN'),\n",
" ('', 'NNP'),\n",
" ('s', 'VBZ'),\n",
" ('innermost', 'NN'),\n",
" ('core', 'NN'),\n",
" ('together', 'RB')]"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"tokens = nltk.word_tokenize(monologue)\n",
"display(nltk.pos_tag(tokens))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Exploring Penn TreeBank"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[nltk_data] Downloading package treebank to /home/jonas/nltk_data...\n",
"[nltk_data] Package treebank is already up-to-date!\n"
]
},
{
"data": {
"text/plain": [
"[('Pierre', 'NNP'),\n",
" ('Vinken', 'NNP'),\n",
" (',', ','),\n",
" ('61', 'CD'),\n",
" ('years', 'NNS'),\n",
" ('old', 'JJ'),\n",
" (',', ','),\n",
" ('will', 'MD'),\n",
" ('join', 'VB'),\n",
" ('the', 'DT'),\n",
" ('board', 'NN'),\n",
" ('as', 'IN'),\n",
" ('a', 'DT'),\n",
" ('nonexecutive', 'JJ'),\n",
" ('director', 'NN'),\n",
" ('Nov.', 'NNP'),\n",
" ('29', 'CD'),\n",
" ('.', '.')]"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"text/plain": [
"'# Tagged sentences: 3914'"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"nltk.download('treebank')\n",
"annotated_sent = nltk.corpus.treebank.tagged_sents()\n",
"\n",
"display(annotated_sent[0])\n",
"display(\"# Tagged sentences: \" + str(len(annotated_sent)))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## tagset information"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"[nltk_data] Downloading package tagsets to /home/jonas/nltk_data...\n",
"[nltk_data] Package tagsets is already up-to-date!\n",
"$: dollar\n",
" $ -$ --$ A$ C$ HK$ M$ NZ$ S$ U.S.$ US$\n",
"'': closing quotation mark\n",
" ' ''\n",
"(: opening parenthesis\n",
" ( [ {\n",
"): closing parenthesis\n",
" ) ] }\n",
",: comma\n",
" ,\n",
"--: dash\n",
" --\n",
".: sentence terminator\n",
" . ! ?\n",
":: colon or ellipsis\n",
" : ; ...\n",
"CC: conjunction, coordinating\n",
" & 'n and both but either et for less minus neither nor or plus so\n",
" therefore times v. versus vs. whether yet\n",
"CD: numeral, cardinal\n",
" mid-1890 nine-thirty forty-two one-tenth ten million 0.5 one forty-\n",
" seven 1987 twenty '79 zero two 78-degrees eighty-four IX '60s .025\n",
" fifteen 271,124 dozen quintillion DM2,000 ...\n",
"DT: determiner\n",
" all an another any both del each either every half la many much nary\n",
" neither no some such that the them these this those\n",
"EX: existential there\n",
" there\n",
"FW: foreign word\n",
" gemeinschaft hund ich jeux habeas Haementeria Herr K'ang-si vous\n",
" lutihaw alai je jour objets salutaris fille quibusdam pas trop Monte\n",
" terram fiche oui corporis ...\n",
"IN: preposition or conjunction, subordinating\n",
" astride among uppon whether out inside pro despite on by throughout\n",
" below within for towards near behind atop around if like until below\n",
" next into if beside ...\n",
"JJ: adjective or numeral, ordinal\n",
" third ill-mannered pre-war regrettable oiled calamitous first separable\n",
" ectoplasmic battery-powered participatory fourth still-to-be-named\n",
" multilingual multi-disciplinary ...\n",
"JJR: adjective, comparative\n",
" bleaker braver breezier briefer brighter brisker broader bumper busier\n",
" calmer cheaper choosier cleaner clearer closer colder commoner costlier\n",
" cozier creamier crunchier cuter ...\n",
"JJS: adjective, superlative\n",
" calmest cheapest choicest classiest cleanest clearest closest commonest\n",
" corniest costliest crassest creepiest crudest cutest darkest deadliest\n",
" dearest deepest densest dinkiest ...\n",
"LS: list item marker\n",
" A A. B B. C C. D E F First G H I J K One SP-44001 SP-44002 SP-44005\n",
" SP-44007 Second Third Three Two * a b c d first five four one six three\n",
" two\n",
"MD: modal auxiliary\n",
" can cannot could couldn't dare may might must need ought shall should\n",
" shouldn't will would\n",
"NN: noun, common, singular or mass\n",
" common-carrier cabbage knuckle-duster Casino afghan shed thermostat\n",
" investment slide humour falloff slick wind hyena override subhumanity\n",
" machinist ...\n",
"NNP: noun, proper, singular\n",
" Motown Venneboerger Czestochwa Ranzer Conchita Trumplane Christos\n",
" Oceanside Escobar Kreisler Sawyer Cougar Yvette Ervin ODI Darryl CTCA\n",
" Shannon A.K.C. Meltex Liverpool ...\n",
"NNPS: noun, proper, plural\n",
" Americans Americas Amharas Amityvilles Amusements Anarcho-Syndicalists\n",
" Andalusians Andes Andruses Angels Animals Anthony Antilles Antiques\n",
" Apache Apaches Apocrypha ...\n",
"NNS: noun, common, plural\n",
" undergraduates scotches bric-a-brac products bodyguards facets coasts\n",
" divestitures storehouses designs clubs fragrances averages\n",
" subjectivists apprehensions muses factory-jobs ...\n",
"PDT: pre-determiner\n",
" all both half many quite such sure this\n",
"POS: genitive marker\n",
" ' 's\n",
"PRP: pronoun, personal\n",
" hers herself him himself hisself it itself me myself one oneself ours\n",
" ourselves ownself self she thee theirs them themselves they thou thy us\n",
"PRP$: pronoun, possessive\n",
" her his mine my our ours their thy your\n",
"RB: adverb\n",
" occasionally unabatingly maddeningly adventurously professedly\n",
" stirringly prominently technologically magisterially predominately\n",
" swiftly fiscally pitilessly ...\n",
"RBR: adverb, comparative\n",
" further gloomier grander graver greater grimmer harder harsher\n",
" healthier heavier higher however larger later leaner lengthier less-\n",
" perfectly lesser lonelier longer louder lower more ...\n",
"RBS: adverb, superlative\n",
" best biggest bluntest earliest farthest first furthest hardest\n",
" heartiest highest largest least less most nearest second tightest worst\n",
"RP: particle\n",
" aboard about across along apart around aside at away back before behind\n",
" by crop down ever fast for forth from go high i.e. in into just later\n",
" low more off on open out over per pie raising start teeth that through\n",
" under unto up up-pp upon whole with you\n",
"SYM: symbol\n",
" % & ' '' ''. ) ). * + ,. < = > @ A[fj] U.S U.S.S.R * ** ***\n",
"TO: \"to\" as preposition or infinitive marker\n",
" to\n",
"UH: interjection\n",
" Goodbye Goody Gosh Wow Jeepers Jee-sus Hubba Hey Kee-reist Oops amen\n",
" huh howdy uh dammit whammo shucks heck anyways whodunnit honey golly\n",
" man baby diddle hush sonuvabitch ...\n",
"VB: verb, base form\n",
" ask assemble assess assign assume atone attention avoid bake balkanize\n",
" bank begin behold believe bend benefit bevel beware bless boil bomb\n",
" boost brace break bring broil brush build ...\n",
"VBD: verb, past tense\n",
" dipped pleaded swiped regummed soaked tidied convened halted registered\n",
" cushioned exacted snubbed strode aimed adopted belied figgered\n",
" speculated wore appreciated contemplated ...\n",
"VBG: verb, present participle or gerund\n",
" telegraphing stirring focusing angering judging stalling lactating\n",
" hankerin' alleging veering capping approaching traveling besieging\n",
" encrypting interrupting erasing wincing ...\n",
"VBN: verb, past participle\n",
" multihulled dilapidated aerosolized chaired languished panelized used\n",
" experimented flourished imitated reunifed factored condensed sheared\n",
" unsettled primed dubbed desired ...\n",
"VBP: verb, present tense, not 3rd person singular\n",
" predominate wrap resort sue twist spill cure lengthen brush terminate\n",
" appear tend stray glisten obtain comprise detest tease attract\n",
" emphasize mold postpone sever return wag ...\n",
"VBZ: verb, present tense, 3rd person singular\n",
" bases reconstructs marks mixes displeases seals carps weaves snatches\n",
" slumps stretches authorizes smolders pictures emerges stockpiles\n",
" seduces fizzes uses bolsters slaps speaks pleads ...\n",
"WDT: WH-determiner\n",
" that what whatever which whichever\n",
"WP: WH-pronoun\n",
" that what whatever whatsoever which who whom whosoever\n",
"WP$: WH-pronoun, possessive\n",
" whose\n",
"WRB: Wh-adverb\n",
" how however whence whenever where whereby whereever wherein whereof why\n",
"``: opening quotation mark\n",
" ` ``\n",
"None\n"
]
}
],
"source": [
"nltk.download('tagsets')\n",
"print(nltk.help.upenn_tagset())"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[('Pierre', 'NNP'),\n",
" ('Vinken', 'NNP'),\n",
" (',', ','),\n",
" ('61', 'CD'),\n",
" ('years', 'NNS'),\n",
" ('old', 'JJ'),\n",
" (',', ','),\n",
" ('will', 'MD'),\n",
" ('join', 'VB'),\n",
" ('the', 'DT'),\n",
" ('board', 'NN'),\n",
" ('as', 'IN'),\n",
" ('a', 'DT'),\n",
" ('nonexecutive', 'JJ'),\n",
" ('director', 'NN'),\n",
" ('Nov.', 'NNP'),\n",
" ('29', 'CD'),\n",
" ('.', '.')]"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"annotated_sent[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.3"
}
},
"nbformat": 4,
"nbformat_minor": 2
}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,86 @@
#!/usr/bin/env bash
# helper functions:
function lineprint {
printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' =
}
function message {
lineprint
printf "$1\n"
lineprint
}
current_action="IDLE"
function confirm_action {
message "successfully finished action: $current_action"
}
function set_action {
current_action="$1"
message "$1"
}
function perform {
"$@"
local status=$?
if [ $status -ne 0 ]
then
message "$current_action failed!"
fi
return $status
}
function perform_and_exit {
perform "$@" || exit
}
# Downloading and unzipping dataset
D1_URL=https://www.cs.ucsb.edu/~william/data/liar_dataset.zip
D1_ZIP=${D1_URL##*/}
D2_URL=https://raw.githubusercontent.com/GeorgeMcIntire/fake_real_news_dataset/master/fake_or_real_news.csv.zip
D2_ZIP=${D2_URL##*/}
P3_URL=https://raw.githubusercontent.com/SmartDataAnalytics/MA-INF-4222-NLP-Lab/master/2018_SoSe/exercises/script_dataset3.py
P3_SCRIPT=${P3_URL##*/}
set_action "checking whether unzip is installed"
# testing for unzip:
perform_and_exit unzip -v
confirm_action
set_action "downloading and unpacking $D1_URL if not already existing"
perform_and_exit mkdir -p ./data
perform_and_exit cd ./data/
if [ ! -e $D1_ZIP ];
then
perform_and_exit curl $D1_URL --output ./$D1_ZIP
perform_and_exit unzip $D1_ZIP
fi
confirm_action
set_action "downloading and unpacking $D2_URL if not already existing"
if [ ! -e $D2_ZIP ];
then
perform_and_exit curl $D2_URL --output ./$D2_ZIP
perform_and_exit unzip $D2_ZIP
fi
confirm_action
set_action "downloading Helper script: $P3_SCRIPT"
if [ ! -e $P3_SCRIPT ];
then
perform_and_exit curl $P3_URL --output ./$P3_SCRIPT
fi
confirm_action

File diff suppressed because one or more lines are too long

File diff suppressed because it is too large Load Diff

View File

@ -1,382 +0,0 @@
True : object.conclusion = "NN"
object.tag == "ADV" : object.conclusion = "ADV"
object.suffixL3 == "ено" : object.conclusion = "VERB"
object.suffixL3 == "ано" : object.conclusion = "VERB"
object.nextWord1 == "-" : object.conclusion = "ADV"
object.nextTag1 == "PUNCT" and object.nextTag2 == "NOUN" : object.conclusion = "NOUN"
object.word == "так" and object.nextWord1 == "как" : object.conclusion = "SCONJ"
object.suffixL3 == "ино" : object.conclusion = "PROPN"
object.nextWord1 == "-" : object.conclusion = "ADV"
object.suffixL3 == "нко" : object.conclusion = "PROPN"
object.word == "всё" and object.nextTag1 == "NOUN" : object.conclusion = "DET"
object.nextTag1 == "SYM" : object.conclusion = "NUM"
object.suffixL3 == "бро" : object.conclusion = "NOUN"
object.suffixL3 == "ако" : object.conclusion = "PROPN"
object.suffixL3 == "йко" : object.conclusion = "PROPN"
object.word == "Сити" and object.nextWord1 == "&#39;&#39;" : object.conclusion = "PROPN"
object.suffixL4 == "бюро" : object.conclusion = "PROPN"
object.nextWord1 == "км" : object.conclusion = "NUM"
object.nextWord1 == "года" : object.conclusion = "ADJ"
object.tag == "NOUN" : object.conclusion = "NOUN"
object.suffixL4 == "ском" : object.conclusion = "ADJ"
object.word == "войском" : object.conclusion = "NOUN"
object.suffixL3 == "кие" : object.conclusion = "ADJ"
object.prevTag1 == "PUNCT" and object.word == "русские" and object.nextTag1 == "PUNCT" : object.conclusion = "NOUN"
object.suffixL3 == "ыми" : object.conclusion = "ADJ"
object.prevTag1 == "PUNCT" and object.nextTag1 == "ADP" : object.conclusion = "VERB"
object.prevTag1 == "PROPN" and object.nextTag1 == "PUNCT" : object.conclusion = "PROPN"
object.suffixL2 == "es" : object.conclusion = "NOUN"
object.suffixL3 == "" : object.conclusion = "ADV"
object.suffixL2 == "ми" : object.conclusion = "NOUN"
object.suffixL2 == "ия" : object.conclusion = "NOUN"
object.nextWord1 == "-" : object.conclusion = "ADV"
object.word == "Entertainment" : object.conclusion = "NOUN"
object.suffixL2 == "ns" : object.conclusion = "NOUN"
object.suffixL3 == "ing" : object.conclusion = "NOUN"
object.nextWord2 == ":" : object.conclusion = "NOUN"
object.nextWord1 == "-" : object.conclusion = "ADV"
object.nextTag1 == "PUNCT" and object.nextTag2 == "NUM" : object.conclusion = "NOUN"
object.nextTag1 == "PUNCT" and object.nextTag2 == "ADP" : object.conclusion = "PROPN"
object.prevTag2 == "VERB" and object.prevTag1 == "NOUN" : object.conclusion = "PROPN"
object.suffixL3 == "ена" : object.conclusion = "NOUN"
object.suffixL3 == "ими" : object.conclusion = "ADJ"
object.prevWord1 == "," : object.conclusion = "VERB"
object.word == "русскими" : object.conclusion = "NOUN"
object.suffixL4 == "ьном" : object.conclusion = "ADJ"
object.suffixL3 == "шие" : object.conclusion = "VERB"
object.prevWord1 == "" : object.conclusion = "ADJ"
object.nextTag1 == "NOUN" and object.nextTag2 == "NOUN" : object.conclusion = "AUX"
object.suffixL3 == "ово" : object.conclusion = "PROPN"
object.word == "слово" : object.conclusion = "NOUN"
object.suffixL3 == "ова" : object.conclusion = "PROPN"
object.word == "слова" : object.conclusion = "NOUN"
object.word == "острова" : object.conclusion = "NOUN"
object.prevTag1 == "ADJ" and object.nextTag1 == "PUNCT" : object.conclusion = "NOUN"
object.prevTag1 == "DET" and object.word == "вдова" : object.conclusion = "NOUN"
object.suffixL4 == "ющей" : object.conclusion = "VERB"
object.suffixL3 == "щие" : object.conclusion = "VERB"
object.nextWord1 == "," : object.conclusion = "NOUN"
object.prevTag1 == "PROPN" and object.nextTag1 == "VERB" : object.conclusion = "PROPN"
object.nextWord2 == "альбом" : object.conclusion = "NOUN"
object.prevWord2 == "-" : object.conclusion = "NOUN"
object.prevTag1 == "PUNCT" and object.word == "м" : object.conclusion = "ADJ"
object.suffixL4 == "нном" : object.conclusion = "ADJ"
object.prevTag1 == "PUNCT" : object.conclusion = "VERB"
object.suffixL4 == "рном" : object.conclusion = "ADJ"
object.suffixL4 == "тном" : object.conclusion = "ADJ"
object.prevWord1 == "городе" : object.conclusion = "PROPN"
object.suffixL3 == "ева" : object.conclusion = "PROPN"
object.word == "королева" and object.nextTag1 == "NOUN" and object.nextTag2 == "PUNCT" : object.conclusion = "NOUN"
object.suffixL3 == "ево" : object.conclusion = "PROPN"
object.suffixL4 == "ески" : object.conclusion = "ADV"
object.prevTag1 == "ADV" : object.conclusion = "PROPN"
object.prevWord1 == "город" : object.conclusion = "PROPN"
object.suffixL4 == "овом" : object.conclusion = "ADJ"
object.suffixL4 == "чном" : object.conclusion = "ADJ"
object.prevWord1 == "реки" : object.conclusion = "PROPN"
object.suffixL3 == "эля" : object.conclusion = "PROPN"
object.suffixL4 == "вшей" : object.conclusion = "VERB"
object.suffixL4 == "дние" : object.conclusion = "ADJ"
object.suffixL4 == "дном" : object.conclusion = "ADJ"
object.suffixL4 == "арда" : object.conclusion = "PROPN"
object.suffixL4 == "тоне" : object.conclusion = "PROPN"
object.suffixL3 == "рге" : object.conclusion = "PROPN"
object.prevWord1 == "города" : object.conclusion = "PROPN"
object.suffixL3 == "йти" : object.conclusion = "VERB"
object.prevWord1 == "*" : object.conclusion = "ADV"
object.suffixL3 == "кии" : object.conclusion = "PROPN"
object.suffixL4 == "поле" : object.conclusion = "PROPN"
object.suffixL4 == "овка" : object.conclusion = "PROPN"
object.nextTag1 == "NOUN" : object.conclusion = "NOUN"
object.prevWord1 == "провинцию" : object.conclusion = "PROPN"
object.suffixL4 == "ичем" : object.conclusion = "PROPN"
object.nextWord2 == "родился" : object.conclusion = "PROPN"
object.suffixL3 == "ёва" : object.conclusion = "PROPN"
object.suffixL4 == "поль" : object.conclusion = "PROPN"
object.prevWord1 == "деревня" : object.conclusion = "PROPN"
object.suffixL4 == "алия" : object.conclusion = "PROPN"
object.suffixL4 == "льда" : object.conclusion = "PROPN"
object.suffixL3 == "ите" : object.conclusion = "VERB"
object.prevTag1 == "ADP" : object.conclusion = "NOUN"
object.suffixL4 == "бном" : object.conclusion = "ADJ"
object.suffixL3 == "ида" : object.conclusion = "PROPN"
object.suffixL4 == "" : object.conclusion = "NOUN"
object.suffixL4 == "ндия" : object.conclusion = "PROPN"
object.suffixL4 == "тове" : object.conclusion = "PROPN"
object.suffixL4 == "асом" : object.conclusion = "PROPN"
object.suffixL3 == "нчи" : object.conclusion = "PROPN"
object.suffixL3 == "бии" : object.conclusion = "PROPN"
object.suffixL3 == "эль" : object.conclusion = "PROPN"
object.suffixL3 == "мми" : object.conclusion = "PROPN"
object.suffixL4 == "евка" : object.conclusion = "PROPN"
object.suffixL3 == "кию" : object.conclusion = "PROPN"
object.suffixL3 == "нне" : object.conclusion = "PROPN"
object.prevWord1 == "у" and object.nextWord1 == ";" : object.conclusion = "PROPN"
object.suffixL3 == "тти" : object.conclusion = "PROPN"
object.suffixL4 == "кера" : object.conclusion = "PROPN"
object.suffixL4 == "кове" : object.conclusion = "PROPN"
object.prevWord1 == "село" : object.conclusion = "PROPN"
object.suffixL4 == "рант" : object.conclusion = "PROPN"
object.suffixL4 == "ндию" : object.conclusion = "PROPN"
object.suffixL4 == "ейма" : object.conclusion = "PROPN"
object.prevWord1 == "в" and object.nextWord1 == "(" : object.conclusion = "PROPN"
object.prevTag2 == "ADJ" and object.prevTag1 == "ADP" : object.conclusion = "NOUN"
object.prevWord1 == "Южной" : object.conclusion = "PROPN"
object.prevWord1 == "святого" : object.conclusion = "PROPN"
object.nextWord1 == "родился" : object.conclusion = "PROPN"
object.suffixL4 == "берт" : object.conclusion = "PROPN"
object.suffixL4 == "дора" : object.conclusion = "PROPN"
object.suffixL4 == "нята" : object.conclusion = "VERB"
object.suffixL4 == "тней" : object.conclusion = "ADJ"
object.suffixL4 == "жном" : object.conclusion = "ADJ"
object.suffixL4 == "мном" : object.conclusion = "ADJ"
object.suffixL4 == "сном" : object.conclusion = "ADJ"
object.prevWord1 == "-" and object.word == "мм" : object.conclusion = "ADJ"
object.suffixL4 == "нней" : object.conclusion = "ADJ"
object.tag == "NUM" : object.conclusion = "NUM"
object.nextWord1 == "-" : object.conclusion = "ADV"
object.suffixL4 == "BASE" : object.conclusion = "PROPN"
object.nextTag1 == "NOUN" and object.nextTag2 == "ADJ" : object.conclusion = "ADJ"
object.suffixL2 == "их" : object.conclusion = "NUM"
object.prevTag2 == "ADP" and object.prevTag1 == "PRON" : object.conclusion = "NUM"
object.prevWord1 == "," : object.conclusion = "NUM"
object.suffixL4 == "ному" : object.conclusion = "NUM"
object.prevWord2 == "на" : object.conclusion = "NUM"
object.prevTag1 == "DET" : object.conclusion = "NUM"
object.prevWord1 == "площадью" : object.conclusion = "NUM"
object.nextWord1 == "году" : object.conclusion = "ADJ"
object.prevWord1 == "-" : object.conclusion = "ADV"
object.prevWord2 == "Ямал" : object.conclusion = "NUM"
object.prevWord1 == "-" and object.word == "ти" : object.conclusion = "NUM"
object.prevTag1 == "PUNCT" and object.word == "1" and object.nextTag1 == "PUNCT" : object.conclusion = "NUM"
object.nextWord1 == "года" : object.conclusion = "ADJ"
object.word == "два" : object.conclusion = "NUM"
object.word == "три" and object.nextWord1 == "года" : object.conclusion = "NUM"
object.prevWord2 == "," : object.conclusion = "NUM"
object.prevWord1 == "на" : object.conclusion = "NUM"
object.prevTag1 == "ADP" and object.word == "2" and object.nextTag1 == "NOUN" : object.conclusion = "NUM"
object.word == "3" : object.conclusion = "NUM"
object.nextWord1 == "годах" : object.conclusion = "ADJ"
object.nextWord1 == "г." : object.conclusion = "ADJ"
object.prevWord1 == "С" : object.conclusion = "ADJ"
object.nextWord1 == "января" : object.conclusion = "ADJ"
object.word == "много" and object.nextTag1 == "VERB" : object.conclusion = "ADV"
object.nextWord1 == "годов" : object.conclusion = "ADJ"
object.nextWord1 == "г" : object.conclusion = "ADJ"
object.nextWord1 == "июня" : object.conclusion = "ADJ"
object.nextWord1 == "сентября" : object.conclusion = "ADJ"
object.nextWord1 == "октября" : object.conclusion = "ADJ"
object.nextWord1 == "августа" : object.conclusion = "ADJ"
object.suffixL2 == "S2" : object.conclusion = "PROPN"
object.nextWord1 == "гг" : object.conclusion = "ADJ"
object.nextTag1 == "PRON" and object.nextTag2 == "VERB" : object.conclusion = "ADJ"
object.prevWord1 == "В" and object.nextWord1 == "--" : object.conclusion = "ADJ"
object.nextWord1 == "ноября" : object.conclusion = "ADJ"
object.prevWord1 == "в" and object.nextWord1 == "--" : object.conclusion = "ADJ"
object.tag == "ADP" : object.conclusion = "ADP"
object.prevWord1 == "так" and object.word == "как" : object.conclusion = "ADV"
object.prevTag1 == "ADJ" and object.word == "типа" : object.conclusion = "NOUN"
object.prevTag1 == "NOUN" and object.word == "как" and object.nextTag1 == "ADP" : object.conclusion = "ADV"
object.word == "Как" and object.nextTag1 == "VERB" : object.conclusion = "SCONJ"
object.word == "как" and object.nextTag1 == "VERB" and object.nextTag2 == "ADP" : object.conclusion = "ADV"
object.prevWord2 == "того" and object.prevWord1 == "," and object.word == "как" : object.conclusion = "SCONJ"
object.prevWord1 == "того" and object.word == "как" : object.conclusion = "SCONJ"
object.tag == "PRON" : object.conclusion = "PRON"
object.word == "том" and object.nextTag1 == "NOUN" : object.conclusion = "DET"
object.nextTag1 == "PART" and object.nextTag2 == "NOUN" : object.conclusion = "DET"
object.nextWord1 == "просто" : object.conclusion = "PRON"
object.prevWord1 == "--" and object.word == "это" : object.conclusion = "AUX"
object.word == "это" and object.nextTag1 == "NOUN" : object.conclusion = "DET"
object.prevTag1 == "" and object.nextTag1 == "NOUN" : object.conclusion = "DET"
object.word == "т." and object.nextTag1 == "ADV" : object.conclusion = "ADV"
object.prevWord1 == "в" and object.word == "этом" : object.conclusion = "DET"
object.word == "всего" and object.nextTag1 == "NOUN" : object.conclusion = "DET"
object.word == "того" and object.nextWord1 == "времени" : object.conclusion = "DET"
object.tag == "SCONJ" : object.conclusion = "SCONJ"
object.word == "что" and object.nextTag1 == "VERB" : object.conclusion = "PRON"
object.prevWord2 == "том" and object.word == "что" : object.conclusion = "SCONJ"
object.prevWord2 == "признал" and object.word == "что" : object.conclusion = "SCONJ"
object.nextWord2 == "в" : object.conclusion = "SCONJ"
object.word == "потому" and object.nextTag1 == "PUNCT" : object.conclusion = "ADV"
object.tag == "PROPN" : object.conclusion = "PROPN"
object.prevTag1 == "AUX" : object.conclusion = "VERB"
object.suffixL2 == "ий" : object.conclusion = "PROPN"
object.suffixL4 == "" : object.conclusion = "PROPN"
object.nextTag1 == "PROPN" and object.nextTag2 == "PUNCT" : object.conclusion = "PROPN"
object.prevWord1 == "-" and object.nextWord1 == "-" : object.conclusion = "ADV"
object.suffixL4 == "ован" : object.conclusion = "VERB"
object.suffixL4 == "мена" : object.conclusion = "NOUN"
object.suffixL4 == "гион" : object.conclusion = "NOUN"
object.nextWord1 == "of" : object.conclusion = "NOUN"
object.prevTag2 == "VERB" and object.prevTag1 == "ADJ" : object.conclusion = "NOUN"
object.suffixL3 == "" : object.conclusion = "PROPN"
object.prevTag1 == "NUM" and object.nextTag1 == "PUNCT" : object.conclusion = "NOUN"
object.suffixL4 == "овед" : object.conclusion = "NOUN"
object.prevTag2 == "ADJ" and object.prevTag1 == "ADJ" : object.conclusion = "NOUN"
object.suffixL3 == "" : object.conclusion = "PROPN"
object.suffixL3 == "ога" : object.conclusion = "NOUN"
object.suffixL4 == "чена" : object.conclusion = "VERB"
object.suffixL4 == "вана" : object.conclusion = "VERB"
object.word == "Ивана" : object.conclusion = "PROPN"
object.suffixL4 == "нена" : object.conclusion = "VERB"
object.suffixL3 == "чна" : object.conclusion = "ADJ"
object.nextTag1 == "PRON" and object.nextTag2 == "VERB" : object.conclusion = "NOUN"
object.suffixL3 == "ics" : object.conclusion = "NOUN"
object.prevTag1 == "VERB" and object.nextTag1 == "ADP" : object.conclusion = "NOUN"
object.prevWord2 == "" : object.conclusion = "PROPN"
object.suffixL2 == "ua" : object.conclusion = "NOUN"
object.suffixL3 == "ача" : object.conclusion = "NOUN"
object.suffixL2 == "nd" : object.conclusion = "NOUN"
object.suffixL3 == "sis" : object.conclusion = "NOUN"
object.prevWord1 == "of" : object.conclusion = "NOUN"
object.prevTag1 == "ADP" and object.nextTag1 == "PROPN" : object.conclusion = "PROPN"
object.suffixL2 == "ry" : object.conclusion = "NOUN"
object.nextWord1 == "the" : object.conclusion = "VERB"
object.suffixL2 == "ed" : object.conclusion = "VERB"
object.prevTag2 == "PUNCT" and object.prevTag1 == "NOUN" : object.conclusion = "PROPN"
object.nextWord1 == "Airlines" : object.conclusion = "ADJ"
object.tag == "DET" : object.conclusion = "DET"
object.nextWord2 == "что" : object.conclusion = "PRON"
object.word == "его" and object.nextTag1 == "VERB" : object.conclusion = "PRON"
object.prevTag1 == "PUNCT" and object.word == "то" : object.conclusion = "ADV"
object.word == "то" and object.nextWord1 == "есть" : object.conclusion = "PRON"
object.nextWord1 == "с" : object.conclusion = "CCONJ"
object.prevTag1 == "VERB" and object.nextTag1 == "ADP" : object.conclusion = "PRON"
object.nextWord1 == "из" : object.conclusion = "DET"
object.prevTag1 == "ADP" and object.word == "этого" : object.conclusion = "PRON"
object.nextTag2 == "PUNCT" : object.conclusion = "DET"
object.nextTag1 == "NOUN" and object.nextTag2 == "NOUN" : object.conclusion = "DET"
object.word == "этого" and object.nextTag1 == "NOUN" and object.nextTag2 == "ADP" : object.conclusion = "DET"
object.nextTag2 == "PRON" : object.conclusion = "DET"
object.prevWord1 == "с" : object.conclusion = "DET"
object.prevTag1 == "VERB" and object.nextTag1 == "ADV" : object.conclusion = "PRON"
object.prevTag1 == "ADJ" and object.nextTag1 == "ADP" : object.conclusion = "PRON"
object.prevTag1 == "NOUN" and object.nextTag1 == "ADP" : object.conclusion = "PRON"
object.prevTag1 == "VERB" and object.word == "его" and object.nextTag1 == "PUNCT" : object.conclusion = "PRON"
object.nextTag1 == "CCONJ" : object.conclusion = "PRON"
object.prevTag2 == "PRON" and object.prevTag1 == "VERB" and object.word == "её" : object.conclusion = "PRON"
object.nextTag1 == "PUNCT" and object.nextTag2 == "NOUN" : object.conclusion = "PRON"
object.word == "его" and object.nextTag1 == "PUNCT" : object.conclusion = "DET"
object.tag == "SYM" : object.conclusion = "SYM"
object.tag == "PART" : object.conclusion = "PART"
object.tag == "PUNCT" : object.conclusion = "PUNCT"
object.tag == "VERB" : object.conclusion = "VERB"
object.suffixL4 == "ость" : object.conclusion = "NOUN"
object.prevTag1 == "ADJ" and object.nextTag1 == "PUNCT" : object.conclusion = "NOUN"
object.nextTag2 == "SCONJ" : object.conclusion = "VERB"
object.suffixL4 == "тели" : object.conclusion = "NOUN"
object.prevTag1 == "NOUN" and object.word == "хотели" and object.nextTag1 == "VERB" : object.conclusion = "VERB"
object.suffixL3 == "лла" : object.conclusion = "PROPN"
object.suffixL3 == "ины" : object.conclusion = "NOUN"
object.suffixL4 == "щины" : object.conclusion = "PROPN"
object.nextTag2 == "" : object.conclusion = "PROPN"
object.nextWord1 == "-" : object.conclusion = "ADV"
object.suffixL3 == "лав" : object.conclusion = "PROPN"
object.prevWord1 == "-" : object.conclusion = "PROPN"
object.suffixL3 == "оны" : object.conclusion = "NOUN"
object.prevWord1 == "``" and object.nextWord1 == "&#39;&#39;" : object.conclusion = "NOUN"
object.suffixL2 == "ен" : object.conclusion = "PROPN"
object.prevTag2 == "CCONJ" and object.prevTag1 == "PUNCT" : object.conclusion = "VERB"
object.suffixL3 == "вен" : object.conclusion = "PROPN"
object.suffixL3 == "мен" : object.conclusion = "NOUN"
object.suffixL3 == "лли" : object.conclusion = "PROPN"
object.suffixL4 == "итет" : object.conclusion = "NOUN"
object.prevTag1 == "NOUN" and object.word == "правил" : object.conclusion = "NOUN"
object.suffixL4 == "мены" : object.conclusion = "NOUN"
object.suffixL4 == "иала" : object.conclusion = "NOUN"
object.suffixL3 == "кет" : object.conclusion = "NOUN"
object.nextWord1 == "города" : object.conclusion = "NOUN"
object.suffixL3 == "уны" : object.conclusion = "NOUN"
object.suffixL4 == "пись" : object.conclusion = "NOUN"
object.suffixL3 == "дли" : object.conclusion = "PROPN"
object.prevTag2 == "NOUN" and object.prevTag1 == "ADP" : object.conclusion = "PROPN"
object.nextTag1 == "PUNCT" and object.nextTag2 == "PROPN" : object.conclusion = "PROPN"
object.suffixL2 == "ся" : object.conclusion = "VERB"
object.suffixL4 == "льны" : object.conclusion = "ADJ"
object.suffixL3 == "тен" : object.conclusion = "ADJ"
object.tag == "X" : object.conclusion = "X"
object.tag == "CCONJ" : object.conclusion = "CCONJ"
object.prevTag2 == "CCONJ" and object.prevTag1 == "ADV" : object.conclusion = "PART"
object.word == "А" and object.nextTag1 == "PUNCT" : object.conclusion = "NOUN"
object.prevWord1 == "-" and object.word == "либо" : object.conclusion = "ADV"
object.prevTag1 == "SCONJ" : object.conclusion = "PART"
object.tag == "AUX" : object.conclusion = "AUX"
object.prevWord1 == "не" and object.word == "было" : object.conclusion = "VERB"
object.word == "стал" and object.nextTag1 == "VERB" : object.conclusion = "VERB"
object.nextTag1 == "ADP" : object.conclusion = "VERB"
object.prevWord1 == "," and object.nextWord1 == "в" : object.conclusion = "AUX"
object.prevTag1 == "NOUN" and object.word == "были" and object.nextTag1 == "ADP" : object.conclusion = "AUX"
object.prevTag1 == "CCONJ" : object.conclusion = "AUX"
object.word == "было" and object.nextTag1 == "NUM" : object.conclusion = "VERB"
object.prevWord2 == "," and object.word == "было" : object.conclusion = "AUX"
object.word == "стала" and object.nextTag1 == "VERB" : object.conclusion = "VERB"
object.prevTag1 == "PUNCT" and object.nextTag1 == "PUNCT" : object.conclusion = "VERB"
object.word == "стали" and object.nextTag1 == "VERB" : object.conclusion = "VERB"
object.word == "быть" and object.nextTag1 == "PART" : object.conclusion = "VERB"
object.prevTag1 == "PART" and object.nextTag1 == "PUNCT" : object.conclusion = "VERB"
object.tag == "ADJ" : object.conclusion = "ADJ"
object.prevTag1 == "PUNCT" and object.nextTag1 == "ADP" : object.conclusion = "VERB"
object.prevWord1 == "-" : object.conclusion = "ADJ"
object.prevWord1 == "--" : object.conclusion = "ADJ"
object.prevTag2 == "NOUN" and object.prevTag1 == "PUNCT" and object.word == "многие" : object.conclusion = "ADJ"
object.suffixL4 == "ений" : object.conclusion = "NOUN"
object.nextWord1 == "-" : object.conclusion = "ADV"
object.nextWord2 == "либо" : object.conclusion = "DET"
object.suffixL3 == "ций" : object.conclusion = "NOUN"
object.prevWord1 == "" : object.conclusion = "PROPN"
object.prevTag1 == "ADJ" and object.nextTag1 == "PUNCT" : object.conclusion = "NOUN"
object.suffixL3 == "ных" : object.conclusion = "ADJ"
object.suffixL2 == "ый" : object.conclusion = "ADJ"
object.suffixL3 == "ное" : object.conclusion = "ADJ"
object.suffixL2 == "го" : object.conclusion = "ADJ"
object.suffixL4 == "аний" : object.conclusion = "NOUN"
object.suffixL4 == "вший" : object.conclusion = "VERB"
object.word == "бывший" : object.conclusion = "ADJ"
object.suffixL4 == "ющая" : object.conclusion = "VERB"
object.nextWord1 == "км" : object.conclusion = "NUM"
object.word == "кв." : object.conclusion = "ADJ"
object.suffixL4 == "ющих" : object.conclusion = "VERB"
object.word == "следующих" : object.conclusion = "ADJ"
object.suffixL4 == "изму" : object.conclusion = "NOUN"
object.suffixL4 == "вших" : object.conclusion = "VERB"
object.word == "бывших" : object.conclusion = "ADJ"
object.suffixL4 == "вшим" : object.conclusion = "VERB"
object.suffixL4 == "ывая" : object.conclusion = "VERB"
object.suffixL4 == "емых" : object.conclusion = "VERB"
object.suffixL4 == "вшая" : object.conclusion = "VERB"
object.suffixL3 == "рий" : object.conclusion = "NOUN"
object.prevWord1 == "" : object.conclusion = "PROPN"
object.nextTag2 == "PUNCT" : object.conclusion = "PROPN"
object.prevTag2 == "PUNCT" and object.prevTag1 == "PART" : object.conclusion = "VERB"
object.suffixL3 == "зий" : object.conclusion = "NOUN"
object.suffixL3 == "лий" : object.conclusion = "NOUN"
object.suffixL4 == "дкой" : object.conclusion = "NOUN"
object.word == "я" and object.nextTag1 == "VERB" : object.conclusion = "PRON"
object.prevTag1 == "PUNCT" and object.nextTag1 == "DET" : object.conclusion = "VERB"
object.suffixL4 == "ивая" : object.conclusion = "VERB"
object.nextTag1 == "PROPN" : object.conclusion = "NOUN"
object.nextTag1 == "PRON" and object.nextTag2 == "NOUN" : object.conclusion = "VERB"
object.prevWord1 == "с" and object.nextWord1 == "." : object.conclusion = "NOUN"
object.prevWord1 == "семейства" : object.conclusion = "NOUN"
object.suffixL4 == "иной" : object.conclusion = "NOUN"
object.word == "21" and object.nextWord2 == "." : object.conclusion = "NUM"
object.word == "14" and object.nextTag1 == "NOUN" and object.nextTag2 == "PUNCT" : object.conclusion = "NUM"
object.suffixL4 == "ющее" : object.conclusion = "VERB"
object.suffixL4 == "емые" : object.conclusion = "VERB"
object.suffixL3 == "жая" : object.conclusion = "VERB"
object.suffixL4 == "олго" : object.conclusion = "ADV"
object.suffixL4 == "рмой" : object.conclusion = "NOUN"
object.suffixL4 == "вкой" : object.conclusion = "NOUN"
object.nextTag1 == "AUX" and object.nextTag2 == "ADV" : object.conclusion = "PROPN"
object.prevWord2 == "названия" : object.conclusion = "PROPN"
object.word == "12" and object.nextTag1 == "PUNCT" : object.conclusion = "NUM"
object.word == "2005" and object.nextTag1 == "PUNCT" : object.conclusion = "NUM"
object.word == "14" and object.nextWord1 == "лет" : object.conclusion = "NUM"
object.suffixL4 == "ящих" : object.conclusion = "VERB"
object.suffixL4 == "ющим" : object.conclusion = "VERB"
object.nextTag1 == "NOUN" and object.nextTag2 == "ADP" : object.conclusion = "ADJ"
object.suffixL4 == "ящий" : object.conclusion = "VERB"
object.suffixL4 == "щему" : object.conclusion = "VERB"
object.suffixL4 == "упая" : object.conclusion = "VERB"
object.suffixL4 == "емой" : object.conclusion = "VERB"
object.nextTag1 == "ADV" and object.nextTag2 == "ADP" : object.conclusion = "VERB"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -5,6 +5,7 @@ Repository for the NLP-LAB
---- ----
* website: http://sda.cs.uni-bonn.de/nlp/ * website: http://sda.cs.uni-bonn.de/nlp/
* Main git repository: https://github.com/SmartDataAnalytics/MA-INF-4222-NLP-Lab
------ ------
@ -24,3 +25,18 @@ Repository for the NLP-LAB
---- ----
## Possible Topics
* P1: Guess Me
* "Game" for guessing a certain Person
* P2: I am Groot?
* Sentimental Analysis / Classification with adequate reaction (e.g. as Dialog System)
* P3: Question Linking
* search for predefined Questions in Knowledge Graph