From 583d0c1356fac77216c96386d5d13563e4aef2b9 Mon Sep 17 00:00:00 2001 From: Jonas Weinz Date: Sat, 28 Apr 2018 21:15:48 +0200 Subject: [PATCH] exercise 1 first try --- Jonas_Solutions/Exercise01.ipynb | 300 ++++++++++ Jonas_Solutions/Exercise01_sandbox.ipynb | 678 +++++++++++++++++++++++ 2 files changed, 978 insertions(+) create mode 100644 Jonas_Solutions/Exercise01.ipynb create mode 100644 Jonas_Solutions/Exercise01_sandbox.ipynb diff --git a/Jonas_Solutions/Exercise01.ipynb b/Jonas_Solutions/Exercise01.ipynb new file mode 100644 index 0000000..c18db61 --- /dev/null +++ b/Jonas_Solutions/Exercise01.ipynb @@ -0,0 +1,300 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# NLP-LAB Exercise 01 by jonas weinz\n", + "----" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import nltk\n", + "import pprint\n", + "from sklearn.tree import DecisionTreeClassifier\n", + "from sklearn.feature_extraction import DictVectorizer\n", + "from sklearn.pipeline import Pipeline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## implementing own classifiers" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* writing an own feature funtion" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'\\n return {\\n \\'word\\': word,\\n \\'is_capitalized\\': word[0].upper() == word[0],\\n \\'prefix-1\\': word[0],\\n \\'suffix-1\\': word[-1],\\n \\'prev_word\\': \\'\\' if index == 0 else sentence[index - 1],\\n \\'next_word\\': \\'\\' if index == len(sentence) - 1 else sentence[index + 1],\\n \\'length\\': len(word),\\n \\'index\\' : index,\\n \\'rev_index\\': len(sentence) - index,\\n \\'sentence_length\\': len(sentence)#,\\n \\'relative_third\\': relative_third,\\n \\'is_punctuation_mark\\': is_punctuation_mark,\\n \\',\\': word == \",\",\\n \\'.\\': word == \".\",\\n \\'!\\': word == \"!\",\\n \\'?\\': word == \"?\"\\n }\\n'" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "def features(sentence, index):\n", + " word = sentence[index]\n", + " is_punctuation_mark = word == \"!\" or word == \".\" or word == \",\" or word == \"?\"\n", + " sentence_length = len(sentence)\n", + " relative_third = (index * 2) // sentence_length \n", + " return {\n", + " 'word': sentence[index],\n", + " 'is_capitalized': sentence[index][0].upper() == sentence[index][0],\n", + " 'prefix-1': sentence[index][0],\n", + " 'suffix-1': sentence[index][-1],\n", + " 'prev_word': '' if index == 0 else sentence[index - 1],\n", + " 'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],\n", + " 'numerical': word.isnumeric(),\n", + " 'is_punctuation_mark': is_punctuation_mark,\n", + " ',': word == \",\",\n", + " '.': word == \".\",\n", + " '!': word == \"!\",\n", + " '?': word == \"?\"\n", + " }\n", + "'''\n", + " return {\n", + " 'word': word,\n", + " 'is_capitalized': word[0].upper() == word[0],\n", + " 'prefix-1': word[0],\n", + " 'suffix-1': word[-1],\n", + " 'prev_word': '' if index == 0 else sentence[index - 1],\n", + " 'next_word': '' if index == len(sentence) - 1 else sentence[index + 1],\n", + " 'length': len(word),\n", + " 'index' : index,\n", + " 'rev_index': len(sentence) - index,\n", + " 'sentence_length': len(sentence)#,\n", + " 'relative_third': relative_third,\n", + " 'is_punctuation_mark': is_punctuation_mark,\n", + " ',': word == \",\",\n", + " '.': word == \".\",\n", + " '!': word == \"!\",\n", + " '?': word == \"?\"\n", + " }\n", + "'''" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "test_sentence = ['The','cake','is','a','lie','!']\n", + "#for i in range(len(test_sentence)):\n", + "# pprint.pprint(features(test_sentence, i))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* function for creating training sets" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def untag(tagged_sentence):\n", + " return [w for w,t in tagged_sentence]\n", + "\n", + "def transform_to_dataset(tagged_sentences):\n", + " X,y = [], []\n", + " \n", + " for s in tagged_sentences:\n", + " for i in range(len(s)):\n", + " X.append(features(untag(s),i))\n", + " y.append(s[i][1])\n", + " return X,y\n", + "\n", + "def create_training_and_test_set(annotated_sentences, relative_cutoff):\n", + " cutoff = int(relative_cutoff * len(annotated_sentences))\n", + " training_sentences = annotated_sentences[:cutoff]\n", + " test_sentences = annotated_sentences[cutoff:]\n", + " \n", + " X,y = transform_to_dataset(training_sentences)\n", + " tX, ty = transform_to_dataset(test_sentences)\n", + " \n", + " return X,y,tX,ty" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* Decision Tree classifier" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "def train_classifier(X,y,classifier,max_size=10000):\n", + " clf = Pipeline([\n", + " ('vectorizer', DictVectorizer(sparse=False)),\n", + " ('classifier', classifier)\n", + " ])\n", + " \n", + " print(\"start training…\")\n", + " \n", + " clf.fit(\n", + " X if len(X) < max_size else X[:max_size],\n", + " y if len(y) < max_size else y[:max_size]\n", + " )\n", + " \n", + " print(\"training done\")\n", + " \n", + " return clf" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* classifier evaluater" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "def test_classifier(clf, tX, ty):\n", + " accuracy = clf.score(tX, ty)\n", + " print(\"Accuracy: \", accuracy)\n", + " # TODO: more analytics" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Exercises:" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Exercise 01\n", + "* train and testing english custom POS tagger model:" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "start training…\n", + "training done\n", + "Accuracy: 0.8842756624582065\n" + ] + } + ], + "source": [ + "annotated_sent = nltk.corpus.treebank.tagged_sents()\n", + "\n", + "X,y,tX,ty = create_training_and_test_set(annotated_sentences=annotated_sent, \n", + " relative_cutoff=0.8)\n", + "\n", + "classifier = DecisionTreeClassifier(criterion='entropy', splitter='random')\n", + "clf = train_classifier(X,y,classifier)\n", + "test_classifier(clf=clf, tX=tX, ty=ty)" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "8257\n" + ] + } + ], + "source": [ + "print(len(classifier.feature_importances_))" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "80637" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "len(y)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/Jonas_Solutions/Exercise01_sandbox.ipynb b/Jonas_Solutions/Exercise01_sandbox.ipynb new file mode 100644 index 0000000..54c88ac --- /dev/null +++ b/Jonas_Solutions/Exercise01_sandbox.ipynb @@ -0,0 +1,678 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# NLP-LAB Exercise 01 (Jonas Weinz)\n", + "----" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## playing around with nltk" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* import nltk and download packages" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[nltk_data] Downloading package punkt to /home/jonas/nltk_data...\n", + "[nltk_data] Package punkt is already up-to-date!\n", + "[nltk_data] Downloading package averaged_perceptron_tagger to\n", + "[nltk_data] /home/jonas/nltk_data...\n", + "[nltk_data] Package averaged_perceptron_tagger is already up-to-\n", + "[nltk_data] date!\n" + ] + }, + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import nltk\n", + "\n", + "#nltk.download('all')\n", + "nltk.download('punkt')\n", + "nltk.download('averaged_perceptron_tagger')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "* analyse a text fragment" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "monologue = \"\"\"Ah! Now I’ve done Philosophy,\n", + "I’ve finished Law and Medicine,\n", + "And sadly even Theology:\n", + "Taken fierce pains, from end to end.\n", + "Now here I am, a fool for sure!\n", + "No wiser than I was before:\n", + "Master, Doctor’s what they call me,\n", + "And I’ve been ten years, already,\n", + "Crosswise, arcing, to and fro,\n", + "Leading my students by the nose,\n", + "And see that we can know - nothing!\n", + "It almost sets my heart burning.\n", + "I’m cleverer than all these teachers,\n", + "Doctors, Masters, scribes, preachers:\n", + "I’m not plagued by doubt or scruple,\n", + "Scared by neither Hell nor Devil –\n", + "Instead all Joy is snatched away,\n", + "What’s worth knowing, I can’t say,\n", + "I can’t say what I should teach\n", + "To make men better or convert each.\n", + "And then I’ve neither goods nor gold,\n", + "No worldly honour, or splendour hold:\n", + "Not even a dog would play this part!\n", + "So I’ve given myself to Magic art,\n", + "To see if, through Spirit powers and lips,\n", + "I might have all secrets at my fingertips.\n", + "And no longer, with rancid sweat, so,\n", + "Still have to speak what I cannot know:\n", + "That I may understand whatever\n", + "Binds the world’s innermost core together\"\"\"" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[('Ah', 'NN'),\n", + " ('!', '.'),\n", + " ('Now', 'RB'),\n", + " ('I', 'PRP'),\n", + " ('’', 'VBP'),\n", + " ('ve', 'JJ'),\n", + " ('done', 'VBN'),\n", + " ('Philosophy', 'NNP'),\n", + " (',', ','),\n", + " ('I', 'PRP'),\n", + " ('’', 'VBP'),\n", + " ('ve', 'RB'),\n", + " ('finished', 'VBN'),\n", + " ('Law', 'NNP'),\n", + " ('and', 'CC'),\n", + " ('Medicine', 'NNP'),\n", + " (',', ','),\n", + " ('And', 'CC'),\n", + " ('sadly', 'RB'),\n", + " ('even', 'RB'),\n", + " ('Theology', 'NNP'),\n", + " (':', ':'),\n", + " ('Taken', 'NNP'),\n", + " ('fierce', 'NN'),\n", + " ('pains', 'NNS'),\n", + " (',', ','),\n", + " ('from', 'IN'),\n", + " ('end', 'NN'),\n", + " ('to', 'TO'),\n", + " ('end', 'VB'),\n", + " ('.', '.'),\n", + " ('Now', 'RB'),\n", + " ('here', 'RB'),\n", + " ('I', 'PRP'),\n", + " ('am', 'VBP'),\n", + " (',', ','),\n", + " ('a', 'DT'),\n", + " ('fool', 'NN'),\n", + " ('for', 'IN'),\n", + " ('sure', 'JJ'),\n", + " ('!', '.'),\n", + " ('No', 'DT'),\n", + " ('wiser', 'JJR'),\n", + " ('than', 'IN'),\n", + " ('I', 'PRP'),\n", + " ('was', 'VBD'),\n", + " ('before', 'IN'),\n", + " (':', ':'),\n", + " ('Master', 'NN'),\n", + " (',', ','),\n", + " ('Doctor', 'NNP'),\n", + " ('’', 'NNP'),\n", + " ('s', 'VBD'),\n", + " ('what', 'WP'),\n", + " ('they', 'PRP'),\n", + " ('call', 'VBP'),\n", + " ('me', 'PRP'),\n", + " (',', ','),\n", + " ('And', 'CC'),\n", + " ('I', 'PRP'),\n", + " ('’', 'VBP'),\n", + " ('ve', 'RB'),\n", + " ('been', 'VBN'),\n", + " ('ten', 'CD'),\n", + " ('years', 'NNS'),\n", + " (',', ','),\n", + " ('already', 'RB'),\n", + " (',', ','),\n", + " ('Crosswise', 'NNP'),\n", + " (',', ','),\n", + " ('arcing', 'VBG'),\n", + " (',', ','),\n", + " ('to', 'TO'),\n", + " ('and', 'CC'),\n", + " ('fro', 'VB'),\n", + " (',', ','),\n", + " ('Leading', 'VBG'),\n", + " ('my', 'PRP$'),\n", + " ('students', 'NNS'),\n", + " ('by', 'IN'),\n", + " ('the', 'DT'),\n", + " ('nose', 'NN'),\n", + " (',', ','),\n", + " ('And', 'CC'),\n", + " ('see', 'VBP'),\n", + " ('that', 'IN'),\n", + " ('we', 'PRP'),\n", + " ('can', 'MD'),\n", + " ('know', 'VB'),\n", + " ('-', ':'),\n", + " ('nothing', 'NN'),\n", + " ('!', '.'),\n", + " ('It', 'PRP'),\n", + " ('almost', 'RB'),\n", + " ('sets', 'VBZ'),\n", + " ('my', 'PRP$'),\n", + " ('heart', 'NN'),\n", + " ('burning', 'NN'),\n", + " ('.', '.'),\n", + " ('I', 'PRP'),\n", + " ('’', 'VBP'),\n", + " ('m', 'RB'),\n", + " ('cleverer', 'JJR'),\n", + " ('than', 'IN'),\n", + " ('all', 'PDT'),\n", + " ('these', 'DT'),\n", + " ('teachers', 'NNS'),\n", + " (',', ','),\n", + " ('Doctors', 'NNS'),\n", + " (',', ','),\n", + " ('Masters', 'NNS'),\n", + " (',', ','),\n", + " ('scribes', 'NNS'),\n", + " (',', ','),\n", + " ('preachers', 'NNS'),\n", + " (':', ':'),\n", + " ('I', 'PRP'),\n", + " ('’', 'VBP'),\n", + " ('m', 'RB'),\n", + " ('not', 'RB'),\n", + " ('plagued', 'VBN'),\n", + " ('by', 'IN'),\n", + " ('doubt', 'NN'),\n", + " ('or', 'CC'),\n", + " ('scruple', 'NN'),\n", + " (',', ','),\n", + " ('Scared', 'VBN'),\n", + " ('by', 'IN'),\n", + " ('neither', 'DT'),\n", + " ('Hell', 'NNP'),\n", + " ('nor', 'CC'),\n", + " ('Devil', 'NNP'),\n", + " ('–', 'NNP'),\n", + " ('Instead', 'RB'),\n", + " ('all', 'DT'),\n", + " ('Joy', 'NNP'),\n", + " ('is', 'VBZ'),\n", + " ('snatched', 'VBN'),\n", + " ('away', 'RB'),\n", + " (',', ','),\n", + " ('What', 'WP'),\n", + " ('’', 'VBD'),\n", + " ('s', 'JJ'),\n", + " ('worth', 'JJ'),\n", + " ('knowing', 'NN'),\n", + " (',', ','),\n", + " ('I', 'PRP'),\n", + " ('can', 'MD'),\n", + " ('’', 'VB'),\n", + " ('t', 'NNS'),\n", + " ('say', 'VBP'),\n", + " (',', ','),\n", + " ('I', 'PRP'),\n", + " ('can', 'MD'),\n", + " ('’', 'VB'),\n", + " ('t', 'NNS'),\n", + " ('say', 'VBP'),\n", + " ('what', 'WP'),\n", + " ('I', 'PRP'),\n", + " ('should', 'MD'),\n", + " ('teach', 'VB'),\n", + " ('To', 'TO'),\n", + " ('make', 'VB'),\n", + " ('men', 'NNS'),\n", + " ('better', 'JJR'),\n", + " ('or', 'CC'),\n", + " ('convert', 'VB'),\n", + " ('each', 'DT'),\n", + " ('.', '.'),\n", + " ('And', 'CC'),\n", + " ('then', 'RB'),\n", + " ('I', 'PRP'),\n", + " ('’', 'VBP'),\n", + " ('ve', 'JJ'),\n", + " ('neither', 'CC'),\n", + " ('goods', 'NNS'),\n", + " ('nor', 'CC'),\n", + " ('gold', 'NN'),\n", + " (',', ','),\n", + " ('No', 'DT'),\n", + " ('worldly', 'RB'),\n", + " ('honour', 'VBZ'),\n", + " (',', ','),\n", + " ('or', 'CC'),\n", + " ('splendour', 'JJ'),\n", + " ('hold', 'NN'),\n", + " (':', ':'),\n", + " ('Not', 'RB'),\n", + " ('even', 'RB'),\n", + " ('a', 'DT'),\n", + " ('dog', 'NN'),\n", + " ('would', 'MD'),\n", + " ('play', 'VB'),\n", + " ('this', 'DT'),\n", + " ('part', 'NN'),\n", + " ('!', '.'),\n", + " ('So', 'RB'),\n", + " ('I', 'PRP'),\n", + " ('’', 'VBP'),\n", + " ('ve', 'JJ'),\n", + " ('given', 'VBN'),\n", + " ('myself', 'PRP'),\n", + " ('to', 'TO'),\n", + " ('Magic', 'NNP'),\n", + " ('art', 'NN'),\n", + " (',', ','),\n", + " ('To', 'TO'),\n", + " ('see', 'VB'),\n", + " ('if', 'IN'),\n", + " (',', ','),\n", + " ('through', 'IN'),\n", + " ('Spirit', 'NNP'),\n", + " ('powers', 'NNS'),\n", + " ('and', 'CC'),\n", + " ('lips', 'NNS'),\n", + " (',', ','),\n", + " ('I', 'PRP'),\n", + " ('might', 'MD'),\n", + " ('have', 'VB'),\n", + " ('all', 'DT'),\n", + " ('secrets', 'NNS'),\n", + " ('at', 'IN'),\n", + " ('my', 'PRP$'),\n", + " ('fingertips', 'NNS'),\n", + " ('.', '.'),\n", + " ('And', 'CC'),\n", + " ('no', 'DT'),\n", + " ('longer', 'RBR'),\n", + " (',', ','),\n", + " ('with', 'IN'),\n", + " ('rancid', 'JJ'),\n", + " ('sweat', 'NN'),\n", + " (',', ','),\n", + " ('so', 'RB'),\n", + " (',', ','),\n", + " ('Still', 'RB'),\n", + " ('have', 'VBP'),\n", + " ('to', 'TO'),\n", + " ('speak', 'VB'),\n", + " ('what', 'WP'),\n", + " ('I', 'PRP'),\n", + " ('can', 'MD'),\n", + " ('not', 'RB'),\n", + " ('know', 'VB'),\n", + " (':', ':'),\n", + " ('That', 'IN'),\n", + " ('I', 'PRP'),\n", + " ('may', 'MD'),\n", + " ('understand', 'VB'),\n", + " ('whatever', 'WDT'),\n", + " ('Binds', 'NNP'),\n", + " ('the', 'DT'),\n", + " ('world', 'NN'),\n", + " ('’', 'NNP'),\n", + " ('s', 'VBZ'),\n", + " ('innermost', 'NN'),\n", + " ('core', 'NN'),\n", + " ('together', 'RB')]" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "tokens = nltk.word_tokenize(monologue)\n", + "display(nltk.pos_tag(tokens))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Exploring Penn TreeBank" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[nltk_data] Downloading package treebank to /home/jonas/nltk_data...\n", + "[nltk_data] Package treebank is already up-to-date!\n" + ] + }, + { + "data": { + "text/plain": [ + "[('Pierre', 'NNP'),\n", + " ('Vinken', 'NNP'),\n", + " (',', ','),\n", + " ('61', 'CD'),\n", + " ('years', 'NNS'),\n", + " ('old', 'JJ'),\n", + " (',', ','),\n", + " ('will', 'MD'),\n", + " ('join', 'VB'),\n", + " ('the', 'DT'),\n", + " ('board', 'NN'),\n", + " ('as', 'IN'),\n", + " ('a', 'DT'),\n", + " ('nonexecutive', 'JJ'),\n", + " ('director', 'NN'),\n", + " ('Nov.', 'NNP'),\n", + " ('29', 'CD'),\n", + " ('.', '.')]" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "data": { + "text/plain": [ + "'# Tagged sentences: 3914'" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "nltk.download('treebank')\n", + "annotated_sent = nltk.corpus.treebank.tagged_sents()\n", + "\n", + "display(annotated_sent[0])\n", + "display(\"# Tagged sentences: \" + str(len(annotated_sent)))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## tagset information" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "[nltk_data] Downloading package tagsets to /home/jonas/nltk_data...\n", + "[nltk_data] Package tagsets is already up-to-date!\n", + "$: dollar\n", + " $ -$ --$ A$ C$ HK$ M$ NZ$ S$ U.S.$ US$\n", + "'': closing quotation mark\n", + " ' ''\n", + "(: opening parenthesis\n", + " ( [ {\n", + "): closing parenthesis\n", + " ) ] }\n", + ",: comma\n", + " ,\n", + "--: dash\n", + " --\n", + ".: sentence terminator\n", + " . ! ?\n", + ":: colon or ellipsis\n", + " : ; ...\n", + "CC: conjunction, coordinating\n", + " & 'n and both but either et for less minus neither nor or plus so\n", + " therefore times v. versus vs. whether yet\n", + "CD: numeral, cardinal\n", + " mid-1890 nine-thirty forty-two one-tenth ten million 0.5 one forty-\n", + " seven 1987 twenty '79 zero two 78-degrees eighty-four IX '60s .025\n", + " fifteen 271,124 dozen quintillion DM2,000 ...\n", + "DT: determiner\n", + " all an another any both del each either every half la many much nary\n", + " neither no some such that the them these this those\n", + "EX: existential there\n", + " there\n", + "FW: foreign word\n", + " gemeinschaft hund ich jeux habeas Haementeria Herr K'ang-si vous\n", + " lutihaw alai je jour objets salutaris fille quibusdam pas trop Monte\n", + " terram fiche oui corporis ...\n", + "IN: preposition or conjunction, subordinating\n", + " astride among uppon whether out inside pro despite on by throughout\n", + " below within for towards near behind atop around if like until below\n", + " next into if beside ...\n", + "JJ: adjective or numeral, ordinal\n", + " third ill-mannered pre-war regrettable oiled calamitous first separable\n", + " ectoplasmic battery-powered participatory fourth still-to-be-named\n", + " multilingual multi-disciplinary ...\n", + "JJR: adjective, comparative\n", + " bleaker braver breezier briefer brighter brisker broader bumper busier\n", + " calmer cheaper choosier cleaner clearer closer colder commoner costlier\n", + " cozier creamier crunchier cuter ...\n", + "JJS: adjective, superlative\n", + " calmest cheapest choicest classiest cleanest clearest closest commonest\n", + " corniest costliest crassest creepiest crudest cutest darkest deadliest\n", + " dearest deepest densest dinkiest ...\n", + "LS: list item marker\n", + " A A. B B. C C. D E F First G H I J K One SP-44001 SP-44002 SP-44005\n", + " SP-44007 Second Third Three Two * a b c d first five four one six three\n", + " two\n", + "MD: modal auxiliary\n", + " can cannot could couldn't dare may might must need ought shall should\n", + " shouldn't will would\n", + "NN: noun, common, singular or mass\n", + " common-carrier cabbage knuckle-duster Casino afghan shed thermostat\n", + " investment slide humour falloff slick wind hyena override subhumanity\n", + " machinist ...\n", + "NNP: noun, proper, singular\n", + " Motown Venneboerger Czestochwa Ranzer Conchita Trumplane Christos\n", + " Oceanside Escobar Kreisler Sawyer Cougar Yvette Ervin ODI Darryl CTCA\n", + " Shannon A.K.C. Meltex Liverpool ...\n", + "NNPS: noun, proper, plural\n", + " Americans Americas Amharas Amityvilles Amusements Anarcho-Syndicalists\n", + " Andalusians Andes Andruses Angels Animals Anthony Antilles Antiques\n", + " Apache Apaches Apocrypha ...\n", + "NNS: noun, common, plural\n", + " undergraduates scotches bric-a-brac products bodyguards facets coasts\n", + " divestitures storehouses designs clubs fragrances averages\n", + " subjectivists apprehensions muses factory-jobs ...\n", + "PDT: pre-determiner\n", + " all both half many quite such sure this\n", + "POS: genitive marker\n", + " ' 's\n", + "PRP: pronoun, personal\n", + " hers herself him himself hisself it itself me myself one oneself ours\n", + " ourselves ownself self she thee theirs them themselves they thou thy us\n", + "PRP$: pronoun, possessive\n", + " her his mine my our ours their thy your\n", + "RB: adverb\n", + " occasionally unabatingly maddeningly adventurously professedly\n", + " stirringly prominently technologically magisterially predominately\n", + " swiftly fiscally pitilessly ...\n", + "RBR: adverb, comparative\n", + " further gloomier grander graver greater grimmer harder harsher\n", + " healthier heavier higher however larger later leaner lengthier less-\n", + " perfectly lesser lonelier longer louder lower more ...\n", + "RBS: adverb, superlative\n", + " best biggest bluntest earliest farthest first furthest hardest\n", + " heartiest highest largest least less most nearest second tightest worst\n", + "RP: particle\n", + " aboard about across along apart around aside at away back before behind\n", + " by crop down ever fast for forth from go high i.e. in into just later\n", + " low more off on open out over per pie raising start teeth that through\n", + " under unto up up-pp upon whole with you\n", + "SYM: symbol\n", + " % & ' '' ''. ) ). * + ,. < = > @ A[fj] U.S U.S.S.R * ** ***\n", + "TO: \"to\" as preposition or infinitive marker\n", + " to\n", + "UH: interjection\n", + " Goodbye Goody Gosh Wow Jeepers Jee-sus Hubba Hey Kee-reist Oops amen\n", + " huh howdy uh dammit whammo shucks heck anyways whodunnit honey golly\n", + " man baby diddle hush sonuvabitch ...\n", + "VB: verb, base form\n", + " ask assemble assess assign assume atone attention avoid bake balkanize\n", + " bank begin behold believe bend benefit bevel beware bless boil bomb\n", + " boost brace break bring broil brush build ...\n", + "VBD: verb, past tense\n", + " dipped pleaded swiped regummed soaked tidied convened halted registered\n", + " cushioned exacted snubbed strode aimed adopted belied figgered\n", + " speculated wore appreciated contemplated ...\n", + "VBG: verb, present participle or gerund\n", + " telegraphing stirring focusing angering judging stalling lactating\n", + " hankerin' alleging veering capping approaching traveling besieging\n", + " encrypting interrupting erasing wincing ...\n", + "VBN: verb, past participle\n", + " multihulled dilapidated aerosolized chaired languished panelized used\n", + " experimented flourished imitated reunifed factored condensed sheared\n", + " unsettled primed dubbed desired ...\n", + "VBP: verb, present tense, not 3rd person singular\n", + " predominate wrap resort sue twist spill cure lengthen brush terminate\n", + " appear tend stray glisten obtain comprise detest tease attract\n", + " emphasize mold postpone sever return wag ...\n", + "VBZ: verb, present tense, 3rd person singular\n", + " bases reconstructs marks mixes displeases seals carps weaves snatches\n", + " slumps stretches authorizes smolders pictures emerges stockpiles\n", + " seduces fizzes uses bolsters slaps speaks pleads ...\n", + "WDT: WH-determiner\n", + " that what whatever which whichever\n", + "WP: WH-pronoun\n", + " that what whatever whatsoever which who whom whosoever\n", + "WP$: WH-pronoun, possessive\n", + " whose\n", + "WRB: Wh-adverb\n", + " how however whence whenever where whereby whereever wherein whereof why\n", + "``: opening quotation mark\n", + " ` ``\n", + "None\n" + ] + } + ], + "source": [ + "nltk.download('tagsets')\n", + "print(nltk.help.upenn_tagset())" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[('Pierre', 'NNP'),\n", + " ('Vinken', 'NNP'),\n", + " (',', ','),\n", + " ('61', 'CD'),\n", + " ('years', 'NNS'),\n", + " ('old', 'JJ'),\n", + " (',', ','),\n", + " ('will', 'MD'),\n", + " ('join', 'VB'),\n", + " ('the', 'DT'),\n", + " ('board', 'NN'),\n", + " ('as', 'IN'),\n", + " ('a', 'DT'),\n", + " ('nonexecutive', 'JJ'),\n", + " ('director', 'NN'),\n", + " ('Nov.', 'NNP'),\n", + " ('29', 'CD'),\n", + " ('.', '.')]" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "annotated_sent[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.3" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +}