diff --git a/text-mining/exercice6-7-8-9.ipynb b/text-mining/exercice6-7-8-9.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..c89f844cffac2f73e087f4318c051481e8b5377c --- /dev/null +++ b/text-mining/exercice6-7-8-9.ipynb @@ -0,0 +1,716 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Text Mining (concepts) - Exercises 6, 7, 8 et 9\n", + "In the following notebook, we are aiming at calculating the similarities between the following movies:\n", + " - (\"Harry Potter 1\",\"671\"),\n", + " - (\"Harry Potter 2\",\"672\"),\n", + " - (\"The lord of the ring 1\", \"120\"),\n", + " - (\"The Hobbit 1\", \"49051\"),\n", + " \n", + "Using the outcome of Exercise 2, extract a summary of those 4 movies (including actors).\n", + "In the present notebook, we will calculate the vectorized version of the 4 documents and calculate the cosine similarities between each pair of movies. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Packages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import math\n", + "from typing import Dict\n", + "from typing import List\n", + "from typing import Optional\n", + "from os import sep\n", + "from os import walk\n", + "\n", + "import numpy\n", + "#@COMPLETE : add here missing packages for Text Mining\n", + "# For NLTK, do not forget to download the required ressources (punkts, stopwords)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Classes" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class Token:\n", + " \"\"\"\n", + " Class representing a given token. It stores the string representing the token, its identifier and the number of\n", + " documents\n", + "\n", + " |\n", + "\n", + " The instance attributes are:\n", + "\n", + " token_id:\n", + " Identifier of the token.\n", + " token:\n", + " String representing the token.\n", + " docs:\n", + " Identifiers of documents containing the token.\n", + " \"\"\"\n", + "\n", + " # -------------------------------------------------------------------------\n", + " token_id: int\n", + " token: str\n", + " docs: List[int]\n", + "\n", + " # -------------------------------------------------------------------------\n", + " def __init__(self, token_id: int, token: str):\n", + " \"\"\"\n", + " Constructor.\n", + "\n", + " :param token_id: Identifier of the token.\n", + " :param token: String representing the token.\n", + " \"\"\"\n", + " self.token_id = token_id\n", + " self.token = token\n", + " self.docs = []\n", + "\n", + " # -------------------------------------------------------------------------\n", + " def get_idf(self, nb_docs: int) -> float:\n", + " \"\"\"\n", + " Compute the IDF factor of a token.\n", + "\n", + " :param nb_docs: Total number of documents in the corpus.\n", + " :return: IDF factor.\n", + " \"\"\"\n", + "\n", + " if len(self.docs) == 0:\n", + " return 0.0\n", + " return math.log(float(nb_docs) / float(len(self.docs)))\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class Doc:\n", + " \"\"\"\n", + " This class represents an instance of a document.\n", + "\n", + " |\n", + "\n", + " The instance attributes are:\n", + "\n", + " url:\n", + " URL of the document (if defined).\n", + " doc_id:\n", + " Identifier of the document.\n", + " text:\n", + " Text of the document to analyse.\n", + " vector:\n", + " Vector representing the document.\n", + " tokens:\n", + " List of tokens i order of appearances. A same token may appear several times.\n", + " \"\"\"\n", + "\n", + " # -------------------------------------------------------------------------\n", + " url: Optional[str]\n", + " doc_id: int\n", + " text: str\n", + " vector: numpy.ndarray\n", + " tokens: List[Token]\n", + "\n", + " # -------------------------------------------------------------------------\n", + " def __init__(self, doc_id: int, text: str, url: Optional[str] = None):\n", + " \"\"\"\n", + " Constructor.\n", + "\n", + " :param doc_id:\n", + " :param text: Text of the document (brut).\n", + " :param url: URL of the document (if any).\n", + " \"\"\"\n", + " self.url = url\n", + " self.doc_id = doc_id\n", + " self.text = text\n", + " self.vector = None\n", + " self.tokens = None\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class DocCorpus:\n", + " \"\"\"\n", + " This class represents a corpus of documents and the corresponding dictionary of tokens contained.\n", + "\n", + " |\n", + "\n", + " The instance attributes are:\n", + "\n", + " docs:\n", + " List of documents.\n", + " tokens:\n", + " Dictionary of tokens (strings are the key).\n", + " ids:\n", + " Dictionary of tokens (identifiers are the key).\n", + " method:\n", + " String representing the method used for analysing (\"TF-IDF\" or \"Doc2Vec\").\n", + " nb_dims:\n", + " Number of dimensions of the semantic space.\n", + " stopwords:\n", + " List of stopwords to eliminate from the analysis. By default, it's the classic English list.\n", + " \"\"\"\n", + "\n", + " # -------------------------------------------------------------------------\n", + " docs = List[Doc]\n", + " tokens: Dict[str, Token]\n", + " ids: Dict[int, Token]\n", + " method: str\n", + " nb_dims: int\n", + " stopwords: List[str]\n", + "\n", + " # -------------------------------------------------------------------------\n", + " def __init__(self):\n", + " \"\"\"\n", + " Constructor.\n", + " \"\"\"\n", + " self.docs = []\n", + " self.tokens = dict()\n", + " self.ids = dict()\n", + " self.method = \"Doc2Vec\"\n", + " self.nb_dims = 0\n", + " self.n_tokens = 0\n", + " self.stopwords = stopwords.words('english')\n", + "\n", + " # -------------------------------------------------------------------------\n", + " def set_method(self, name) -> None:\n", + " \"\"\"\n", + " Change the parameter.\n", + "\n", + " :param name: Name of the method.\n", + " \"\"\"\n", + " self.method = name\n", + "\n", + " # -------------------------------------------------------------------------\n", + " def add_doc(self, new_doc: str, url: Optional[str] = None) -> None:\n", + " \"\"\"\n", + " Add a string representing a document to the corpus and provides an\n", + " identifier to the document.\n", + "\n", + " :param new_doc: New document.\n", + " :param url: URL of the document (if any)\n", + " \"\"\"\n", + " new_id = len(self.docs)\n", + " self.docs.append(Doc(new_id, new_doc, url))\n", + "\n", + " # -------------------------------------------------------------------------\n", + " def add_docs(self, docs: List[str]) -> None:\n", + " \"\"\"\n", + " Add a list of strings representing documents to the corpus. Each document receives an\n", + " identifier.\n", + "\n", + " :param docs: List of documents.\n", + " \"\"\"\n", + " for cur_doc in docs:\n", + " self.add_doc(cur_doc)\n", + "\n", + " # -------------------------------------------------------------------------\n", + " def build_vectors(self) -> None:\n", + " \"\"\"\n", + " Build the vectors for the documents of the corpus based on the current method.\n", + " \"\"\"\n", + "\n", + " if self.method == \"Doc2Vec\":\n", + " self.build_doc2vec()\n", + " elif self.method == \"TF-IDF\":\n", + " self.build_tf_idf()\n", + " else:\n", + " raise ValueError(\"'\" + self.method + \"': Invalid building method\")\n", + "\n", + " # -------------------------------------------------------------------------\n", + " def get_term_document_matrix(self) -> numpy.ndarray:\n", + " \"\"\"\n", + " Build a document-token matrix with the weights as values.\n", + "\n", + " :return: Document-token matrix.\n", + " \"\"\"\n", + "\n", + " matrix = numpy.zeros(shape=(len(self.docs),self.nb_dims))\n", + " for cur_doc in self.docs:\n", + " i = 0\n", + " for token in cur_doc.vector:\n", + " matrix[cur_doc.doc_id, i] = token\n", + " i += 1\n", + " return matrix\n", + " \n", + " # -------------------------------------------------------------------------\n", + " def add_token(self, cur_doc: Doc, token_str: str) -> None:\n", + " \"\"\"Add a token in string format to the Doc Corpus\n", + " \n", + " Find the identifier of the current token in the dictionary.\n", + " If not present, create a new Token instance\n", + " \n", + " Attach the token to the current document\n", + " \n", + " Finally, link the document to the Token object\n", + " \n", + " :param cur_doc : the current document from which the token is extracted\n", + " :token_str : the token after cleaning steps (stopwords, stemming, ...)\n", + " \n", + " \"\"\"\n", + " # Find the identifier of the current token in the dictionary\n", + " if token_str not in self.tokens.keys():\n", + " token_id = len(self.tokens)\n", + " token = Token(token_id, token_str)\n", + " self.tokens[token_str] = token\n", + " self.ids[token_id] = token\n", + " self.n_tokens = len(self.tokens)\n", + " else:\n", + " token = self.tokens[token_str]\n", + " \n", + " # Add the token\n", + " cur_doc.tokens.append(token)\n", + " \n", + " # Add a reference count if necessary\n", + " if cur_doc.doc_id not in token.docs:\n", + " token.docs.append(cur_doc.doc_id)\n", + "\n", + " # -------------------------------------------------------------------------\n", + " def extract_tokens(self) -> None:\n", + " \"\"\"\n", + " Extract the tokens from the text of the documents. In practice, for each document, the methods\n", + " do the following steps:\n", + "\n", + " 1. The text is transform in lowercase.\n", + "\n", + " 2. The text is tokenised.\n", + "\n", + " 3. Stopwords are removed.\n", + "\n", + " The method words incrementally. Once a document is treated, it will not be re-treated in successive\n", + " calls.\n", + " \"\"\"\n", + "\n", + " # @COMPLETE : create a stemmer\n", + " for cur_doc in self.docs:\n", + " \n", + " if cur_doc.tokens is not None:\n", + " continue\n", + " cur_doc.tokens = []\n", + " text = cur_doc.text\n", + " \n", + " # @COMPLETE : get text to lowercase\n", + " for extracted_token in nltk.word_tokenize(text):\n", + "\n", + " # @COMPLETE : Retains only the stem of non stopwords and punctuation \n", + " self.add_token(cur_doc, token_str)\n", + "\n", + "\n", + "\n", + " # -------------------------------------------------------------------------\n", + " def build_tf_idf(self) -> None:\n", + " \"\"\"\n", + " Build the vectors of the corpus using the TF-IDF approach.\n", + " \"\"\"\n", + "\n", + " vectors = []\n", + " self.extract_tokens()\n", + "\n", + " # Step 1: For each document, compute the relative frequencies of each token (TF).\n", + " for cur_doc in self.docs:\n", + " vector = dict() # Dictionary representing a vector of pairs (token_id,nb_occurrences)\n", + " nb_occurrences = 0\n", + " # @COMPLETE : calculate a vector TF for each document and append to vectors\n", + " \n", + "\n", + " # Step 2: Build the TF-IDF vectors by multiplying the relative frequencies by the IDF factor.\n", + " self.nb_dims = self.n_tokens\n", + " for cur_doc in self.docs:\n", + " cur_doc.vector = numpy.zeros(shape=self.nb_dims)\n", + " # @COMPLETE : calculate a vector TF-IDF and store it to cur_doc.vector\n", + " # Hint : make use of the \"get_idf() method of the Token Class\"\n", + "\n", + "\n", + " # -------------------------------------------------------------------------\n", + " def build_doc2vec(self) -> None:\n", + " \"\"\"\n", + " Build the vectors using the doc2vec approach.\n", + " \"\"\"\n", + "\n", + " self.extract_tokens()\n", + " corpus = []\n", + " for doc in self.docs:\n", + " tokens = []\n", + " for token in doc.tokens:\n", + " tokens.append(token.token)\n", + " corpus.append(tokens)\n", + " \n", + " corpus = [\n", + " TaggedDocument(words, ['d{}'.format(idx)])\n", + " for idx, words in enumerate(corpus)\n", + " ]\n", + " \n", + " # @COMPLETE : create a doc2vec model with 5 dimension and min_count=1\n", + " # Add the resulting vector (mode.dv) in self.docs[i].vector\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class TokenSorter:\n", + " \"\"\"\n", + " Class to sort a list of tokens by a certain value.\n", + " |\n", + "\n", + " The instance attributes are:\n", + "\n", + " tokens:\n", + " List of tokens to sort.\n", + " reverse:\n", + " Must the token be ranked descending (False) or ascending (True)\n", + " \"\"\"\n", + "\n", + " # -------------------------------------------------------------------------\n", + " class TokenRef:\n", + " \"\"\"\n", + " Class to represent a reference to a token.\n", + " \"\"\"\n", + "\n", + " # ---------------------------------------------------------------------\n", + " token: Token\n", + " value: float\n", + "\n", + " # ---------------------------------------------------------------------\n", + " def __init__(self, token: Token, value: float):\n", + " self.token = token\n", + " self.value = value\n", + "\n", + " # -------------------------------------------------------------------------\n", + " tokens: List[TokenRef]\n", + " reverse: bool\n", + "\n", + " # -------------------------------------------------------------------------\n", + " def __init__(self):\n", + " \"\"\"\n", + " Constructor.\n", + " \"\"\"\n", + "\n", + " self.tokens = []\n", + " self.reverse = False\n", + "\n", + " # -------------------------------------------------------------------------\n", + " def build(self, tokens, value, reverse: bool) -> None:\n", + " \"\"\"\n", + " Build the list of token to sort.\n", + "\n", + " :param tokens: Tokens to sort.\n", + " :param value: Lambda function that will be used to build the value associated to each token to sort.\n", + " :param reverse: Should the token be sorted in descending (True) of ascending (False) order.\n", + " \"\"\"\n", + "\n", + " for token in tokens.values():\n", + " self.add_token(token, value(token))\n", + " self.reverse = reverse\n", + " self.sort()\n", + "\n", + " # -------------------------------------------------------------------------\n", + " def add_token(self, token: Token, value: float) -> None:\n", + " \"\"\"\n", + " Add a token to the list.\n", + "\n", + " :param token: Token to add.\n", + " :param value: Value that will be used to sort the tokens.\n", + " \"\"\"\n", + "\n", + " self.tokens.append(TokenSorter.TokenRef(token=token, value=float(value)))\n", + "\n", + " # -------------------------------------------------------------------------\n", + " def sort(self) -> None:\n", + " \"\"\"\n", + " Sort the tokens.\n", + " \"\"\"\n", + "\n", + " self.tokens.sort(reverse=self.reverse, key=lambda token: token.value)\n", + "\n", + " # -------------------------------------------------------------------------\n", + " def get_token(self, pos: int) -> str:\n", + " \"\"\"\n", + " Get a given token of the list.\n", + "\n", + " :param pos: Position of the token in the list.\n", + " :return: String representing the token.\n", + " \"\"\"\n", + "\n", + " return self.tokens[pos].token.token\n", + "\n", + " # -------------------------------------------------------------------------\n", + " def get_value(self, pos: int) -> str:\n", + " \"\"\"\n", + " Get a value of a given token in the list.\n", + "\n", + " :param pos: Position of the token in the list.\n", + " :return: Value of the token used for the sorting.\n", + " \"\"\"\n", + "\n", + " return self.tokens[pos].value\n", + "\n", + " # -------------------------------------------------------------------------\n", + " def print(self, title: str, nb : int) -> None:\n", + " \"\"\"\n", + " Print a given number of top ranked tokens with a title and their values.\n", + "\n", + " :param title: Title to print.\n", + " :param nb: Number of tokens to print.\n", + " \"\"\"\n", + " print(title)\n", + " if nb > len(self.tokens):\n", + " nb = len(self.tokens)\n", + " for i in range(0,nb):\n", + " print(f\" Token: {self.get_token(i)} ({self.get_value(i):.2f})\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Functions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def print_matrix(name:str, matrix: numpy.ndarray) -> None:\n", + " \"\"\"\n", + " Simple method to print a little matrix nicely.\n", + "\n", + " :param name: Name of the matrix.\n", + " :param matrix: Matrix to print.\n", + " \"\"\"\n", + " nb_lines = matrix.shape[0]\n", + " nb_cols = matrix.shape[1]\n", + " spaces = \" \" * (len(name) + 1)\n", + " title_line = nb_lines % 2\n", + " for i in range(0, nb_lines):\n", + " if i == title_line:\n", + " print(name + \"=\", end=\"\")\n", + " else:\n", + " print(spaces, end=\"\")\n", + " print(\"( \", end=\"\")\n", + " for j in range(0, nb_cols):\n", + " print( \"{:.3f}\".format(matrix[i,j]), end=\" \")\n", + " print(\")\",)\n", + "\n", + "\n", + "def create_corpus(path: str) -> DocCorpus:\n", + " \"\"\"\n", + " From a list of docs located at path, create a corpus\n", + " \n", + " A DocCorpus document is build and populated with all the \"doc\" documents\n", + " located at the path\n", + " \n", + " :param path : string description of the path\n", + " \n", + " :return : DocCorpus representing the corpus of all the documents\n", + " \"\"\"\n", + " # Instantiate a DocCorpus object\n", + " the_corpus = DocCorpus()\n", + "\n", + " # Look for all the files in a directory\n", + " files = []\n", + " dir_to_analyse = \"./docs\"\n", + " for (_, _, file_names) in walk(dir_to_analyse):\n", + " files.extend(file_names)\n", + " break\n", + "\n", + " # Add the context to the corpus\n", + " for doc_to_analyse in files:\n", + " # Treat only files beginning with \"doc\"\n", + " if doc_to_analyse[:3] != \"doc\":\n", + " continue\n", + "\n", + " filename = dir_to_analyse + sep + doc_to_analyse\n", + " file = open(file=filename, mode=\"r\", encoding=\"utf-8\")\n", + " the_corpus.add_doc(file.read(), filename)\n", + " \n", + " return the_corpus\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Exercise 6 : token extraction and dimensionality reduction\n", + "Complete the implemention of implementation the \"extract_tokens\" method from the DocCorpus class.\n", + "The method should :\n", + "- extract word tokens\n", + "- remove the case (lowercase only)\n", + "- remove punctuation\n", + "- remove stopwords\n", + "- perform stemming" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "the_corpus = create_corpus(\"./docs\")# Create a corpus instance\n", + "the_corpus.extract_tokens() # Extract the tokens from the corpus\n", + "\n", + "\n", + "# -------------------------------------------------------------------------------------------------\n", + "# Sort the tokens by the number of documents in which they appear\n", + "sort_by_docs = TokenSorter()\n", + "sort_by_docs.build(tokens=the_corpus.tokens, value=lambda token: len(token.docs), reverse=True)\n", + "sort_by_docs.print(title=\"Most appearing tokens (Nb Documents):\",nb=5)\n", + "\n", + "# Sort the tokens by their idf factor\n", + "sort_by_iDF = TokenSorter()\n", + "sort_by_iDF.build(tokens=the_corpus.tokens, value=lambda token: token.get_idf(len(the_corpus.docs)), reverse=True)\n", + "sort_by_iDF.print(title=\"Most discriminant tokens (idf):\",nb=5)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Exercise 7 : vectorization with TF-IDF\n", + "Complete the implemention of the TF-IDF method from the DocCorpus class." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "the_corpus.build_tf_idf()\n", + "term_document_matrix = the_corpus.get_term_document_matrix()\n", + "print_matrix(\"Docs\", term_document_matrix)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Exercise 8 : vectorization with Doc2Vec\n", + "Complete the implemention of the Doc2Vec method from the DocCorpus class." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "the_corpus.build_doc2vec()\n", + "term_document_matrix = the_corpus.get_term_document_matrix()\n", + "print_matrix(\"Docs\", term_document_matrix)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Exercise 9 : corpus analysis using Cosine Similarity\n", + "Display the similarity between every pair of document.\n", + "Which movies are close to each other ? Which method works best ? Why ?" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def corpus_analysis(corpus: DocCorpus, method: str) -> None:\n", + " \"\"\"\n", + " Calculate and display the cosine similarity between every pair of document for a given vectorization method\n", + "\n", + " :param corpus: Corpus to analyse.\n", + " :param method: Method to use for the analysis.\n", + " \"\"\"\n", + " print(\"\\n---- \" + method + \" ----\")\n", + " corpus.set_method(method)\n", + " corpus.build_vectors()\n", + " matrix = corpus.get_term_document_matrix()\n", + " \n", + " # @COMPLETE : compute cosine similarity between every vector of the matrix\n", + " # \n", + " for i in range(0, len(corpus.docs) - 1):\n", + " # Take a vector and build a two dimension matrix needed by cosine_similarity\n", + " vec1 = matrix[i].reshape(1, -1)\n", + "\n", + " for j in range(i + 1, len(corpus.docs)):\n", + " # Take a vector and build a two dimension matrix needed by cosine_similarity\n", + " vec2 = matrix[j].reshape(1, -1)\n", + " \n", + " # Retrieve name of the docs\n", + " url_i = corpus.docs[i].url\n", + " url_j = corpus.docs[j].url\n", + "\n", + " # Compute and display the similarity\n", + " print(\"\\tSim(doc\" + url_i + \",doc\" + url_j + \")=\" + \"{:.3f}\".format(cosine_similarity(vec1, vec2)[0, 0]))\n", + "\n", + "\n", + "# -----------------------------------------------------------------------------------------------------------\n", + "corpus_analysis(corpus=the_corpus, method=\"TF-IDF\")\n", + "corpus_analysis(corpus=the_corpus, method=\"Doc2Vec\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Web Mining", + "language": "python", + "name": "web-mining" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.9" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +}