For NLP tasks, either you will come across these libraries or you will have to use many of these Python libraries.
import nltk
# tokenizer
nltk.download("punkt")
# stop words
nltk.download("stopwords")
from nltk.tokenize import TreebankWordTokenizer
from nltk.tokenize import WordPunctTokenizer
from nltk.tokenize import RegexpTokenizer
from nltk.tokenize import sent_tokenize
from nltk.corpus import stopwords
import os.path
import re
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
import nltk
from nltk.tokenize import TreebankWordTokenizer
from nltk.tokenize import WordPunctTokenizer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
import nltk
from nltk.util import ngrams
from collections import Counter
from nltk.collocations import *
from nltk.tokenize import word_tokenize
from nltk.probability import ConditionalFreqDist, FreqDist
from nltk.probability import ConditionalProbDist, LaplaceProbDist
from nltk.corpus import stopwords
from nltk.metrics import TrigramAssocMeasures
from nltk.tokenize import TreebankWordTokenizer
#from nltk.probability import *
import math
from nltk.metrics import TrigramAssocMeasures
from nltk.metrics import BigramAssocMeasures
from nltk.metrics import BigramAssocMeasures
import math
import random
from collections import Counter, defaultdict
import nltk
nltk.download("gutenberg")
from nltk.corpus import gutenberg
from nltk.util import ngrams
import csv
from numpy import array
from numpy import asarray
from numpy import zeros
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Embedding
import matplotlib.pyplot as plt
import nltk
nltk.download(‘averaged_perceptron_tagger’)
import nltk
from nltk.corpus import treebank
import nltk
from nltk.tag import StanfordNERTagger
from nltk.metrics.scores import accuracy
import nltk
from nltk.corpus import treebank
from nltk.classify import maxent
from __future__ import print_function, unicode_literals, division
import re
import itertools
from six.moves import map, zip
from nltk.probability import (
FreqDist,
ConditionalFreqDist,
ConditionalProbDist,
DictionaryProbDist,
DictionaryConditionalProbDist,
LidstoneProbDist,
MutableProbDist,
MLEProbDist,
RandomProbDist,
)
from nltk.metrics import accuracy
from nltk.util import LazyMap, unique_list
from nltk.compat import python_2_unicode_compatible
from nltk.tag.api import TaggerI
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import StratifiedKFold
import tensorflow as tf
import numpy as np
import random
import keras
from keras.layers import Dense
from keras import models
from keras import layers
from keras.layers import Activation, Dense
from keras import optimizers
from gensim.summarization import summarize
from gensim.summarization import keywords
from sklearn.datasets import fetch_20newsgroups
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer
import string
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition import TruncatedSVD
nltk.download("averaged_perceptron_tagger")
nltk.download(‘tagsets’)
nltk.help.upenn_tagset("JJS")
nltk.download(‘treebank’)
nltk.download(‘ brown’)
nltk.download(‘universal_tagset’)
import nltk
from nltk.corpus import treebank
import nltk
from nltk.corpus import treebank
from bs4 import BeautifulSoup # For HTML parsing
import urllib # Website connections
import re # Regular expressions
from time import sleep # To prevent overwhelming the server between connections
from collections import Counter # Keep track of our term counts
from nltk.corpus import stopwords # Filter out stopwords, such as ‘the’, ‘or’, ‘and’
import pandas as pd # For converting results to a dataframe and bar chart plots
import numpy as np
import copy
%matplotlib inline
from sklearn.mixture import GaussianMixture
from sklearn.feature_extraction.text import TfidfVectorizer
import operator
from sklearn.datasets import load_files
import nltk
import string
from sklearn.feature_extraction.text import CountVectorizer
from nltk.stem import PorterStemmer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import precision_recall_fscore_support
import pandas as pd
import nltk
from nltk.corpus import treebank
import tensorflow as tf
import keras
from keras.layers import Dense
from keras import models
from keras import Sequential
from numpy import array
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
from sklearn.datasets import load_files
import nltk
import string
from sklearn.feature_extraction.text import CountVectorizer
from nltk.stem import PorterStemmer
from keras import models
from numpy import array
from keras.preprocessing.text import one_hot
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers.embeddings import Embedding
from keras.preprocessing.text import one_hot
from numpy import array
from numpy import asarray
from numpy import zeros
import pandas as pd
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Embedding
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from keras.models import Model, Input
from keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout, Bidirectional
from keras.layers.merge import add
from seqeval.metrics import precision_score, recall_score, f1_score, classification_report
from keras.models import Model, Input
from keras.layers import LSTM, Embedding, Dense, TimeDistributed, Dropout, Bidirectional
from keras.layers.merge import add
from keras import models, layers
from numpy import zeros
*** ***. ***
Note: Older short-notes from this site are posted on Medium: https://medium.com/@SayedAhmedCanada
*** . *** *** . *** . *** . ***
Sayed Ahmed
BSc. Eng. in Comp. Sc. & Eng. (BUET)
MSc. in Comp. Sc. (U of Manitoba, Canada)
MSc. in Data Science and Analytics (Ryerson University, Canada)
Linkedin: https://ca.linkedin.com/in/sayedjustetc
Blog: http://Bangla.SaLearningSchool.com, http://SitesTree.com
Online and Offline Training: http://Training.SitesTree.com (Also, can be free and low cost sometimes)
Facebook Group/Form to discuss (Q & A): https://www.facebook.com/banglasalearningschool
Our free or paid training events: https://www.facebook.com/justetcsocial
Get access to courses on Big Data, Data Science, AI, Cloud, Linux, System Admin, Web Development and Misc. related. Also, create your own course to sell to others. http://sitestree.com/training/
If you want to contribute to occasional free and/or low cost online/offline training or charitable/non-profit work in the education/health/social service sector, you can financially contribute to: safoundation at salearningschool.com using Paypal or Credit Card (on http://sitestree.com/training/enrol/index.php?id=114 ).