ameythakur's picture
DEPRESSION-DETECTION
4d1cb0c verified
# ==============================================================================
# PROJECT: DEPRESSION-DETECTION-USING-TWEETS
# AUTHORS: AMEY THAKUR & MEGA SATISH
# GITHUB (AMEY): https://github.com/Amey-Thakur
# GITHUB (MEGA): https://github.com/msatmod
# REPOSITORY: https://github.com/Amey-Thakur/DEPRESSION-DETECTION-USING-TWEETS
# RELEASE DATE: June 5, 2022
# LICENSE: MIT License
# DESCRIPTION: Core NLP logic for cleaning and normalizing tweet text.
# ==============================================================================
import re
import warnings
import nltk
import ftfy
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
# Suppression of non-critical warnings to ensure a streamlined algorithmic log
warnings.filterwarnings("ignore")
# Dictionary of standard English contractions for lexical expansion
# This facilitates uniform tokenization by resolving ambiguous shorthand
CONTRACTIONS_LIST = {
"ain't": "am not",
"aren't": "are not",
"can't": "cannot",
"can't've": "cannot have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he would",
"he'd've": "he would have",
"he'll": "he will",
"he'll've": "he will have",
"he's": "he is",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how is",
"I'd": "I would",
"I'd've": "I would have",
"I'll": "I will",
"I'll've": "I will have",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it had",
"it'd've": "it would have",
"it'll": "it will",
"it'll've": "it will have",
"it's": "it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she would",
"she'd've": "she would have",
"she'll": "she will",
"she'll've": "she will have",
"she's": "she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so is",
"that'd": "that would",
"that'd've": "that would have",
"that's": "that is",
"there'd": "there had",
"there'd've": "there would have",
"there's": "there is",
"they'd": "they would",
"they'd've": "they would have",
"they'll": "they will",
"they'll've": "they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we had",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what will",
"what'll've": "what will have",
"what're": "what are",
"what's": "what is",
"what've": "what have",
"when's": "when is",
"when've": "when have",
"where'd": "where did",
"where's": "where is",
"where've": "where have",
"who'll": "who will",
"who'll've": "who will have",
"who's": "who is",
"who've": "who have",
"why's": "why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'alls": "you alls",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you had",
"you'd've": "you would have",
"you'll": "you you will",
"you'll've": "you you will have",
"you're": "you are",
"you've": "you have"
}
# Pre-compiled regular expression for efficient contraction matching
CONTRACTIONS_RE = re.compile('(%s)' % '|'.join(CONTRACTIONS_LIST.keys()))
def expand_contractions(text: str, contractions_re=CONTRACTIONS_RE) -> str:
"""
Identifies and replaces English contractions within the input text
using a predefined mapping.
Args:
text (str): The raw text potentially containing contractions.
contractions_re: Compiled regex pattern for matching contractions.
Returns:
str: Expanded lexical form of the input text.
"""
def replace(match):
return CONTRACTIONS_LIST[match.group(0)]
return contractions_re.sub(replace, text)
def tweets_cleaner(tweet: str) -> str:
"""
Executes a comprehensive analytical pipeline for the linguistic
normalization of microblogging content (Tweets).
Analytical Methodology:
1. Case Normalization: Lowercasting to ensure uniformity.
2. Relevance Filtering: Exclusion of tweets consisting solely of URLs.
3. Noise Reduction: Removal of hashtags, mentions, and visual asset links.
4. Encoding Correction: Fixing malformed Unicode sequences (via ftfy).
5. Lexical Expansion: Resolution of linguistic contractions.
6. Punctuation Removal: Strategic elimination of non-alphanumeric noise.
7. Morphological Analysis: Removal of high-frequency stop words and
application of WordNet-based lemmatization to reduce words to
their base semantic roots.
Args:
tweet (str): Raw input tweet captured from the platform.
Returns:
str: Sanitized and normalized string ready for vectorization.
"""
# Phase 1: Case Uniformity
tweet = tweet.lower()
# Phase 2: Structural Relevance Check (Filtering out pure URL content)
if re.match("(\w+:\/\/\S+)", tweet) is None:
# Phase 3: Targeted entity removal (Handles Twitter-specific artifacts)
tweet = ' '.join(
re.sub(
"(@[A-Za-z0-9]+)|(\#[A-Za-z0-9]+)|(<Emoji:.*>)|(pic\.twitter\.com\/.*)",
" ",
tweet
).split()
)
# Phase 4: Resolution of malformed character encodings
tweet = ftfy.fix_text(tweet)
# Phase 5: Applied contraction expansion for token consistency
tweet = expand_contractions(tweet)
# Phase 6: Punctuation and non-essential character pruning
tweet = ' '.join(re.sub("([^0-9A-Za-z \t])", " ", tweet).split())
# Phase 7: Stop-word filtration and Lemmatization
# Methodology: Reducing inflectional forms to a common base word (Lemma)
stop_words_set = set(stopwords.words('english'))
tokens = nltk.word_tokenize(tweet)
lemmatizer_engine = WordNetLemmatizer()
filtered_lexicon = [
lemmatizer_engine.lemmatize(word)
for word in tokens
if word not in stop_words_set
]
# Phase 8: Re-assembly of the normalized semantic string
tweet = ' '.join(filtered_lexicon)
return tweet