Search is not available for this dataset
text stringlengths 75 104k |
|---|
def quadratic_weighted_kappa(rater_a, rater_b, min_rating=None, max_rating=None):
"""
Calculates kappa correlation between rater_a and rater_b.
Kappa measures how well 2 quantities vary together.
rater_a is a list of rater a scores
rater_b is a list of rater b scores
min_rating is an optional argument describing the minimum rating possible on the data set
max_rating is an optional argument describing the maximum rating possible on the data set
Returns a float corresponding to the kappa correlation
"""
assert(len(rater_a) == len(rater_b))
rater_a = [int(a) for a in rater_a]
rater_b = [int(b) for b in rater_b]
if min_rating is None:
min_rating = min(rater_a + rater_b)
if max_rating is None:
max_rating = max(rater_a + rater_b)
conf_mat = confusion_matrix(rater_a, rater_b,
min_rating, max_rating)
num_ratings = len(conf_mat)
num_scored_items = float(len(rater_a))
hist_rater_a = histogram(rater_a, min_rating, max_rating)
hist_rater_b = histogram(rater_b, min_rating, max_rating)
numerator = 0.0
denominator = 0.0
if(num_ratings > 1):
for i in range(num_ratings):
for j in range(num_ratings):
expected_count = (hist_rater_a[i] * hist_rater_b[j]
/ num_scored_items)
d = pow(i - j, 2.0) / pow(num_ratings - 1, 2.0)
numerator += d * conf_mat[i][j] / num_scored_items
denominator += d * expected_count / num_scored_items
return 1.0 - numerator / denominator
else:
return 1.0 |
def confusion_matrix(rater_a, rater_b, min_rating=None, max_rating=None):
"""
Generates a confusion matrix between rater_a and rater_b
A confusion matrix shows how often 2 values agree and disagree
See quadratic_weighted_kappa for argument descriptions
"""
assert(len(rater_a) == len(rater_b))
rater_a = [int(a) for a in rater_a]
rater_b = [int(b) for b in rater_b]
min_rating = int(min_rating)
max_rating = int(max_rating)
if min_rating is None:
min_rating = min(rater_a)
if max_rating is None:
max_rating = max(rater_a)
num_ratings = int(max_rating - min_rating + 1)
conf_mat = [[0 for i in range(num_ratings)]
for j in range(num_ratings)]
for a, b in zip(rater_a, rater_b):
conf_mat[int(a - min_rating)][int(b - min_rating)] += 1
return conf_mat |
def histogram(ratings, min_rating=None, max_rating=None):
"""
Generates a frequency count of each rating on the scale
ratings is a list of scores
Returns a list of frequencies
"""
ratings = [int(r) for r in ratings]
if min_rating is None:
min_rating = min(ratings)
if max_rating is None:
max_rating = max(ratings)
num_ratings = int(max_rating - min_rating + 1)
hist_ratings = [0 for x in range(num_ratings)]
for r in ratings:
hist_ratings[r - min_rating] += 1
return hist_ratings |
def get_wordnet_syns(word):
"""
Utilize wordnet (installed with nltk) to get synonyms for words
word is the input word
returns a list of unique synonyms
"""
synonyms = []
regex = r"_"
pat = re.compile(regex)
synset = nltk.wordnet.wordnet.synsets(word)
for ss in synset:
for swords in ss.lemma_names:
synonyms.append(pat.sub(" ", swords.lower()))
synonyms = f7(synonyms)
return synonyms |
def get_separator_words(toks1):
"""
Finds the words that separate a list of tokens from a background corpus
Basically this generates a list of informative/interesting words in a set
toks1 is a list of words
Returns a list of separator words
"""
tab_toks1 = nltk.FreqDist(word.lower() for word in toks1)
if(os.path.isfile(ESSAY_COR_TOKENS_PATH)):
toks2 = pickle.load(open(ESSAY_COR_TOKENS_PATH, 'rb'))
else:
essay_corpus = open(ESSAY_CORPUS_PATH).read()
essay_corpus = sub_chars(essay_corpus)
toks2 = nltk.FreqDist(word.lower() for word in nltk.word_tokenize(essay_corpus))
pickle.dump(toks2, open(ESSAY_COR_TOKENS_PATH, 'wb'))
sep_words = []
for word in tab_toks1.keys():
tok1_present = tab_toks1[word]
if(tok1_present > 2):
tok1_total = tab_toks1._N
tok2_present = toks2[word]
tok2_total = toks2._N
fish_val = pvalue(tok1_present, tok2_present, tok1_total, tok2_total).two_tail
if(fish_val < .001 and tok1_present / float(tok1_total) > (tok2_present / float(tok2_total)) * 2):
sep_words.append(word)
sep_words = [w for w in sep_words if not w in nltk.corpus.stopwords.words("english") and len(w) > 5]
return sep_words |
def encode_plus(s):
"""
Literally encodes the plus sign
input is a string
returns the string with plus signs encoded
"""
regex = r"\+"
pat = re.compile(regex)
return pat.sub("%2B", s) |
def getMedian(numericValues):
"""
Gets the median of a list of values
Returns a float/int
"""
theValues = sorted(numericValues)
if len(theValues) % 2 == 1:
return theValues[(len(theValues) + 1) / 2 - 1]
else:
lower = theValues[len(theValues) / 2 - 1]
upper = theValues[len(theValues) / 2]
return (float(lower + upper)) / 2 |
def initialize_dictionaries(self, e_set, max_feats2 = 200):
"""
Initializes dictionaries from an essay set object
Dictionaries must be initialized prior to using this to extract features
e_set is an input essay set
returns a confirmation of initialization
"""
if(hasattr(e_set, '_type')):
if(e_set._type == "train"):
#normal text (unstemmed) useful words/bigrams
nvocab = util_functions.get_vocab(e_set._text, e_set._score, max_feats2 = max_feats2)
#stemmed and spell corrected vocab useful words/ngrams
svocab = util_functions.get_vocab(e_set._clean_stem_text, e_set._score, max_feats2 = max_feats2)
#dictionary trained on proper vocab
self._normal_dict = CountVectorizer(ngram_range=(1,2), vocabulary=nvocab)
#dictionary trained on proper vocab
self._stem_dict = CountVectorizer(ngram_range=(1,2), vocabulary=svocab)
self.dict_initialized = True
#Average spelling errors in set. needed later for spelling detection
self._mean_spelling_errors=sum(e_set._spelling_errors)/float(len(e_set._spelling_errors))
self._spell_errors_per_character=sum(e_set._spelling_errors)/float(sum([len(t) for t in e_set._text]))
#Gets the number and positions of grammar errors
good_pos_tags,bad_pos_positions=self._get_grammar_errors(e_set._pos,e_set._text,e_set._tokens)
self._grammar_errors_per_character=(sum(good_pos_tags)/float(sum([len(t) for t in e_set._text])))
#Generate bag of words features
bag_feats=self.gen_bag_feats(e_set)
#Sum of a row of bag of words features (topical words in an essay)
f_row_sum=numpy.sum(bag_feats[:,:])
#Average index of how "topical" essays are
self._mean_f_prop=f_row_sum/float(sum([len(t) for t in e_set._text]))
ret = "ok"
else:
raise util_functions.InputError(e_set, "needs to be an essay set of the train type.")
else:
raise util_functions.InputError(e_set, "wrong input. need an essay set object")
return ret |
def get_good_pos_ngrams(self):
"""
Gets a set of gramatically correct part of speech sequences from an input file called essaycorpus.txt
Returns the set and caches the file
"""
if(os.path.isfile(NGRAM_PATH)):
good_pos_ngrams = pickle.load(open(NGRAM_PATH, 'rb'))
elif os.path.isfile(ESSAY_CORPUS_PATH):
essay_corpus = open(ESSAY_CORPUS_PATH).read()
essay_corpus = util_functions.sub_chars(essay_corpus)
good_pos_ngrams = util_functions.regenerate_good_tokens(essay_corpus)
pickle.dump(good_pos_ngrams, open(NGRAM_PATH, 'wb'))
else:
#Hard coded list in case the needed files cannot be found
good_pos_ngrams=['NN PRP', 'NN PRP .', 'NN PRP . DT', 'PRP .', 'PRP . DT', 'PRP . DT NNP', '. DT',
'. DT NNP', '. DT NNP NNP', 'DT NNP', 'DT NNP NNP', 'DT NNP NNP NNP', 'NNP NNP',
'NNP NNP NNP', 'NNP NNP NNP NNP', 'NNP NNP NNP .', 'NNP NNP .', 'NNP NNP . TO',
'NNP .', 'NNP . TO', 'NNP . TO NNP', '. TO', '. TO NNP', '. TO NNP NNP',
'TO NNP', 'TO NNP NNP']
return set(good_pos_ngrams) |
def _get_grammar_errors(self,pos,text,tokens):
"""
Internal function to get the number of grammar errors in given text
pos - part of speech tagged text (list)
text - normal text (list)
tokens - list of lists of tokenized text
"""
word_counts = [max(len(t),1) for t in tokens]
good_pos_tags = []
min_pos_seq=2
max_pos_seq=4
bad_pos_positions=[]
for i in xrange(0, len(text)):
pos_seq = [tag[1] for tag in pos[i]]
pos_ngrams = util_functions.ngrams(pos_seq, min_pos_seq, max_pos_seq)
long_pos_ngrams=[z for z in pos_ngrams if z.count(' ')==(max_pos_seq-1)]
bad_pos_tuples=[[z,z+max_pos_seq] for z in xrange(0,len(long_pos_ngrams)) if long_pos_ngrams[z] not in self._good_pos_ngrams]
bad_pos_tuples.sort(key=operator.itemgetter(1))
to_delete=[]
for m in reversed(xrange(len(bad_pos_tuples)-1)):
start, end = bad_pos_tuples[m]
for j in xrange(m+1, len(bad_pos_tuples)):
lstart, lend = bad_pos_tuples[j]
if lstart >= start and lstart <= end:
bad_pos_tuples[m][1]=bad_pos_tuples[j][1]
to_delete.append(j)
fixed_bad_pos_tuples=[bad_pos_tuples[z] for z in xrange(0,len(bad_pos_tuples)) if z not in to_delete]
bad_pos_positions.append(fixed_bad_pos_tuples)
overlap_ngrams = [z for z in pos_ngrams if z in self._good_pos_ngrams]
if (len(pos_ngrams)-len(overlap_ngrams))>0:
divisor=len(pos_ngrams)/len(pos_seq)
else:
divisor=1
if divisor == 0:
divisor=1
good_grammar_ratio = (len(pos_ngrams)-len(overlap_ngrams))/divisor
good_pos_tags.append(good_grammar_ratio)
return good_pos_tags,bad_pos_positions |
def gen_length_feats(self, e_set):
"""
Generates length based features from an essay set
Generally an internal function called by gen_feats
Returns an array of length features
e_set - EssaySet object
"""
text = e_set._text
lengths = [len(e) for e in text]
word_counts = [max(len(t),1) for t in e_set._tokens]
comma_count = [e.count(",") for e in text]
ap_count = [e.count("'") for e in text]
punc_count = [e.count(".") + e.count("?") + e.count("!") for e in text]
chars_per_word = [lengths[m] / float(word_counts[m]) for m in xrange(0, len(text))]
good_pos_tags,bad_pos_positions= self._get_grammar_errors(e_set._pos,e_set._text,e_set._tokens)
good_pos_tag_prop = [good_pos_tags[m] / float(word_counts[m]) for m in xrange(0, len(text))]
length_arr = numpy.array((
lengths, word_counts, comma_count, ap_count, punc_count, chars_per_word, good_pos_tags,
good_pos_tag_prop)).transpose()
return length_arr.copy() |
def gen_bag_feats(self, e_set):
"""
Generates bag of words features from an input essay set and trained FeatureExtractor
Generally called by gen_feats
Returns an array of features
e_set - EssaySet object
"""
if(hasattr(self, '_stem_dict')):
sfeats = self._stem_dict.transform(e_set._clean_stem_text)
nfeats = self._normal_dict.transform(e_set._text)
bag_feats = numpy.concatenate((sfeats.toarray(), nfeats.toarray()), axis=1)
else:
raise util_functions.InputError(self, "Dictionaries must be initialized prior to generating bag features.")
return bag_feats.copy() |
def gen_feats(self, e_set):
"""
Generates bag of words, length, and prompt features from an essay set object
returns an array of features
e_set - EssaySet object
"""
bag_feats = self.gen_bag_feats(e_set)
length_feats = self.gen_length_feats(e_set)
prompt_feats = self.gen_prompt_feats(e_set)
overall_feats = numpy.concatenate((length_feats, prompt_feats, bag_feats), axis=1)
overall_feats = overall_feats.copy()
return overall_feats |
def gen_prompt_feats(self, e_set):
"""
Generates prompt based features from an essay set object and internal prompt variable.
Generally called internally by gen_feats
Returns an array of prompt features
e_set - EssaySet object
"""
prompt_toks = nltk.word_tokenize(e_set._prompt)
expand_syns = []
for word in prompt_toks:
synonyms = util_functions.get_wordnet_syns(word)
expand_syns.append(synonyms)
expand_syns = list(chain.from_iterable(expand_syns))
prompt_overlap = []
prompt_overlap_prop = []
for j in e_set._tokens:
tok_length=len(j)
if(tok_length==0):
tok_length=1
prompt_overlap.append(len([i for i in j if i in prompt_toks]))
prompt_overlap_prop.append(prompt_overlap[len(prompt_overlap) - 1] / float(tok_length))
expand_overlap = []
expand_overlap_prop = []
for j in e_set._tokens:
tok_length=len(j)
if(tok_length==0):
tok_length=1
expand_overlap.append(len([i for i in j if i in expand_syns]))
expand_overlap_prop.append(expand_overlap[len(expand_overlap) - 1] / float(tok_length))
prompt_arr = numpy.array((prompt_overlap, prompt_overlap_prop, expand_overlap, expand_overlap_prop)).transpose()
return prompt_arr.copy() |
def gen_feedback(self, e_set, features=None):
"""
Generate feedback for a given set of essays
e_set - EssaySet object
features - optionally, pass in a matrix of features extracted from e_set using FeatureExtractor
in order to get off topic feedback.
Returns a list of lists (one list per essay in e_set)
e_set - EssaySet object
"""
#Set ratio to modify thresholds for grammar/spelling errors
modifier_ratio=1.05
#Calc number of grammar and spelling errors per character
set_grammar,bad_pos_positions=self._get_grammar_errors(e_set._pos,e_set._text,e_set._tokens)
set_grammar_per_character=[set_grammar[m]/float(len(e_set._text[m])+.1) for m in xrange(0,len(e_set._text))]
set_spell_errors_per_character=[e_set._spelling_errors[m]/float(len(e_set._text[m])+.1) for m in xrange(0,len(e_set._text))]
#Iterate through essays and create a feedback dict for each
all_feedback=[]
for m in xrange(0,len(e_set._text)):
#Be very careful about changing these messages!
individual_feedback={'grammar' : "Grammar: Ok.",
'spelling' : "Spelling: Ok.",
'markup_text' : "",
'grammar_per_char' : set_grammar_per_character[m],
'spelling_per_char' : set_spell_errors_per_character[m],
'too_similar_to_prompt' : False,
}
markup_tokens=e_set._markup_text[m].split(" ")
#This loop ensures that sequences of bad grammar get put together into one sequence instead of staying
#disjointed
bad_pos_starts=[z[0] for z in bad_pos_positions[m]]
bad_pos_ends=[z[1]-1 for z in bad_pos_positions[m]]
for z in xrange(0,len(markup_tokens)):
if z in bad_pos_starts:
markup_tokens[z]='<bg>' + markup_tokens[z]
elif z in bad_pos_ends:
markup_tokens[z]=markup_tokens[z] + "</bg>"
if(len(bad_pos_ends)>0 and len(bad_pos_starts)>0 and len(markup_tokens)>1):
if max(bad_pos_ends)>(len(markup_tokens)-1) and max(bad_pos_starts)<(len(markup_tokens)-1):
markup_tokens[len(markup_tokens)-1]+="</bg>"
#Display messages if grammar/spelling errors greater than average in training set
if set_grammar_per_character[m]>(self._grammar_errors_per_character*modifier_ratio):
individual_feedback['grammar']="Grammar: More grammar errors than average."
if set_spell_errors_per_character[m]>(self._spell_errors_per_character*modifier_ratio):
individual_feedback['spelling']="Spelling: More spelling errors than average."
#Test topicality by calculating # of on topic words per character and comparing to the training set
#mean. Requires features to be passed in
if features is not None:
f_row_sum=numpy.sum(features[m,12:])
f_row_prop=f_row_sum/len(e_set._text[m])
if f_row_prop<(self._mean_f_prop/1.5) or len(e_set._text[m])<20:
individual_feedback['topicality']="Topicality: Essay may be off topic."
if(features[m,9]>.6):
individual_feedback['prompt_overlap']="Prompt Overlap: Too much overlap with prompt."
individual_feedback['too_similar_to_prompt']=True
log.debug(features[m,9])
#Create string representation of markup text
markup_string=" ".join(markup_tokens)
individual_feedback['markup_text']=markup_string
all_feedback.append(individual_feedback)
return all_feedback |
def add_essay(self, essay_text, essay_score, essay_generated=0):
"""
Add new (essay_text,essay_score) pair to the essay set.
essay_text must be a string.
essay_score must be an int.
essay_generated should not be changed by the user.
Returns a confirmation that essay was added.
"""
# Get maximum current essay id, or set to 0 if this is the first essay added
if(len(self._id) > 0):
max_id = max(self._id)
else:
max_id = 0
# Verify that essay_score is an int, essay_text is a string, and essay_generated equals 0 or 1
try:
essay_text = essay_text.encode('ascii', 'ignore')
if len(essay_text) < 5:
essay_text = "Invalid essay."
except:
log.exception("Could not parse essay into ascii.")
try:
# Try conversion of types
essay_score = int(essay_score)
essay_text = str(essay_text)
except:
# Nothing needed here, will return error in any case.
log.exception("Invalid type for essay score : {0} or essay text : {1}".format(type(essay_score), type(essay_text)))
if isinstance(essay_score, int) and isinstance(essay_text, basestring)\
and (essay_generated == 0 or essay_generated == 1):
self._id.append(max_id + 1)
self._score.append(essay_score)
# Clean text by removing non digit/work/punctuation characters
try:
essay_text = str(essay_text.encode('ascii', 'ignore'))
except:
essay_text = (essay_text.decode('utf-8', 'replace')).encode('ascii', 'ignore')
cleaned_essay = util_functions.sub_chars(essay_text).lower()
if(len(cleaned_essay) > MAXIMUM_ESSAY_LENGTH):
cleaned_essay = cleaned_essay[0:MAXIMUM_ESSAY_LENGTH]
self._text.append(cleaned_essay)
# Spell correct text using aspell
cleaned_text, spell_errors, markup_text = util_functions.spell_correct(self._text[len(self._text) - 1])
self._clean_text.append(cleaned_text)
self._spelling_errors.append(spell_errors)
self._markup_text.append(markup_text)
# Tokenize text
self._tokens.append(nltk.word_tokenize(self._clean_text[len(self._clean_text) - 1]))
# Part of speech tag text
self._pos.append(nltk.pos_tag(self._clean_text[len(self._clean_text) - 1].split(" ")))
self._generated.append(essay_generated)
# Stem spell corrected text
porter = nltk.PorterStemmer()
por_toks = " ".join([porter.stem(w) for w in self._tokens[len(self._tokens) - 1]])
self._clean_stem_text.append(por_toks)
ret = "text: " + self._text[len(self._text) - 1] + " score: " + str(essay_score)
else:
raise util_functions.InputError(essay_text, "arguments need to be in format "
"(text,score). text needs to be string,"
" score needs to be int.") |
def update_prompt(self, prompt_text):
"""
Update the default prompt string, which is "".
prompt_text should be a string.
Returns the prompt as a confirmation.
"""
if(isinstance(prompt_text, basestring)):
self._prompt = util_functions.sub_chars(prompt_text)
ret = self._prompt
else:
raise util_functions.InputError(prompt_text, "Invalid prompt. Need to enter a string value.")
return ret |
def generate_additional_essays(self, e_text, e_score, dictionary=None, max_syns=3):
"""
Substitute synonyms to generate extra essays from existing ones.
This is done to increase the amount of training data.
Should only be used with lowest scoring essays.
e_text is the text of the original essay.
e_score is the score of the original essay.
dictionary is a fixed dictionary (list) of words to replace.
max_syns defines the maximum number of additional essays to generate. Do not set too high.
"""
e_toks = nltk.word_tokenize(e_text)
all_syns = []
for word in e_toks:
synonyms = util_functions.get_wordnet_syns(word)
if(len(synonyms) > max_syns):
synonyms = random.sample(synonyms, max_syns)
all_syns.append(synonyms)
new_essays = []
for i in range(0, max_syns):
syn_toks = e_toks
for z in range(0, len(e_toks)):
if len(all_syns[z]) > i and (dictionary == None or e_toks[z] in dictionary):
syn_toks[z] = all_syns[z][i]
new_essays.append(" ".join(syn_toks))
for z in xrange(0, len(new_essays)):
self.add_essay(new_essays[z], e_score, 1) |
def create(text,score,prompt_string, dump_data=False):
"""
Creates a machine learning model from input text, associated scores, a prompt, and a path to the model
TODO: Remove model path argument, it is needed for now to support legacy code
text - A list of strings containing the text of the essays
score - a list of integers containing score values
prompt_string - the common prompt for the set of essays
"""
if dump_data:
dump_input_data(text, score)
algorithm = select_algorithm(score)
#Initialize a results dictionary to return
results = {'errors': [],'success' : False, 'cv_kappa' : 0, 'cv_mean_absolute_error': 0,
'feature_ext' : "", 'classifier' : "", 'algorithm' : algorithm,
'score' : score, 'text' : text, 'prompt' : prompt_string}
if len(text)!=len(score):
msg = "Target and text lists must be same length."
results['errors'].append(msg)
log.exception(msg)
return results
try:
#Create an essay set object that encapsulates all the essays and alternate representations (tokens, etc)
e_set = model_creator.create_essay_set(text, score, prompt_string)
except:
msg = "essay set creation failed."
results['errors'].append(msg)
log.exception(msg)
try:
#Gets features from the essay set and computes error
feature_ext, classifier, cv_error_results = model_creator.extract_features_and_generate_model(e_set, algorithm = algorithm)
results['cv_kappa']=cv_error_results['kappa']
results['cv_mean_absolute_error']=cv_error_results['mae']
results['feature_ext']=feature_ext
results['classifier']=classifier
results['algorithm'] = algorithm
results['success']=True
except:
msg = "feature extraction and model creation failed."
results['errors'].append(msg)
log.exception(msg)
return results |
def create_generic(numeric_values, textual_values, target, algorithm = util_functions.AlgorithmTypes.regression):
"""
Creates a model from a generic list numeric values and text values
numeric_values - A list of lists that are the predictors
textual_values - A list of lists that are the predictors
(each item in textual_values corresponds to the similarly indexed counterpart in numeric_values)
target - The variable that we are trying to predict. A list of integers.
algorithm - the type of algorithm that will be used
"""
algorithm = select_algorithm(target)
#Initialize a result dictionary to return.
results = {'errors': [],'success' : False, 'cv_kappa' : 0, 'cv_mean_absolute_error': 0,
'feature_ext' : "", 'classifier' : "", 'algorithm' : algorithm}
if len(numeric_values)!=len(textual_values) or len(numeric_values)!=len(target):
msg = "Target, numeric features, and text features must all be the same length."
results['errors'].append(msg)
log.exception(msg)
return results
try:
#Initialize a predictor set object that encapsulates all of the text and numeric predictors
pset = predictor_set.PredictorSet(essaytype="train")
for i in xrange(0, len(numeric_values)):
pset.add_row(numeric_values[i], textual_values[i], target[i])
except:
msg = "predictor set creation failed."
results['errors'].append(msg)
log.exception(msg)
try:
#Extract all features and then train a classifier with the features
feature_ext, classifier, cv_error_results = model_creator.extract_features_and_generate_model_predictors(pset, algorithm)
results['cv_kappa']=cv_error_results['kappa']
results['cv_mean_absolute_error']=cv_error_results['mae']
results['feature_ext']=feature_ext
results['classifier']=classifier
results['success']=True
except:
msg = "feature extraction and model creation failed."
results['errors'].append(msg)
log.exception(msg)
return results |
def create_essay_set(text, score, prompt_string, generate_additional=True):
"""
Creates an essay set from given data.
Text should be a list of strings corresponding to essay text.
Score should be a list of scores where score[n] corresponds to text[n]
Prompt string is just a string containing the essay prompt.
Generate_additional indicates whether to generate additional essays at the minimum score point or not.
"""
x = EssaySet()
for i in xrange(0, len(text)):
x.add_essay(text[i], score[i])
if score[i] == min(score) and generate_additional == True:
x.generate_additional_essays(x._clean_text[len(x._clean_text) - 1], score[i])
x.update_prompt(prompt_string)
return x |
def get_cv_error(clf,feats,scores):
"""
Gets cross validated error for a given classifier, set of features, and scores
clf - classifier
feats - features to feed into the classified and cross validate over
scores - scores associated with the features -- feature row 1 associates with score 1, etc.
"""
results={'success' : False, 'kappa' : 0, 'mae' : 0}
try:
cv_preds=util_functions.gen_cv_preds(clf,feats,scores)
err=numpy.mean(numpy.abs(numpy.array(cv_preds)-scores))
kappa=util_functions.quadratic_weighted_kappa(list(cv_preds),scores)
results['mae']=err
results['kappa']=kappa
results['success']=True
except ValueError as ex:
# If this is hit, everything is fine. It is hard to explain why the error occurs, but it isn't a big deal.
msg = u"Not enough classes (0,1,etc) in each cross validation fold: {ex}".format(ex=ex)
log.debug(msg)
except:
log.exception("Error getting cv error estimates.")
return results |
def get_algorithms(algorithm):
"""
Gets two classifiers for each type of algorithm, and returns them. First for predicting, second for cv error.
type - one of util_functions.AlgorithmTypes
"""
if algorithm == util_functions.AlgorithmTypes.classification:
clf = sklearn.ensemble.GradientBoostingClassifier(n_estimators=100, learn_rate=.05,
max_depth=4, random_state=1,min_samples_leaf=3)
clf2=sklearn.ensemble.GradientBoostingClassifier(n_estimators=100, learn_rate=.05,
max_depth=4, random_state=1,min_samples_leaf=3)
else:
clf = sklearn.ensemble.GradientBoostingRegressor(n_estimators=100, learn_rate=.05,
max_depth=4, random_state=1,min_samples_leaf=3)
clf2=sklearn.ensemble.GradientBoostingRegressor(n_estimators=100, learn_rate=.05,
max_depth=4, random_state=1,min_samples_leaf=3)
return clf, clf2 |
def extract_features_and_generate_model_predictors(predictor_set, algorithm=util_functions.AlgorithmTypes.regression):
"""
Extracts features and generates predictors based on a given predictor set
predictor_set - a PredictorSet object that has been initialized with data
type - one of util_functions.AlgorithmType
"""
if(algorithm not in [util_functions.AlgorithmTypes.regression, util_functions.AlgorithmTypes.classification]):
algorithm = util_functions.AlgorithmTypes.regression
f = predictor_extractor.PredictorExtractor()
f.initialize_dictionaries(predictor_set)
train_feats = f.gen_feats(predictor_set)
clf,clf2 = get_algorithms(algorithm)
cv_error_results=get_cv_error(clf2,train_feats,predictor_set._target)
try:
set_score = numpy.asarray(predictor_set._target, dtype=numpy.int)
clf.fit(train_feats, set_score)
except ValueError:
log.exception("Not enough classes (0,1,etc) in sample.")
set_score = predictor_set._target
set_score[0]=1
set_score[1]=0
clf.fit(train_feats, set_score)
return f, clf, cv_error_results |
def extract_features_and_generate_model(essays, algorithm=util_functions.AlgorithmTypes.regression):
"""
Feed in an essay set to get feature vector and classifier
essays must be an essay set object
additional array is an optional argument that can specify
a numpy array of values to add in
returns a trained FeatureExtractor object and a trained classifier
"""
f = feature_extractor.FeatureExtractor()
f.initialize_dictionaries(essays)
train_feats = f.gen_feats(essays)
set_score = numpy.asarray(essays._score, dtype=numpy.int)
if len(util_functions.f7(list(set_score)))>5:
algorithm = util_functions.AlgorithmTypes.regression
else:
algorithm = util_functions.AlgorithmTypes.classification
clf,clf2 = get_algorithms(algorithm)
cv_error_results=get_cv_error(clf2,train_feats,essays._score)
try:
clf.fit(train_feats, set_score)
except ValueError:
log.exception("Not enough classes (0,1,etc) in sample.")
set_score[0]=1
set_score[1]=0
clf.fit(train_feats, set_score)
return f, clf, cv_error_results |
def dump_model_to_file(prompt_string, feature_ext, classifier, text, score, model_path):
"""
Writes out a model to a file.
prompt string is a string containing the prompt
feature_ext is a trained FeatureExtractor object
classifier is a trained classifier
model_path is the path of write out the model file to
"""
model_file = {'prompt': prompt_string, 'extractor': feature_ext, 'model': classifier, 'text' : text, 'score' : score}
pickle.dump(model_file, file=open(model_path, "w")) |
def create_essay_set_and_dump_model(text,score,prompt,model_path,additional_array=None):
"""
Function that creates essay set, extracts features, and writes out model
See above functions for argument descriptions
"""
essay_set=create_essay_set(text,score,prompt)
feature_ext,clf=extract_features_and_generate_model(essay_set,additional_array)
dump_model_to_file(prompt,feature_ext,clf,model_path) |
def initialize_dictionaries(self, p_set):
"""
Initialize dictionaries with the textual inputs in the PredictorSet object
p_set - PredictorSet object that has had data fed in
"""
success = False
if not (hasattr(p_set, '_type')):
error_message = "needs to be an essay set of the train type."
log.exception(error_message)
raise util_functions.InputError(p_set, error_message)
if not (p_set._type == "train"):
error_message = "needs to be an essay set of the train type."
log.exception(error_message)
raise util_functions.InputError(p_set, error_message)
div_length=len(p_set._essay_sets)
if div_length==0:
div_length=1
#Ensures that even with a large amount of input textual features, training time stays reasonable
max_feats2 = int(math.floor(200/div_length))
for i in xrange(0,len(p_set._essay_sets)):
self._extractors.append(FeatureExtractor())
self._extractors[i].initialize_dictionaries(p_set._essay_sets[i], max_feats2=max_feats2)
self._initialized = True
success = True
return success |
def gen_feats(self, p_set):
"""
Generates features based on an iput p_set
p_set - PredictorSet
"""
if self._initialized!=True:
error_message = "Dictionaries have not been initialized."
log.exception(error_message)
raise util_functions.InputError(p_set, error_message)
textual_features = []
for i in xrange(0,len(p_set._essay_sets)):
textual_features.append(self._extractors[i].gen_feats(p_set._essay_sets[i]))
textual_matrix = numpy.concatenate(textual_features, axis=1)
predictor_matrix = numpy.array(p_set._numeric_features)
print textual_matrix.shape
print predictor_matrix.shape
overall_matrix = numpy.concatenate((textual_matrix, predictor_matrix), axis=1)
return overall_matrix.copy() |
def t_text(self, t):
r':\s*<text>'
t.lexer.text_start = t.lexer.lexpos - len('<text>')
t.lexer.begin('text') |
def t_text_end(self, t):
r'</text>\s*'
t.type = 'TEXT'
t.value = t.lexer.lexdata[
t.lexer.text_start:t.lexer.lexpos]
t.lexer.lineno += t.value.count('\n')
t.value = t.value.strip()
t.lexer.begin('INITIAL')
return t |
def t_KEYWORD_AS_TAG(self, t):
r'[a-zA-Z]+'
t.type = self.reserved.get(t.value, 'UNKNOWN_TAG')
t.value = t.value.strip()
return t |
def t_LINE_OR_KEYWORD_VALUE(self, t):
r':.+'
t.value = t.value[1:].strip()
if t.value in self.reserved.keys():
t.type = self.reserved[t.value]
else:
t.type = 'LINE'
return t |
def tv_to_rdf(infile_name, outfile_name):
"""
Convert a SPDX file from tag/value format to RDF format.
Return True on sucess, False otherwise.
"""
parser = Parser(Builder(), StandardLogger())
parser.build()
with open(infile_name) as infile:
data = infile.read()
document, error = parser.parse(data)
if not error:
with open(outfile_name, mode='w') as outfile:
write_document(document, outfile)
return True
else:
print('Errors encountered while parsing RDF file.')
messages = []
document.validate(messages)
print('\n'.join(messages))
return False |
def order_error(self, first_tag, second_tag, line):
"""Reports an OrderError. Error message will state that
first_tag came before second_tag.
"""
self.error = True
msg = ERROR_MESSAGES['A_BEFORE_B'].format(first_tag, second_tag, line)
self.logger.log(msg) |
def p_lic_xref_1(self, p):
"""lic_xref : LICS_CRS_REF LINE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.add_lic_xref(self.document, value)
except OrderError:
self.order_error('LicenseCrossReference', 'LicenseName', p.lineno(1)) |
def p_lic_comment_1(self, p):
"""lic_comment : LICS_COMMENT TEXT"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_lic_comment(self.document, value)
except OrderError:
self.order_error('LicenseComment', 'LicenseID', p.lineno(1))
except CardinalityError:
self.more_than_one_error('LicenseComment', p.lineno(1)) |
def p_extr_lic_name_1(self, p):
"""extr_lic_name : LICS_NAME extr_lic_name_value"""
try:
self.builder.set_lic_name(self.document, p[2])
except OrderError:
self.order_error('LicenseName', 'LicenseID', p.lineno(1))
except CardinalityError:
self.more_than_one_error('LicenseName', p.lineno(1)) |
def p_extr_lic_name_value_1(self, p):
"""extr_lic_name_value : LINE"""
if six.PY2:
p[0] = p[1].decode(encoding='utf-8')
else:
p[0] = p[1] |
def p_extr_lic_text_1(self, p):
"""extr_lic_text : LICS_TEXT TEXT"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_lic_text(self.document, value)
except OrderError:
self.order_error('ExtractedText', 'LicenseID', p.lineno(1))
except CardinalityError:
self.more_than_one_error('ExtractedText', p.lineno(1)) |
def p_extr_lic_id_1(self, p):
"""extr_lic_id : LICS_ID LINE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_lic_id(self.document, value)
except SPDXValueError:
self.error = True
msg = ERROR_MESSAGES['LICS_ID_VALUE'].format(p.lineno(1))
self.logger.log(msg) |
def p_prj_uri_art_3(self, p):
"""prj_uri_art : ART_PRJ_URI error"""
self.error = True
msg = ERROR_MESSAGES['ART_PRJ_URI_VALUE'].format(p.lineno(1))
self.logger.log(msg) |
def p_prj_home_art_1(self, p):
"""prj_home_art : ART_PRJ_HOME LINE"""
try:
self.builder.set_file_atrificat_of_project(self.document, 'home', p[2])
except OrderError:
self.order_error('ArtificatOfProjectHomePage', 'FileName', p.lineno(1)) |
def p_prj_home_art_2(self, p):
"""prj_home_art : ART_PRJ_HOME UN_KNOWN"""
try:
self.builder.set_file_atrificat_of_project(self.document,
'home', utils.UnKnown())
except OrderError:
self.order_error('ArtifactOfProjectName', 'FileName', p.lineno(1)) |
def p_prj_name_art_1(self, p):
"""prj_name_art : ART_PRJ_NAME LINE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_file_atrificat_of_project(self.document, 'name', value)
except OrderError:
self.order_error('ArtifactOfProjectName', 'FileName', p.lineno(1)) |
def p_file_dep_1(self, p):
"""file_dep : FILE_DEP LINE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.add_file_dep(self.document, value)
except OrderError:
self.order_error('FileDependency', 'FileName', p.lineno(1)) |
def p_file_contrib_1(self, p):
"""file_contrib : FILE_CONTRIB LINE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.add_file_contribution(self.document, value)
except OrderError:
self.order_error('FileContributor', 'FileName', p.lineno(1)) |
def p_file_notice_1(self, p):
"""file_notice : FILE_NOTICE TEXT"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_file_notice(self.document, value)
except OrderError:
self.order_error('FileNotice', 'FileName', p.lineno(1))
except CardinalityError:
self.more_than_one_error('FileNotice', p.lineno(1)) |
def p_file_cr_text_1(self, p):
"""file_cr_text : FILE_CR_TEXT file_cr_value"""
try:
self.builder.set_file_copyright(self.document, p[2])
except OrderError:
self.order_error('FileCopyrightText', 'FileName', p.lineno(1))
except CardinalityError:
self.more_than_one_error('FileCopyrightText', p.lineno(1)) |
def p_file_cr_value_1(self, p):
"""file_cr_value : TEXT"""
if six.PY2:
p[0] = p[1].decode(encoding='utf-8')
else:
p[0] = p[1] |
def p_file_lics_comment_1(self, p):
"""file_lics_comment : FILE_LICS_COMMENT TEXT"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_file_license_comment(self.document, value)
except OrderError:
self.order_error('LicenseComments', 'FileName', p.lineno(1))
except CardinalityError:
self.more_than_one_error('LicenseComments', p.lineno(1)) |
def p_file_lics_info_1(self, p):
"""file_lics_info : FILE_LICS_INFO file_lic_info_value"""
try:
self.builder.set_file_license_in_file(self.document, p[2])
except OrderError:
self.order_error('LicenseInfoInFile', 'FileName', p.lineno(1))
except SPDXValueError:
self.error = True
msg = ERROR_MESSAGES['FILE_LICS_INFO_VALUE'].format(p.lineno(1))
self.logger.log(msg) |
def p_conc_license_3(self, p):
"""conc_license : LINE"""
if six.PY2:
value = p[1].decode(encoding='utf-8')
else:
value = p[1]
ref_re = re.compile('LicenseRef-.+', re.UNICODE)
if (p[1] in config.LICENSE_MAP.keys()) or (ref_re.match(p[1]) is not None):
p[0] = document.License.from_identifier(value)
else:
p[0] = self.license_list_parser.parse(value) |
def p_file_name_1(self, p):
"""file_name : FILE_NAME LINE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_file_name(self.document, value)
except OrderError:
self.order_error('FileName', 'PackageName', p.lineno(1)) |
def p_spdx_id(self, p):
"""spdx_id : SPDX_ID LINE"""
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
if not self.builder.doc_spdx_id_set:
self.builder.set_doc_spdx_id(self.document, value)
else:
self.builder.set_file_spdx_id(self.document, value) |
def p_file_comment_1(self, p):
"""file_comment : FILE_COMMENT TEXT"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_file_comment(self.document, value)
except OrderError:
self.order_error('FileComment', 'FileName', p.lineno(1))
except CardinalityError:
self.more_than_one_error('FileComment', p.lineno(1)) |
def p_file_type_1(self, p):
"""file_type : FILE_TYPE file_type_value"""
try:
self.builder.set_file_type(self.document, p[2])
except OrderError:
self.order_error('FileType', 'FileName', p.lineno(1))
except CardinalityError:
self.more_than_one_error('FileType', p.lineno(1)) |
def p_file_chksum_1(self, p):
"""file_chksum : FILE_CHKSUM CHKSUM"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_file_chksum(self.document, value)
except OrderError:
self.order_error('FileChecksum', 'FileName', p.lineno(1))
except CardinalityError:
self.more_than_one_error('FileChecksum', p.lineno(1)) |
def p_file_conc_1(self, p):
"""file_conc : FILE_LICS_CONC conc_license"""
try:
self.builder.set_concluded_license(self.document, p[2])
except SPDXValueError:
self.error = True
msg = ERROR_MESSAGES['FILE_LICS_CONC_VALUE'].format(p.lineno(1))
self.logger.log(msg)
except OrderError:
self.order_error('LicenseConcluded', 'FileName', p.lineno(1))
except CardinalityError:
self.more_than_one_error('LicenseConcluded', p.lineno(1)) |
def p_file_type_value(self, p):
"""file_type_value : OTHER
| SOURCE
| ARCHIVE
| BINARY
"""
if six.PY2:
p[0] = p[1].decode(encoding='utf-8')
else:
p[0] = p[1] |
def p_pkg_desc_1(self, p):
"""pkg_desc : PKG_DESC TEXT"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_pkg_desc(self.document, value)
except CardinalityError:
self.more_than_one_error('PackageDescription', p.lineno(1))
except OrderError:
self.order_error('PackageDescription', 'PackageFileName', p.lineno(1)) |
def p_pkg_summary_1(self, p):
"""pkg_summary : PKG_SUM TEXT"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_pkg_summary(self.document, value)
except OrderError:
self.order_error('PackageSummary', 'PackageFileName', p.lineno(1))
except CardinalityError:
self.more_than_one_error('PackageSummary', p.lineno(1)) |
def p_pkg_cr_text_1(self, p):
"""pkg_cr_text : PKG_CPY_TEXT pkg_cr_text_value"""
try:
self.builder.set_pkg_cr_text(self.document, p[2])
except OrderError:
self.order_error('PackageCopyrightText', 'PackageFileName', p.lineno(1))
except CardinalityError:
self.more_than_one_error('PackageCopyrightText', p.lineno(1)) |
def p_pkg_cr_text_value_1(self, p):
"""pkg_cr_text_value : TEXT"""
if six.PY2:
p[0] = p[1].decode(encoding='utf-8')
else:
p[0] = p[1] |
def p_pkg_lic_comment_1(self, p):
"""pkg_lic_comment : PKG_LICS_COMMENT TEXT"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_pkg_license_comment(self.document, value)
except OrderError:
self.order_error('PackageLicenseComments', 'PackageFileName', p.lineno(1))
except CardinalityError:
self.more_than_one_error('PackageLicenseComments', p.lineno(1)) |
def p_pkg_lic_ff_1(self, p):
"""pkg_lic_ff : PKG_LICS_FFILE pkg_lic_ff_value"""
try:
self.builder.set_pkg_license_from_file(self.document, p[2])
except OrderError:
self.order_error('PackageLicenseInfoFromFiles', 'PackageName', p.lineno(1))
except SPDXValueError:
self.error = True
msg = ERROR_MESSAGES['PKG_LIC_FFILE_VALUE'].format(p.lineno(1))
self.logger.log(msg) |
def p_pkg_lic_ff_value_3(self, p):
"""pkg_lic_ff_value : LINE"""
if six.PY2:
value = p[1].decode(encoding='utf-8')
else:
value = p[1]
p[0] = document.License.from_identifier(value) |
def p_pkg_src_info_1(self, p):
"""pkg_src_info : PKG_SRC_INFO TEXT"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_pkg_source_info(self.document, value)
except CardinalityError:
self.more_than_one_error('PackageSourceInfo', p.lineno(1))
except OrderError:
self.order_error('PackageSourceInfo', 'PackageFileName', p.lineno(1)) |
def p_pkg_chksum_1(self, p):
"""pkg_chksum : PKG_CHKSUM CHKSUM"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_pkg_chk_sum(self.document, value)
except OrderError:
self.order_error('PackageChecksum', 'PackageFileName', p.lineno(1))
except CardinalityError:
self.more_than_one_error('PackageChecksum', p.lineno(1)) |
def p_pkg_home_1(self, p):
"""pkg_home : PKG_HOME pkg_home_value"""
try:
self.builder.set_pkg_down_location(self.document, p[2])
except OrderError:
self.order_error('PackageHomePage', 'PackageName', p.lineno(1))
except CardinalityError:
self.more_than_one_error('PackageHomePage', p.lineno(1)) |
def p_pkg_home_2(self, p):
"""pkg_home : PKG_HOME error"""
self.error = True
msg = ERROR_MESSAGES['PKG_HOME_VALUE']
self.logger.log(msg) |
def p_pkg_home_value_1(self, p):
"""pkg_home_value : LINE"""
if six.PY2:
p[0] = p[1].decode(encoding='utf-8')
else:
p[0] = p[1] |
def p_pkg_down_value_1(self, p):
"""pkg_down_value : LINE """
if six.PY2:
p[0] = p[1].decode(encoding='utf-8')
else:
p[0] = p[1] |
def p_pkg_file_name(self, p):
"""pkg_file_name : PKG_FILE_NAME LINE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_pkg_file_name(self.document, value)
except OrderError:
self.order_error('PackageFileName', 'PackageName', p.lineno(1))
except CardinalityError:
self.more_than_one_error('PackageFileName', p.lineno(1)) |
def p_package_version_1(self, p):
"""package_version : PKG_VERSION LINE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_pkg_vers(self.document, value)
except OrderError:
self.order_error('PackageVersion', 'PackageName', p.lineno(1))
except CardinalityError:
self.more_than_one_error('PackageVersion', p.lineno(1)) |
def p_package_name(self, p):
"""package_name : PKG_NAME LINE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.create_package(self.document, value)
except CardinalityError:
self.more_than_one_error('PackageName', p.lineno(1)) |
def p_review_date_1(self, p):
"""review_date : REVIEW_DATE DATE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.add_review_date(self.document, value)
except CardinalityError:
self.more_than_one_error('ReviewDate', p.lineno(1))
except OrderError:
self.order_error('ReviewDate', 'Reviewer', p.lineno(1)) |
def p_review_comment_1(self, p):
"""review_comment : REVIEW_COMMENT TEXT"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.add_review_comment(self.document, value)
except CardinalityError:
self.more_than_one_error('ReviewComment', p.lineno(1))
except OrderError:
self.order_error('ReviewComment', 'Reviewer', p.lineno(1)) |
def p_annotation_date_1(self, p):
"""annotation_date : ANNOTATION_DATE DATE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.add_annotation_date(self.document, value)
except CardinalityError:
self.more_than_one_error('AnnotationDate', p.lineno(1))
except OrderError:
self.order_error('AnnotationDate', 'Annotator', p.lineno(1)) |
def p_annotation_comment_1(self, p):
"""annotation_comment : ANNOTATION_COMMENT TEXT"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.add_annotation_comment(self.document, value)
except CardinalityError:
self.more_than_one_error('AnnotationComment', p.lineno(1))
except OrderError:
self.order_error('AnnotationComment', 'Annotator', p.lineno(1)) |
def p_annotation_type_1(self, p):
"""annotation_type : ANNOTATION_TYPE LINE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.add_annotation_type(self.document, value)
except CardinalityError:
self.more_than_one_error('AnnotationType', p.lineno(1))
except SPDXValueError:
self.error = True
msg = ERROR_MESSAGES['ANNOTATION_TYPE_VALUE'].format(p.lineno(1))
self.logger.log(msg)
except OrderError:
self.order_error('AnnotationType', 'Annotator', p.lineno(1)) |
def p_annotation_spdx_id_1(self, p):
"""annotation_spdx_id : ANNOTATION_SPDX_ID LINE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_annotation_spdx_id(self.document, value)
except CardinalityError:
self.more_than_one_error('SPDXREF', p.lineno(1))
except OrderError:
self.order_error('SPDXREF', 'Annotator', p.lineno(1)) |
def p_doc_comment_1(self, p):
"""doc_comment : DOC_COMMENT TEXT"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_doc_comment(self.document, value)
except CardinalityError:
self.more_than_one_error('DocumentComment', p.lineno(1)) |
def p_doc_name_1(self, p):
"""doc_name : DOC_NAME LINE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_doc_name(self.document, value)
except CardinalityError:
self.more_than_one_error('DocumentName', p.lineno(1)) |
def p_ext_doc_refs_1(self, p):
"""ext_doc_ref : EXT_DOC_REF DOC_REF_ID DOC_URI EXT_DOC_REF_CHKSUM"""
try:
if six.PY2:
doc_ref_id = p[2].decode(encoding='utf-8')
doc_uri = p[3].decode(encoding='utf-8')
ext_doc_chksum = p[4].decode(encoding='utf-8')
else:
doc_ref_id = p[2]
doc_uri = p[3]
ext_doc_chksum = p[4]
self.builder.add_ext_doc_refs(self.document, doc_ref_id, doc_uri,
ext_doc_chksum)
except SPDXValueError:
self.error = True
msg = ERROR_MESSAGES['EXT_DOC_REF_VALUE'].format(p.lineno(2))
self.logger.log(msg) |
def p_spdx_version_1(self, p):
"""spdx_version : DOC_VERSION LINE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_doc_version(self.document, value)
except CardinalityError:
self.more_than_one_error('SPDXVersion', p.lineno(1))
except SPDXValueError:
self.error = True
msg = ERROR_MESSAGES['DOC_VERSION_VALUE'].format(p[2], p.lineno(1))
self.logger.log(msg) |
def p_creator_comment_1(self, p):
"""creator_comment : CREATOR_COMMENT TEXT"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_creation_comment(self.document, value)
except CardinalityError:
self.more_than_one_error('CreatorComment', p.lineno(1)) |
def p_created_1(self, p):
"""created : CREATED DATE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_created_date(self.document, value)
except CardinalityError:
self.more_than_one_error('Created', p.lineno(1)) |
def write_creation_info(creation_info, out):
"""
Write the creation info to out.
"""
out.write('# Creation Info\n\n')
# Write sorted creators
for creator in sorted(creation_info.creators):
write_value('Creator', creator, out)
# write created
write_value('Created', creation_info.created_iso_format, out)
# possible comment
if creation_info.has_comment:
write_text_value('CreatorComment', creation_info.comment, out) |
def write_review(review, out):
"""
Write the fields of a single review to out.
"""
out.write('# Review\n\n')
write_value('Reviewer', review.reviewer, out)
write_value('ReviewDate', review.review_date_iso_format, out)
if review.has_comment:
write_text_value('ReviewComment', review.comment, out) |
def write_annotation(annotation, out):
"""
Write the fields of a single annotation to out.
"""
out.write('# Annotation\n\n')
write_value('Annotator', annotation.annotator, out)
write_value('AnnotationDate', annotation.annotation_date_iso_format, out)
if annotation.has_comment:
write_text_value('AnnotationComment', annotation.comment, out)
write_value('AnnotationType', annotation.annotation_type, out)
write_value('SPDXREF', annotation.spdx_id, out) |
def write_file(spdx_file, out):
"""
Write a file fields to out.
"""
out.write('# File\n\n')
write_value('FileName', spdx_file.name, out)
write_value('SPDXID', spdx_file.spdx_id, out)
if spdx_file.has_optional_field('type'):
write_file_type(spdx_file.type, out)
write_value('FileChecksum', spdx_file.chk_sum.to_tv(), out)
if isinstance(spdx_file.conc_lics, (document.LicenseConjunction, document.LicenseDisjunction)):
write_value('LicenseConcluded', u'({0})'.format(spdx_file.conc_lics), out)
else:
write_value('LicenseConcluded', spdx_file.conc_lics, out)
# write sorted list
for lics in sorted(spdx_file.licenses_in_file):
write_value('LicenseInfoInFile', lics, out)
if isinstance(spdx_file.copyright, six.string_types):
write_text_value('FileCopyrightText', spdx_file.copyright, out)
else:
write_value('FileCopyrightText', spdx_file.copyright, out)
if spdx_file.has_optional_field('license_comment'):
write_text_value('LicenseComments', spdx_file.license_comment, out)
if spdx_file.has_optional_field('comment'):
write_text_value('FileComment', spdx_file.comment, out)
if spdx_file.has_optional_field('notice'):
write_text_value('FileNotice', spdx_file.notice, out)
for contributor in sorted(spdx_file.contributors):
write_value('FileContributor', contributor, out)
for dependency in sorted(spdx_file.dependencies):
write_value('FileDependency', dependency, out)
names = spdx_file.artifact_of_project_name
homepages = spdx_file.artifact_of_project_home
uris = spdx_file.artifact_of_project_uri
for name, homepage, uri in sorted(zip_longest(names, homepages, uris)):
write_value('ArtifactOfProjectName', name, out)
if homepage is not None:
write_value('ArtifactOfProjectHomePage', homepage, out)
if uri is not None:
write_value('ArtifactOfProjectURI', uri, out) |
def write_package(package, out):
"""
Write a package fields to out.
"""
out.write('# Package\n\n')
write_value('PackageName', package.name, out)
if package.has_optional_field('version'):
write_value('PackageVersion', package.version, out)
write_value('PackageDownloadLocation', package.download_location, out)
if package.has_optional_field('summary'):
write_text_value('PackageSummary', package.summary, out)
if package.has_optional_field('source_info'):
write_text_value('PackageSourceInfo', package.source_info, out)
if package.has_optional_field('file_name'):
write_value('PackageFileName', package.file_name, out)
if package.has_optional_field('supplier'):
write_value('PackageSupplier', package.supplier, out)
if package.has_optional_field('originator'):
write_value('PackageOriginator', package.originator, out)
if package.has_optional_field('check_sum'):
write_value('PackageChecksum', package.check_sum.to_tv(), out)
write_value('PackageVerificationCode', format_verif_code(package), out)
if package.has_optional_field('description'):
write_text_value('PackageDescription', package.description, out)
if isinstance(package.license_declared, (document.LicenseConjunction,
document.LicenseDisjunction)):
write_value('PackageLicenseDeclared', u'({0})'.format(package.license_declared), out)
else:
write_value('PackageLicenseDeclared', package.license_declared, out)
if isinstance(package.conc_lics, (document.LicenseConjunction,
document.LicenseDisjunction)):
write_value('PackageLicenseConcluded', u'({0})'.format(package.conc_lics), out)
else:
write_value('PackageLicenseConcluded', package.conc_lics, out)
# Write sorted list of licenses.
for lics in sorted(package.licenses_from_files):
write_value('PackageLicenseInfoFromFiles', lics, out)
if package.has_optional_field('license_comment'):
write_text_value('PackageLicenseComments', package.license_comment, out)
# cr_text is either free form text or NONE or NOASSERTION.
if isinstance(package.cr_text, six.string_types):
write_text_value('PackageCopyrightText', package.cr_text, out)
else:
write_value('PackageCopyrightText', package.cr_text, out)
if package.has_optional_field('homepage'):
write_value('PackageHomePage', package.homepage, out)
# Write sorted files.
for spdx_file in sorted(package.files):
write_separators(out)
write_file(spdx_file, out) |
def write_extracted_licenses(lics, out):
"""
Write extracted licenses fields to out.
"""
write_value('LicenseID', lics.identifier, out)
if lics.full_name is not None:
write_value('LicenseName', lics.full_name, out)
if lics.comment is not None:
write_text_value('LicenseComment', lics.comment, out)
for xref in sorted(lics.cross_ref):
write_value('LicenseCrossReference', xref, out)
write_text_value('ExtractedText', lics.text, out) |
def write_document(document, out, validate=True):
"""
Write an SPDX tag value document.
- document - spdx.document instance.
- out - file like object that will be written to.
Optionally `validate` the document before writing and raise
InvalidDocumentError if document.validate returns False.
"""
messages = []
messages = document.validate(messages)
if validate and messages:
raise InvalidDocumentError(messages)
# Write out document information
out.write('# Document Information\n\n')
write_value('SPDXVersion', str(document.version), out)
write_value('DataLicense', document.data_license.identifier, out)
write_value('DocumentName', document.name, out)
write_value('SPDXID', 'SPDXRef-DOCUMENT', out)
write_value('DocumentNamespace', document.namespace, out)
if document.has_comment:
write_text_value('DocumentComment', document.comment, out)
for doc_ref in document.ext_document_references:
doc_ref_str = ' '.join([doc_ref.external_document_id,
doc_ref.spdx_document_uri,
doc_ref.check_sum.identifier + ':' +
doc_ref.check_sum.value])
write_value('ExternalDocumentRef', doc_ref_str, out)
write_separators(out)
# Write out creation info
write_creation_info(document.creation_info, out)
write_separators(out)
# Writesorted reviews
for review in sorted(document.reviews):
write_review(review, out)
write_separators(out)
#Write sorted annotations
for annotation in sorted(document.annotations):
write_annotation(annotation, out)
write_separators(out)
# Write out package info
write_package(document.package, out)
write_separators(out)
out.write('# Extracted Licenses\n\n')
for lic in sorted(document.extracted_licenses):
write_extracted_licenses(lic, out)
write_separators(out) |
def checksum_from_sha1(value):
"""
Return an spdx.checksum.Algorithm instance representing the SHA1
checksum or None if does not match CHECKSUM_RE.
"""
# More constrained regex at lexer level
CHECKSUM_RE = re.compile('SHA1:\s*([\S]+)', re.UNICODE)
match = CHECKSUM_RE.match(value)
if match:
return checksum.Algorithm(identifier='SHA1', value=match.group(1))
else:
return None |
def str_from_text(text):
"""
Return content of a free form text block as a string.
"""
REGEX = re.compile('<text>((.|\n)+)</text>', re.UNICODE)
match = REGEX.match(text)
if match:
return match.group(1)
else:
return None |
def set_doc_version(self, doc, value):
"""
Set the document version.
Raise SPDXValueError if malformed value, CardinalityError
if already defined
"""
if not self.doc_version_set:
self.doc_version_set = True
m = self.VERS_STR_REGEX.match(value)
if m is None:
raise SPDXValueError('Document::Version')
else:
doc.version = version.Version(major=int(m.group(1)),
minor=int(m.group(2)))
return True
else:
raise CardinalityError('Document::Version') |
def set_doc_data_lics(self, doc, lics):
"""Sets the document data license.
Raises value error if malformed value, CardinalityError
if already defined.
"""
if not self.doc_data_lics_set:
self.doc_data_lics_set = True
if validations.validate_data_lics(lics):
doc.data_license = document.License.from_identifier(lics)
return True
else:
raise SPDXValueError('Document::DataLicense')
else:
raise CardinalityError('Document::DataLicense') |
def set_doc_name(self, doc, name):
"""Sets the document name.
Raises CardinalityError if already defined.
"""
if not self.doc_name_set:
doc.name = name
self.doc_name_set = True
return True
else:
raise CardinalityError('Document::Name') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.