code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
time_taken_ms = time_taken * 1000 if time_taken_ms <= self.timer_ok: color = 'green' elif time_taken_ms <= self.timer_warning: color = 'yellow' else: color = 'red' return color
def _get_result_color(self, time_taken)
Get time taken result color.
2.3474
2.175613
1.07896
if self._threshold is None: self._threshold = { 'error': self.timer_warning, 'warning': self.timer_ok, }[self.timer_fail] return self._threshold
def threshold(self)
Get maximum test time allowed when --timer-fail option is used.
5.897596
3.590379
1.642611
if self.timer_no_color: return "{0:0.4f}s".format(time_taken) return _colorize("{0:0.4f}s".format(time_taken), color)
def _colored_time(self, time_taken, color=None)
Get formatted and colored string for a given time taken.
3.516042
3.28722
1.06961
return "[{0}] {3:04.2f}% {1}: {2}".format( status, test, self._colored_time(time_taken, color), percent )
def _format_report_line(self, test, time_taken, color, status, percent)
Format a single report line.
4.836061
4.676539
1.034111
time_taken = self._register_time(test, 'success') if self.timer_fail is not None and time_taken * 1000.0 > self.threshold: test.fail('Test was too slow (took {0:0.4f}s, threshold was ' '{1:0.4f}s)'.format(time_taken, self.threshold / 1000.0))
def addSuccess(self, test, capt=None)
Called when a test passes.
4.277558
4.127474
1.036362
super(TimerPlugin, self).options(parser, env) # timer top n parser.add_option( "--timer-top-n", action="store", default="-1", dest="timer_top_n", help=( "When the timer plugin is enabled, only show the N tests that " "consume more time. The default, -1, shows all tests." ), ) parser.add_option( "--timer-json-file", action="store", default=None, dest="json_file", help=( "Save the results of the timing and status of each tests in " "said Json file." ), ) _time_units_help = ("Default time unit is a second, but you can set " "it explicitly (e.g. 1s, 500ms)") # timer ok parser.add_option( "--timer-ok", action="store", default=1, dest="timer_ok", help=( "Normal execution time. Such tests will be highlighted in " "green. {units_help}.".format(units_help=_time_units_help) ), ) # time warning parser.add_option( "--timer-warning", action="store", default=3, dest="timer_warning", help=( "Warning about execution time to highlight slow tests in " "yellow. Tests which take more time will be highlighted in " "red. {units_help}.".format(units_help=_time_units_help) ), ) # Windows + nosetests does not support colors (even with colorama). if not IS_NT: parser.add_option( "--timer-no-color", action="store_true", default=False, dest="timer_no_color", help="Don't colorize output (useful for non-tty output).", ) # timer filter parser.add_option( "--timer-filter", action="store", default=None, dest="timer_filter", help="Show filtered results only (ok,warning,error).", ) # timer fail parser.add_option( "--timer-fail", action="store", default=None, dest="timer_fail", choices=('warning', 'error'), help="Fail tests that exceed a threshold (warning,error)", )
def options(self, parser, env=os.environ)
Register commandline options.
3.132705
3.099935
1.010571
text = self._parse_input(text) sentences, unprocessed_sentences = self._tokenizer.tokenize_sentences(text) length = self._parse_summary_length(length, len(sentences)) if length == len(sentences): return unprocessed_sentences # Compute the word frequency matrix. If norm is set to 'l1' or 'l2' then words are normalized # by the length of their associated sentences (such that each vector of sentence terms sums to 1). word_matrix = self._compute_matrix(sentences, weighting=weighting, norm=norm) # Build the similarity graph by calculating the number of overlapping words between all # combinations of sentences. similarity_matrix = (word_matrix * word_matrix.T) similarity_graph = networkx.from_scipy_sparse_matrix(similarity_matrix) scores = networkx.pagerank(similarity_graph) ranked_sentences = sorted( ((score, ndx) for ndx, score in scores.items()), reverse=True ) top_sentences = [ranked_sentences[i][1] for i in range(length)] top_sentences.sort() return [unprocessed_sentences[i] for i in top_sentences]
def summarize(self, text, length=5, weighting='frequency', norm=None)
Implements the TextRank summarization algorithm, which follows closely to the PageRank algorithm for ranking web pages. :param text: a string of text to be summarized, path to a text file, or URL starting with http :param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage of the original document (e.g. 0.5) :param weighting: 'frequency', 'binary' or 'tfidf' weighting of sentence terms ('frequency' by default) :param norm: if 'l1' or 'l2', normalizes words by the length of their associated sentence to "down-weight" the voting power of long sentences (None by default) :return: list of sentences for the summary
3.6871
3.423466
1.077008
if isinstance(tokens, (list, tuple)): return [word for word in tokens if word.lower() not in self._stopwords] else: return ' '.join( [word for word in tokens.split(' ') if word.lower() not in self._stopwords] )
def remove_stopwords(self, tokens)
Remove all stopwords from a list of word tokens or a string of text.
2.191643
1.961329
1.117427
if self.stemmer: return unicode_to_ascii(self._stemmer.stem(word)) else: return word
def stem(self, word)
Perform stemming on an input word.
4.279491
4.226661
1.012499
chars_to_strip = ''.join( set(list(punctuation)).union(set(list(include))) - set(list(exclude)) ) return text.strip(chars_to_strip)
def strip_punctuation(text, exclude='', include='')
Strip leading and trailing punctuation from an input string.
3.543371
3.64472
0.972193
return [ self.strip_punctuation(word) for word in text.split(' ') if self.strip_punctuation(word) ]
def tokenize_words(self, text)
Tokenize an input string into a list of words (with punctuation removed).
3.739933
3.518164
1.063035
# while True: # old_text = text # text = text.replace(' ', ' ') # if text == old_text: # return text non_spaces = re.finditer(r'[^ ]', text) if not non_spaces: return text first_non_space = non_spaces.next() first_non_space = first_non_space.start() last_non_space = None for item in non_spaces: last_non_space = item if not last_non_space: return text[first_non_space:] else: last_non_space = last_non_space.end() return text[first_non_space:last_non_space]
def _remove_whitespace(text)
Remove excess whitespace from the ends of a given input string.
2.11695
2.107566
1.004453
punkt_params = PunktParameters() # Not using set literal to allow compatibility with Python 2.6 punkt_params.abbrev_types = set([ 'dr', 'vs', 'mr', 'mrs', 'ms', 'prof', 'mt', 'inc', 'i.e', 'e.g' ]) sentence_splitter = PunktSentenceTokenizer(punkt_params) # 1. TOKENIZE "UNPROCESSED" SENTENCES FOR DISPLAY # Need to adjust quotations for correct sentence splitting text_unprocessed = text.replace('?"', '? "').replace('!"', '! "').replace('."', '. "') # Treat line breaks as end of sentence (needed in cases where titles don't have a full stop) text_unprocessed = text_unprocessed.replace('\n', ' . ') # Perform sentence splitting unprocessed_sentences = sentence_splitter.tokenize(text_unprocessed) # Now that sentences have been split we can return them back to their normal formatting for ndx, sentence in enumerate(unprocessed_sentences): sentence = unicode_to_ascii(sentence) # Sentence splitter returns unicode strings sentence = sentence.replace('? " ', '?" ').replace('! " ', '!" ').replace('. " ', '." ') sentence = self._remove_whitespace(sentence) # Remove excess whitespace sentence = sentence[:-2] if (sentence.endswith(' .') or sentence.endswith(' . ')) else sentence unprocessed_sentences[ndx] = sentence # 2. PROCESS THE SENTENCES TO PERFORM STEMMING, STOPWORDS REMOVAL ETC. FOR MATRIX COMPUTATION processed_sentences = [self.sanitize_text(sen) for sen in unprocessed_sentences] # Sentences should contain at least 'word_threshold' significant terms filter_sentences = [i for i in range(len(processed_sentences)) if len(processed_sentences[i].replace('.', '').split(' ')) > word_threshold] processed_sentences = [processed_sentences[i] for i in filter_sentences] unprocessed_sentences = [unprocessed_sentences[i] for i in filter_sentences] return processed_sentences, unprocessed_sentences
def tokenize_sentences(self, text, word_threshold=5)
Returns a list of sentences given an input string of text. :param text: input string :param word_threshold: number of significant words that a sentence must contain to be counted (to count all sentences set equal to 1; 5 by default) :return: list of sentences
4.334776
4.326797
1.001844
paragraphs = [] paragraphs_first_pass = text.split('\n') for p in paragraphs_first_pass: paragraphs_second_pass = re.split('\s{4,}', p) paragraphs += paragraphs_second_pass # Remove empty strings from list paragraphs = [p for p in paragraphs if p] return paragraphs
def tokenize_paragraphs(cls, text)
Convert an input string into a list of paragraphs.
3.253529
3.091455
1.052426
u, s, v = svds(matrix, k=num_concepts) return u, s, v
def _svd(cls, matrix, num_concepts=5)
Perform singular value decomposition for dimensionality reduction of the input matrix.
3.446561
3.348062
1.02942
text = self._parse_input(text) sentences, unprocessed_sentences = self._tokenizer.tokenize_sentences(text) length = self._parse_summary_length(length, len(sentences)) if length == len(sentences): return unprocessed_sentences topics = self._validate_num_topics(topics, sentences) # Generate a matrix of terms that appear in each sentence weighting = 'binary' if binary_matrix else 'frequency' sentence_matrix = self._compute_matrix(sentences, weighting=weighting) sentence_matrix = sentence_matrix.transpose() # Filter out negatives in the sparse matrix (need to do this on Vt for LSA method): sentence_matrix = sentence_matrix.multiply(sentence_matrix > 0) s, u, v = self._svd(sentence_matrix, num_concepts=topics) # Only consider topics/concepts whose singular values are half of the largest singular value if 1 <= topic_sigma_threshold < 0: raise ValueError('Parameter topic_sigma_threshold must take a value between 0 and 1') sigma_threshold = max(u) * topic_sigma_threshold u[u < sigma_threshold] = 0 # Set all other singular values to zero # Build a "length vector" containing the length (i.e. saliency) of each sentence saliency_vec = np.dot(np.square(u), np.square(v)) top_sentences = saliency_vec.argsort()[-length:][::-1] # Return the sentences in the order in which they appear in the document top_sentences.sort() return [unprocessed_sentences[i] for i in top_sentences]
def summarize(self, text, topics=4, length=5, binary_matrix=True, topic_sigma_threshold=0.5)
Implements the method of latent semantic analysis described by Steinberger and Jezek in the paper: J. Steinberger and K. Jezek (2004). Using latent semantic analysis in text summarization and summary evaluation. Proc. ISIM ’04, pp. 93–100. :param text: a string of text to be summarized, path to a text file, or URL starting with http :param topics: the number of topics/concepts covered in the input text (defines the degree of dimensionality reduction in the SVD step) :param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage of the original document (e.g. 0.5) :param binary_matrix: boolean value indicating whether the matrix of word counts should be binary (True by default) :param topic_sigma_threshold: filters out topics/concepts with a singular value less than this percentage of the largest singular value (must be between 0 and 1, 0.5 by default) :return: list of sentences for the summary
4.541797
4.253029
1.067897
validation = "" if not isinstance(payment['amount'], int): validation += "AMOUNT_NOT_INTEGER " if not isinstance(payment['mandate_date'], datetime.date): validation += "MANDATE_DATE_INVALID_OR_NOT_DATETIME_INSTANCE" payment['mandate_date'] = str(payment['mandate_date']) if not isinstance(payment['collection_date'], datetime.date): validation += "COLLECTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE" payment['collection_date'] = str(payment['collection_date']) if validation == "": return True else: raise Exception('Payment did not validate: ' + validation)
def check_payment(self, payment)
Check the payment for required fields and validity. @param payment: The payment dict @return: True if valid, error string if invalid paramaters where encountered.
2.804175
2.829313
0.991115
if self.clean: from text_unidecode import unidecode payment['name'] = unidecode(payment['name'])[:70] payment['description'] = unidecode(payment['description'])[:140] # Validate the payment self.check_payment(payment) # Get the CstmrDrctDbtInitnNode if not self._config['batch']: # Start building the non batch payment PmtInf_nodes = self._create_PmtInf_node() PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name']) PmtInf_nodes['PmtMtdNode'].text = "DD" PmtInf_nodes['BtchBookgNode'].text = "false" PmtInf_nodes['NbOfTxsNode'].text = "1" PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str( payment['amount']) PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA" PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument'] PmtInf_nodes['SeqTpNode'].text = payment['type'] PmtInf_nodes['ReqdColltnDtNode'].text = payment['collection_date'] PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name'] PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN'] if 'BIC' in self._config: PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC'] PmtInf_nodes['ChrgBrNode'].text = "SLEV" PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name'] PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id'] PmtInf_nodes['PrtryNode'].text = "SEPA" if 'BIC' in payment: bic = True else: bic = False TX_nodes = self._create_TX_node(bic) TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency']) TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount']) TX_nodes['MndtIdNode'].text = payment['mandate_id'] TX_nodes['DtOfSgntrNode'].text = payment['mandate_date'] if bic: TX_nodes['BIC_DbtrAgt_Node'].text = payment['BIC'] TX_nodes['Nm_Dbtr_Node'].text = payment['name'] TX_nodes['IBAN_DbtrAcct_Node'].text = payment['IBAN'] TX_nodes['UstrdNode'].text = payment['description'] if not payment.get('endtoend_id', ''): payment['endtoend_id'] = make_id(self._config['name']) TX_nodes['EndToEndIdNode'].text = payment['endtoend_id'] if self._config['batch']: self._add_batch(TX_nodes, payment) else: self._add_non_batch(TX_nodes, PmtInf_nodes)
def add_payment(self, payment)
Function to add payments @param payment: The payment dict @raise exception: when payment is invalid
2.456481
2.439779
1.006846
# Retrieve the node to which we will append the group header. CstmrDrctDbtInitn_node = self._xml.find('CstmrDrctDbtInitn') # Create the header nodes. GrpHdr_node = ET.Element("GrpHdr") MsgId_node = ET.Element("MsgId") CreDtTm_node = ET.Element("CreDtTm") NbOfTxs_node = ET.Element("NbOfTxs") CtrlSum_node = ET.Element("CtrlSum") InitgPty_node = ET.Element("InitgPty") Nm_node = ET.Element("Nm") SupId_node = ET.Element("Id") OrgId_node = ET.Element("OrgId") Othr_node = ET.Element("Othr") Id_node = ET.Element("Id") # Add data to some header nodes. MsgId_node.text = self.msg_id CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S') Nm_node.text = self._config['name'] Id_node.text = self._config['creditor_id'] # Append the nodes Othr_node.append(Id_node) OrgId_node.append(Othr_node) SupId_node.append(OrgId_node) InitgPty_node.append(Nm_node) InitgPty_node.append(SupId_node) GrpHdr_node.append(MsgId_node) GrpHdr_node.append(CreDtTm_node) GrpHdr_node.append(NbOfTxs_node) GrpHdr_node.append(CtrlSum_node) GrpHdr_node.append(InitgPty_node) # Append the header to its parent CstmrDrctDbtInitn_node.append(GrpHdr_node)
def _create_header(self)
Function to create the GroupHeader (GrpHdr) in the CstmrDrctDbtInit Node
2.092019
1.891357
1.106094
ED = dict() # ED is element dict ED['PmtInfNode'] = ET.Element("PmtInf") ED['PmtInfIdNode'] = ET.Element("PmtInfId") ED['PmtMtdNode'] = ET.Element("PmtMtd") ED['BtchBookgNode'] = ET.Element("BtchBookg") ED['NbOfTxsNode'] = ET.Element("NbOfTxs") ED['CtrlSumNode'] = ET.Element("CtrlSum") ED['PmtTpInfNode'] = ET.Element("PmtTpInf") ED['SvcLvlNode'] = ET.Element("SvcLvl") ED['Cd_SvcLvl_Node'] = ET.Element("Cd") ED['LclInstrmNode'] = ET.Element("LclInstrm") ED['Cd_LclInstrm_Node'] = ET.Element("Cd") ED['SeqTpNode'] = ET.Element("SeqTp") ED['ReqdColltnDtNode'] = ET.Element("ReqdColltnDt") ED['CdtrNode'] = ET.Element("Cdtr") ED['Nm_Cdtr_Node'] = ET.Element("Nm") ED['CdtrAcctNode'] = ET.Element("CdtrAcct") ED['Id_CdtrAcct_Node'] = ET.Element("Id") ED['IBAN_CdtrAcct_Node'] = ET.Element("IBAN") ED['CdtrAgtNode'] = ET.Element("CdtrAgt") ED['FinInstnId_CdtrAgt_Node'] = ET.Element("FinInstnId") if 'BIC' in self._config: ED['BIC_CdtrAgt_Node'] = ET.Element("BIC") ED['ChrgBrNode'] = ET.Element("ChrgBr") ED['CdtrSchmeIdNode'] = ET.Element("CdtrSchmeId") ED['Nm_CdtrSchmeId_Node'] = ET.Element("Nm") ED['Id_CdtrSchmeId_Node'] = ET.Element("Id") ED['PrvtIdNode'] = ET.Element("PrvtId") ED['OthrNode'] = ET.Element("Othr") ED['Id_Othr_Node'] = ET.Element("Id") ED['SchmeNmNode'] = ET.Element("SchmeNm") ED['PrtryNode'] = ET.Element("Prtry") return ED
def _create_PmtInf_node(self)
Method to create the blank payment information nodes as a dict.
1.571746
1.514998
1.037458
ED = dict() ED['DrctDbtTxInfNode'] = ET.Element("DrctDbtTxInf") ED['PmtIdNode'] = ET.Element("PmtId") ED['EndToEndIdNode'] = ET.Element("EndToEndId") ED['InstdAmtNode'] = ET.Element("InstdAmt") ED['DrctDbtTxNode'] = ET.Element("DrctDbtTx") ED['MndtRltdInfNode'] = ET.Element("MndtRltdInf") ED['MndtIdNode'] = ET.Element("MndtId") ED['DtOfSgntrNode'] = ET.Element("DtOfSgntr") ED['DbtrAgtNode'] = ET.Element("DbtrAgt") ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId") if bic: ED['BIC_DbtrAgt_Node'] = ET.Element("BIC") ED['DbtrNode'] = ET.Element("Dbtr") ED['Nm_Dbtr_Node'] = ET.Element("Nm") ED['DbtrAcctNode'] = ET.Element("DbtrAcct") ED['Id_DbtrAcct_Node'] = ET.Element("Id") ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN") ED['RmtInfNode'] = ET.Element("RmtInf") ED['UstrdNode'] = ET.Element("Ustrd") return ED
def _create_TX_node(self, bic=True)
Method to create the blank transaction nodes as a dict. If bic is True, the BIC node will also be created.
1.8282
1.751261
1.043933
validation = "" required = ["name", "currency", "IBAN", "BIC"] for config_item in required: if config_item not in config: validation += config_item.upper() + "_MISSING " if not validation: return True else: raise Exception("Config file did not validate. " + validation)
def check_config(self, config)
Check the config file for required fields and validity. @param config: The config dict. @return: True if valid, error string if invalid paramaters where encountered.
5.148532
5.219379
0.986426
validation = "" required = ["name", "IBAN", "BIC", "amount", "description"] for config_item in required: if config_item not in payment: validation += config_item.upper() + "_MISSING " if not isinstance(payment['amount'], int): validation += "AMOUNT_NOT_INTEGER " if 'execution_date' in payment: if not isinstance(payment['execution_date'], datetime.date): validation += "EXECUTION_DATE_INVALID_OR_NOT_DATETIME_INSTANCE" payment['execution_date'] = payment['execution_date'].isoformat() if validation == "": return True else: raise Exception('Payment did not validate: ' + validation)
def check_payment(self, payment)
Check the payment for required fields and validity. @param payment: The payment dict @return: True if valid, error string if invalid paramaters where encountered.
3.349524
3.3485
1.000306
# Retrieve the node to which we will append the group header. CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn') # Create the header nodes. GrpHdr_node = ET.Element("GrpHdr") MsgId_node = ET.Element("MsgId") CreDtTm_node = ET.Element("CreDtTm") NbOfTxs_node = ET.Element("NbOfTxs") CtrlSum_node = ET.Element("CtrlSum") InitgPty_node = ET.Element("InitgPty") Nm_node = ET.Element("Nm") # Add data to some header nodes. MsgId_node.text = self.msg_id CreDtTm_node.text = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S') Nm_node.text = self._config['name'] # Append the nodes InitgPty_node.append(Nm_node) GrpHdr_node.append(MsgId_node) GrpHdr_node.append(CreDtTm_node) GrpHdr_node.append(NbOfTxs_node) GrpHdr_node.append(CtrlSum_node) GrpHdr_node.append(InitgPty_node) # Append the header to its parent CstmrCdtTrfInitn_node.append(GrpHdr_node)
def _create_header(self)
Function to create the GroupHeader (GrpHdr) in the CstmrCdtTrfInitn Node
2.135723
1.826982
1.168989
ED = dict() # ED is element dict ED['PmtInfNode'] = ET.Element("PmtInf") ED['PmtInfIdNode'] = ET.Element("PmtInfId") ED['PmtMtdNode'] = ET.Element("PmtMtd") ED['BtchBookgNode'] = ET.Element("BtchBookg") ED['NbOfTxsNode'] = ET.Element("NbOfTxs") ED['CtrlSumNode'] = ET.Element("CtrlSum") ED['PmtTpInfNode'] = ET.Element("PmtTpInf") ED['SvcLvlNode'] = ET.Element("SvcLvl") ED['Cd_SvcLvl_Node'] = ET.Element("Cd") ED['ReqdExctnDtNode'] = ET.Element("ReqdExctnDt") ED['DbtrNode'] = ET.Element("Dbtr") ED['Nm_Dbtr_Node'] = ET.Element("Nm") ED['DbtrAcctNode'] = ET.Element("DbtrAcct") ED['Id_DbtrAcct_Node'] = ET.Element("Id") ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN") ED['DbtrAgtNode'] = ET.Element("DbtrAgt") ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId") if 'BIC' in self._config: ED['BIC_DbtrAgt_Node'] = ET.Element("BIC") ED['ChrgBrNode'] = ET.Element("ChrgBr") return ED
def _create_PmtInf_node(self)
Method to create the blank payment information nodes as a dict.
1.722986
1.637786
1.052021
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node']) TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode']) TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode']) TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode']) if TX_nodes['BIC_CdtrAgt_Node'].text is not None: TX_nodes['FinInstnId_CdtrAgt_Node'].append( TX_nodes['BIC_CdtrAgt_Node']) TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node']) TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode']) TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node']) TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode']) TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node']) TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node']) TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode']) TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode']) TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode']) self._add_to_batch_list(TX_nodes, payment)
def _add_batch(self, TX_nodes, payment)
Method to add a payment as a batch. The transaction details are already present. Will fold the nodes accordingly and the call the _add_to_batch_list function to store the batch.
1.978208
1.903226
1.039397
batch_key = payment.get('execution_date', None) if batch_key in self._batches.keys(): self._batches[batch_key].append(TX['CdtTrfTxInfNode']) else: self._batches[batch_key] = [] self._batches[batch_key].append(TX['CdtTrfTxInfNode']) if batch_key in self._batch_totals: self._batch_totals[batch_key] += payment['amount'] else: self._batch_totals[batch_key] = payment['amount']
def _add_to_batch_list(self, TX, payment)
Method to add a transaction to the batch list. The correct batch will be determined by the payment dict and the batch will be created if not existant. This will also add the payment amount to the respective batch total.
2.45017
2.325682
1.053528
for batch_meta, batch_nodes in self._batches.items(): PmtInf_nodes = self._create_PmtInf_node() PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name']) PmtInf_nodes['PmtMtdNode'].text = "TRF" PmtInf_nodes['BtchBookgNode'].text = "true" PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA" if batch_meta: PmtInf_nodes['ReqdExctnDtNode'].text = batch_meta else: del PmtInf_nodes['ReqdExctnDtNode'] PmtInf_nodes['Nm_Dbtr_Node'].text = self._config['name'] PmtInf_nodes['IBAN_DbtrAcct_Node'].text = self._config['IBAN'] if 'BIC' in self._config: PmtInf_nodes['BIC_DbtrAgt_Node'].text = self._config['BIC'] PmtInf_nodes['ChrgBrNode'].text = "SLEV" PmtInf_nodes['NbOfTxsNode'].text = str(len(batch_nodes)) PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(self._batch_totals[batch_meta]) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtInfIdNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtMtdNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['BtchBookgNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['NbOfTxsNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['CtrlSumNode']) PmtInf_nodes['SvcLvlNode'].append(PmtInf_nodes['Cd_SvcLvl_Node']) PmtInf_nodes['PmtTpInfNode'].append(PmtInf_nodes['SvcLvlNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['PmtTpInfNode']) if 'ReqdExctnDtNode' in PmtInf_nodes: PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ReqdExctnDtNode']) PmtInf_nodes['DbtrNode'].append(PmtInf_nodes['Nm_Dbtr_Node']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrNode']) PmtInf_nodes['Id_DbtrAcct_Node'].append(PmtInf_nodes['IBAN_DbtrAcct_Node']) PmtInf_nodes['DbtrAcctNode'].append(PmtInf_nodes['Id_DbtrAcct_Node']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAcctNode']) if 'BIC' in self._config: PmtInf_nodes['FinInstnId_DbtrAgt_Node'].append(PmtInf_nodes['BIC_DbtrAgt_Node']) PmtInf_nodes['DbtrAgtNode'].append(PmtInf_nodes['FinInstnId_DbtrAgt_Node']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['DbtrAgtNode']) PmtInf_nodes['PmtInfNode'].append(PmtInf_nodes['ChrgBrNode']) for txnode in batch_nodes: PmtInf_nodes['PmtInfNode'].append(txnode) CstmrCdtTrfInitn_node = self._xml.find('CstmrCdtTrfInitn') CstmrCdtTrfInitn_node.append(PmtInf_nodes['PmtInfNode'])
def _finalize_batch(self)
Method to finalize the batch, this will iterate over the _batches dict and create a PmtInf node for each batch. The correct information (from the batch_key and batch_totals) will be inserted and the batch transaction nodes will be folded. Finally, the batches will be added to the main XML.
1.726943
1.612544
1.070943
if norm not in ('l1', 'l2', None): raise ValueError('Parameter "norm" can only take values "l1", "l2" or None') # Initialise vectorizer to convert text documents into matrix of token counts if weighting.lower() == 'binary': vectorizer = CountVectorizer(min_df=1, ngram_range=(1, 1), binary=True, stop_words=None) elif weighting.lower() == 'frequency': vectorizer = CountVectorizer(min_df=1, ngram_range=(1, 1), binary=False, stop_words=None) elif weighting.lower() == 'tfidf': vectorizer = TfidfVectorizer(min_df=1, ngram_range=(1, 1), stop_words=None) else: raise ValueError('Parameter "method" must take one of the values "binary", "frequency" or "tfidf".') # Extract word features from sentences using sparse vectorizer frequency_matrix = vectorizer.fit_transform(sentences).astype(float) # Normalize the term vectors (i.e. each row adds to 1) if norm in ('l1', 'l2'): frequency_matrix = normalize(frequency_matrix, norm=norm, axis=1) elif norm is not None: raise ValueError('Parameter "norm" can only take values "l1", "l2" or None') return frequency_matrix
def _compute_matrix(cls, sentences, weighting='frequency', norm=None)
Compute the matrix of term frequencies given a list of sentences
2.131073
2.102673
1.013507
text = self._parse_input(text) sentences, unprocessed_sentences = self._tokenizer.tokenize_sentences(text) length = self._parse_summary_length(length, len(sentences)) if length == len(sentences): return unprocessed_sentences matrix = self._compute_matrix(sentences, weighting='frequency') # Sum occurrences of terms over all sentences to obtain document frequency doc_frequency = matrix.sum(axis=0) if binary_matrix: matrix = (matrix != 0).astype(int) summary_sentences = [] for _ in range(length): # Take the inner product of each sentence vector with the document vector sentence_scores = matrix.dot(doc_frequency.transpose()) sentence_scores = np.array(sentence_scores.T)[0] # Grab the top sentence and add it to the summary top_sentence = sentence_scores.argsort()[-1] summary_sentences.append(top_sentence) # Remove all terms that appear in the top sentence from the document terms_in_top_sentence = (matrix[top_sentence, :] != 0).toarray() doc_frequency[terms_in_top_sentence] = 0 # Remove the top sentence from consideration by setting all its elements to zero # This does the same as matrix[top_sentence, :] = 0, but is much faster for sparse matrices matrix.data[matrix.indptr[top_sentence]:matrix.indptr[top_sentence+1]] = 0 matrix.eliminate_zeros() # Return the sentences in the order in which they appear in the document summary_sentences.sort() return [unprocessed_sentences[i] for i in summary_sentences]
def summarize(self, text, length=5, binary_matrix=True)
Implements the method of summarization by relevance score, as described by Gong and Liu in the paper: Y. Gong and X. Liu (2001). Generic text summarization using relevance measure and latent semantic analysis. Proceedings of the 24th International Conference on Research in Information Retrieval (SIGIR ’01), pp. 19–25. This method computes and ranks the cosine similarity between each sentence vector and the overall document. :param text: a string of text to be summarized, path to a text file, or URL starting with http :param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage of the original document (e.g. 0.5) :param binary_matrix: boolean value indicating whether the matrix of word counts should be binary (True by default) :return: list of sentences for the summary
3.307565
3.177863
1.040814
self._xml = ET.Element("Document") self._xml.set("xmlns", "urn:iso:std:iso:20022:tech:xsd:" + self.schema) self._xml.set("xmlns:xsi", "http://www.w3.org/2001/XMLSchema-instance") ET.register_namespace("", "urn:iso:std:iso:20022:tech:xsd:" + self.schema) ET.register_namespace("xsi", "http://www.w3.org/2001/XMLSchema-instance") n = ET.Element(self.root_el) self._xml.append(n)
def _prepare_document(self)
Build the main document node and set xml namespaces.
2.013999
1.847079
1.09037
self._finalize_batch() ctrl_sum_total = 0 nb_of_txs_total = 0 for ctrl_sum in self._xml.iter('CtrlSum'): if ctrl_sum.text is None: continue ctrl_sum_total += decimal_str_to_int(ctrl_sum.text) for nb_of_txs in self._xml.iter('NbOfTxs'): if nb_of_txs.text is None: continue nb_of_txs_total += int(nb_of_txs.text) n = self._xml.find(self.root_el) GrpHdr_node = n.find('GrpHdr') CtrlSum_node = GrpHdr_node.find('CtrlSum') NbOfTxs_node = GrpHdr_node.find('NbOfTxs') CtrlSum_node.text = int_to_decimal_str(ctrl_sum_total) NbOfTxs_node.text = str(nb_of_txs_total) # Prepending the XML version is hacky, but cElementTree only offers this # automatically if you write to a file, which we don't necessarily want. out = b"<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + ET.tostring( self._xml, "utf-8") if validate and not is_valid_xml(out, self.schema): raise ValidationError( "The output SEPA file contains validation errors. This is likely due to an illegal value in one of " "your input fields." ) return out
def export(self, validate=True)
Method to output the xml as string. It will finalize the batches and then calculate the checksums (amount sum and transaction count), fill these into the group header and output the XML.
3.465762
3.132356
1.106439
if not using_sysrandom: # This is ugly, and a hack, but it makes things better than # the alternative of predictability. This re-seeds the PRNG # using a value that is hard for an attacker to predict, every # time a random string is required. This may change the # properties of the chosen random sequence slightly, but this # is better than absolute predictability. random.seed( hashlib.sha256( ("%s%s" % ( random.getstate(), time.time())).encode('utf-8') ).digest()) return ''.join([random.choice(allowed_chars) for i in range(length)])
def get_rand_string(length=12, allowed_chars='0123456789abcdef')
Returns a securely generated random string. Taken from the Django project The default length of 12 with the a-z, A-Z, 0-9 character set returns a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
1.91203
1.765747
1.082845
random_string = get_rand_string(12) timestamp = time.strftime("%Y%m%d%I%M%S") msg_id = timestamp + "-" + random_string return msg_id
def make_msg_id()
Create a semi random message id, by using 12 char random hex string and a timestamp. @return: string consisting of timestamp, -, random value
3.374718
3.417162
0.987579
name = re.sub(r'[^a-zA-Z0-9]', '', name) r = get_rand_string(12) if len(name) > 22: name = name[:22] return name + "-" + r
def make_id(name)
Create a random id combined with the creditor name. @return string consisting of name (truncated at 22 chars), -, 12 char rand hex string.
3.147546
2.624198
1.199432
int_string = str(integer) if len(int_string) < 2: return "0." + int_string.zfill(2) else: return int_string[:-2] + "." + int_string[-2:]
def int_to_decimal_str(integer)
Helper to convert integers (representing cents) into decimal currency string. WARNING: DO NOT TRY TO DO THIS BY DIVISION, FLOATING POINT ERRORS ARE NO FUN IN FINANCIAL SYSTEMS. @param integer The amount in cents @return string The amount in currency with full stop decimal separator
2.375211
2.594682
0.915415
int_string = decimal_string.replace('.', '') int_string = int_string.lstrip('0') return int(int_string)
def decimal_str_to_int(decimal_string)
Helper to decimal currency string into integers (cents). WARNING: DO NOT TRY TO DO THIS BY CONVERSION AND MULTIPLICATION, FLOATING POINT ERRORS ARE NO FUN IN FINANCIAL SYSTEMS. @param string The amount in currency with full stop decimal separator @return integer The amount in cents
2.469163
3.092935
0.798324
rdf = Graph() for source in sources: if isinstance(source, Graph): for triple in source: rdf.add(triple) continue if source == '-': f = sys.stdin else: f = open(source, 'r') if infmt: fmt = infmt else: # determine format based on file extension fmt = 'xml' # default if source.endswith('n3'): fmt = 'n3' if source.endswith('ttl'): fmt = 'n3' if source.endswith('nt'): fmt = 'nt' logging.debug("Parsing input file %s (format: %s)", source, fmt) rdf.parse(f, format=fmt) return rdf
def read_rdf(sources, infmt)
Read a list of RDF files and/or RDF graphs. May raise an Exception.
2.327431
2.330359
0.998744
ln = localname(uri) # 1. try to match URI keys for k, v in mapping.items(): if k == uri: return v # 2. try to match local names for k, v in mapping.items(): if k == ln: return v # 3. try to match local names with * prefix # try to match longest first, so sort the mapping by key length l = list(mapping.items()) l.sort(key=lambda i: len(i[0]), reverse=True) for k, v in l: if k[0] == '*' and ln.endswith(k[1:]): return v raise KeyError(uri)
def mapping_get(uri, mapping)
Look up the URI in the given mapping and return the result. Throws KeyError if no matching mapping was found.
3.237261
3.203807
1.010442
try: val = mapping_get(uri, mapping) return True except KeyError: return False
def mapping_match(uri, mapping)
Determine whether the given URI matches one of the given mappings. Returns True if a match was found, False otherwise.
5.295617
5.785168
0.915378
RDFuri = RDF.uri RDFSuri = RDFS.uri for ns in (RDFuri, RDFSuri, OWL, SKOS, DC): if uri.startswith(ns): return True return False
def in_general_ns(uri)
Return True iff the URI is in a well-known general RDF namespace. URI namespaces considered well-known are RDF, RDFS, OWL, SKOS and DC.
4.774052
2.995034
1.593989
# add explicit type for s, o in rdf.subject_objects(SKOS.inScheme): if not isinstance(o, Literal): rdf.add((o, RDF.type, SKOS.ConceptScheme)) else: logging.warning( "Literal value %s for skos:inScheme detected, ignoring.", o) css = list(rdf.subjects(RDF.type, SKOS.ConceptScheme)) if len(css) > 1: css.sort() cs = css[0] logging.warning( "Multiple concept schemes found. " "Selecting %s as default concept scheme.", cs) elif len(css) == 1: cs = css[0] else: cs = None return cs
def get_concept_scheme(rdf)
Return a skos:ConceptScheme contained in the model. Returns None if no skos:ConceptScheme is present.
2.964571
2.961914
1.000897
# pick a concept conc = rdf.value(None, RDF.type, SKOS.Concept, any=True) if conc is None: logging.critical( "Namespace auto-detection failed. " "Set namespace using the --namespace option.") sys.exit(1) ln = localname(conc) ns = URIRef(conc.replace(ln, '')) if ns.strip() == '': logging.critical( "Namespace auto-detection failed. " "Set namespace using the --namespace option.") sys.exit(1) logging.info( "Namespace auto-detected to '%s' " "- you can override this with the --namespace option.", ns) return ns
def detect_namespace(rdf)
Try to automatically detect the URI namespace of the vocabulary. Return namespace as URIRef.
4.091846
3.977736
1.028687
ont = None if not ns: # see if there's an owl:Ontology and use that to determine namespace onts = list(rdf.subjects(RDF.type, OWL.Ontology)) if len(onts) > 1: onts.sort() ont = onts[0] logging.warning( "Multiple owl:Ontology instances found. " "Creating concept scheme from %s.", ont) elif len(onts) == 1: ont = onts[0] else: ont = None if not ont: logging.info( "No skos:ConceptScheme or owl:Ontology found. " "Using namespace auto-detection for creating concept scheme.") ns = detect_namespace(rdf) elif ont.endswith('/') or ont.endswith('#') or ont.endswith(':'): ns = ont else: ns = ont + '/' NS = Namespace(ns) cs = NS[lname] rdf.add((cs, RDF.type, SKOS.ConceptScheme)) if ont is not None: rdf.remove((ont, RDF.type, OWL.Ontology)) # remove owl:imports declarations for o in rdf.objects(ont, OWL.imports): rdf.remove((ont, OWL.imports, o)) # remove protege specific properties for p, o in rdf.predicate_objects(ont): prot = URIRef( 'http://protege.stanford.edu/plugins/owl/protege#') if p.startswith(prot): rdf.remove((ont, p, o)) # move remaining properties (dc:title etc.) of the owl:Ontology into # the skos:ConceptScheme replace_uri(rdf, ont, cs) return cs
def create_concept_scheme(rdf, ns, lname='')
Create a skos:ConceptScheme in the model and return it.
3.034503
2.947556
1.029498
# check whether the concept scheme is unlabeled, and label it if possible labels = list(rdf.objects(cs, RDFS.label)) + \ list(rdf.objects(cs, SKOS.prefLabel)) if len(labels) == 0: if not label: logging.warning( "Concept scheme has no label(s). " "Use --label option to set the concept scheme label.") else: logging.info( "Unlabeled concept scheme detected. Setting label to '%s'" % label) rdf.add((cs, RDFS.label, Literal(label, language))) if set_modified: curdate = datetime.datetime.utcnow().replace(microsecond=0).isoformat() + 'Z' rdf.remove((cs, DCTERMS.modified, None)) rdf.add((cs, DCTERMS.modified, Literal(curdate, datatype=XSD.dateTime)))
def initialize_concept_scheme(rdf, cs, label, language, set_modified)
Initialize a concept scheme: Optionally add a label if the concept scheme doesn't have a label, and optionally add a dct:modified timestamp.
2.645287
2.622118
1.008836
logging.debug("performing SPARQL Update transformation") if update_query[0] == '@': # actual query should be read from file update_query = file(update_query[1:]).read() logging.debug("update query: %s", update_query) rdf.update(update_query)
def transform_sparql_update(rdf, update_query)
Perform a SPARQL Update transformation on the RDF data.
4.808278
4.128179
1.164746
logging.debug("performing SPARQL CONSTRUCT transformation") if construct_query[0] == '@': # actual query should be read from file construct_query = file(construct_query[1:]).read() logging.debug("CONSTRUCT query: %s", construct_query) newgraph = Graph() for triple in rdf.query(construct_query): newgraph.add(triple) return newgraph
def transform_sparql_construct(rdf, construct_query)
Perform a SPARQL CONSTRUCT query on the RDF data and return a new graph.
3.974929
3.744711
1.061478
# find out all the types used in the model types = set() for s, o in rdf.subject_objects(RDF.type): if o not in typemap and in_general_ns(o): continue types.add(o) for t in types: if mapping_match(t, typemap): newval = mapping_get(t, typemap) newuris = [v[0] for v in newval] logging.debug("transform class %s -> %s", t, str(newuris)) if newuris[0] is None: # delete all instances for inst in rdf.subjects(RDF.type, t): delete_uri(rdf, inst) delete_uri(rdf, t) else: replace_object(rdf, t, newuris, predicate=RDF.type) else: logging.info("Don't know what to do with type %s", t)
def transform_concepts(rdf, typemap)
Transform Concepts into new types, as defined by the config file.
3.891939
3.796669
1.025093
affected_types = (SKOS.Concept, SKOS.Collection, SKOSEXT.DeprecatedConcept) props = set() for t in affected_types: for conc in rdf.subjects(RDF.type, t): for p, o in rdf.predicate_objects(conc): if isinstance(o, Literal) \ and (p in literalmap or not in_general_ns(p)): props.add(p) for p in props: if mapping_match(p, literalmap): newval = mapping_get(p, literalmap) newuris = [v[0] for v in newval] logging.debug("transform literal %s -> %s", p, str(newuris)) replace_predicate( rdf, p, newuris, subjecttypes=affected_types) else: logging.info("Don't know what to do with literal %s", p)
def transform_literals(rdf, literalmap)
Transform literal properties of Concepts, as defined by config file.
4.292887
4.133091
1.038662
affected_types = (SKOS.Concept, SKOS.Collection, SKOSEXT.DeprecatedConcept) props = set() for t in affected_types: for conc in rdf.subjects(RDF.type, t): for p, o in rdf.predicate_objects(conc): if isinstance(o, (URIRef, BNode)) \ and (p in relationmap or not in_general_ns(p)): props.add(p) for p in props: if mapping_match(p, relationmap): newval = mapping_get(p, relationmap) logging.debug("transform relation %s -> %s", p, str(newval)) replace_predicate( rdf, p, newval, subjecttypes=affected_types) else: logging.info("Don't know what to do with relation %s", p)
def transform_relations(rdf, relationmap)
Transform YSO-style concept relations into SKOS equivalents.
4.157026
4.064611
1.022737
if not aggregates: logging.debug("removing aggregate concepts") aggregate_concepts = [] relation = relationmap.get( OWL.equivalentClass, [(OWL.equivalentClass, False)])[0][0] for conc, eq in rdf.subject_objects(relation): eql = rdf.value(eq, OWL.unionOf, None) if eql is None: continue if aggregates: aggregate_concepts.append(conc) for item in rdf.items(eql): rdf.add((conc, SKOS.narrowMatch, item)) # remove the old equivalentClass-unionOf-rdf:List structure rdf.remove((conc, relation, eq)) rdf.remove((eq, RDF.type, OWL.Class)) rdf.remove((eq, OWL.unionOf, eql)) # remove the rdf:List structure delete_uri(rdf, eql) if not aggregates: delete_uri(rdf, conc) if len(aggregate_concepts) > 0: ns = cs.replace(localname(cs), '') acs = create_concept_scheme(rdf, ns, 'aggregateconceptscheme') logging.debug("creating aggregate concept scheme %s", acs) for conc in aggregate_concepts: rdf.add((conc, SKOS.inScheme, acs))
def transform_aggregate_concepts(rdf, cs, relationmap, aggregates)
Transform YSO-style AggregateConcepts into skos:Concepts within their own skos:ConceptScheme, linked to the regular concepts with SKOS.narrowMatch relationships. If aggregates is False, remove all aggregate concepts instead.
3.763691
3.454751
1.089425
deprecated_concepts = [] for conc in rdf.subjects(RDF.type, SKOSEXT.DeprecatedConcept): rdf.add((conc, RDF.type, SKOS.Concept)) rdf.add((conc, OWL.deprecated, Literal("true", datatype=XSD.boolean))) deprecated_concepts.append(conc) if len(deprecated_concepts) > 0: ns = cs.replace(localname(cs), '') dcs = create_concept_scheme( rdf, ns, 'deprecatedconceptscheme') logging.debug("creating deprecated concept scheme %s", dcs) for conc in deprecated_concepts: rdf.add((conc, SKOS.inScheme, dcs))
def transform_deprecated_concepts(rdf, cs)
Transform deprecated concepts so they are in their own concept scheme.
3.32222
3.216187
1.032968
# 1. first enrich mapping relationships (because they affect regular ones) if enrich_mappings: infer.skos_symmetric_mappings(rdf) infer.skos_hierarchical_mappings(rdf, use_narrower) # 2. then enrich regular relationships # related <-> related infer.skos_related(rdf) # broaderGeneric -> broader + inverse narrowerGeneric for s, o in rdf.subject_objects(SKOSEXT.broaderGeneric): rdf.add((s, SKOS.broader, o)) # broaderPartitive -> broader + inverse narrowerPartitive for s, o in rdf.subject_objects(SKOSEXT.broaderPartitive): rdf.add((s, SKOS.broader, o)) infer.skos_hierarchical(rdf, use_narrower) # transitive closure: broaderTransitive and narrowerTransitive if use_transitive: infer.skos_transitive(rdf, use_narrower) else: # transitive relationships are not wanted, so remove them for s, o in rdf.subject_objects(SKOS.broaderTransitive): rdf.remove((s, SKOS.broaderTransitive, o)) for s, o in rdf.subject_objects(SKOS.narrowerTransitive): rdf.remove((s, SKOS.narrowerTransitive, o)) infer.skos_topConcept(rdf)
def enrich_relations(rdf, enrich_mappings, use_narrower, use_transitive)
Enrich the SKOS relations according to SKOS semantics, including subproperties of broader and symmetric related properties. If use_narrower is True, include inverse narrower relations for all broader relations. If use_narrower is False, instead remove all narrower relations, replacing them with inverse broader relations. If use_transitive is True, calculate transitive hierarchical relationships. (broaderTransitive, and also narrowerTransitive if use_narrower is True) and include them in the model.
2.897813
2.701158
1.072804
for cs in sorted(rdf.subjects(RDF.type, SKOS.ConceptScheme)): for conc in sorted(rdf.subjects(SKOS.inScheme, cs)): if (conc, RDF.type, SKOS.Concept) not in rdf: continue # not a Concept, so can't be a top concept # check whether it's a top concept broader = rdf.value(conc, SKOS.broader, None, any=True) if broader is None: # yes it is a top concept! if (cs, SKOS.hasTopConcept, conc) not in rdf and \ (conc, SKOS.topConceptOf, cs) not in rdf: if mark_top_concepts: logging.info( "Marking loose concept %s " "as top concept of scheme %s", conc, cs) rdf.add((cs, SKOS.hasTopConcept, conc)) rdf.add((conc, SKOS.topConceptOf, cs)) else: logging.debug( "Not marking loose concept %s as top concept " "of scheme %s, as mark_top_concepts is disabled", conc, cs)
def setup_top_concepts(rdf, mark_top_concepts)
Determine the top concepts of each concept scheme and mark them using hasTopConcept/topConceptOf.
2.478118
2.440583
1.01538
for conc in rdf.subjects(RDF.type, SKOS.Concept): # check concept scheme cs = rdf.value(conc, SKOS.inScheme, None, any=True) if cs is None: # need to set inScheme rdf.add((conc, SKOS.inScheme, defaultcs))
def setup_concept_scheme(rdf, defaultcs)
Make sure all concepts have an inScheme property, using the given default concept scheme if necessary.
3.565294
3.076489
1.158884
for t in (OWL.Class, RDFS.Class): for cl in rdf.subjects(RDF.type, t): # SKOS classes may be safely removed if cl.startswith(SKOS): logging.debug("removing SKOS class definition: %s", cl) replace_subject(rdf, cl, None) continue # if there are instances of the class, keep the class def if rdf.value(None, RDF.type, cl, any=True) is not None: continue # if the class is used in a domain/range/equivalentClass # definition, keep the class def if rdf.value(None, RDFS.domain, cl, any=True) is not None: continue if rdf.value(None, RDFS.range, cl, any=True) is not None: continue if rdf.value(None, OWL.equivalentClass, cl, any=True) is not None: continue # if the class is also a skos:Concept or skos:Collection, only # remove its rdf:type if (cl, RDF.type, SKOS.Concept) in rdf \ or (cl, RDF.type, SKOS.Collection) in rdf: logging.debug("removing classiness of %s", cl) rdf.remove((cl, RDF.type, t)) else: # remove it completely logging.debug("removing unused class definition: %s", cl) replace_subject(rdf, cl, None)
def cleanup_classes(rdf)
Remove unnecessary class definitions: definitions of SKOS classes or unused classes. If a class is also a skos:Concept or skos:Collection, remove the 'classness' of it but leave the Concept/Collection.
2.64004
2.42167
1.090173
for t in (RDF.Property, OWL.DatatypeProperty, OWL.ObjectProperty, OWL.SymmetricProperty, OWL.TransitiveProperty, OWL.InverseFunctionalProperty, OWL.FunctionalProperty): for prop in rdf.subjects(RDF.type, t): if prop.startswith(SKOS): logging.debug( "removing SKOS property definition: %s", prop) replace_subject(rdf, prop, None) continue if prop.startswith(DC): logging.debug("removing DC property definition: %s", prop) replace_subject(rdf, prop, None) continue # if there are triples using the property, keep the property def if len(list(rdf.subject_objects(prop))) > 0: continue logging.debug("removing unused property definition: %s", prop) replace_subject(rdf, prop, None)
def cleanup_properties(rdf)
Remove unnecessary property definitions. Reemoves SKOS and DC property definitions and definitions of unused properties.
2.430045
2.273691
1.068767
starttime = time.time() # This is almost a non-recursive breadth-first search algorithm, but a set # is used as the "open" set instead of a FIFO, and an arbitrary element of # the set is searched. This is slightly faster than DFS (using a stack) # and much faster than BFS (using a FIFO). seen = set() # used as the "closed" set to_search = set([res]) # used as the "open" set while len(to_search) > 0: res = to_search.pop() if res in seen: continue seen.add(res) # res as subject for p, o in rdf.predicate_objects(res): if isinstance(p, URIRef) and p not in seen: to_search.add(p) if isinstance(o, URIRef) and o not in seen: to_search.add(o) # res as predicate for s, o in rdf.subject_objects(res): if isinstance(s, URIRef) and s not in seen: to_search.add(s) if isinstance(o, URIRef) and o not in seen: to_search.add(o) # res as object for s, p in rdf.subject_predicates(res): if isinstance(s, URIRef) and s not in seen: to_search.add(s) if isinstance(p, URIRef) and p not in seen: to_search.add(p) endtime = time.time() logging.debug("find_reachable took %f seconds", (endtime - starttime)) return seen
def find_reachable(rdf, res)
Return the set of reachable resources starting from the given resource, excluding the seen set of resources. Note that the seen set is modified in-place to reflect the ongoing traversal.
2.275306
2.289805
0.993668
all_subjects = set(rdf.subjects()) logging.debug("total subject resources: %d", len(all_subjects)) reachable = find_reachable(rdf, SKOS.Concept) nonreachable = all_subjects - reachable logging.debug("deleting %s non-reachable resources", len(nonreachable)) for subj in nonreachable: delete_uri(rdf, subj)
def cleanup_unreachable(rdf)
Remove triples which cannot be reached from the concepts by graph traversal.
4.256314
4.177143
1.018954
starttime = time.time() if check.hierarchy_cycles(rdf, break_cycles): logging.info( "Some concepts not reached in initial cycle detection. " "Re-checking for loose concepts.") setup_top_concepts(rdf, mark_top_concepts) check.disjoint_relations(rdf, not keep_related) check.hierarchical_redundancy(rdf, eliminate_redundancy) endtime = time.time() logging.debug("check_hierarchy took %f seconds", (endtime - starttime))
def check_hierarchy(rdf, break_cycles, keep_related, mark_top_concepts, eliminate_redundancy)
Check for, and optionally fix, problems in the skos:broader hierarchy using a recursive depth first search algorithm. :param Graph rdf: An rdflib.graph.Graph object. :param bool fix_cycles: Break cycles. :param bool fix_disjoint_relations: Remoe skos:related overlapping with skos:broaderTransitive. :param bool fix_redundancy: Remove skos:broader between two concepts otherwise connected by skos:broaderTransitive.
5.724908
6.65345
0.860442
cfg = Config() for key in config: if hasattr(cfg, key): setattr(cfg, key, config[key]) config = cfg namespaces = config.namespaces typemap = config.types literalmap = config.literals relationmap = config.relations logging.debug("Skosify starting. $Revision$") starttime = time.time() logging.debug("Phase 1: Parsing input files") try: voc = read_rdf(sources, config.from_format) except: logging.critical("Parsing failed. Exception: %s", str(sys.exc_info()[1])) sys.exit(1) inputtime = time.time() logging.debug("Phase 2: Performing inferences") if config.update_query is not None: transform_sparql_update(voc, config.update_query) if config.construct_query is not None: voc = transform_sparql_construct(voc, config.construct_query) if config.infer: logging.debug("doing RDFS subclass and properties inference") infer.rdfs_classes(voc) infer.rdfs_properties(voc) logging.debug("Phase 3: Setting up namespaces") for prefix, uri in namespaces.items(): voc.namespace_manager.bind(prefix, uri) logging.debug("Phase 4: Transforming concepts, literals and relations") # transform concepts, literals and concept relations transform_concepts(voc, typemap) transform_literals(voc, literalmap) transform_relations(voc, relationmap) # special transforms for labels: whitespace, prefLabel vs altLabel transform_labels(voc, config.default_language) # special transforms for collections + aggregate and deprecated concepts transform_collections(voc) # find/create concept scheme cs = get_concept_scheme(voc) if not cs: cs = create_concept_scheme(voc, config.namespace) initialize_concept_scheme(voc, cs, label=config.label, language=config.default_language, set_modified=config.set_modified) transform_aggregate_concepts( voc, cs, relationmap, config.aggregates) transform_deprecated_concepts(voc, cs) logging.debug("Phase 5: Performing SKOS enrichments") # enrichments: broader <-> narrower, related <-> related enrich_relations(voc, config.enrich_mappings, config.narrower, config.transitive) logging.debug("Phase 6: Cleaning up") # clean up unused/unnecessary class/property definitions and unreachable # triples if config.cleanup_properties: cleanup_properties(voc) if config.cleanup_classes: cleanup_classes(voc) if config.cleanup_unreachable: cleanup_unreachable(voc) logging.debug("Phase 7: Setting up concept schemes and top concepts") # setup inScheme and hasTopConcept setup_concept_scheme(voc, cs) setup_top_concepts(voc, config.mark_top_concepts) logging.debug("Phase 8: Checking concept hierarchy") # check hierarchy for cycles check_hierarchy(voc, config.break_cycles, config.keep_related, config.mark_top_concepts, config.eliminate_redundancy) logging.debug("Phase 9: Checking labels") # check for duplicate labels check_labels(voc, config.preflabel_policy) processtime = time.time() logging.debug("reading input file took %d seconds", (inputtime - starttime)) logging.debug("processing took %d seconds", (processtime - inputtime)) logging.debug("Phase 10: Writing output") return voc
def skosify(*sources, **config)
Convert, extend, and check SKOS vocabulary.
3.602065
3.564422
1.010561
if curie == '': return None if sys.version < '3' and not isinstance(curie, type(u'')): # Python 2 ConfigParser gives raw byte strings curie = curie.decode('UTF-8') # ...make those into Unicode objects if curie.startswith('[') and curie.endswith(']'): # decode SafeCURIE curie = curie[1:-1] if ':' in curie: ns, localpart = curie.split(':', 1) elif '.' in curie: ns, localpart = curie.split('.', 1) else: return curie if ns in namespaces: return URIRef(namespaces[ns].term(localpart)) else: logging.warning("Unknown namespace prefix %s", ns) return URIRef(curie)
def expand_curielike(namespaces, curie)
Expand a CURIE (or a CURIE-like string with a period instead of colon as separator) into URIRef. If the provided curie is not a CURIE, return it unchanged.
3.732904
3.556019
1.049743
vals = [v.strip() for v in val.split(',')] ret = [] for v in vals: inverse = False if v.startswith('^'): inverse = True v = v[1:] ret.append((expand_curielike(namespaces, v), inverse)) return ret
def expand_mapping_target(namespaces, val)
Expand a mapping target, expressed as a comma-separated list of CURIE-like strings potentially prefixed with ^ to express inverse properties, into a list of (uri, inverse) tuples, where uri is a URIRef and inverse is a boolean.
3.819134
2.692412
1.41848
if hasattr(file, 'readline'): # we have a file object if sys.version_info >= (3, 2): cfgparser.read_file(file) # Added in Python 3.2 else: cfgparser.readfp(file) # Deprecated since Python 3.2 else: # we have a file name cfgparser.read(file)
def read_file(self, cfgparser, file)
Read configuration from file.
3.067076
2.801388
1.094842
if fromuri == touri: return for p, o in rdf.predicate_objects(fromuri): rdf.remove((fromuri, p, o)) if touri is not None: if not isinstance(touri, (list, tuple)): touri = [touri] for uri in touri: rdf.add((uri, p, o))
def replace_subject(rdf, fromuri, touri)
Replace occurrences of fromuri as subject with touri in given model. If touri=None, will delete all occurrences of fromuri instead. If touri is a list or tuple of URIRefs, all values will be inserted.
2.023335
1.979343
1.022226
if fromuri == touri: return for s, o in rdf.subject_objects(fromuri): if subjecttypes is not None: typeok = False for t in subjecttypes: if (s, RDF.type, t) in rdf: typeok = True if not typeok: continue rdf.remove((s, fromuri, o)) if touri is not None: if not isinstance(touri, (list, tuple)): touri = [touri] for val in touri: if not isinstance(val, tuple): val = (val, False) uri, inverse = val if uri is None: continue if inverse: rdf.add((o, uri, s)) else: rdf.add((s, uri, o))
def replace_predicate(rdf, fromuri, touri, subjecttypes=None, inverse=False)
Replace occurrences of fromuri as predicate with touri in given model. If touri=None, will delete all occurrences of fromuri instead. If touri is a list or tuple of URIRef, all values will be inserted. If touri is a list of (URIRef, boolean) tuples, the boolean value will be used to determine whether an inverse property is created (if True) or not (if False). If a subjecttypes sequence is given, modify only those triples where the subject is one of the provided types.
2.229454
2.040603
1.092547
if fromuri == touri: return for s, p in rdf.subject_predicates(fromuri): if predicate is not None and p != predicate: continue rdf.remove((s, p, fromuri)) if touri is not None: if not isinstance(touri, (list, tuple)): touri = [touri] for uri in touri: rdf.add((s, p, uri))
def replace_object(rdf, fromuri, touri, predicate=None)
Replace all occurrences of fromuri as object with touri in the given model. If touri=None, will delete all occurrences of fromuri instead. If touri is a list or tuple of URIRef, all values will be inserted. If predicate is given, modify only triples with the given predicate.
2.042299
2.070318
0.986466
replace_subject(rdf, fromuri, touri) replace_predicate(rdf, fromuri, touri) replace_object(rdf, fromuri, touri)
def replace_uri(rdf, fromuri, touri)
Replace all occurrences of fromuri with touri in the given model. If touri is a list or tuple of URIRef, all values will be inserted. If touri=None, will delete all occurrences of fromuri instead.
1.754033
2.276271
0.770573
top_concepts = sorted(rdf.subject_objects(SKOS.hasTopConcept)) status = {} for cs, root in top_concepts: _hierarchy_cycles_visit( rdf, root, None, fix, status=status) # double check that all concepts were actually visited in the search, # and visit remaining ones if necessary recheck_top_concepts = False for conc in sorted(rdf.subjects(RDF.type, SKOS.Concept)): if conc not in status: recheck_top_concepts = True _hierarchy_cycles_visit( rdf, conc, None, fix, status=status) return recheck_top_concepts
def hierarchy_cycles(rdf, fix=False)
Check if the graph contains skos:broader cycles and optionally break these. :param Graph rdf: An rdflib.graph.Graph object. :param bool fix: Fix the problem by removing any skos:broader that overlaps with skos:broaderTransitive.
4.540313
4.669197
0.972397
for conc1, conc2 in sorted(rdf.subject_objects(SKOS.related)): if conc2 in sorted(rdf.transitive_objects(conc1, SKOS.broader)): if fix: logging.warning( "Concepts %s and %s connected by both " "skos:broaderTransitive and skos:related, " "removing skos:related", conc1, conc2) rdf.remove((conc1, SKOS.related, conc2)) rdf.remove((conc2, SKOS.related, conc1)) else: logging.warning( "Concepts %s and %s connected by both " "skos:broaderTransitive and skos:related, " "but keeping it because keep_related is enabled", conc1, conc2)
def disjoint_relations(rdf, fix=False)
Check if the graph contains concepts connected by both of the semantically disjoint semantic skos:related and skos:broaderTransitive (S27), and optionally remove the involved skos:related relations. :param Graph rdf: An rdflib.graph.Graph object. :param bool fix: Fix the problem by removing skos:related relations that overlap with skos:broaderTransitive.
2.970752
2.448585
1.213252
for conc, parent1 in rdf.subject_objects(SKOS.broader): for parent2 in rdf.objects(conc, SKOS.broader): if parent1 == parent2: continue # must be different if parent2 in rdf.transitive_objects(parent1, SKOS.broader): if fix: logging.warning( "Eliminating redundant hierarchical relationship: " "%s skos:broader %s", conc, parent2) rdf.remove((conc, SKOS.broader, parent2)) rdf.remove((conc, SKOS.broaderTransitive, parent2)) rdf.remove((parent2, SKOS.narrower, conc)) rdf.remove((parent2, SKOS.narrowerTransitive, conc)) else: logging.warning( "Redundant hierarchical relationship " "%s skos:broader %s found, but not eliminated " "because eliminate_redundancy is not set", conc, parent2)
def hierarchical_redundancy(rdf, fix=False)
Check for and optionally remove extraneous skos:broader relations. :param Graph rdf: An rdflib.graph.Graph object. :param bool fix: Fix the problem by removing skos:broader relations between concepts that are otherwise connected by skos:broaderTransitive.
2.867112
2.565428
1.117596
for s, o in sorted(rdf.subject_objects(prop1)): if (s, prop2, o) in rdf: yield (s, o)
def find_prop_overlap(rdf, prop1, prop2)
Generate (subject,object) pairs connected by two properties.
3.296544
2.630023
1.253428
for s, o in rdf.subject_objects(SKOS.related): rdf.add((o, SKOS.related, s))
def skos_related(rdf)
Make sure that skos:related is stated in both directions (S23).
3.334433
2.486861
1.34082
for s, o in rdf.subject_objects(SKOS.hasTopConcept): rdf.add((o, SKOS.topConceptOf, s)) for s, o in rdf.subject_objects(SKOS.topConceptOf): rdf.add((o, SKOS.hasTopConcept, s)) for s, o in rdf.subject_objects(SKOS.topConceptOf): rdf.add((s, SKOS.inScheme, o))
def skos_topConcept(rdf)
Infer skos:topConceptOf/skos:hasTopConcept (S8) and skos:inScheme (S7).
1.742532
1.685516
1.033827
if narrower: for s, o in rdf.subject_objects(SKOS.broader): rdf.add((o, SKOS.narrower, s)) for s, o in rdf.subject_objects(SKOS.narrower): rdf.add((o, SKOS.broader, s)) if not narrower: rdf.remove((s, SKOS.narrower, o))
def skos_hierarchical(rdf, narrower=True)
Infer skos:broader/skos:narrower (S25) but only keep skos:narrower on request. :param bool narrower: If set to False, skos:narrower will not be added, but rather removed.
1.895569
2.004913
0.945462
for conc in rdf.subjects(RDF.type, SKOS.Concept): for bt in rdf.transitive_objects(conc, SKOS.broader): if bt == conc: continue rdf.add((conc, SKOS.broaderTransitive, bt)) if narrower: rdf.add((bt, SKOS.narrowerTransitive, conc))
def skos_transitive(rdf, narrower=True)
Perform transitive closure inference (S22, S24).
2.408727
2.273137
1.059649
for s, o in rdf.subject_objects(SKOS.relatedMatch): rdf.add((o, SKOS.relatedMatch, s)) if related: rdf.add((s, SKOS.related, o)) rdf.add((o, SKOS.related, s)) for s, o in rdf.subject_objects(SKOS.closeMatch): rdf.add((o, SKOS.closeMatch, s)) for s, o in rdf.subject_objects(SKOS.exactMatch): rdf.add((o, SKOS.exactMatch, s))
def skos_symmetric_mappings(rdf, related=True)
Ensure that the symmetric mapping properties (skos:relatedMatch, skos:closeMatch and skos:exactMatch) are stated in both directions (S44). :param bool related: Add the skos:related super-property for all skos:relatedMatch relations (S41).
1.556648
1.548209
1.005451
for s, o in rdf.subject_objects(SKOS.broadMatch): rdf.add((s, SKOS.broader, o)) if narrower: rdf.add((o, SKOS.narrowMatch, s)) rdf.add((o, SKOS.narrower, s)) for s, o in rdf.subject_objects(SKOS.narrowMatch): rdf.add((o, SKOS.broadMatch, s)) rdf.add((o, SKOS.broader, s)) if narrower: rdf.add((s, SKOS.narrower, o)) else: rdf.remove((s, SKOS.narrowMatch, o))
def skos_hierarchical_mappings(rdf, narrower=True)
Infer skos:broadMatch/skos:narrowMatch (S43) and add the super-properties skos:broader/skos:narrower (S41). :param bool narrower: If set to False, skos:narrowMatch will not be added, but rather removed.
1.76186
1.751968
1.005647
# find out the subclass mappings upperclasses = {} # key: class val: set([superclass1, superclass2..]) for s, o in rdf.subject_objects(RDFS.subClassOf): upperclasses.setdefault(s, set()) for uc in rdf.transitive_objects(s, RDFS.subClassOf): if uc != s: upperclasses[s].add(uc) # set the superclass type information for subclass instances for s, ucs in upperclasses.items(): logging.debug("setting superclass types: %s -> %s", s, str(ucs)) for res in rdf.subjects(RDF.type, s): for uc in ucs: rdf.add((res, RDF.type, uc))
def rdfs_classes(rdf)
Perform RDFS subclass inference. Mark all resources with a subclass type with the upper class.
3.707728
3.533092
1.049428
# find out the subproperty mappings superprops = {} # key: property val: set([superprop1, superprop2..]) for s, o in rdf.subject_objects(RDFS.subPropertyOf): superprops.setdefault(s, set()) for sp in rdf.transitive_objects(s, RDFS.subPropertyOf): if sp != s: superprops[s].add(sp) # add the superproperty relationships for p, sps in superprops.items(): logging.debug("setting superproperties: %s -> %s", p, str(sps)) for s, o in rdf.subject_objects(p): for sp in sps: rdf.add((s, sp, o))
def rdfs_properties(rdf)
Perform RDFS subproperty inference. Add superproperties where subproperties have been used.
3.04879
2.928543
1.04106
bits = token.split_contents() if len(bits) != 2: raise TemplateSyntaxError("'%s' takes one argument" % bits[0]) parent_name = parser.compile_filter(bits[1]) nodelist = parser.parse() if nodelist.get_nodes_by_type(ExtendsNode): raise TemplateSyntaxError("'%s' cannot appear more than once " "in the same template" % bits[0]) return OverExtendsNode(nodelist, parent_name, None)
def overextends(parser, token)
Extended version of Django's ``extends`` tag that allows circular inheritance to occur, eg a template can both be overridden and extended at once.
2.281105
2.318023
0.984073
# These imports want settings, which aren't available when this # module is imported to ``add_to_builtins``, so do them here. from django.conf import settings # Find the app_template_dirs (moved in Django 1.8) import django.template.loaders.app_directories as app_directories try: # Django >= 1.8 get_app_template_dirs = app_directories.get_app_template_dirs app_template_dirs = get_app_template_dirs('templates') except AttributeError: # Django <= 1.7 app_template_dirs = app_directories.app_template_dirs # Find the find_template_loader function, and appropriate template # settings (changed in Django 1.8) try: # Django >= 1.8 find_template_loader = context.template.engine.find_template_loader template_dirs = context.template.engine.dirs template_loaders = context.template.engine.loaders except AttributeError: # Django <= 1.7 from django.template.loader import find_template_loader template_dirs = list(settings.TEMPLATE_DIRS) template_loaders = settings.TEMPLATE_LOADERS # Store a dictionary in the template context mapping template # names to the lists of template directories available to # search for that template. Each time a template is loaded, its # origin directory is removed from its directories list. context_name = "OVEREXTENDS_DIRS" if context_name not in context: context[context_name] = {} if name not in context[context_name]: all_dirs = template_dirs + list(app_template_dirs) # os.path.abspath is needed under uWSGI, and also ensures we # have consistent path separators across different OSes. context[context_name][name] = list(map(os.path.abspath, all_dirs)) # Build a list of template loaders to use. For loaders that wrap # other loaders like the ``cached`` template loader, unwind its # internal loaders and add those instead. loaders = [] for loader_name in template_loaders: loader = find_template_loader(loader_name) loaders.extend(getattr(loader, "loaders", [loader])) # Go through the loaders and try to find the template. When # found, removed its absolute path from the context dict so # that it won't be used again when the same relative name/path # is requested. for loader in loaders: dirs = context[context_name][name] if not dirs: break try: source, path = loader.load_template_source(name, dirs) except TemplateDoesNotExist: pass else: # Only remove the absolute path for the initial call in # get_parent, and not when we're peeking during the # second call. if not peeking: remove_path = os.path.abspath(path[:-len(name) - 1]) context[context_name][name].remove(remove_path) return Template(source) raise TemplateDoesNotExist(name)
def find_template(self, name, context, peeking=False)
Replacement for Django's ``find_template`` that uses the current template context to keep track of which template directories it has used when finding a template. This allows multiple templates with the same relative name/path to be discovered, so that circular template inheritance can occur.
3.879037
3.741885
1.036653
parent = self.parent_name.resolve(context) # If parent is a template object, just return it. if hasattr(parent, "render"): return parent template = self.find_template(parent, context) for node in template.nodelist: if (isinstance(node, ExtendsNode) and node.parent_name.resolve(context) == parent): return self.find_template(parent, context, peeking=True) return template
def get_parent(self, context)
Load the parent template using our own ``find_template``, which will cause its absolute path to not be used again. Then peek at the first node, and if its parent arg is the same as the current parent arg, we know circular inheritance is going to occur, in which case we try and find the template again, with the absolute directory removed from the search list.
4.048973
3.394664
1.192746
config = Config() # additional options for command line client only defaults = vars(config) defaults['to_format'] = None defaults['output'] = '-' defaults['log'] = None defaults['debug'] = False options, remainingArgs = get_option_parser(defaults).parse_args() for key in vars(options): if hasattr(config, key): setattr(config, key, getattr(options, key)) # configure logging, messages to stderr by default logformat = '%(levelname)s: %(message)s' loglevel = logging.INFO if options.debug: loglevel = logging.DEBUG if options.log: logging.basicConfig(filename=options.log, format=logformat, level=loglevel) else: logging.basicConfig(format=logformat, level=loglevel) output = options.output to_format = options.to_format # read config file as defaults and override from command line arguments if options.config is not None: config.read_and_parse_config_file(options.config) options, remainingArgs = get_option_parser(vars(config)).parse_args() for key in vars(options): if hasattr(config, key): setattr(config, key, getattr(options, key)) if remainingArgs: inputfiles = remainingArgs else: inputfiles = ['-'] voc = skosify(*inputfiles, **vars(config)) write_rdf(voc, output, to_format)
def main()
Read command line parameters and make a transform based on them.
2.809965
2.819994
0.996443
if self._handler: raise Exception('Handler was already set') if handler: self._handler = async_task(handler, loop=self._loop)
def set_handler(self, handler)
Connect with a coroutine, which is scheduled when connection is made. This function will create a task, and when connection is closed, the task will be canceled. :param handler: :return: None
6.159292
5.953125
1.034632
yield from _wait_for_events(self._resumed, self._stream_creatable) stream_id = self._conn.get_next_available_stream_id() self._priority.insert_stream(stream_id) self._priority.block(stream_id) self._conn.send_headers(stream_id, headers, end_stream=end_stream) self._flush() return stream_id
def start_request(self, headers, *, end_stream=False)
Start a request by sending given headers on a new stream, and return the ID of the new stream. This may block until the underlying transport becomes writable, and the number of concurrent outbound requests (open outbound streams) is less than the value of peer config MAX_CONCURRENT_STREAMS. The completion of the call to this method does not mean the request is successfully delivered - data is only correctly stored in a buffer to be sent. There's no guarantee it is truly delivered. :param headers: A list of key-value tuples as headers. :param end_stream: To send a request without body, set `end_stream` to `True` (default `False`). :return: Stream ID as a integer, used for further communication.
5.240592
5.349899
0.979568
yield from self._resumed.wait() self._conn.send_headers(stream_id, headers, end_stream=end_stream) self._flush()
def start_response(self, stream_id, headers, *, end_stream=False)
Start a response by sending given headers on the given stream. This may block until the underlying transport becomes writable. :param stream_id: Which stream to send response on. :param headers: A list of key-value tuples as headers. :param end_stream: To send a response without body, set `end_stream` to `True` (default `False`).
6.228616
6.336731
0.982938
try: with (yield from self._get_stream(stream_id).wlock): while True: yield from _wait_for_events( self._resumed, self._get_stream(stream_id).window_open) self._priority.unblock(stream_id) waiter = asyncio.Future() if not self._priority_events: self._loop.call_soon(self._priority_step) self._priority_events[stream_id] = waiter try: yield from waiter data_size = len(data) size = min( data_size, self._conn.local_flow_control_window(stream_id), self._conn.max_outbound_frame_size) if data_size == 0 or size == data_size: self._conn.send_data(stream_id, data, end_stream=end_stream) self._flush() break elif size > 0: self._conn.send_data(stream_id, data[:size]) data = data[size:] self._flush() finally: self._priority_events.pop(stream_id, None) self._priority.block(stream_id) if self._priority_events: self._loop.call_soon(self._priority_step) except ProtocolError: raise exceptions.SendException(data)
def send_data(self, stream_id, data, *, end_stream=False)
Send request or response body on the given stream. This will block until either whole data is sent, or the stream gets closed. Meanwhile, a paused underlying transport or a closed flow control window will also help waiting. If the peer increase the flow control window, this method will start sending automatically. This can be called multiple times, but it must be called after a `start_request` or `start_response` with the returning stream ID, and before any `end_stream` instructions; Otherwise it will fail. The given data may be automatically split into smaller frames in order to fit in the configured frame size or flow control window. Each stream can only have one `send_data` running, others calling this will be blocked on a per-stream lock (wlock), so that coroutines sending data concurrently won't mess up with each other. Similarly, the completion of the call to this method does not mean the data is delivered. :param stream_id: Which stream to send data on :param data: Bytes to send :param end_stream: To finish sending a request or response, set this to `True` to close the given stream locally after data is sent (default `False`). :raise: `SendException` if there is an error sending data. Data left unsent can be found in `data` of the exception.
3.151527
2.977798
1.058341
with (yield from self._get_stream(stream_id).wlock): yield from self._resumed.wait() self._conn.send_headers(stream_id, headers, end_stream=True) self._flush()
def send_trailers(self, stream_id, headers)
Send trailers on the given stream, closing the stream locally. This may block until the underlying transport becomes writable, or other coroutines release the wlock on this stream. :param stream_id: Which stream to send trailers on. :param headers: A list of key-value tuples as trailers.
5.831199
7.167861
0.81352
with (yield from self._get_stream(stream_id).wlock): yield from self._resumed.wait() self._conn.end_stream(stream_id) self._flush()
def end_stream(self, stream_id)
Close the given stream locally. This may block until the underlying transport becomes writable, or other coroutines release the wlock on this stream. :param stream_id: Which stream to close.
7.372466
7.433441
0.991797
rv = [] try: with (yield from self._get_stream(stream_id).rlock): if size is None: rv.append(( yield from self._get_stream(stream_id).read_frame())) self._flow_control(stream_id) elif size < 0: while True: rv.extend(( yield from self._get_stream(stream_id).read_all())) self._flow_control(stream_id) else: while size > 0: bufs, count = yield from self._get_stream( stream_id).read(size) rv.extend(bufs) size -= count self._flow_control(stream_id) except StreamClosedError: pass except _StreamEndedException as e: try: self._flow_control(stream_id) except StreamClosedError: pass rv.extend(e.bufs) return b''.join(rv)
def read_stream(self, stream_id, size=None)
Read data from the given stream. By default (`size=None`), this returns all data left in current HTTP/2 frame. In other words, default behavior is to receive frame by frame. If size is given a number above zero, method will try to return as much bytes as possible up to the given size, block until enough bytes are ready or stream is remotely closed. If below zero, it will read until the stream is remotely closed and return everything at hand. `size=0` is a special case that does nothing but returns `b''`. The same result `b''` is also returned under other conditions if there is no more data on the stream to receive, even under `size=None` and peer sends an empty frame - you can use `b''` to safely identify the end of the given stream. Flow control frames will be automatically sent while reading clears the buffer, allowing more data to come in. :param stream_id: Stream to read :param size: Expected size to read, `-1` for all, default frame. :return: Bytes read or empty if there is no more to expect.
2.740222
2.651911
1.033301
while not self._is_functional(): self._rtt = None self._ping_index += 1 self._ping_time = self._loop.time() self._conn.ping(struct.pack('Q', self._ping_index)) self._flush() try: yield from asyncio.wait_for(self._functional.wait(), self._functional_timeout) except asyncio.TimeoutError: pass return self._rtt
def wait_functional(self)
Wait until the connection becomes functional. The connection is count functional if it was active within last few seconds (defined by `functional_timeout`), where a newly-made connection and received data indicate activeness. :return: Most recently calculated round-trip time if any.
4.75288
4.407941
1.078254
self._priority.reprioritize(stream_id, depends_on, weight, exclusive)
def reprioritize(self, stream_id, depends_on=None, weight=16, exclusive=False)
Update the priority status of an existing stream. :param stream_id: The stream ID of the stream being updated. :param depends_on: (optional) The ID of the stream that the stream now depends on. If ``None``, will be moved to depend on stream 0. :param weight: (optional) The new weight to give the stream. Defaults to 16. :param exclusive: (optional) Whether this stream should now be an exclusive dependency of the new parent.
3.343041
5.374502
0.622019
actual_decorator = user_passes_test( lambda u: u.is_active and u.is_superuser, login_url=login_url, redirect_field_name=redirect_field_name ) if view_func: return actual_decorator(view_func) return actual_decorator
def superuser_required(view_func=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url='admin:login')
Decorator for views that checks that the user is logged in and is a superuser member, redirecting to the login page if necessary.
1.618292
1.729512
0.935693
''' Return sql for condition. ''' def escape(value): if isinstance(value, bool): value = str(int(value)) if isinstance(value, six.string_types): # Escape params used with LIKE if '%' in value: value = value.replace('%', '%%') # Escape single quotes if "'" in value: value = value.replace("'", "''") # Add single quote to text values value = "'" + value + "'" return value sql, param = self.condition.query.where.as_sql(qn, connection) param = map(escape, param) return sql % tuple(param)
def _condition_as_sql(self, qn, connection)
Return sql for condition.
3.656109
3.353478
1.090244
siteList = SiteList(points) context = Context() voronoi(siteList,context) return (context.vertices, context.edges, context.polygons)
def computeVoronoiDiagram(points)
Takes a list of point objects (which must have x and y fields). Returns a 3-tuple of: (1) a list of 2-tuples, which are the x,y coordinates of the Voronoi diagram vertices (2) a list of 3-tuples (a,b,c) which are the equations of the lines in the Voronoi diagram: a*x + b*y = c (3) a list of 3-tuples, (l, v1, v2) representing edges of the Voronoi diagram. l is the index of the line, v1 and v2 are the indices of the vetices at the end of the edge. If v1 or v2 is -1, the line extends to infinity.
7.077571
8.279893
0.85479
siteList = SiteList(points) context = Context() context.triangulate = True voronoi(siteList,context) return context.triangles
def computeDelaunayTriangulation(points)
Takes a list of point objects (which must have x and y fields). Returns a list of 3-tuples: the indices of the points that form a Delaunay triangle.
8.890203
11.717433
0.758716
count_types = collections.Counter( line.get('type') or None for line in messages) count_modules = collections.Counter( line.get('module') or None for line in messages) count_symbols = collections.Counter( line.get('symbol') or None for line in messages) count_paths = collections.Counter( line.get('path') or None for line in messages) return { 'types': count_types, 'modules': count_modules, 'symbols': count_symbols, 'paths': count_paths, }
def build_messages_metrics(messages)
Build reports's metrics
1.997066
1.910131
1.045513
data = collections.defaultdict(list) for line in messages: module_name = line.get('module') module_path = line.get('path') module_info = ModuleInfo( module_name, module_path, ) data[module_info].append(line) for module, module_messages in data.items(): yield ( module, sorted(module_messages, key=lambda x: x.get('line')))
def build_messages_modules(messages)
Build and yield sorted list of messages per module. :param list messages: List of dict of messages :return: Tuple of 2 values: first is the module info, second is the list of messages sorted by line number
2.920592
2.614333
1.117146
statement = stats.get('statement') error = stats.get('error', 0) warning = stats.get('warning', 0) refactor = stats.get('refactor', 0) convention = stats.get('convention', 0) if not statement or statement <= 0: return None malus = float(5 * error + warning + refactor + convention) malus_ratio = malus / statement return 10.0 - (malus_ratio * 10)
def stats_evaluation(stats)
Generate an evaluation for the given pylint ``stats``.
3.839087
3.355745
1.144034