_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q9100
|
Search.charges
|
train
|
def charges(self, num, charge_id=None, **kwargs):
"""Search for charges against a company by company number.
Args:
num (str): Company number to search on.
transaction (Optional[str]): Filing record number.
kwargs (dict): additional keywords passed into
requests.session.get params keyword.
"""
baseuri = self._BASE_URI + "company/{}/charges".format(num)
if charge_id is not None:
baseuri += "/{}".format(charge_id)
res = self.session.get(baseuri, params=kwargs)
else:
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res
|
python
|
{
"resource": ""
}
|
q9101
|
Search.officers
|
train
|
def officers(self, num, **kwargs):
"""Search for a company's registered officers by company number.
Args:
num (str): Company number to search on.
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword.
"""
baseuri = self._BASE_URI + "company/{}/officers".format(num)
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res
|
python
|
{
"resource": ""
}
|
q9102
|
Search.disqualified
|
train
|
def disqualified(self, num, natural=True, **kwargs):
"""Search for disqualified officers by officer ID.
Searches for natural disqualifications by default. Specify
natural=False to search for corporate disqualifications.
Args:
num (str): Company number to search on.
natural (Optional[bool]): Natural or corporate search
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword.
"""
search_type = 'natural' if natural else 'corporate'
baseuri = (self._BASE_URI +
'disqualified-officers/{}/{}'.format(search_type, num))
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res
|
python
|
{
"resource": ""
}
|
q9103
|
Search.persons_significant_control
|
train
|
def persons_significant_control(self, num, statements=False, **kwargs):
"""Search for a list of persons with significant control.
Searches for persons of significant control based on company number for
a specified company. Specify statements=True to only search for
officers with statements.
Args:
num (str, int): Company number to search on.
statements (Optional[bool]): Search only for persons with
statements. Default is False.
kwargs (dict): additional keywords passed into requests.session.get
*params* keyword.
"""
baseuri = (self._BASE_URI +
'company/{}/persons-with-significant-control'.format(num))
# Only append statements to the URL if statements is True
if statements is True:
baseuri += '-statements'
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res
|
python
|
{
"resource": ""
}
|
q9104
|
Search.significant_control
|
train
|
def significant_control(self,
num,
entity_id,
entity_type='individual',
**kwargs):
"""Get details of a specific entity with significant control.
Args:
num (str, int): Company number to search on.
entity_id (str, int): Entity id to request details for
entity_type (str, int): What type of entity to search for. Defaults
to 'individual'. Other possible opetions are
'corporate' (for corporate entitys), 'legal' (for legal
persons), 'statements' (for a person with significant control
statement) and 'secure' (for a super secure person).
kwargs (dict): additional keywords passed into requests.session.get
*params* keyword.
"""
# Dict mapping entity_type strings to url strings
entities = {'individual': 'individual',
'corporate': 'corporate-entity',
'legal': 'legal-person',
'statements': 'persons-with-significant-control-statements',
'secure': 'super-secure'}
# Make sure correct entity_type supplied
try:
entity = entities[entity_type]
except KeyError as e:
msg = ("Wrong entity_type supplied. Please choose from " +
"individual, corporate, legal, statements or secure")
raise Exception(msg) from e
# Construct the request and return the result
baseuri = (self._BASE_URI +
'company/{}/persons-with-significant-control/'.format(num) +
'{}/{}'.format(entity, entity_id))
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res
|
python
|
{
"resource": ""
}
|
q9105
|
Search.document
|
train
|
def document(self, document_id, **kwargs):
"""Requests for a document by the document id.
Normally the response.content can be saved as a pdf file
Args:
document_id (str): The id of the document retrieved.
kwargs (dict): additional keywords passed into
requests.session.get *params* keyword.
"""
baseuri = '{}document/{}/content'.format(self._DOCUMENT_URI,
document_id)
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res
|
python
|
{
"resource": ""
}
|
q9106
|
Plate.get_node_label
|
train
|
def get_node_label(self, model):
"""
Defines how labels are constructed from models.
Default - uses verbose name, lines breaks where sensible
"""
if model.is_proxy:
label = "(P) %s" % (model.name.title())
else:
label = "%s" % (model.name.title())
line = ""
new_label = []
for w in label.split(" "):
if len(line + w) > 15:
new_label.append(line)
line = w
else:
line += " "
line += w
new_label.append(line)
return "\n".join(new_label)
|
python
|
{
"resource": ""
}
|
q9107
|
get_version
|
train
|
def get_version(release_level=True):
"""
Return the formatted version information
"""
vers = ["%(major)i.%(minor)i.%(micro)i" % __version_info__]
if release_level and __version_info__['releaselevel'] != 'final':
vers.append('%(releaselevel)s%(serial)i' % __version_info__)
return ''.join(vers)
|
python
|
{
"resource": ""
}
|
q9108
|
_sslobj
|
train
|
def _sslobj(sock):
"""Returns the underlying PySLLSocket object with which the C extension
functions interface.
"""
pass
if isinstance(sock._sslobj, _ssl._SSLSocket):
return sock._sslobj
else:
return sock._sslobj._sslobj
|
python
|
{
"resource": ""
}
|
q9109
|
_colorize
|
train
|
def _colorize(val, color):
"""Colorize a string using termcolor or colorama.
If any of them are available.
"""
if termcolor is not None:
val = termcolor.colored(val, color)
elif colorama is not None:
val = TERMCOLOR2COLORAMA[color] + val + colorama.Style.RESET_ALL
return val
|
python
|
{
"resource": ""
}
|
q9110
|
TimerPlugin._parse_time
|
train
|
def _parse_time(self, value):
"""Parse string time representation to get number of milliseconds.
Raises the ``ValueError`` for invalid format.
"""
try:
# Default time unit is a second, we should convert it to milliseconds.
return int(value) * 1000
except ValueError:
# Try to parse if we are unlucky to cast value into int.
m = self.time_format.match(value)
if not m:
raise ValueError("Could not parse time represented by '{t}'".format(t=value))
time = int(m.group('time'))
if m.group('units') != 'ms':
time *= 1000
return time
|
python
|
{
"resource": ""
}
|
q9111
|
TimerPlugin.configure
|
train
|
def configure(self, options, config):
"""Configures the test timer plugin."""
super(TimerPlugin, self).configure(options, config)
self.config = config
if self.enabled:
self.timer_top_n = int(options.timer_top_n)
self.timer_ok = self._parse_time(options.timer_ok)
self.timer_warning = self._parse_time(options.timer_warning)
self.timer_filter = self._parse_filter(options.timer_filter)
self.timer_fail = options.timer_fail
self.timer_no_color = True
self.json_file = options.json_file
# Windows + nosetests does not support colors (even with colorama).
if not IS_NT:
self.timer_no_color = options.timer_no_color
# determine if multiprocessing plugin enabled
self.multiprocessing_enabled = bool(getattr(options, 'multiprocess_workers', False))
|
python
|
{
"resource": ""
}
|
q9112
|
TimerPlugin.report
|
train
|
def report(self, stream):
"""Report the test times."""
if not self.enabled:
return
# if multiprocessing plugin enabled - get items from results queue
if self.multiprocessing_enabled:
for i in range(_results_queue.qsize()):
try:
k, v, s = _results_queue.get(False)
self._timed_tests[k] = {
'time': v,
'status': s,
}
except Queue.Empty:
pass
d = sorted(self._timed_tests.items(), key=lambda item: item[1]['time'], reverse=True)
if self.json_file:
dict_type = OrderedDict if self.timer_top_n else dict
with open(self.json_file, 'w') as f:
json.dump({'tests': dict_type((k, v) for k, v in d)}, f)
total_time = sum([vv['time'] for kk, vv in d])
for i, (test, time_and_status) in enumerate(d):
time_taken = time_and_status['time']
status = time_and_status['status']
if i < self.timer_top_n or self.timer_top_n == -1:
color = self._get_result_color(time_taken)
percent = 0 if total_time == 0 else time_taken / total_time * 100
line = self._format_report_line(
test=test,
time_taken=time_taken,
color=color,
status=status,
percent=percent,
)
_filter = self._COLOR_TO_FILTER.get(color)
if self.timer_filter is None or _filter is None or _filter in self.timer_filter:
stream.writeln(line)
|
python
|
{
"resource": ""
}
|
q9113
|
TimerPlugin._get_result_color
|
train
|
def _get_result_color(self, time_taken):
"""Get time taken result color."""
time_taken_ms = time_taken * 1000
if time_taken_ms <= self.timer_ok:
color = 'green'
elif time_taken_ms <= self.timer_warning:
color = 'yellow'
else:
color = 'red'
return color
|
python
|
{
"resource": ""
}
|
q9114
|
TimerPlugin._colored_time
|
train
|
def _colored_time(self, time_taken, color=None):
"""Get formatted and colored string for a given time taken."""
if self.timer_no_color:
return "{0:0.4f}s".format(time_taken)
return _colorize("{0:0.4f}s".format(time_taken), color)
|
python
|
{
"resource": ""
}
|
q9115
|
TimerPlugin._format_report_line
|
train
|
def _format_report_line(self, test, time_taken, color, status, percent):
"""Format a single report line."""
return "[{0}] {3:04.2f}% {1}: {2}".format(
status, test, self._colored_time(time_taken, color), percent
)
|
python
|
{
"resource": ""
}
|
q9116
|
TimerPlugin.addSuccess
|
train
|
def addSuccess(self, test, capt=None):
"""Called when a test passes."""
time_taken = self._register_time(test, 'success')
if self.timer_fail is not None and time_taken * 1000.0 > self.threshold:
test.fail('Test was too slow (took {0:0.4f}s, threshold was '
'{1:0.4f}s)'.format(time_taken, self.threshold / 1000.0))
|
python
|
{
"resource": ""
}
|
q9117
|
TextRankSummarizer.summarize
|
train
|
def summarize(self, text, length=5, weighting='frequency', norm=None):
"""
Implements the TextRank summarization algorithm, which follows closely to the PageRank algorithm for ranking
web pages.
:param text: a string of text to be summarized, path to a text file, or URL starting with http
:param length: the length of the output summary; either a number of sentences (e.g. 5) or a percentage
of the original document (e.g. 0.5)
:param weighting: 'frequency', 'binary' or 'tfidf' weighting of sentence terms ('frequency' by default)
:param norm: if 'l1' or 'l2', normalizes words by the length of their associated sentence to "down-weight"
the voting power of long sentences (None by default)
:return: list of sentences for the summary
"""
text = self._parse_input(text)
sentences, unprocessed_sentences = self._tokenizer.tokenize_sentences(text)
length = self._parse_summary_length(length, len(sentences))
if length == len(sentences):
return unprocessed_sentences
# Compute the word frequency matrix. If norm is set to 'l1' or 'l2' then words are normalized
# by the length of their associated sentences (such that each vector of sentence terms sums to 1).
word_matrix = self._compute_matrix(sentences, weighting=weighting, norm=norm)
# Build the similarity graph by calculating the number of overlapping words between all
# combinations of sentences.
similarity_matrix = (word_matrix * word_matrix.T)
similarity_graph = networkx.from_scipy_sparse_matrix(similarity_matrix)
scores = networkx.pagerank(similarity_graph)
ranked_sentences = sorted(
((score, ndx) for ndx, score in scores.items()), reverse=True
)
top_sentences = [ranked_sentences[i][1] for i in range(length)]
top_sentences.sort()
return [unprocessed_sentences[i] for i in top_sentences]
|
python
|
{
"resource": ""
}
|
q9118
|
Tokenizer.remove_stopwords
|
train
|
def remove_stopwords(self, tokens):
"""Remove all stopwords from a list of word tokens or a string of text."""
if isinstance(tokens, (list, tuple)):
return [word for word in tokens if word.lower() not in self._stopwords]
else:
return ' '.join(
[word for word in tokens.split(' ') if word.lower() not in self._stopwords]
)
|
python
|
{
"resource": ""
}
|
q9119
|
Tokenizer.stem
|
train
|
def stem(self, word):
"""Perform stemming on an input word."""
if self.stemmer:
return unicode_to_ascii(self._stemmer.stem(word))
else:
return word
|
python
|
{
"resource": ""
}
|
q9120
|
Tokenizer.strip_punctuation
|
train
|
def strip_punctuation(text, exclude='', include=''):
"""Strip leading and trailing punctuation from an input string."""
chars_to_strip = ''.join(
set(list(punctuation)).union(set(list(include))) - set(list(exclude))
)
return text.strip(chars_to_strip)
|
python
|
{
"resource": ""
}
|
q9121
|
Tokenizer._remove_whitespace
|
train
|
def _remove_whitespace(text):
"""Remove excess whitespace from the ends of a given input string."""
# while True:
# old_text = text
# text = text.replace(' ', ' ')
# if text == old_text:
# return text
non_spaces = re.finditer(r'[^ ]', text)
if not non_spaces:
return text
first_non_space = non_spaces.next()
first_non_space = first_non_space.start()
last_non_space = None
for item in non_spaces:
last_non_space = item
if not last_non_space:
return text[first_non_space:]
else:
last_non_space = last_non_space.end()
return text[first_non_space:last_non_space]
|
python
|
{
"resource": ""
}
|
q9122
|
Tokenizer.tokenize_sentences
|
train
|
def tokenize_sentences(self, text, word_threshold=5):
"""
Returns a list of sentences given an input string of text.
:param text: input string
:param word_threshold: number of significant words that a sentence must contain to be counted
(to count all sentences set equal to 1; 5 by default)
:return: list of sentences
"""
punkt_params = PunktParameters()
# Not using set literal to allow compatibility with Python 2.6
punkt_params.abbrev_types = set([
'dr', 'vs', 'mr', 'mrs', 'ms', 'prof', 'mt', 'inc', 'i.e', 'e.g'
])
sentence_splitter = PunktSentenceTokenizer(punkt_params)
# 1. TOKENIZE "UNPROCESSED" SENTENCES FOR DISPLAY
# Need to adjust quotations for correct sentence splitting
text_unprocessed = text.replace('?"', '? "').replace('!"', '! "').replace('."', '. "')
# Treat line breaks as end of sentence (needed in cases where titles don't have a full stop)
text_unprocessed = text_unprocessed.replace('\n', ' . ')
# Perform sentence splitting
unprocessed_sentences = sentence_splitter.tokenize(text_unprocessed)
# Now that sentences have been split we can return them back to their normal formatting
for ndx, sentence in enumerate(unprocessed_sentences):
sentence = unicode_to_ascii(sentence) # Sentence splitter returns unicode strings
sentence = sentence.replace('? " ', '?" ').replace('! " ', '!" ').replace('. " ', '." ')
sentence = self._remove_whitespace(sentence) # Remove excess whitespace
sentence = sentence[:-2] if (sentence.endswith(' .') or sentence.endswith(' . ')) else sentence
unprocessed_sentences[ndx] = sentence
# 2. PROCESS THE SENTENCES TO PERFORM STEMMING, STOPWORDS REMOVAL ETC. FOR MATRIX COMPUTATION
processed_sentences = [self.sanitize_text(sen) for sen in unprocessed_sentences]
# Sentences should contain at least 'word_threshold' significant terms
filter_sentences = [i for i in range(len(processed_sentences))
if len(processed_sentences[i].replace('.', '').split(' ')) > word_threshold]
processed_sentences = [processed_sentences[i] for i in filter_sentences]
unprocessed_sentences = [unprocessed_sentences[i] for i in filter_sentences]
return processed_sentences, unprocessed_sentences
|
python
|
{
"resource": ""
}
|
q9123
|
Tokenizer.tokenize_paragraphs
|
train
|
def tokenize_paragraphs(cls, text):
"""Convert an input string into a list of paragraphs."""
paragraphs = []
paragraphs_first_pass = text.split('\n')
for p in paragraphs_first_pass:
paragraphs_second_pass = re.split('\s{4,}', p)
paragraphs += paragraphs_second_pass
# Remove empty strings from list
paragraphs = [p for p in paragraphs if p]
return paragraphs
|
python
|
{
"resource": ""
}
|
q9124
|
BaseLsaSummarizer._svd
|
train
|
def _svd(cls, matrix, num_concepts=5):
"""
Perform singular value decomposition for dimensionality reduction of the input matrix.
"""
u, s, v = svds(matrix, k=num_concepts)
return u, s, v
|
python
|
{
"resource": ""
}
|
q9125
|
SepaDD.add_payment
|
train
|
def add_payment(self, payment):
"""
Function to add payments
@param payment: The payment dict
@raise exception: when payment is invalid
"""
if self.clean:
from text_unidecode import unidecode
payment['name'] = unidecode(payment['name'])[:70]
payment['description'] = unidecode(payment['description'])[:140]
# Validate the payment
self.check_payment(payment)
# Get the CstmrDrctDbtInitnNode
if not self._config['batch']:
# Start building the non batch payment
PmtInf_nodes = self._create_PmtInf_node()
PmtInf_nodes['PmtInfIdNode'].text = make_id(self._config['name'])
PmtInf_nodes['PmtMtdNode'].text = "DD"
PmtInf_nodes['BtchBookgNode'].text = "false"
PmtInf_nodes['NbOfTxsNode'].text = "1"
PmtInf_nodes['CtrlSumNode'].text = int_to_decimal_str(
payment['amount'])
PmtInf_nodes['Cd_SvcLvl_Node'].text = "SEPA"
PmtInf_nodes['Cd_LclInstrm_Node'].text = self._config['instrument']
PmtInf_nodes['SeqTpNode'].text = payment['type']
PmtInf_nodes['ReqdColltnDtNode'].text = payment['collection_date']
PmtInf_nodes['Nm_Cdtr_Node'].text = self._config['name']
PmtInf_nodes['IBAN_CdtrAcct_Node'].text = self._config['IBAN']
if 'BIC' in self._config:
PmtInf_nodes['BIC_CdtrAgt_Node'].text = self._config['BIC']
PmtInf_nodes['ChrgBrNode'].text = "SLEV"
PmtInf_nodes['Nm_CdtrSchmeId_Node'].text = self._config['name']
PmtInf_nodes['Id_Othr_Node'].text = self._config['creditor_id']
PmtInf_nodes['PrtryNode'].text = "SEPA"
if 'BIC' in payment:
bic = True
else:
bic = False
TX_nodes = self._create_TX_node(bic)
TX_nodes['InstdAmtNode'].set("Ccy", self._config['currency'])
TX_nodes['InstdAmtNode'].text = int_to_decimal_str(payment['amount'])
TX_nodes['MndtIdNode'].text = payment['mandate_id']
TX_nodes['DtOfSgntrNode'].text = payment['mandate_date']
if bic:
TX_nodes['BIC_DbtrAgt_Node'].text = payment['BIC']
TX_nodes['Nm_Dbtr_Node'].text = payment['name']
TX_nodes['IBAN_DbtrAcct_Node'].text = payment['IBAN']
TX_nodes['UstrdNode'].text = payment['description']
if not payment.get('endtoend_id', ''):
payment['endtoend_id'] = make_id(self._config['name'])
TX_nodes['EndToEndIdNode'].text = payment['endtoend_id']
if self._config['batch']:
self._add_batch(TX_nodes, payment)
else:
self._add_non_batch(TX_nodes, PmtInf_nodes)
|
python
|
{
"resource": ""
}
|
q9126
|
SepaDD._create_TX_node
|
train
|
def _create_TX_node(self, bic=True):
"""
Method to create the blank transaction nodes as a dict. If bic is True,
the BIC node will also be created.
"""
ED = dict()
ED['DrctDbtTxInfNode'] = ET.Element("DrctDbtTxInf")
ED['PmtIdNode'] = ET.Element("PmtId")
ED['EndToEndIdNode'] = ET.Element("EndToEndId")
ED['InstdAmtNode'] = ET.Element("InstdAmt")
ED['DrctDbtTxNode'] = ET.Element("DrctDbtTx")
ED['MndtRltdInfNode'] = ET.Element("MndtRltdInf")
ED['MndtIdNode'] = ET.Element("MndtId")
ED['DtOfSgntrNode'] = ET.Element("DtOfSgntr")
ED['DbtrAgtNode'] = ET.Element("DbtrAgt")
ED['FinInstnId_DbtrAgt_Node'] = ET.Element("FinInstnId")
if bic:
ED['BIC_DbtrAgt_Node'] = ET.Element("BIC")
ED['DbtrNode'] = ET.Element("Dbtr")
ED['Nm_Dbtr_Node'] = ET.Element("Nm")
ED['DbtrAcctNode'] = ET.Element("DbtrAcct")
ED['Id_DbtrAcct_Node'] = ET.Element("Id")
ED['IBAN_DbtrAcct_Node'] = ET.Element("IBAN")
ED['RmtInfNode'] = ET.Element("RmtInf")
ED['UstrdNode'] = ET.Element("Ustrd")
return ED
|
python
|
{
"resource": ""
}
|
q9127
|
SepaTransfer.check_config
|
train
|
def check_config(self, config):
"""
Check the config file for required fields and validity.
@param config: The config dict.
@return: True if valid, error string if invalid paramaters where
encountered.
"""
validation = ""
required = ["name", "currency", "IBAN", "BIC"]
for config_item in required:
if config_item not in config:
validation += config_item.upper() + "_MISSING "
if not validation:
return True
else:
raise Exception("Config file did not validate. " + validation)
|
python
|
{
"resource": ""
}
|
q9128
|
SepaTransfer._add_batch
|
train
|
def _add_batch(self, TX_nodes, payment):
"""
Method to add a payment as a batch. The transaction details are already
present. Will fold the nodes accordingly and the call the
_add_to_batch_list function to store the batch.
"""
TX_nodes['PmtIdNode'].append(TX_nodes['EndToEnd_PmtId_Node'])
TX_nodes['AmtNode'].append(TX_nodes['InstdAmtNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['PmtIdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['AmtNode'])
if TX_nodes['BIC_CdtrAgt_Node'].text is not None:
TX_nodes['FinInstnId_CdtrAgt_Node'].append(
TX_nodes['BIC_CdtrAgt_Node'])
TX_nodes['CdtrAgtNode'].append(TX_nodes['FinInstnId_CdtrAgt_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAgtNode'])
TX_nodes['CdtrNode'].append(TX_nodes['Nm_Cdtr_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrNode'])
TX_nodes['Id_CdtrAcct_Node'].append(TX_nodes['IBAN_CdtrAcct_Node'])
TX_nodes['CdtrAcctNode'].append(TX_nodes['Id_CdtrAcct_Node'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['CdtrAcctNode'])
TX_nodes['RmtInfNode'].append(TX_nodes['UstrdNode'])
TX_nodes['CdtTrfTxInfNode'].append(TX_nodes['RmtInfNode'])
self._add_to_batch_list(TX_nodes, payment)
|
python
|
{
"resource": ""
}
|
q9129
|
SepaTransfer._add_to_batch_list
|
train
|
def _add_to_batch_list(self, TX, payment):
"""
Method to add a transaction to the batch list. The correct batch will
be determined by the payment dict and the batch will be created if
not existant. This will also add the payment amount to the respective
batch total.
"""
batch_key = payment.get('execution_date', None)
if batch_key in self._batches.keys():
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
else:
self._batches[batch_key] = []
self._batches[batch_key].append(TX['CdtTrfTxInfNode'])
if batch_key in self._batch_totals:
self._batch_totals[batch_key] += payment['amount']
else:
self._batch_totals[batch_key] = payment['amount']
|
python
|
{
"resource": ""
}
|
q9130
|
BaseSummarizer._compute_matrix
|
train
|
def _compute_matrix(cls, sentences, weighting='frequency', norm=None):
"""
Compute the matrix of term frequencies given a list of sentences
"""
if norm not in ('l1', 'l2', None):
raise ValueError('Parameter "norm" can only take values "l1", "l2" or None')
# Initialise vectorizer to convert text documents into matrix of token counts
if weighting.lower() == 'binary':
vectorizer = CountVectorizer(min_df=1, ngram_range=(1, 1), binary=True, stop_words=None)
elif weighting.lower() == 'frequency':
vectorizer = CountVectorizer(min_df=1, ngram_range=(1, 1), binary=False, stop_words=None)
elif weighting.lower() == 'tfidf':
vectorizer = TfidfVectorizer(min_df=1, ngram_range=(1, 1), stop_words=None)
else:
raise ValueError('Parameter "method" must take one of the values "binary", "frequency" or "tfidf".')
# Extract word features from sentences using sparse vectorizer
frequency_matrix = vectorizer.fit_transform(sentences).astype(float)
# Normalize the term vectors (i.e. each row adds to 1)
if norm in ('l1', 'l2'):
frequency_matrix = normalize(frequency_matrix, norm=norm, axis=1)
elif norm is not None:
raise ValueError('Parameter "norm" can only take values "l1", "l2" or None')
return frequency_matrix
|
python
|
{
"resource": ""
}
|
q9131
|
SepaPaymentInitn._prepare_document
|
train
|
def _prepare_document(self):
"""
Build the main document node and set xml namespaces.
"""
self._xml = ET.Element("Document")
self._xml.set("xmlns",
"urn:iso:std:iso:20022:tech:xsd:" + self.schema)
self._xml.set("xmlns:xsi",
"http://www.w3.org/2001/XMLSchema-instance")
ET.register_namespace("",
"urn:iso:std:iso:20022:tech:xsd:" + self.schema)
ET.register_namespace("xsi",
"http://www.w3.org/2001/XMLSchema-instance")
n = ET.Element(self.root_el)
self._xml.append(n)
|
python
|
{
"resource": ""
}
|
q9132
|
get_rand_string
|
train
|
def get_rand_string(length=12, allowed_chars='0123456789abcdef'):
"""
Returns a securely generated random string. Taken from the Django project
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits
"""
if not using_sysrandom:
# This is ugly, and a hack, but it makes things better than
# the alternative of predictability. This re-seeds the PRNG
# using a value that is hard for an attacker to predict, every
# time a random string is required. This may change the
# properties of the chosen random sequence slightly, but this
# is better than absolute predictability.
random.seed(
hashlib.sha256(
("%s%s" % (
random.getstate(),
time.time())).encode('utf-8')
).digest())
return ''.join([random.choice(allowed_chars) for i in range(length)])
|
python
|
{
"resource": ""
}
|
q9133
|
make_msg_id
|
train
|
def make_msg_id():
"""
Create a semi random message id, by using 12 char random hex string and
a timestamp.
@return: string consisting of timestamp, -, random value
"""
random_string = get_rand_string(12)
timestamp = time.strftime("%Y%m%d%I%M%S")
msg_id = timestamp + "-" + random_string
return msg_id
|
python
|
{
"resource": ""
}
|
q9134
|
make_id
|
train
|
def make_id(name):
"""
Create a random id combined with the creditor name.
@return string consisting of name (truncated at 22 chars), -,
12 char rand hex string.
"""
name = re.sub(r'[^a-zA-Z0-9]', '', name)
r = get_rand_string(12)
if len(name) > 22:
name = name[:22]
return name + "-" + r
|
python
|
{
"resource": ""
}
|
q9135
|
mapping_get
|
train
|
def mapping_get(uri, mapping):
"""Look up the URI in the given mapping and return the result.
Throws KeyError if no matching mapping was found.
"""
ln = localname(uri)
# 1. try to match URI keys
for k, v in mapping.items():
if k == uri:
return v
# 2. try to match local names
for k, v in mapping.items():
if k == ln:
return v
# 3. try to match local names with * prefix
# try to match longest first, so sort the mapping by key length
l = list(mapping.items())
l.sort(key=lambda i: len(i[0]), reverse=True)
for k, v in l:
if k[0] == '*' and ln.endswith(k[1:]):
return v
raise KeyError(uri)
|
python
|
{
"resource": ""
}
|
q9136
|
mapping_match
|
train
|
def mapping_match(uri, mapping):
"""Determine whether the given URI matches one of the given mappings.
Returns True if a match was found, False otherwise.
"""
try:
val = mapping_get(uri, mapping)
return True
except KeyError:
return False
|
python
|
{
"resource": ""
}
|
q9137
|
in_general_ns
|
train
|
def in_general_ns(uri):
"""Return True iff the URI is in a well-known general RDF namespace.
URI namespaces considered well-known are RDF, RDFS, OWL, SKOS and DC."""
RDFuri = RDF.uri
RDFSuri = RDFS.uri
for ns in (RDFuri, RDFSuri, OWL, SKOS, DC):
if uri.startswith(ns):
return True
return False
|
python
|
{
"resource": ""
}
|
q9138
|
detect_namespace
|
train
|
def detect_namespace(rdf):
"""Try to automatically detect the URI namespace of the vocabulary.
Return namespace as URIRef.
"""
# pick a concept
conc = rdf.value(None, RDF.type, SKOS.Concept, any=True)
if conc is None:
logging.critical(
"Namespace auto-detection failed. "
"Set namespace using the --namespace option.")
sys.exit(1)
ln = localname(conc)
ns = URIRef(conc.replace(ln, ''))
if ns.strip() == '':
logging.critical(
"Namespace auto-detection failed. "
"Set namespace using the --namespace option.")
sys.exit(1)
logging.info(
"Namespace auto-detected to '%s' "
"- you can override this with the --namespace option.", ns)
return ns
|
python
|
{
"resource": ""
}
|
q9139
|
transform_sparql_update
|
train
|
def transform_sparql_update(rdf, update_query):
"""Perform a SPARQL Update transformation on the RDF data."""
logging.debug("performing SPARQL Update transformation")
if update_query[0] == '@': # actual query should be read from file
update_query = file(update_query[1:]).read()
logging.debug("update query: %s", update_query)
rdf.update(update_query)
|
python
|
{
"resource": ""
}
|
q9140
|
transform_sparql_construct
|
train
|
def transform_sparql_construct(rdf, construct_query):
"""Perform a SPARQL CONSTRUCT query on the RDF data and return a new graph."""
logging.debug("performing SPARQL CONSTRUCT transformation")
if construct_query[0] == '@': # actual query should be read from file
construct_query = file(construct_query[1:]).read()
logging.debug("CONSTRUCT query: %s", construct_query)
newgraph = Graph()
for triple in rdf.query(construct_query):
newgraph.add(triple)
return newgraph
|
python
|
{
"resource": ""
}
|
q9141
|
transform_concepts
|
train
|
def transform_concepts(rdf, typemap):
"""Transform Concepts into new types, as defined by the config file."""
# find out all the types used in the model
types = set()
for s, o in rdf.subject_objects(RDF.type):
if o not in typemap and in_general_ns(o):
continue
types.add(o)
for t in types:
if mapping_match(t, typemap):
newval = mapping_get(t, typemap)
newuris = [v[0] for v in newval]
logging.debug("transform class %s -> %s", t, str(newuris))
if newuris[0] is None: # delete all instances
for inst in rdf.subjects(RDF.type, t):
delete_uri(rdf, inst)
delete_uri(rdf, t)
else:
replace_object(rdf, t, newuris, predicate=RDF.type)
else:
logging.info("Don't know what to do with type %s", t)
|
python
|
{
"resource": ""
}
|
q9142
|
transform_literals
|
train
|
def transform_literals(rdf, literalmap):
"""Transform literal properties of Concepts, as defined by config file."""
affected_types = (SKOS.Concept, SKOS.Collection,
SKOSEXT.DeprecatedConcept)
props = set()
for t in affected_types:
for conc in rdf.subjects(RDF.type, t):
for p, o in rdf.predicate_objects(conc):
if isinstance(o, Literal) \
and (p in literalmap or not in_general_ns(p)):
props.add(p)
for p in props:
if mapping_match(p, literalmap):
newval = mapping_get(p, literalmap)
newuris = [v[0] for v in newval]
logging.debug("transform literal %s -> %s", p, str(newuris))
replace_predicate(
rdf, p, newuris, subjecttypes=affected_types)
else:
logging.info("Don't know what to do with literal %s", p)
|
python
|
{
"resource": ""
}
|
q9143
|
transform_relations
|
train
|
def transform_relations(rdf, relationmap):
"""Transform YSO-style concept relations into SKOS equivalents."""
affected_types = (SKOS.Concept, SKOS.Collection,
SKOSEXT.DeprecatedConcept)
props = set()
for t in affected_types:
for conc in rdf.subjects(RDF.type, t):
for p, o in rdf.predicate_objects(conc):
if isinstance(o, (URIRef, BNode)) \
and (p in relationmap or not in_general_ns(p)):
props.add(p)
for p in props:
if mapping_match(p, relationmap):
newval = mapping_get(p, relationmap)
logging.debug("transform relation %s -> %s", p, str(newval))
replace_predicate(
rdf, p, newval, subjecttypes=affected_types)
else:
logging.info("Don't know what to do with relation %s", p)
|
python
|
{
"resource": ""
}
|
q9144
|
transform_deprecated_concepts
|
train
|
def transform_deprecated_concepts(rdf, cs):
"""Transform deprecated concepts so they are in their own concept
scheme."""
deprecated_concepts = []
for conc in rdf.subjects(RDF.type, SKOSEXT.DeprecatedConcept):
rdf.add((conc, RDF.type, SKOS.Concept))
rdf.add((conc, OWL.deprecated, Literal("true", datatype=XSD.boolean)))
deprecated_concepts.append(conc)
if len(deprecated_concepts) > 0:
ns = cs.replace(localname(cs), '')
dcs = create_concept_scheme(
rdf, ns, 'deprecatedconceptscheme')
logging.debug("creating deprecated concept scheme %s", dcs)
for conc in deprecated_concepts:
rdf.add((conc, SKOS.inScheme, dcs))
|
python
|
{
"resource": ""
}
|
q9145
|
enrich_relations
|
train
|
def enrich_relations(rdf, enrich_mappings, use_narrower, use_transitive):
"""Enrich the SKOS relations according to SKOS semantics, including
subproperties of broader and symmetric related properties. If use_narrower
is True, include inverse narrower relations for all broader relations. If
use_narrower is False, instead remove all narrower relations, replacing
them with inverse broader relations. If use_transitive is True, calculate
transitive hierarchical relationships.
(broaderTransitive, and also narrowerTransitive if use_narrower is
True) and include them in the model.
"""
# 1. first enrich mapping relationships (because they affect regular ones)
if enrich_mappings:
infer.skos_symmetric_mappings(rdf)
infer.skos_hierarchical_mappings(rdf, use_narrower)
# 2. then enrich regular relationships
# related <-> related
infer.skos_related(rdf)
# broaderGeneric -> broader + inverse narrowerGeneric
for s, o in rdf.subject_objects(SKOSEXT.broaderGeneric):
rdf.add((s, SKOS.broader, o))
# broaderPartitive -> broader + inverse narrowerPartitive
for s, o in rdf.subject_objects(SKOSEXT.broaderPartitive):
rdf.add((s, SKOS.broader, o))
infer.skos_hierarchical(rdf, use_narrower)
# transitive closure: broaderTransitive and narrowerTransitive
if use_transitive:
infer.skos_transitive(rdf, use_narrower)
else:
# transitive relationships are not wanted, so remove them
for s, o in rdf.subject_objects(SKOS.broaderTransitive):
rdf.remove((s, SKOS.broaderTransitive, o))
for s, o in rdf.subject_objects(SKOS.narrowerTransitive):
rdf.remove((s, SKOS.narrowerTransitive, o))
infer.skos_topConcept(rdf)
|
python
|
{
"resource": ""
}
|
q9146
|
setup_concept_scheme
|
train
|
def setup_concept_scheme(rdf, defaultcs):
"""Make sure all concepts have an inScheme property, using the given
default concept scheme if necessary."""
for conc in rdf.subjects(RDF.type, SKOS.Concept):
# check concept scheme
cs = rdf.value(conc, SKOS.inScheme, None, any=True)
if cs is None: # need to set inScheme
rdf.add((conc, SKOS.inScheme, defaultcs))
|
python
|
{
"resource": ""
}
|
q9147
|
cleanup_properties
|
train
|
def cleanup_properties(rdf):
"""Remove unnecessary property definitions.
Reemoves SKOS and DC property definitions and definitions of unused
properties."""
for t in (RDF.Property, OWL.DatatypeProperty, OWL.ObjectProperty,
OWL.SymmetricProperty, OWL.TransitiveProperty,
OWL.InverseFunctionalProperty, OWL.FunctionalProperty):
for prop in rdf.subjects(RDF.type, t):
if prop.startswith(SKOS):
logging.debug(
"removing SKOS property definition: %s", prop)
replace_subject(rdf, prop, None)
continue
if prop.startswith(DC):
logging.debug("removing DC property definition: %s", prop)
replace_subject(rdf, prop, None)
continue
# if there are triples using the property, keep the property def
if len(list(rdf.subject_objects(prop))) > 0:
continue
logging.debug("removing unused property definition: %s", prop)
replace_subject(rdf, prop, None)
|
python
|
{
"resource": ""
}
|
q9148
|
find_reachable
|
train
|
def find_reachable(rdf, res):
"""Return the set of reachable resources starting from the given resource,
excluding the seen set of resources.
Note that the seen set is modified
in-place to reflect the ongoing traversal.
"""
starttime = time.time()
# This is almost a non-recursive breadth-first search algorithm, but a set
# is used as the "open" set instead of a FIFO, and an arbitrary element of
# the set is searched. This is slightly faster than DFS (using a stack)
# and much faster than BFS (using a FIFO).
seen = set() # used as the "closed" set
to_search = set([res]) # used as the "open" set
while len(to_search) > 0:
res = to_search.pop()
if res in seen:
continue
seen.add(res)
# res as subject
for p, o in rdf.predicate_objects(res):
if isinstance(p, URIRef) and p not in seen:
to_search.add(p)
if isinstance(o, URIRef) and o not in seen:
to_search.add(o)
# res as predicate
for s, o in rdf.subject_objects(res):
if isinstance(s, URIRef) and s not in seen:
to_search.add(s)
if isinstance(o, URIRef) and o not in seen:
to_search.add(o)
# res as object
for s, p in rdf.subject_predicates(res):
if isinstance(s, URIRef) and s not in seen:
to_search.add(s)
if isinstance(p, URIRef) and p not in seen:
to_search.add(p)
endtime = time.time()
logging.debug("find_reachable took %f seconds", (endtime - starttime))
return seen
|
python
|
{
"resource": ""
}
|
q9149
|
cleanup_unreachable
|
train
|
def cleanup_unreachable(rdf):
"""Remove triples which cannot be reached from the concepts by graph
traversal."""
all_subjects = set(rdf.subjects())
logging.debug("total subject resources: %d", len(all_subjects))
reachable = find_reachable(rdf, SKOS.Concept)
nonreachable = all_subjects - reachable
logging.debug("deleting %s non-reachable resources", len(nonreachable))
for subj in nonreachable:
delete_uri(rdf, subj)
|
python
|
{
"resource": ""
}
|
q9150
|
replace_subject
|
train
|
def replace_subject(rdf, fromuri, touri):
"""Replace occurrences of fromuri as subject with touri in given model.
If touri=None, will delete all occurrences of fromuri instead.
If touri is a list or tuple of URIRefs, all values will be inserted.
"""
if fromuri == touri:
return
for p, o in rdf.predicate_objects(fromuri):
rdf.remove((fromuri, p, o))
if touri is not None:
if not isinstance(touri, (list, tuple)):
touri = [touri]
for uri in touri:
rdf.add((uri, p, o))
|
python
|
{
"resource": ""
}
|
q9151
|
replace_predicate
|
train
|
def replace_predicate(rdf, fromuri, touri, subjecttypes=None, inverse=False):
"""Replace occurrences of fromuri as predicate with touri in given model.
If touri=None, will delete all occurrences of fromuri instead.
If touri is a list or tuple of URIRef, all values will be inserted. If
touri is a list of (URIRef, boolean) tuples, the boolean value will be
used to determine whether an inverse property is created (if True) or
not (if False). If a subjecttypes sequence is given, modify only those
triples where the subject is one of the provided types.
"""
if fromuri == touri:
return
for s, o in rdf.subject_objects(fromuri):
if subjecttypes is not None:
typeok = False
for t in subjecttypes:
if (s, RDF.type, t) in rdf:
typeok = True
if not typeok:
continue
rdf.remove((s, fromuri, o))
if touri is not None:
if not isinstance(touri, (list, tuple)):
touri = [touri]
for val in touri:
if not isinstance(val, tuple):
val = (val, False)
uri, inverse = val
if uri is None:
continue
if inverse:
rdf.add((o, uri, s))
else:
rdf.add((s, uri, o))
|
python
|
{
"resource": ""
}
|
q9152
|
replace_object
|
train
|
def replace_object(rdf, fromuri, touri, predicate=None):
"""Replace all occurrences of fromuri as object with touri in the given
model.
If touri=None, will delete all occurrences of fromuri instead.
If touri is a list or tuple of URIRef, all values will be inserted.
If predicate is given, modify only triples with the given predicate.
"""
if fromuri == touri:
return
for s, p in rdf.subject_predicates(fromuri):
if predicate is not None and p != predicate:
continue
rdf.remove((s, p, fromuri))
if touri is not None:
if not isinstance(touri, (list, tuple)):
touri = [touri]
for uri in touri:
rdf.add((s, p, uri))
|
python
|
{
"resource": ""
}
|
q9153
|
replace_uri
|
train
|
def replace_uri(rdf, fromuri, touri):
"""Replace all occurrences of fromuri with touri in the given model.
If touri is a list or tuple of URIRef, all values will be inserted.
If touri=None, will delete all occurrences of fromuri instead.
"""
replace_subject(rdf, fromuri, touri)
replace_predicate(rdf, fromuri, touri)
replace_object(rdf, fromuri, touri)
|
python
|
{
"resource": ""
}
|
q9154
|
rdfs_classes
|
train
|
def rdfs_classes(rdf):
"""Perform RDFS subclass inference.
Mark all resources with a subclass type with the upper class."""
# find out the subclass mappings
upperclasses = {} # key: class val: set([superclass1, superclass2..])
for s, o in rdf.subject_objects(RDFS.subClassOf):
upperclasses.setdefault(s, set())
for uc in rdf.transitive_objects(s, RDFS.subClassOf):
if uc != s:
upperclasses[s].add(uc)
# set the superclass type information for subclass instances
for s, ucs in upperclasses.items():
logging.debug("setting superclass types: %s -> %s", s, str(ucs))
for res in rdf.subjects(RDF.type, s):
for uc in ucs:
rdf.add((res, RDF.type, uc))
|
python
|
{
"resource": ""
}
|
q9155
|
rdfs_properties
|
train
|
def rdfs_properties(rdf):
"""Perform RDFS subproperty inference.
Add superproperties where subproperties have been used."""
# find out the subproperty mappings
superprops = {} # key: property val: set([superprop1, superprop2..])
for s, o in rdf.subject_objects(RDFS.subPropertyOf):
superprops.setdefault(s, set())
for sp in rdf.transitive_objects(s, RDFS.subPropertyOf):
if sp != s:
superprops[s].add(sp)
# add the superproperty relationships
for p, sps in superprops.items():
logging.debug("setting superproperties: %s -> %s", p, str(sps))
for s, o in rdf.subject_objects(p):
for sp in sps:
rdf.add((s, sp, o))
|
python
|
{
"resource": ""
}
|
q9156
|
OverExtendsNode.get_parent
|
train
|
def get_parent(self, context):
"""
Load the parent template using our own ``find_template``, which
will cause its absolute path to not be used again. Then peek at
the first node, and if its parent arg is the same as the
current parent arg, we know circular inheritance is going to
occur, in which case we try and find the template again, with
the absolute directory removed from the search list.
"""
parent = self.parent_name.resolve(context)
# If parent is a template object, just return it.
if hasattr(parent, "render"):
return parent
template = self.find_template(parent, context)
for node in template.nodelist:
if (isinstance(node, ExtendsNode) and
node.parent_name.resolve(context) == parent):
return self.find_template(parent, context, peeking=True)
return template
|
python
|
{
"resource": ""
}
|
q9157
|
main
|
train
|
def main():
"""Read command line parameters and make a transform based on them."""
config = Config()
# additional options for command line client only
defaults = vars(config)
defaults['to_format'] = None
defaults['output'] = '-'
defaults['log'] = None
defaults['debug'] = False
options, remainingArgs = get_option_parser(defaults).parse_args()
for key in vars(options):
if hasattr(config, key):
setattr(config, key, getattr(options, key))
# configure logging, messages to stderr by default
logformat = '%(levelname)s: %(message)s'
loglevel = logging.INFO
if options.debug:
loglevel = logging.DEBUG
if options.log:
logging.basicConfig(filename=options.log,
format=logformat, level=loglevel)
else:
logging.basicConfig(format=logformat, level=loglevel)
output = options.output
to_format = options.to_format
# read config file as defaults and override from command line arguments
if options.config is not None:
config.read_and_parse_config_file(options.config)
options, remainingArgs = get_option_parser(vars(config)).parse_args()
for key in vars(options):
if hasattr(config, key):
setattr(config, key, getattr(options, key))
if remainingArgs:
inputfiles = remainingArgs
else:
inputfiles = ['-']
voc = skosify(*inputfiles, **vars(config))
write_rdf(voc, output, to_format)
|
python
|
{
"resource": ""
}
|
q9158
|
H2Protocol.set_handler
|
train
|
def set_handler(self, handler):
"""
Connect with a coroutine, which is scheduled when connection is made.
This function will create a task, and when connection is closed,
the task will be canceled.
:param handler:
:return: None
"""
if self._handler:
raise Exception('Handler was already set')
if handler:
self._handler = async_task(handler, loop=self._loop)
|
python
|
{
"resource": ""
}
|
q9159
|
H2Protocol.start_request
|
train
|
def start_request(self, headers, *, end_stream=False):
"""
Start a request by sending given headers on a new stream, and return
the ID of the new stream.
This may block until the underlying transport becomes writable, and
the number of concurrent outbound requests (open outbound streams) is
less than the value of peer config MAX_CONCURRENT_STREAMS.
The completion of the call to this method does not mean the request is
successfully delivered - data is only correctly stored in a buffer to
be sent. There's no guarantee it is truly delivered.
:param headers: A list of key-value tuples as headers.
:param end_stream: To send a request without body, set `end_stream` to
`True` (default `False`).
:return: Stream ID as a integer, used for further communication.
"""
yield from _wait_for_events(self._resumed, self._stream_creatable)
stream_id = self._conn.get_next_available_stream_id()
self._priority.insert_stream(stream_id)
self._priority.block(stream_id)
self._conn.send_headers(stream_id, headers, end_stream=end_stream)
self._flush()
return stream_id
|
python
|
{
"resource": ""
}
|
q9160
|
H2Protocol.start_response
|
train
|
def start_response(self, stream_id, headers, *, end_stream=False):
"""
Start a response by sending given headers on the given stream.
This may block until the underlying transport becomes writable.
:param stream_id: Which stream to send response on.
:param headers: A list of key-value tuples as headers.
:param end_stream: To send a response without body, set `end_stream` to
`True` (default `False`).
"""
yield from self._resumed.wait()
self._conn.send_headers(stream_id, headers, end_stream=end_stream)
self._flush()
|
python
|
{
"resource": ""
}
|
q9161
|
H2Protocol.send_data
|
train
|
def send_data(self, stream_id, data, *, end_stream=False):
"""
Send request or response body on the given stream.
This will block until either whole data is sent, or the stream gets
closed. Meanwhile, a paused underlying transport or a closed flow
control window will also help waiting. If the peer increase the flow
control window, this method will start sending automatically.
This can be called multiple times, but it must be called after a
`start_request` or `start_response` with the returning stream ID, and
before any `end_stream` instructions; Otherwise it will fail.
The given data may be automatically split into smaller frames in order
to fit in the configured frame size or flow control window.
Each stream can only have one `send_data` running, others calling this
will be blocked on a per-stream lock (wlock), so that coroutines
sending data concurrently won't mess up with each other.
Similarly, the completion of the call to this method does not mean the
data is delivered.
:param stream_id: Which stream to send data on
:param data: Bytes to send
:param end_stream: To finish sending a request or response, set this to
`True` to close the given stream locally after data
is sent (default `False`).
:raise: `SendException` if there is an error sending data. Data left
unsent can be found in `data` of the exception.
"""
try:
with (yield from self._get_stream(stream_id).wlock):
while True:
yield from _wait_for_events(
self._resumed, self._get_stream(stream_id).window_open)
self._priority.unblock(stream_id)
waiter = asyncio.Future()
if not self._priority_events:
self._loop.call_soon(self._priority_step)
self._priority_events[stream_id] = waiter
try:
yield from waiter
data_size = len(data)
size = min(
data_size,
self._conn.local_flow_control_window(stream_id),
self._conn.max_outbound_frame_size)
if data_size == 0 or size == data_size:
self._conn.send_data(stream_id, data,
end_stream=end_stream)
self._flush()
break
elif size > 0:
self._conn.send_data(stream_id, data[:size])
data = data[size:]
self._flush()
finally:
self._priority_events.pop(stream_id, None)
self._priority.block(stream_id)
if self._priority_events:
self._loop.call_soon(self._priority_step)
except ProtocolError:
raise exceptions.SendException(data)
|
python
|
{
"resource": ""
}
|
q9162
|
H2Protocol.send_trailers
|
train
|
def send_trailers(self, stream_id, headers):
"""
Send trailers on the given stream, closing the stream locally.
This may block until the underlying transport becomes writable, or
other coroutines release the wlock on this stream.
:param stream_id: Which stream to send trailers on.
:param headers: A list of key-value tuples as trailers.
"""
with (yield from self._get_stream(stream_id).wlock):
yield from self._resumed.wait()
self._conn.send_headers(stream_id, headers, end_stream=True)
self._flush()
|
python
|
{
"resource": ""
}
|
q9163
|
H2Protocol.end_stream
|
train
|
def end_stream(self, stream_id):
"""
Close the given stream locally.
This may block until the underlying transport becomes writable, or
other coroutines release the wlock on this stream.
:param stream_id: Which stream to close.
"""
with (yield from self._get_stream(stream_id).wlock):
yield from self._resumed.wait()
self._conn.end_stream(stream_id)
self._flush()
|
python
|
{
"resource": ""
}
|
q9164
|
H2Protocol.read_stream
|
train
|
def read_stream(self, stream_id, size=None):
"""
Read data from the given stream.
By default (`size=None`), this returns all data left in current HTTP/2
frame. In other words, default behavior is to receive frame by frame.
If size is given a number above zero, method will try to return as much
bytes as possible up to the given size, block until enough bytes are
ready or stream is remotely closed.
If below zero, it will read until the stream is remotely closed and
return everything at hand.
`size=0` is a special case that does nothing but returns `b''`. The
same result `b''` is also returned under other conditions if there is
no more data on the stream to receive, even under `size=None` and peer
sends an empty frame - you can use `b''` to safely identify the end of
the given stream.
Flow control frames will be automatically sent while reading clears the
buffer, allowing more data to come in.
:param stream_id: Stream to read
:param size: Expected size to read, `-1` for all, default frame.
:return: Bytes read or empty if there is no more to expect.
"""
rv = []
try:
with (yield from self._get_stream(stream_id).rlock):
if size is None:
rv.append((
yield from self._get_stream(stream_id).read_frame()))
self._flow_control(stream_id)
elif size < 0:
while True:
rv.extend((
yield from self._get_stream(stream_id).read_all()))
self._flow_control(stream_id)
else:
while size > 0:
bufs, count = yield from self._get_stream(
stream_id).read(size)
rv.extend(bufs)
size -= count
self._flow_control(stream_id)
except StreamClosedError:
pass
except _StreamEndedException as e:
try:
self._flow_control(stream_id)
except StreamClosedError:
pass
rv.extend(e.bufs)
return b''.join(rv)
|
python
|
{
"resource": ""
}
|
q9165
|
H2Protocol.wait_functional
|
train
|
def wait_functional(self):
"""
Wait until the connection becomes functional.
The connection is count functional if it was active within last few
seconds (defined by `functional_timeout`), where a newly-made
connection and received data indicate activeness.
:return: Most recently calculated round-trip time if any.
"""
while not self._is_functional():
self._rtt = None
self._ping_index += 1
self._ping_time = self._loop.time()
self._conn.ping(struct.pack('Q', self._ping_index))
self._flush()
try:
yield from asyncio.wait_for(self._functional.wait(),
self._functional_timeout)
except asyncio.TimeoutError:
pass
return self._rtt
|
python
|
{
"resource": ""
}
|
q9166
|
H2Protocol.reprioritize
|
train
|
def reprioritize(self, stream_id,
depends_on=None, weight=16, exclusive=False):
"""
Update the priority status of an existing stream.
:param stream_id: The stream ID of the stream being updated.
:param depends_on: (optional) The ID of the stream that the stream now
depends on. If ``None``, will be moved to depend on stream 0.
:param weight: (optional) The new weight to give the stream. Defaults
to 16.
:param exclusive: (optional) Whether this stream should now be an
exclusive dependency of the new parent.
"""
self._priority.reprioritize(stream_id, depends_on, weight, exclusive)
|
python
|
{
"resource": ""
}
|
q9167
|
superuser_required
|
train
|
def superuser_required(view_func=None, redirect_field_name=REDIRECT_FIELD_NAME,
login_url='admin:login'):
"""
Decorator for views that checks that the user is logged in and is a superuser
member, redirecting to the login page if necessary.
"""
actual_decorator = user_passes_test(
lambda u: u.is_active and u.is_superuser,
login_url=login_url,
redirect_field_name=redirect_field_name
)
if view_func:
return actual_decorator(view_func)
return actual_decorator
|
python
|
{
"resource": ""
}
|
q9168
|
SqlAggregate._condition_as_sql
|
train
|
def _condition_as_sql(self, qn, connection):
'''
Return sql for condition.
'''
def escape(value):
if isinstance(value, bool):
value = str(int(value))
if isinstance(value, six.string_types):
# Escape params used with LIKE
if '%' in value:
value = value.replace('%', '%%')
# Escape single quotes
if "'" in value:
value = value.replace("'", "''")
# Add single quote to text values
value = "'" + value + "'"
return value
sql, param = self.condition.query.where.as_sql(qn, connection)
param = map(escape, param)
return sql % tuple(param)
|
python
|
{
"resource": ""
}
|
q9169
|
build_messages_metrics
|
train
|
def build_messages_metrics(messages):
"""Build reports's metrics"""
count_types = collections.Counter(
line.get('type') or None
for line in messages)
count_modules = collections.Counter(
line.get('module') or None
for line in messages)
count_symbols = collections.Counter(
line.get('symbol') or None
for line in messages)
count_paths = collections.Counter(
line.get('path') or None
for line in messages)
return {
'types': count_types,
'modules': count_modules,
'symbols': count_symbols,
'paths': count_paths,
}
|
python
|
{
"resource": ""
}
|
q9170
|
build_messages_modules
|
train
|
def build_messages_modules(messages):
"""Build and yield sorted list of messages per module.
:param list messages: List of dict of messages
:return: Tuple of 2 values: first is the module info, second is the list
of messages sorted by line number
"""
data = collections.defaultdict(list)
for line in messages:
module_name = line.get('module')
module_path = line.get('path')
module_info = ModuleInfo(
module_name,
module_path,
)
data[module_info].append(line)
for module, module_messages in data.items():
yield (
module,
sorted(module_messages, key=lambda x: x.get('line')))
|
python
|
{
"resource": ""
}
|
q9171
|
stats_evaluation
|
train
|
def stats_evaluation(stats):
"""Generate an evaluation for the given pylint ``stats``."""
statement = stats.get('statement')
error = stats.get('error', 0)
warning = stats.get('warning', 0)
refactor = stats.get('refactor', 0)
convention = stats.get('convention', 0)
if not statement or statement <= 0:
return None
malus = float(5 * error + warning + refactor + convention)
malus_ratio = malus / statement
return 10.0 - (malus_ratio * 10)
|
python
|
{
"resource": ""
}
|
q9172
|
build_command_parser
|
train
|
def build_command_parser():
"""Build command parser using ``argparse`` module."""
parser = argparse.ArgumentParser(
description='Transform Pylint JSON report to HTML')
parser.add_argument(
'filename',
metavar='FILENAME',
type=argparse.FileType('r'),
nargs='?',
default=sys.stdin,
help='Pylint JSON report input file (or stdin)')
parser.add_argument(
'-o', '--output',
metavar='FILENAME',
type=argparse.FileType('w'),
default=sys.stdout,
help='Pylint HTML report output file (or stdout)')
parser.add_argument(
'-f', '--input-format',
metavar='FORMAT',
choices=[SIMPLE_JSON, EXTENDED_JSON],
action='store',
dest='input_format',
default='json',
help='Pylint JSON Report input type (json or jsonextended)')
return parser
|
python
|
{
"resource": ""
}
|
q9173
|
main
|
train
|
def main():
"""Pylint JSON to HTML Main Entry Point"""
parser = build_command_parser()
options = parser.parse_args()
file_pointer = options.filename
input_format = options.input_format
with file_pointer:
json_data = json.load(file_pointer)
if input_format == SIMPLE_JSON:
report = Report(json_data)
elif input_format == EXTENDED_JSON:
report = Report(
json_data.get('messages'),
json_data.get('stats'),
json_data.get('previous'))
print(report.render(), file=options.output)
|
python
|
{
"resource": ""
}
|
q9174
|
Report.render
|
train
|
def render(self):
"""Render report to HTML"""
template = self.get_template()
return template.render(
messages=self._messages,
metrics=self.metrics,
report=self)
|
python
|
{
"resource": ""
}
|
q9175
|
JsonExtendedReporter.handle_message
|
train
|
def handle_message(self, msg):
"""Store new message for later use.
.. seealso:: :meth:`~JsonExtendedReporter.on_close`
"""
self._messages.append({
'type': msg.category,
'module': msg.module,
'obj': msg.obj,
'line': msg.line,
'column': msg.column,
'path': msg.path,
'symbol': msg.symbol,
'message': str(msg.msg) or '',
'message-id': msg.msg_id,
})
|
python
|
{
"resource": ""
}
|
q9176
|
JsonExtendedReporter.on_close
|
train
|
def on_close(self, stats, previous_stats):
"""Print the extended JSON report to reporter's output.
:param dict stats: Metrics for the current pylint run
:param dict previous_stats: Metrics for the previous pylint run
"""
reports = {
'messages': self._messages,
'stats': stats,
'previous': previous_stats,
}
print(json.dumps(reports, cls=JSONSetEncoder, indent=4), file=self.out)
|
python
|
{
"resource": ""
}
|
q9177
|
triangulate
|
train
|
def triangulate(points):
"""
Connects an input list of xy tuples with lines forming a set of
smallest possible Delauney triangles between them.
Arguments:
- **points**: A list of xy or xyz point tuples to triangulate.
Returns:
- A list of triangle polygons. If the input coordinate points contained
a third z value then the output triangles will also have these z values.
"""
# Remove duplicate xy points bc that would make delauney fail, and must remember z (if any) for retrieving originals from index results
seen = set()
uniqpoints = [ p for p in points if str( p[:2] ) not in seen and not seen.add( str( p[:2] ) )]
classpoints = [_Point(*point[:2]) for point in uniqpoints]
# Compute Delauney
triangle_ids = tesselator.computeDelaunayTriangulation(classpoints)
# Get vertices from result indexes
triangles = [[uniqpoints[i] for i in triangle] for triangle in triangle_ids]
return triangles
|
python
|
{
"resource": ""
}
|
q9178
|
voronoi
|
train
|
def voronoi(points, buffer_percent=100):
"""
Surrounds each point in an input list of xy tuples with a
unique Voronoi polygon.
Arguments:
- **points**: A list of xy or xyz point tuples to triangulate.
- **buffer_percent** (optional): Controls how much bigger than
the original bbox of the input points to set the bbox of fake points,
used to account for lacking values around the edges (default is 100 percent).
Returns:
- Returns a list of 2-tuples, with the first item in each tuple being the
original input point (or None for each corner of the bounding box buffer),
and the second item being the point's corressponding Voronoi polygon.
"""
# Remove duplicate xy points bc that would make delauney fail, and must remember z (if any) for retrieving originals from index results
seen = set()
uniqpoints = [ p for p in points if str( p[:2] ) not in seen and not seen.add( str( p[:2] ) )]
classpoints = [_Point(*point[:2]) for point in uniqpoints]
# Create fake sitepoints around the point extent to correct for infinite polygons
# For a similar approach and problem see: http://gis.stackexchange.com/questions/11866/voronoi-polygons-that-run-out-to-infinity
xs,ys = list(zip(*uniqpoints))[:2]
pointswidth = max(xs) - min(xs)
pointsheight = max(ys) - min(ys)
xbuff,ybuff = ( pointswidth / 100.0 * buffer_percent , pointsheight / 100.0 * buffer_percent )
midx,midy = ( sum(xs) / float(len(xs)) , sum(ys) / float(len(ys)) )
#bufferbox = [(midx-xbuff,midy-ybuff),(midx+xbuff,midy-ybuff),(midx+xbuff,midy+ybuff),(midx-xbuff,midy+ybuff)] # corner buffer
bufferbox = [(midx-xbuff,midy),(midx+xbuff,midy),(midx,midy+ybuff),(midx,midy-ybuff)] # mid sides buffer
classpoints.extend([_Point(*corner) for corner in bufferbox])
# Compute Voronoi
vertices,edges,poly_dict = tesselator.computeVoronoiDiagram(classpoints)
# Turn unordered result edges into ordered polygons
polygons = list()
for sitepoint,polyedges in list(poly_dict.items()):
polyedges = [edge[1:] for edge in polyedges]
poly = list()
firststart,firstend = polyedges.pop(0)
poly.append(firstend)
while polyedges:
curend = poly[-1]
for i,other in enumerate(polyedges):
otherstart,otherend = other
if otherstart == curend:
poly.append(otherend)
##print otherstart,otherend
polyedges.pop(i)
break
elif otherend == curend:
##print otherend,otherstart
poly.append(otherstart)
polyedges.pop(i)
break
# Get vertices from indexes
try: sitepoint = uniqpoints[sitepoint]
except IndexError:
sitepoint = None # fake bbox sitepoints shouldnt be in the results
poly = [vertices[vi] for vi in poly if vi != -1]
polygons.append((sitepoint, poly))
# Maybe clip parts of polygons that stick outside screen?
# ...
return polygons
|
python
|
{
"resource": ""
}
|
q9179
|
equals
|
train
|
def equals(val1, val2):
"""
Returns True if the two strings are equal, False otherwise.
The time taken is independent of the number of characters that match.
For the sake of simplicity, this function executes in constant time only
when the two strings have the same length. It short-circuits when they
have different lengths.
"""
if len(val1) != len(val2):
return False
result = 0
for x, y in zip(val1, val2):
result |= ord(x) ^ ord(y)
return result == 0
|
python
|
{
"resource": ""
}
|
q9180
|
_encode_uvarint
|
train
|
def _encode_uvarint(data, n):
''' Encodes integer into variable-length format into data.'''
if n < 0:
raise ValueError('only support positive integer')
while True:
this_byte = n & 0x7f
n >>= 7
if n == 0:
data.append(this_byte)
break
data.append(this_byte | 0x80)
|
python
|
{
"resource": ""
}
|
q9181
|
BinarySerializer._parse_section_v2
|
train
|
def _parse_section_v2(self, data):
''' Parses a sequence of packets in data.
The sequence is terminated by a packet with a field type of EOS
:param data bytes to be deserialized.
:return: the rest of data and an array of packet V2
'''
from pymacaroons.exceptions import MacaroonDeserializationException
prev_field_type = -1
packets = []
while True:
if len(data) == 0:
raise MacaroonDeserializationException(
'section extends past end of buffer')
rest, packet = self._parse_packet_v2(data)
if packet.field_type == self._EOS:
return rest, packets
if packet.field_type <= prev_field_type:
raise MacaroonDeserializationException('fields out of order')
packets.append(packet)
prev_field_type = packet.field_type
data = rest
|
python
|
{
"resource": ""
}
|
q9182
|
BinarySerializer._parse_packet_v2
|
train
|
def _parse_packet_v2(self, data):
''' Parses a V2 data packet at the start of the given data.
The format of a packet is as follows:
field_type(varint) payload_len(varint) data[payload_len bytes]
apart from EOS which has no payload_en or data (it's a single zero
byte).
:param data:
:return: rest of data, PacketV2
'''
from pymacaroons.exceptions import MacaroonDeserializationException
ft, n = _decode_uvarint(data)
data = data[n:]
if ft == self._EOS:
return data, PacketV2(ft, None)
payload_len, n = _decode_uvarint(data)
data = data[n:]
if payload_len > len(data):
raise MacaroonDeserializationException(
'field data extends past end of buffer')
return data[payload_len:], PacketV2(ft, data[0:payload_len])
|
python
|
{
"resource": ""
}
|
q9183
|
Macaroon.prepare_for_request
|
train
|
def prepare_for_request(self, discharge_macaroon):
''' Return a new discharge macaroon bound to the receiving macaroon's
current signature so that it can be used in a request.
This must be done before a discharge macaroon is sent to a server.
:param discharge_macaroon:
:return: bound discharge macaroon
'''
protected = discharge_macaroon.copy()
return HashSignaturesBinder(self).bind(protected)
|
python
|
{
"resource": ""
}
|
q9184
|
_caveat_v1_to_dict
|
train
|
def _caveat_v1_to_dict(c):
''' Return a caveat as a dictionary for export as the JSON
macaroon v1 format.
'''
serialized = {}
if len(c.caveat_id) > 0:
serialized['cid'] = c.caveat_id
if c.verification_key_id:
serialized['vid'] = utils.raw_urlsafe_b64encode(
c.verification_key_id).decode('utf-8')
if c.location:
serialized['cl'] = c.location
return serialized
|
python
|
{
"resource": ""
}
|
q9185
|
_caveat_v2_to_dict
|
train
|
def _caveat_v2_to_dict(c):
''' Return a caveat as a dictionary for export as the JSON
macaroon v2 format.
'''
serialized = {}
if len(c.caveat_id_bytes) > 0:
_add_json_binary_field(c.caveat_id_bytes, serialized, 'i')
if c.verification_key_id:
_add_json_binary_field(c.verification_key_id, serialized, 'v')
if c.location:
serialized['l'] = c.location
return serialized
|
python
|
{
"resource": ""
}
|
q9186
|
_read_json_binary_field
|
train
|
def _read_json_binary_field(deserialized, field):
''' Read the value of a JSON field that may be string or base64-encoded.
'''
val = deserialized.get(field)
if val is not None:
return utils.convert_to_bytes(val)
val = deserialized.get(field + '64')
if val is None:
return None
return utils.raw_urlsafe_b64decode(val)
|
python
|
{
"resource": ""
}
|
q9187
|
JsonSerializer.serialize
|
train
|
def serialize(self, m):
'''Serialize the macaroon in JSON format indicated by the version field.
@param macaroon the macaroon to serialize.
@return JSON macaroon.
'''
from pymacaroons import macaroon
if m.version == macaroon.MACAROON_V1:
return self._serialize_v1(m)
return self._serialize_v2(m)
|
python
|
{
"resource": ""
}
|
q9188
|
JsonSerializer._serialize_v1
|
train
|
def _serialize_v1(self, macaroon):
'''Serialize the macaroon in JSON format v1.
@param macaroon the macaroon to serialize.
@return JSON macaroon.
'''
serialized = {
'identifier': utils.convert_to_string(macaroon.identifier),
'signature': macaroon.signature,
}
if macaroon.location:
serialized['location'] = macaroon.location
if macaroon.caveats:
serialized['caveats'] = [
_caveat_v1_to_dict(caveat) for caveat in macaroon.caveats
]
return json.dumps(serialized)
|
python
|
{
"resource": ""
}
|
q9189
|
JsonSerializer._serialize_v2
|
train
|
def _serialize_v2(self, macaroon):
'''Serialize the macaroon in JSON format v2.
@param macaroon the macaroon to serialize.
@return JSON macaroon in v2 format.
'''
serialized = {}
_add_json_binary_field(macaroon.identifier_bytes, serialized, 'i')
_add_json_binary_field(binascii.unhexlify(macaroon.signature_bytes),
serialized, 's')
if macaroon.location:
serialized['l'] = macaroon.location
if macaroon.caveats:
serialized['c'] = [
_caveat_v2_to_dict(caveat) for caveat in macaroon.caveats
]
return json.dumps(serialized)
|
python
|
{
"resource": ""
}
|
q9190
|
JsonSerializer.deserialize
|
train
|
def deserialize(self, serialized):
'''Deserialize a JSON macaroon depending on the format.
@param serialized the macaroon in JSON format.
@return the macaroon object.
'''
deserialized = json.loads(serialized)
if deserialized.get('identifier') is None:
return self._deserialize_v2(deserialized)
else:
return self._deserialize_v1(deserialized)
|
python
|
{
"resource": ""
}
|
q9191
|
JsonSerializer._deserialize_v1
|
train
|
def _deserialize_v1(self, deserialized):
'''Deserialize a JSON macaroon in v1 format.
@param serialized the macaroon in v1 JSON format.
@return the macaroon object.
'''
from pymacaroons.macaroon import Macaroon, MACAROON_V1
from pymacaroons.caveat import Caveat
caveats = []
for c in deserialized.get('caveats', []):
caveat = Caveat(
caveat_id=c['cid'],
verification_key_id=(
utils.raw_b64decode(c['vid']) if c.get('vid')
else None
),
location=(
c['cl'] if c.get('cl') else None
),
version=MACAROON_V1
)
caveats.append(caveat)
return Macaroon(
location=deserialized.get('location'),
identifier=deserialized['identifier'],
caveats=caveats,
signature=deserialized['signature'],
version=MACAROON_V1
)
|
python
|
{
"resource": ""
}
|
q9192
|
JsonSerializer._deserialize_v2
|
train
|
def _deserialize_v2(self, deserialized):
'''Deserialize a JSON macaroon v2.
@param serialized the macaroon in JSON format v2.
@return the macaroon object.
'''
from pymacaroons.macaroon import Macaroon, MACAROON_V2
from pymacaroons.caveat import Caveat
caveats = []
for c in deserialized.get('c', []):
caveat = Caveat(
caveat_id=_read_json_binary_field(c, 'i'),
verification_key_id=_read_json_binary_field(c, 'v'),
location=_read_json_binary_field(c, 'l'),
version=MACAROON_V2
)
caveats.append(caveat)
return Macaroon(
location=_read_json_binary_field(deserialized, 'l'),
identifier=_read_json_binary_field(deserialized, 'i'),
caveats=caveats,
signature=binascii.hexlify(
_read_json_binary_field(deserialized, 's')),
version=MACAROON_V2
)
|
python
|
{
"resource": ""
}
|
q9193
|
ProjectPreferences.save_settings
|
train
|
def save_settings(cls, project=None, user=None, settings=None):
"""
Save settings for a user without first fetching their preferences.
- **user** and **project** can be either a :py:class:`.User` and
:py:class:`.Project` instance respectively, or they can be given as
IDs. If either argument is given, the other is also required.
- **settings** is a :py:class:`dict` containing the settings to be
saved.
"""
if (isinstance(settings, dict)):
_to_update = settings
if (
isinstance(user, User)
and isinstance(project, Project)
):
_user_id = user.id
_project_id = project.id
elif (
isinstance(user, (int, str,))
and isinstance(project, (int, str,))
):
_user_id = user
_project_id = project
else:
raise TypeError
cls.http_post(
'update_settings',
json={
'project_preferences': {
'user_id': _user_id,
'project_id': _project_id,
'settings': _to_update,
}
}
)
else:
raise TypeError
|
python
|
{
"resource": ""
}
|
q9194
|
Subject.async_saves
|
train
|
def async_saves(cls):
"""
Returns a context manager to allow asynchronously creating subjects.
Using this context manager will create a pool of threads which will
create multiple subjects at once and upload any local files
simultaneously.
The recommended way to use this is with the `with` statement::
with Subject.async_saves():
local_files = [...]
for filename in local_files:
s = Subject()
s.links.project = 1234
s.add_location(filename)
s.save()
Alternatively, you can manually shut down the thread pool::
pool = Subject.async_saves()
local_files = [...]
try:
for filename in local_files:
s = Subject()
s.links.project = 1234
s.add_location(filename)
s.save()
finally:
pool.shutdown()
"""
cls._local.save_exec = ThreadPoolExecutor(
max_workers=ASYNC_SAVE_THREADS
)
return cls._local.save_exec
|
python
|
{
"resource": ""
}
|
q9195
|
Subject.async_save_result
|
train
|
def async_save_result(self):
"""
Retrieves the result of this subject's asynchronous save.
- Returns `True` if the subject was saved successfully.
- Raises `concurrent.futures.CancelledError` if the save was cancelled.
- If the save failed, raises the relevant exception.
- Returns `False` if the subject hasn't finished saving or if the
subject has not been queued for asynchronous save.
"""
if hasattr(self, "_async_future") and self._async_future.done():
self._async_future.result()
return True
else:
return False
|
python
|
{
"resource": ""
}
|
q9196
|
Subject.add_location
|
train
|
def add_location(self, location):
"""
Add a media location to this subject.
- **location** can be an open :py:class:`file` object, a path to a
local file, or a :py:class:`dict` containing MIME types and URLs for
remote media.
Examples::
subject.add_location(my_file)
subject.add_location('/data/image.jpg')
subject.add_location({'image/png': 'https://example.com/image.png'})
"""
if type(location) is dict:
self.locations.append(location)
self._media_files.append(None)
return
elif type(location) in (str,) + _OLD_STR_TYPES:
f = open(location, 'rb')
else:
f = location
try:
media_data = f.read()
if MEDIA_TYPE_DETECTION == 'magic':
media_type = magic.from_buffer(media_data, mime=True)
else:
media_type = imghdr.what(None, media_data)
if not media_type:
raise UnknownMediaException(
'Could not detect file type. Please try installing '
'libmagic: https://panoptes-python-client.readthedocs.'
'io/en/latest/user_guide.html#uploading-non-image-'
'media-types'
)
media_type = 'image/{}'.format(media_type)
self.locations.append(media_type)
self._media_files.append(media_data)
finally:
f.close()
|
python
|
{
"resource": ""
}
|
q9197
|
Exportable.wait_export
|
train
|
def wait_export(
self,
export_type,
timeout=None,
):
"""
Blocks until an in-progress export is ready.
- **export_type** is a string specifying which type of export to wait
for.
- **timeout** is the maximum number of seconds to wait.
If ``timeout`` is given and the export is not ready by the time limit,
:py:class:`.PanoptesAPIException` is raised.
"""
success = False
if timeout:
end_time = datetime.datetime.now() + datetime.timedelta(
seconds=timeout
)
while (not timeout) or (datetime.datetime.now() < end_time):
export_description = self.describe_export(
export_type,
)
if export_type in TALK_EXPORT_TYPES:
export_metadata = export_description['data_requests'][0]
else:
export_metadata = export_description['media'][0]['metadata']
if export_metadata.get('state', '') in ('ready', 'finished'):
success = True
break
time.sleep(2)
if not success:
raise PanoptesAPIException(
'{}_export not ready within {} seconds'.format(
export_type,
timeout
)
)
return export_description
|
python
|
{
"resource": ""
}
|
q9198
|
Exportable.generate_export
|
train
|
def generate_export(self, export_type):
"""
Start a new export.
- **export_type** is a string specifying which type of export to start.
Returns a :py:class:`dict` containing metadata for the new export.
"""
if export_type in TALK_EXPORT_TYPES:
return talk.post_data_request(
'project-{}'.format(self.id),
export_type.replace('talk_', '')
)
return self.http_post(
self._export_path(export_type),
json={"media": {"content_type": "text/csv"}},
)[0]
|
python
|
{
"resource": ""
}
|
q9199
|
Exportable.describe_export
|
train
|
def describe_export(self, export_type):
"""
Fetch metadata for an export.
- **export_type** is a string specifying which type of export to look
up.
Returns a :py:class:`dict` containing metadata for the export.
"""
if export_type in TALK_EXPORT_TYPES:
return talk.get_data_request(
'project-{}'.format(self.id),
export_type.replace('talk_', '')
)[0]
return self.http_get(
self._export_path(export_type),
)[0]
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.