code
string | signature
string | docstring
string | loss_without_docstring
float64 | loss_with_docstring
float64 | factor
float64 |
|---|---|---|---|---|---|
lenvec_plus = op.join(out_dir, 'genomic_lenvec.plus')
lenvec_minus = op.join(out_dir, 'genomic_lenvec.minus')
compute_genomic_cmd = ("compute_genomic_lenvectors "
"{bam_in} {lenvec_plus} "
"{lenvec_minus} "
"{min_len} "
"{max_len} ")
index_genomic_cmd = ("index_genomic_lenvectors "
"{lenvec} ")
genomic_lenvec = op.join(out_dir, 'genomic_lenvec')
feat_len_file = op.join(out_dir, 'feat_lengths.txt')
compute_locus_cmd = ("compute_locus_lenvectors "
"{loci_file} "
"{genomic_lenvec} "
"{min_len} "
"{max_len} "
"> {feat_len_file}")
cov_S_file = op.join(out_dir, 'loci.cov_anti')
coverage_anti_cmd = ("coverageBed -S -counts -b "
"{bam_in} -a {loci_file} "
"> {cov_S_file}")
feat_posentropy = op.join(out_dir, 'feat_posentropy.txt')
entropy_cmd = ("compute_locus_entropy.rb "
"{counts_reads} "
"> {feat_posentropy}")
with utils.chdir(out_dir):
run(compute_genomic_cmd.format(min_len=min_trimmed_read_len, max_len=max_trimmed_read_len, **locals()), "Run compute_genomic")
run(index_genomic_cmd.format(lenvec=lenvec_plus), "Run index in plus")
run(index_genomic_cmd.format(lenvec=lenvec_minus), "Run index in minus")
run(compute_locus_cmd.format(min_len=min_trimmed_read_len, max_len=max_trimmed_read_len, **locals()), "Run compute locus")
run(coverage_anti_cmd.format(**locals()), "Run coverage antisense")
feat_antisense = _order_antisense_column(cov_S_file, min_trimmed_read_len)
counts_reads = _reads_per_position(bam_in, loci_file, out_dir)
run(entropy_cmd.format(**locals()), "Run entropy")
rnafold = calculate_structure(loci_file, reference)
|
def create_features(bam_in, loci_file, reference, out_dir)
|
Use feature extraction module from CoRaL
| 2.906762
| 2.914707
| 0.997274
|
logger.info("reading sequeces")
data = load_data(args.json)
logger.info("create profile")
data = make_profile(data, os.path.join(args.out, "profiles"), args)
logger.info("create database")
make_database(data, "seqcluster.db", args.out)
logger.info("Done. Download https://github.com/lpantano/seqclusterViz/archive/master.zip to browse the output.")
|
def report(args)
|
Create report in html format
| 6.963284
| 7.073503
| 0.984418
|
previous = peaks[0]
new_peaks = [previous]
for pos in peaks:
if pos > previous + 10:
new_peaks.add(pos)
previous = pos
return new_peaks
|
def _summarize_peaks(peaks)
|
merge peaks position if closer than 10
| 3.71229
| 3.258483
| 1.139269
|
previous = min(y)
peaks = []
intervals = range(x, y, win)
for pos in intervals:
if y[pos] > previous * 10:
previous = y[pos]
peaks.add(pos)
peaks = _summarize_peaks(peaks)
|
def find_mature(x, y, win=10)
|
Window apprach to find hills in the expression profile
| 5.57384
| 5.564854
| 1.001615
|
keep = Counter()
with open_fastq(in_file) as handle:
for line in handle:
if line.startswith("@"):
if line.find("UMI") > -1:
logger.info("Find UMI tags in read names, collapsing by UMI.")
return collapse_umi(in_file)
seq = handle.next().strip()
handle.next()
qual = handle.next().strip()
if seq in keep:
keep[seq].update(qual)
else:
keep[seq] = quality(qual)
logger.info("Sequences loaded: %s" % len(keep))
return keep
|
def collapse(in_file)
|
collapse identical sequences and keep Q
| 4.078318
| 4.006186
| 1.018005
|
keep = defaultdict(dict)
with open_fastq(in_file) as handle:
for line in handle:
if line.startswith("@"):
m = re.search('UMI_([ATGC]*)', line.strip())
umis = m.group(0)
seq = handle.next().strip()
handle.next()
qual = handle.next().strip()
if (umis, seq) in keep:
keep[(umis, seq)][1].update(qual)
keep[(umis, seq)][0].update(seq)
else:
keep[(umis, seq)] = [umi(seq), quality(qual)]
logger.info("Sequences loaded: %s" % len(keep))
return keep
|
def collapse_umi(in_file)
|
collapse reads using UMI tags
| 3.343557
| 3.325593
| 1.005402
|
_, ext = os.path.splitext(in_file)
if ext == ".gz":
return gzip.open(in_file, 'rb')
if ext in [".fastq", ".fq", ".fasta", ".fa"]:
return open(in_file, 'r')
return ValueError("File needs to be fastq|fasta|fq|fa [.gz]")
|
def open_fastq(in_file)
|
open a fastq file, using gzip if it is gzipped
from bcbio package
| 3.087684
| 3.081132
| 1.002126
|
try:
umi_fn = args.fastq
if _is_umi(args.fastq):
umis = collapse(args.fastq)
umi_fn = os.path.join(args.out, splitext_plus(os.path.basename(args.fastq))[0] + "_umi_trimmed.fastq")
write_output(umi_fn, umis, args.minimum)
seqs = collapse(umi_fn)
out_file = splitext_plus(os.path.basename(args.fastq))[0] + "_trimmed.fastq"
except IOError as e:
logger.error("I/O error({0}): {1}".format(e.errno, e.strerror))
raise "Can not read file"
out_file = os.path.join(args.out, out_file)
write_output(out_file, seqs, args.minimum)
return out_file
|
def collapse_fastq(args)
|
collapse fasq files after adapter trimming
| 2.749694
| 2.729586
| 1.007367
|
current = 0
num_children = len(doctree.children)
while current < num_children:
child = doctree.children[current]
child.replace_self(
child.traverse(no_autoslides_filter)
)
if len(doctree.children) == num_children:
# nothing removed, increment current
current += 1
else:
# a node was removed; retain current and update length
num_children = len(doctree.children)
|
def filter_doctree_for_slides(doctree)
|
Given a doctree, remove all non-slide related elements from it.
| 4.806735
| 4.400705
| 1.092265
|
parent_title_node = node.parent.next_node(nodes.title)
nextslide_info = getattr(
parent_title_node, 'nextslide_info',
(parent_title_node.deepcopy().children, 1),
)
nextslide_info = (
nextslide_info[0],
nextslide_info[1] + 1,
)
if node.args:
textnodes, messages = node.state.inline_text(
node.args[0],
1,
)
new_title = nodes.title(node.args[0], '', *textnodes)
else:
title_nodes = nextslide_info[0][:]
if 'increment' in node.attributes:
title_nodes.append(
nodes.Text(' (%s)' % nextslide_info[1])
)
new_title = nodes.title(
'', '',
*title_nodes
)
new_title.nextslide_info = nextslide_info
return new_title
|
def _make_title_node(self, node, increment=True)
|
Generate a new title node for ``node``.
``node`` is a ``nextslide`` node. The title will use the node's
parent's title, or the title specified as an argument.
| 3.482329
| 3.227608
| 1.078919
|
if 'theme' in self.attributes:
builder.apply_theme(
self.attributes['theme'],
builder.theme_options,
)
|
def apply(self, builder)
|
Apply the Slide Configuration to a Builder.
| 6.983453
| 6.180555
| 1.129907
|
# set up the default conf
result = {
'theme': builder.config.slide_theme,
'autoslides': builder.config.autoslides,
'slide_classes': [],
}
# now look for a slideconf node in the doctree and update the conf
if doctree:
conf_node = cls.get(doctree)
if conf_node:
result.update(conf_node.attributes)
return result
|
def get_conf(cls, builder, doctree=None)
|
Return a dictionary of slide configuration for this doctree.
| 4.233522
| 3.690017
| 1.147291
|
COPY_LISTS = ('script_files', 'css_files',)
for attr in COPY_LISTS:
if attr in context:
context[attr] = context[attr][:]
return context
|
def __fix_context(context)
|
Return a new context dict based on original context.
The new context will be a copy of the original, and some mutable
members (such as script and css files) will also be copied to
prevent polluting shared context.
| 6.210461
| 4.143929
| 1.498689
|
# Print welcome message
msg = bold('Welcome to the Hieroglyph %s quickstart utility.') % (
version(),
)
print(msg)
msg =
print(msg)
# set a few defaults that we don't usually care about for Hieroglyph
d.update({
'version': datetime.date.today().strftime('%Y.%m.%d'),
'release': datetime.date.today().strftime('%Y.%m.%d'),
'make_mode': True,
})
if 'project' not in d:
print('''
The presentation title will be included on the title slide.''')
sphinx.quickstart.do_prompt(d, 'project', 'Presentation title')
if 'author' not in d:
sphinx.quickstart.do_prompt(d, 'author', 'Author name(s)')
# slide_theme
theme_entrypoints = pkg_resources.iter_entry_points('hieroglyph.theme')
themes = [
t.load()
for t in theme_entrypoints
]
msg =
for theme in themes:
msg += '\n'.join([
bold(theme['name']),
theme['desc'],
'', '',
])
msg +=
print(msg)
sphinx.quickstart.do_prompt(
d, 'slide_theme', 'Slide Theme', themes[0]['name'],
sphinx.quickstart.choice(
*[t['name'] for t in themes]
),
)
# Ask original questions
print("")
sphinx.quickstart.ask_user(d)
|
def ask_user(d)
|
Wrap sphinx.quickstart.ask_user, and add additional questions.
| 5.227936
| 4.780658
| 1.09356
|
return {
'title': self.title,
'level': self.level,
'content': self.content,
'classes': self.classes,
'slide_classes': self._filter_classes(exclude='content-'),
'content_classes': self._filter_classes(include='content-'),
'slide_number': self.slide_number,
'config': self._translator.builder.config,
'id': self.id,
}
|
def get_slide_context(self)
|
Return the context dict for rendering this slide.
| 4.670915
| 4.053182
| 1.152407
|
if self.builder.config.slide_numbers:
self.body.append(
'\n<div class="slide-no">%s</div>\n' % (slide_no,),
)
|
def _add_slide_number(self, slide_no)
|
Add the slide number to the output if enabled.
| 4.756207
| 4.015939
| 1.184332
|
if self.builder.config.slide_footer:
self.body.append(
'\n<div class="slide-footer">%s</div>\n' % (
self.builder.config.slide_footer,
),
)
|
def _add_slide_footer(self, slide_no)
|
Add the slide footer to the output if enabled.
| 3.73829
| 3.142128
| 1.189732
|
# avoid import cycles :/
from hieroglyph import writer
# only reconfigure Sphinx if we're generating HTML
if app.builder.name not in HTML_BUILDERS:
return
if app.config.slide_link_html_to_slides:
# add the slide theme dir as a Loader
app.builder.templates.loaders.append(
SphinxFileSystemLoader(
os.path.join(
os.path.dirname(__file__), 'themes', 'slides',
)
)
)
# add the "show slides" sidebar template
if not app.config.html_sidebars:
# no sidebars explicitly defined, mimic the old style
# behavior + slide links
app.config.html_sidebars = {
'**': [
'localtoc.html',
'relations.html',
'sourcelink.html',
SLIDELINK_TEMPLATE,
'searchbox.html',
],
}
else:
# sidebars defined, add the template if needed
included = False
for glob, templates in app.config.html_sidebars:
if SLIDELINK_TEMPLATE in templates:
included = True
break
if not included:
# the slidelink template was not included; append it
# to the list of sidebars for all templates
app.config.html_sidebars.setdefault('**', []).append(
SLIDELINK_TEMPLATE,
)
if app.config.slide_link_html_sections_to_slides:
# fix up the HTML Translator
if sphinx.version_info >= (1, 6, 0):
override_translator = type(
'SlideLinkTranslator',
(app.builder.get_translator_class(), object),
{
'depart_title': writer.depart_title,
},
)
app.set_translator(app.builder, override_translator)
else:
app.builder.translator_class = type(
'SlideLinkTranslator',
(app.builder.translator_class, object),
{
'depart_title': writer.depart_title,
},
)
|
def inspect_config(app)
|
Inspect the Sphinx configuration and update for slide-linking.
If links from HTML to slides are enabled, make sure the sidebar
configuration includes the template and add the necessary theme
directory as a loader so the sidebar template can be located.
If the sidebar configuration already includes ``slidelink.html``
(in any key), the configuration will not be changed. If the
configuration is not specified, we'll attempt to emulate what
Sphinx does by default.
| 4.019293
| 3.590475
| 1.119432
|
return builder.get_relative_uri(
pagename or builder.current_docname,
os.path.join(
builder.app.config.slide_relative_path,
pagename or builder.current_docname,
))
|
def slide_path(builder, pagename=None)
|
Calculate the relative path to the Slides for pagename.
| 4.267953
| 3.66732
| 1.16378
|
return builder.get_relative_uri(
pagename or builder.current_docname,
os.path.join(
builder.app.config.slide_html_relative_path,
pagename or builder.current_docname,
))
|
def html_path(builder, pagename=None)
|
Calculate the relative path to the Slides for pagename.
| 5.022016
| 3.816942
| 1.315717
|
# we can only show the slidelink if we can resolve the filename
context['show_slidelink'] = (
app.config.slide_link_html_to_slides and
hasattr(app.builder, 'get_outfilename')
)
if context['show_slidelink']:
context['slide_path'] = slide_path(app.builder, pagename)
|
def add_link(app, pagename, templatename, context, doctree)
|
Add the slides link to the HTML context.
| 5.701555
| 5.29402
| 1.07698
|
# push the existing values onto the Stack
self._theme_stack.append(
(self.theme, self.theme_options)
)
theme_factory = HTMLThemeFactory(self.app)
theme_factory.load_additional_themes(self.get_builtin_theme_dirs() + self.config.slide_theme_path)
self.theme = theme_factory.create(themename)
self.theme_options = themeoptions.copy()
self.templates.init(self, self.theme)
self.templates.environment.filters['json'] = json.dumps
if self.theme not in self._additional_themes:
self._additional_themes.append(self.theme)
|
def apply_theme(self, themename, themeoptions)
|
Apply a new theme to the document.
This will store the existing theme configuration and apply a new one.
| 5.029773
| 5.326957
| 0.944211
|
super(AbstractSlideBuilder, self).post_process_images(doctree)
# figure out where this doctree is in relation to the srcdir
relative_base = (
['..'] *
doctree.attributes.get('source')[len(self.srcdir) + 1:].count('/')
)
for node in doctree.traverse(nodes.image):
if node.get('candidates') is None:
node['candidates'] = ('*',)
# fix up images with absolute paths
if node['uri'].startswith(self.outdir):
node['uri'] = '/'.join(
relative_base + [
node['uri'][len(self.outdir) + 1:]
]
)
|
def post_process_images(self, doctree)
|
Pick the best candidate for all image URIs.
| 4.803202
| 4.749878
| 1.011226
|
metadata = {}
metadata_lines = section.split('\n')
for line in metadata_lines:
colon_index = line.find(':')
if colon_index != -1:
key = line[:colon_index].strip()
val = line[colon_index + 1:].strip()
metadata[key] = val
return metadata
|
def parse_metadata(section)
|
Given the first part of a slide, returns metadata associated with it.
| 2.013933
| 1.823405
| 1.104491
|
if metadata.get('build_lists') and metadata['build_lists'] == 'true':
html = html.replace('<ul>', '<ul class="build">')
html = html.replace('<ol>', '<ol class="build">')
return html
|
def postprocess_html(html, metadata)
|
Returns processed HTML to fit into the slide template format.
| 3.72619
| 3.427694
| 1.087084
|
if sys.version_info[0] <= 2:
checker = get_checker(linter, checker_method.im_class)
else:
try:
checker = get_checker(linter, checker_method.__self__.__class__)
except AttributeError:
checker = get_checker(linter, get_class(checker_method.__module__, checker_method.__qualname__))
old_method = getattr(checker, checker_method.__name__)
def augment_func(node):
def chain():
old_method(node)
augmentation(chain, node)
setattr(checker, checker_method.__name__, augment_func)
|
def augment_visit(linter, checker_method, augmentation)
|
Augmenting a visit enables additional errors to be raised (although that case is
better served using a new checker) or to suppress all warnings in certain circumstances.
Augmenting functions should accept a 'chain' function, which runs the checker method
and possibly any other augmentations, and secondly an Astroid node. "chain()" can be
called at any point to trigger the continuation of other checks, or not at all to
prevent any further checking.
| 2.690246
| 2.593904
| 1.037142
|
# At some point, pylint started preferring message symbols to message IDs. However this is not done
# consistently or uniformly - occasionally there are some message IDs with no matching symbols.
# We try to work around this here by suppressing both the ID and the symbol, if we can find it.
# This also gives us compatability with a broader range of pylint versions.
# Similarly, a commit between version 1.2 and 1.3 changed where the messages are stored - see:
# https://bitbucket.org/logilab/pylint/commits/0b67f42799bed08aebb47babdc9fb0e761efc4ff#chg-reporters/__init__.py
# Therefore here, we try the new attribute name, and fall back to the old version for
# compatability with <=1.2 and >=1.3
msgs_store = getattr(linter, 'msgs_store', linter)
def get_message_definitions(message_id_or_symbol):
if hasattr(msgs_store, 'check_message_id'):
return [msgs_store.check_message_id(message_id_or_symbol)]
# pylint 2.0 renamed check_message_id to get_message_definition in:
# https://github.com/PyCQA/pylint/commit/5ccbf9eaa54c0c302c9180bdfb745566c16e416d
elif hasattr(msgs_store, 'get_message_definition'):
return [msgs_store.get_message_definition(message_id_or_symbol)]
# pylint 2.3.0 renamed get_message_definition to get_message_definitions in:
# https://github.com/PyCQA/pylint/commit/da67a9da682e51844fbc674229ff6619eb9c816a
elif hasattr(msgs_store, 'get_message_definitions'):
return msgs_store.get_message_definitions(message_id_or_symbol)
else:
raise ValueError('pylint.utils.MessagesStore does not have a get_message_definition(s) method')
try:
pylint_messages = get_message_definitions(message_id_or_symbol)
symbols = [symbol
for pylint_message in pylint_messages
for symbol in (pylint_message.msgid, pylint_message.symbol)
if symbol is not None]
except UnknownMessage:
# This can happen due to mismatches of pylint versions and plugin expectations of available messages
symbols = [message_id_or_symbol]
def do_suppress(chain, node):
with Suppress(linter) as s:
if test_func(node):
s.suppress(*symbols)
chain()
augment_visit(linter, checker_method, do_suppress)
|
def suppress_message(linter, checker_method, message_id_or_symbol, test_func)
|
This wrapper allows the suppression of a message if the supplied test function
returns True. It is useful to prevent one particular message from being raised
in one particular case, while leaving the rest of the messages intact.
| 5.82231
| 5.909722
| 0.985209
|
q = '''SELECT threat_type,platform_type,threat_entry_type, expires_at < current_timestamp AS has_expired
FROM full_hash WHERE value IN ({})
'''
output = []
with self.get_cursor() as dbc:
placeholders = ','.join(['?'] * len(hash_values))
dbc.execute(q.format(placeholders), [sqlite3.Binary(hv) for hv in hash_values])
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, has_expired = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append((threat_list, has_expired))
return output
|
def lookup_full_hashes(self, hash_values)
|
Query DB to see if hash is blacklisted
| 3.786151
| 3.64051
| 1.040006
|
q = '''SELECT value, MAX(negative_expires_at < current_timestamp) AS negative_cache_expired
FROM hash_prefix WHERE cue IN ({}) GROUP BY 1
'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q.format(','.join(['?'] * len(cues))), [sqlite3.Binary(cue) for cue in cues])
for h in dbc.fetchall():
value, negative_cache_expired = h
output.append((bytes(value), negative_cache_expired))
return output
|
def lookup_hash_prefix(self, cues)
|
Lookup hash prefixes by cue (first 4 bytes of hash)
Returns a tuple of (value, negative_cache_expired).
| 4.378923
| 3.475551
| 1.259922
|
log.info('Storing full hash %s to list %s with cache duration %s',
to_hex(hash_value), str(threat_list), cache_duration)
qi = '''INSERT OR IGNORE INTO full_hash
(value, threat_type, platform_type, threat_entry_type, malware_threat_type, downloaded_at)
VALUES
(?, ?, ?, ?, ?, current_timestamp)
'''
qu = "UPDATE full_hash SET expires_at=datetime(current_timestamp, '+{} SECONDS') \
WHERE value=? AND threat_type=? AND platform_type=? AND threat_entry_type=?"
i_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type, malware_threat_type]
u_parameters = [sqlite3.Binary(hash_value), threat_list.threat_type,
threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(qi, i_parameters)
dbc.execute(qu.format(int(cache_duration)), u_parameters)
|
def store_full_hash(self, threat_list, hash_value, cache_duration, malware_threat_type)
|
Store full hash found for the given hash prefix
| 2.960198
| 2.971781
| 0.996102
|
q = '''DELETE FROM full_hash WHERE expires_at < datetime(current_timestamp, '-{} SECONDS')
'''
log.info('Cleaning up full_hash entries expired more than {} seconds ago.'.format(keep_expired_for))
with self.get_cursor() as dbc:
dbc.execute(q.format(int(keep_expired_for)))
|
def cleanup_full_hashes(self, keep_expired_for=(60 * 60 * 12))
|
Remove long expired full_hash entries.
| 3.731576
| 3.261279
| 1.144206
|
q = '''SELECT threat_type,platform_type,threat_entry_type FROM threat_list'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type = h
threat_list = ThreatList(threat_type, platform_type, threat_entry_type)
output.append(threat_list)
return output
|
def get_threat_lists(self)
|
Get a list of known threat lists.
| 3.038681
| 2.877722
| 1.055933
|
q = '''SELECT threat_type,platform_type,threat_entry_type,client_state FROM threat_list'''
output = {}
with self.get_cursor() as dbc:
dbc.execute(q)
for h in dbc.fetchall():
threat_type, platform_type, threat_entry_type, client_state = h
threat_list_tuple = (threat_type, platform_type, threat_entry_type)
output[threat_list_tuple] = client_state
return output
|
def get_client_state(self)
|
Get a dict of known threat lists including clientState values.
| 3.618584
| 2.87606
| 1.258174
|
q = '''INSERT OR IGNORE INTO threat_list
(threat_type, platform_type, threat_entry_type, timestamp)
VALUES
(?, ?, ?, current_timestamp)
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
|
def add_threat_list(self, threat_list)
|
Add threat list entry if it does not exist.
| 3.166385
| 2.952126
| 1.072578
|
log.info('Deleting cached threat list "{}"'.format(repr(threat_list)))
q = '''DELETE FROM threat_list
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
|
def delete_threat_list(self, threat_list)
|
Delete threat list entry.
| 3.765919
| 3.670587
| 1.025972
|
q = '''SELECT value FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=?
ORDER BY value
'''
params = [threat_list.threat_type, threat_list.platform_type, threat_list.threat_entry_type]
with self.get_cursor() as dbc:
dbc.execute(q, params)
all_hashes = b''.join(bytes(h[0]) for h in dbc.fetchall())
checksum = hashlib.sha256(all_hashes).digest()
return checksum
|
def hash_prefix_list_checksum(self, threat_list)
|
Returns SHA256 checksum for alphabetically-sorted concatenated list of hash prefixes
| 3.272141
| 3.041309
| 1.075899
|
batch_size = 40
q = '''DELETE FROM hash_prefix
WHERE threat_type=? AND platform_type=? AND threat_entry_type=? AND value IN ({})
'''
prefixes_to_remove = self.get_hash_prefix_values_to_remove(threat_list, indices)
with self.get_cursor() as dbc:
for i in range(0, len(prefixes_to_remove), batch_size):
remove_batch = prefixes_to_remove[i:(i + batch_size)]
params = [
threat_list.threat_type,
threat_list.platform_type,
threat_list.threat_entry_type
] + [sqlite3.Binary(b) for b in remove_batch]
dbc.execute(q.format(','.join(['?'] * len(remove_batch))), params)
|
def remove_hash_prefix_indices(self, threat_list, indices)
|
Remove records matching idices from a lexicographically-sorted local threat list.
| 2.947951
| 2.90148
| 1.016016
|
q = '''SELECT distinct value from hash_prefix'''
output = []
with self.get_cursor() as dbc:
dbc.execute(q)
output = [bytes(r[0]) for r in dbc.fetchall()]
return output
|
def dump_hash_prefix_values(self)
|
Export all hash prefix values.
Returns a list of known hash prefix values
| 4.924798
| 4.693357
| 1.049312
|
used = False
try:
# Process all enqueued events, then exit.
while True:
try:
# Get an event request from the queue.
method, args, kwargs, response_queue = tk.tk._event_queue.get_nowait()
except queue.Empty:
# No more events to process.
break
else:
# Call the event with the given arguments, and then return
# the result back to the caller via the response queue.
used = True
if tk.tk._debug >= 2:
print('Calling event from main thread:', method.__name__, args, kwargs)
try:
response_queue.put((False, method(*args, **kwargs)))
except SystemExit:
raise # Raises original SystemExit
except Exception:
# Calling the event caused an exception; return the
# exception back to the caller so that it can be raised
# in the caller's thread.
from sys import exc_info # Python 2 requirement
ex_type, ex_value, ex_tb = exc_info()
response_queue.put((True, (ex_type, ex_value, ex_tb)))
finally:
# Schedule to check again. If we just processed an event, check
# immediately; if we didn't, check later.
if used:
tk.after_idle(_check_events, tk)
else:
tk.after(tk.tk._check_period, _check_events, tk)
|
def _check_events(tk)
|
Checks events in the queue on a given Tk instance
| 3.72766
| 3.617288
| 1.030513
|
try:
self.storage.cleanup_full_hashes()
self.storage.commit()
self._sync_threat_lists()
self.storage.commit()
self._sync_hash_prefix_cache()
except Exception:
self.storage.rollback()
raise
|
def update_hash_prefix_cache(self)
|
Update locally cached threat lists.
| 5.640396
| 4.143888
| 1.361136
|
client_state = self.storage.get_client_state()
self.api_client.fair_use_delay()
fh_response = self.api_client.get_full_hashes(hash_prefixes, client_state)
# update negative cache for each hash prefix
# store full hash (insert or update) with positive cache bumped up
for m in fh_response.get('matches', []):
threat_list = ThreatList(m['threatType'], m['platformType'], m['threatEntryType'])
hash_value = b64decode(m['threat']['hash'])
cache_duration = int(m['cacheDuration'].rstrip('s'))
malware_threat_type = None
for metadata in m['threatEntryMetadata'].get('entries', []):
k = b64decode(metadata['key'])
v = b64decode(metadata['value'])
if k == 'malware_threat_type':
malware_threat_type = v
self.storage.store_full_hash(threat_list, hash_value, cache_duration, malware_threat_type)
negative_cache_duration = int(fh_response['negativeCacheDuration'].rstrip('s'))
for prefix_value in hash_prefixes:
self.storage.update_hash_prefix_expiration(prefix_value, negative_cache_duration)
|
def _sync_full_hashes(self, hash_prefixes)
|
Download full hashes matching hash_prefixes.
Also update cache expiration timestamps.
| 4.345929
| 4.172034
| 1.041681
|
if type(url) is not str:
url = url.encode('utf8')
if not url.strip():
raise ValueError("Empty input string.")
url_hashes = URL(url).hashes
try:
list_names = self._lookup_hashes(url_hashes)
self.storage.commit()
except Exception:
self.storage.rollback()
raise
if list_names:
return list_names
return None
|
def lookup_url(self, url)
|
Look up specified URL in Safe Browsing threat lists.
| 4.547139
| 4.188996
| 1.085496
|
full_hashes = list(full_hashes)
cues = [fh[0:4] for fh in full_hashes]
result = []
matching_prefixes = {}
matching_full_hashes = set()
is_potential_threat = False
# First lookup hash prefixes which match full URL hash
for (hash_prefix, negative_cache_expired) in self.storage.lookup_hash_prefix(cues):
for full_hash in full_hashes:
if full_hash.startswith(hash_prefix):
is_potential_threat = True
# consider hash prefix negative cache as expired if it is expired in at least one threat list
matching_prefixes[hash_prefix] = matching_prefixes.get(hash_prefix, False) or negative_cache_expired
matching_full_hashes.add(full_hash)
# if none matches, URL hash is clear
if not is_potential_threat:
return []
# if there is non-expired full hash, URL is blacklisted
matching_expired_threat_lists = set()
for threat_list, has_expired in self.storage.lookup_full_hashes(matching_full_hashes):
if has_expired:
matching_expired_threat_lists.add(threat_list)
else:
result.append(threat_list)
if result:
return result
# If there are no matching expired full hash entries
# and negative cache is still current for all prefixes, consider it safe
if len(matching_expired_threat_lists) == 0 and sum(map(int, matching_prefixes.values())) == 0:
log.info('Negative cache hit.')
return []
# Now we can assume that there are expired matching full hash entries and/or
# cache prefix entries with expired negative cache. Both require full hash sync.
self._sync_full_hashes(matching_prefixes.keys())
# Now repeat full hash lookup
for threat_list, has_expired in self.storage.lookup_full_hashes(matching_full_hashes):
if not has_expired:
result.append(threat_list)
return result
|
def _lookup_hashes(self, full_hashes)
|
Lookup URL hash in blacklists
Returns names of lists it was found in.
| 4.398732
| 4.324175
| 1.017242
|
response = self.service.threatLists().list().execute()
self.set_wait_duration(response.get('minimumWaitDuration'))
return response['threatLists']
|
def get_threats_lists(self)
|
Retrieve all available threat lists
| 7.87182
| 6.618197
| 1.189421
|
request_body = {
"client": {
"clientId": self.client_id,
"clientVersion": self.client_version,
},
"listUpdateRequests": [],
}
for (threat_type, platform_type, threat_entry_type), current_state in client_state.items():
request_body['listUpdateRequests'].append(
{
"threatType": threat_type,
"platformType": platform_type,
"threatEntryType": threat_entry_type,
"state": current_state,
"constraints": {
"supportedCompressions": ["RAW"]
}
}
)
response = self.service.threatListUpdates().fetch(body=request_body).execute()
self.set_wait_duration(response.get('minimumWaitDuration'))
return response['listUpdateResponses']
|
def get_threats_update(self, client_state)
|
Fetch hash prefixes update for given threat list.
client_state is a dict which looks like {(threatType, platformType, threatEntryType): clientState}
| 3.676455
| 2.879906
| 1.276588
|
request_body = {
"client": {
"clientId": self.client_id,
"clientVersion": self.client_version,
},
"clientStates": [],
"threatInfo": {
"threatTypes": [],
"platformTypes": [],
"threatEntryTypes": [],
"threatEntries": [],
}
}
for prefix in prefixes:
request_body['threatInfo']['threatEntries'].append({"hash": b64encode(prefix).decode()})
for ((threatType, platformType, threatEntryType), clientState) in client_state.items():
request_body['clientStates'].append(clientState)
if threatType not in request_body['threatInfo']['threatTypes']:
request_body['threatInfo']['threatTypes'].append(threatType)
if platformType not in request_body['threatInfo']['platformTypes']:
request_body['threatInfo']['platformTypes'].append(platformType)
if threatEntryType not in request_body['threatInfo']['threatEntryTypes']:
request_body['threatInfo']['threatEntryTypes'].append(threatEntryType)
response = self.service.fullHashes().find(body=request_body).execute()
self.set_wait_duration(response.get('minimumWaitDuration'))
return response
|
def get_full_hashes(self, prefixes, client_state)
|
Find full hashes matching hash prefixes.
client_state is a dict which looks like {(threatType, platformType, threatEntryType): clientState}
| 2.245175
| 1.965551
| 1.142263
|
for url_variant in self.url_permutations(self.canonical):
url_hash = self.digest(url_variant)
yield url_hash
|
def hashes(self)
|
Hashes of all possible permutations of the URL in canonical form
| 9.700336
| 5.429233
| 1.786686
|
def full_unescape(u):
uu = urllib.unquote(u)
if uu == u:
return uu
else:
return full_unescape(uu)
def full_unescape_to_bytes(u):
uu = urlparse.unquote_to_bytes(u)
if uu == u:
return uu
else:
return full_unescape_to_bytes(uu)
def quote(s):
safe_chars = '!"$&\'()*+,-./:;<=>?@[\\]^_`{|}~'
return urllib.quote(s, safe=safe_chars)
url = self.url.strip()
url = url.replace(b'\n', b'').replace(b'\r', b'').replace(b'\t', b'')
url = url.split(b'#', 1)[0]
if url.startswith(b'//'):
url = b'http:' + url
if len(url.split(b'://')) <= 1:
url = b'http://' + url
# at python3 work with bytes instead of string
# as URL may contain invalid unicode characters
if self.__py3 and type(url) is bytes:
url = quote(full_unescape_to_bytes(url))
else:
url = quote(full_unescape(url))
url_parts = urlparse.urlsplit(url)
if not url_parts[0]:
url = 'http://{}'.format(url)
url_parts = urlparse.urlsplit(url)
protocol = url_parts.scheme
if self.__py3:
host = full_unescape_to_bytes(url_parts.hostname)
path = full_unescape_to_bytes(url_parts.path)
else:
host = full_unescape(url_parts.hostname)
path = full_unescape(url_parts.path)
query = url_parts.query
if not query and '?' not in url:
query = None
if not path:
path = b'/'
has_trailing_slash = (path[-1:] == b'/')
path = posixpath.normpath(path).replace(b'//', b'/')
if has_trailing_slash and path[-1:] != b'/':
path = path + b'/'
port = url_parts.port
host = host.strip(b'.')
host = re.sub(br'\.+', b'.', host).lower()
if host.isdigit():
try:
host = socket.inet_ntoa(struct.pack("!I", int(host)))
except Exception:
pass
elif host.startswith(b'0x') and b'.' not in host:
try:
host = socket.inet_ntoa(struct.pack("!I", int(host, 16)))
except Exception:
pass
quoted_path = quote(path)
quoted_host = quote(host)
if port is not None:
quoted_host = '{}:{}'.format(quoted_host, port)
canonical_url = '{}://{}{}'.format(protocol, quoted_host, quoted_path)
if query is not None:
canonical_url = '{}?{}'.format(canonical_url, query)
return canonical_url
|
def canonical(self)
|
Convert URL to its canonical form.
| 2.136068
| 2.091283
| 1.021415
|
def url_host_permutations(host):
if re.match(r'\d+\.\d+\.\d+\.\d+', host):
yield host
return
parts = host.split('.')
l = min(len(parts), 5)
if l > 4:
yield host
for i in range(l - 1):
yield '.'.join(parts[i - l:])
def url_path_permutations(path):
yield path
query = None
if '?' in path:
path, query = path.split('?', 1)
if query is not None:
yield path
path_parts = path.split('/')[0:-1]
curr_path = ''
for i in range(min(4, len(path_parts))):
curr_path = curr_path + path_parts[i] + '/'
yield curr_path
protocol, address_str = urllib.splittype(url)
host, path = urllib.splithost(address_str)
user, host = urllib.splituser(host)
host, port = urllib.splitport(host)
host = host.strip('/')
seen_permutations = set()
for h in url_host_permutations(host):
for p in url_path_permutations(path):
u = '{}{}'.format(h, p)
if u not in seen_permutations:
yield u
seen_permutations.add(u)
|
def url_permutations(url)
|
Try all permutations of hostname and path which can be applied
to blacklisted URLs
| 2.51395
| 2.462822
| 1.02076
|
def normalize(v):
# strip trailing .0 or .00 or .0.0 or ...
v = re.sub(r'(\.0+)*$', '', v)
result = []
for part in v.split('.'):
# just digits
m = re.match(r'^(\d+)$', part)
if m:
result.append(int(m.group(1)))
continue
# digits letters
m = re.match(r'^(\d+)([a-zA-Z]+)$', part)
if m:
result.append(int(m.group(1)))
result.append(m.group(2))
continue
# digits letters digits
m = re.match(r'^(\d+)([a-zA-Z]+)(\d+)$', part)
if m:
result.append(int(m.group(1)))
result.append(m.group(2))
result.append(int(m.group(3)))
continue
return tuple(result)
n1 = normalize(v1)
n2 = normalize(v2)
return (n1 > n2) - (n1 < n2)
|
def _compare_versions(v1, v2)
|
Compare two version strings and return -1, 0 or 1 depending on the equality
of the subset of matching version numbers.
The implementation is inspired by the top answer at
http://stackoverflow.com/a/1714190/997768.
| 1.831004
| 1.800459
| 1.016966
|
m = re.search(r'([<>=]?=?)?\s*([0-9.a-zA-Z]+)', spec)
return m.group(2), m.group(1)
|
def _split_version_specifier(spec)
|
Splits version specifiers in the form ">= 0.1.2" into ('0.1.2', '>=')
| 3.881971
| 3.284639
| 1.181856
|
pkg_config_exe = os.environ.get('PKG_CONFIG', None) or 'pkg-config'
cmd = '{0} --exists {1}'.format(pkg_config_exe, package).split()
return call(cmd) == 0
|
def exists(package)
|
Return True if package information is available.
If ``pkg-config`` not on path, raises ``EnvironmentError``.
| 3.117239
| 3.255038
| 0.957666
|
_raise_if_not_exists(package)
return _query(package, *_build_options('--libs', static=static))
|
def libs(package, static=False)
|
Return the LDFLAGS string returned by pkg-config.
The static specifier will also include libraries for static linking (i.e.,
includes any private libraries).
| 11.890494
| 14.841365
| 0.801173
|
_raise_if_not_exists(package)
result = _query(package, '--print-variables')
names = (x.strip() for x in result.split('\n') if x != '')
return dict(((x, _query(package, '--variable={0}'.format(x)).strip()) for x in names))
|
def variables(package)
|
Return a dictionary of all the variables defined in the .pc pkg-config file
of 'package'.
| 4.441587
| 3.971601
| 1.118337
|
if not exists(package):
return False
number, comparator = _split_version_specifier(version)
modversion = _query(package, '--modversion')
try:
result = _compare_versions(modversion, number)
except ValueError:
msg = "{0} is not a correct version specifier".format(version)
raise ValueError(msg)
if comparator in ('', '=', '=='):
return result == 0
if comparator == '>':
return result > 0
if comparator == '>=':
return result >= 0
if comparator == '<':
return result < 0
if comparator == '<=':
return result <= 0
|
def installed(package, version)
|
Check if the package meets the required version.
The version specifier consists of an optional comparator (one of =, ==, >,
<, >=, <=) and an arbitrarily long version number separated by dots. The
should be as you would expect, e.g. for an installed version '0.1.2' of
package 'foo':
>>> installed('foo', '==0.1.2')
True
>>> installed('foo', '<0.1')
False
>>> installed('foo', '>= 0.0.4')
True
If ``pkg-config`` not on path, raises ``EnvironmentError``.
| 3.094699
| 2.87994
| 1.074571
|
for package in packages.split():
_raise_if_not_exists(package)
out = _query(packages, *_build_options('--cflags --libs', static=static))
out = out.replace('\\"', '')
result = collections.defaultdict(list)
for token in re.split(r'(?<!\\) ', out):
key = _PARSE_MAP.get(token[:2])
if key:
result[key].append(token[2:].strip())
def split(m):
t = tuple(m.split('='))
return t if len(t) > 1 else (t[0], None)
result['define_macros'] = [split(m) for m in result['define_macros']]
# only have members with values not being the empty list (which is default
# anyway):
return collections.defaultdict(list, ((k, v) for k, v in result.items() if v))
|
def parse(packages, static=False)
|
Parse the output from pkg-config about the passed package or packages.
Builds a dictionary containing the 'libraries', the 'library_dirs', the
'include_dirs', and the 'define_macros' that are presented by pkg-config.
*package* is a string with space-delimited package names.
The static specifier will also include libraries for static linking (i.e.,
includes any private libraries).
If ``pkg-config`` is not on path, raises ``EnvironmentError``.
| 4.884425
| 4.60499
| 1.060681
|
'''Send the data over UDP while taking the sample_rate in account
The sample rate should be a number between `0` and `1` which indicates
the probability that a message will be sent. The sample_rate is also
communicated to `statsd` so it knows what multiplier to use.
:keyword data: The data to send
:type data: dict
:keyword sample_rate: The sample rate, defaults to `1` (meaning always)
:type sample_rate: int
'''
if self._disabled:
self.logger.debug('Connection disabled, not sending data')
return False
if sample_rate is None:
sample_rate = self._sample_rate
sampled_data = {}
if sample_rate < 1:
if random.random() <= sample_rate:
# Modify the data so statsd knows our sample_rate
for stat, value in compat.iter_dict(data):
sampled_data[stat] = '%s|@%s' % (data[stat], sample_rate)
else:
sampled_data = data
try:
for stat, value in compat.iter_dict(sampled_data):
send_data = ('%s:%s' % (stat, value)).encode("utf-8")
self.udp_sock.send(send_data)
return True
except Exception as e:
self.logger.exception('unexpected error %r while sending data', e)
return False
|
def send(self, data, sample_rate=None)
|
Send the data over UDP while taking the sample_rate in account
The sample rate should be a number between `0` and `1` which indicates
the probability that a message will be sent. The sample_rate is also
communicated to `statsd` so it knows what multiplier to use.
:keyword data: The data to send
:type data: dict
:keyword sample_rate: The sample rate, defaults to `1` (meaning always)
:type sample_rate: int
| 3.853312
| 2.193792
| 1.756462
|
'''Start the timer and store the start time, this can only be executed
once per instance
It returns the timer instance so it can be chained when instantiating
the timer instance like this:
``timer = Timer('application_name').start()``'''
assert self._start is None, (
'Unable to start, the timer is already running')
self._last = self._start = time.time()
return self
|
def start(self)
|
Start the timer and store the start time, this can only be executed
once per instance
It returns the timer instance so it can be chained when instantiating
the timer instance like this:
``timer = Timer('application_name').start()``
| 9.095167
| 2.612973
| 3.480773
|
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The time delta (time.time() - time.time()) to report
:type delta: float
'''
ms = delta * 1000
if ms > self.min_send_threshold:
name = self._get_name(self.name, subname)
self.logger.info('%s: %0.08fms', name, ms)
return statsd.Client._send(self, {name: '%0.08f|ms' % ms})
else:
return True
|
def send(self, subname, delta)
|
Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The time delta (time.time() - time.time()) to report
:type delta: float
| 4.81349
| 2.886324
| 1.667689
|
'''Send the time that has passed since our last measurement
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
'''
t = time.time()
response = self.send(subname, t - self._last)
self._last = t
return response
|
def intermediate(self, subname)
|
Send the time that has passed since our last measurement
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
| 6.963409
| 2.57597
| 2.703218
|
'''Stop the timer and send the total since `start()` was run
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
'''
assert self._stop is None, (
'Unable to stop, the timer is already stopped')
self._stop = time.time()
return self.send(subname, self._stop - self._start)
|
def stop(self, subname='total')
|
Stop the timer and send the total since `start()` was run
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
| 6.185112
| 2.622652
| 2.358342
|
'''Decorate a function to time the execution
The method can be called with or without a name. If no name is given
the function defaults to the name of the function.
:keyword function_or_name: The name to post to or the function to wrap
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> @timer.decorate
... def some_function():
... # resulting timer name: application_name.some_function
... pass
>>>
>>> @timer.decorate('my_timer')
... def some_other_function():
... # resulting timer name: application_name.my_timer
... pass
'''
if callable(function_or_name):
return self._decorate(function_or_name.__name__, function_or_name)
else:
return partial(self._decorate, function_or_name)
|
def decorate(self, function_or_name)
|
Decorate a function to time the execution
The method can be called with or without a name. If no name is given
the function defaults to the name of the function.
:keyword function_or_name: The name to post to or the function to wrap
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> @timer.decorate
... def some_function():
... # resulting timer name: application_name.some_function
... pass
>>>
>>> @timer.decorate('my_timer')
... def some_other_function():
... # resulting timer name: application_name.my_timer
... pass
| 3.257326
| 1.467835
| 2.219136
|
'''Returns a context manager to time execution of a block of code.
:keyword subname: The subname to report data to
:type subname: str
:keyword class_: The :class:`~statsd.client.Client` subclass to use
(e.g. :class:`~statsd.timer.Timer` or
:class:`~statsd.counter.Counter`)
:type class_: :class:`~statsd.client.Client`
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> with timer.time():
... # resulting timer name: application_name
... pass
>>>
>>>
>>> with timer.time('context_timer'):
... # resulting timer name: application_name.context_timer
... pass
'''
if class_ is None:
class_ = Timer
timer = self.get_client(subname, class_)
timer.start()
yield
timer.stop('')
|
def time(self, subname=None, class_=None)
|
Returns a context manager to time execution of a block of code.
:keyword subname: The subname to report data to
:type subname: str
:keyword class_: The :class:`~statsd.client.Client` subclass to use
(e.g. :class:`~statsd.timer.Timer` or
:class:`~statsd.counter.Counter`)
:type class_: :class:`~statsd.client.Client`
>>> from statsd import Timer
>>> timer = Timer('application_name')
>>>
>>> with timer.time():
... # resulting timer name: application_name
... pass
>>>
>>>
>>> with timer.time('context_timer'):
... # resulting timer name: application_name.context_timer
... pass
| 3.584112
| 1.495703
| 2.396273
|
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The raw value to send
'''
if timestamp is None:
ts = int(dt.datetime.now().strftime("%s"))
else:
ts = timestamp
name = self._get_name(self.name, subname)
self.logger.info('%s: %s %s' % (name, value, ts))
return statsd.Client._send(self, {name: '%s|r|%s' % (value, ts)})
|
def send(self, subname, value, timestamp=None)
|
Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The raw value to send
| 4.652858
| 3.079849
| 1.510742
|
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The gauge value to send
'''
name = self._get_name(self.name, subname)
self.logger.info('%s: %s', name, value)
return statsd.Client._send(self, {name: '%s|g' % value})
|
def _send(self, subname, value)
|
Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The gauge value to send
| 5.875161
| 2.992109
| 1.963552
|
'''Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The gauge value to send
'''
assert isinstance(value, compat.NUM_TYPES)
return self._send(subname, value)
|
def send(self, subname, value)
|
Send the data to statsd via self.connection
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The gauge value to send
| 7.175501
| 2.795959
| 2.566382
|
'''Increment the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to add to the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.increment('gauge_name', 10)
True
>>> gauge.increment(delta=10)
True
>>> gauge.increment('gauge_name')
True
'''
delta = int(delta)
sign = "+" if delta >= 0 else ""
return self._send(subname, "%s%d" % (sign, delta))
|
def increment(self, subname=None, delta=1)
|
Increment the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to add to the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.increment('gauge_name', 10)
True
>>> gauge.increment(delta=10)
True
>>> gauge.increment('gauge_name')
True
| 3.967499
| 1.773543
| 2.237047
|
'''Decrement the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to remove from the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.decrement('gauge_name', 10)
True
>>> gauge.decrement(delta=10)
True
>>> gauge.decrement('gauge_name')
True
'''
delta = -int(delta)
sign = "+" if delta >= 0 else ""
return self._send(subname, "%s%d" % (sign, delta))
|
def decrement(self, subname=None, delta=1)
|
Decrement the gauge with `delta`
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword delta: The delta to remove from the gauge
:type delta: int
>>> gauge = Gauge('application_name')
>>> gauge.decrement('gauge_name', 10)
True
>>> gauge.decrement(delta=10)
True
>>> gauge.decrement('gauge_name')
True
| 4.273195
| 1.768414
| 2.416399
|
'''
Set the data ignoring the sign, ie set("test", -1) will set "test"
exactly to -1 (not decrement it by 1)
See https://github.com/etsy/statsd/blob/master/docs/metric_types.md
"Adding a sign to the gauge value will change the value, rather
than setting it.
gaugor:-10|g
gaugor:+4|g
So if gaugor was 333, those commands would set it to 333 - 10 + 4, or
327.
Note: This implies you can't explicitly set a gauge to a negative
number without first setting it to zero."
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The new gauge value
'''
assert isinstance(value, compat.NUM_TYPES)
if value < 0:
self._send(subname, 0)
return self._send(subname, value)
|
def set(self, subname, value)
|
Set the data ignoring the sign, ie set("test", -1) will set "test"
exactly to -1 (not decrement it by 1)
See https://github.com/etsy/statsd/blob/master/docs/metric_types.md
"Adding a sign to the gauge value will change the value, rather
than setting it.
gaugor:-10|g
gaugor:+4|g
So if gaugor was 333, those commands would set it to 333 - 10 + 4, or
327.
Note: This implies you can't explicitly set a gauge to a negative
number without first setting it to zero."
:keyword subname: The subname to report the data to (appended to the
client name)
:type subname: str
:keyword value: The new gauge value
| 9.087666
| 1.49172
| 6.092073
|
'''Get a (sub-)client with a separate namespace
This way you can create a global/app based client with subclients
per class/function
:keyword name: The name to use, if the name for this client was `spam`
and the `name` argument is `eggs` than the resulting name will be
`spam.eggs`
:type name: str
:keyword class_: The :class:`~statsd.client.Client` subclass to use
(e.g. :class:`~statsd.timer.Timer` or
:class:`~statsd.counter.Counter`)
:type class_: :class:`~statsd.client.Client`
'''
# If the name was given, use it. Otherwise simply clone
name = self._get_name(self.name, name)
# Create using the given class, or the current class
if not class_:
class_ = self.__class__
return class_(
name=name,
connection=self.connection,
)
|
def get_client(self, name=None, class_=None)
|
Get a (sub-)client with a separate namespace
This way you can create a global/app based client with subclients
per class/function
:keyword name: The name to use, if the name for this client was `spam`
and the `name` argument is `eggs` than the resulting name will be
`spam.eggs`
:type name: str
:keyword class_: The :class:`~statsd.client.Client` subclass to use
(e.g. :class:`~statsd.timer.Timer` or
:class:`~statsd.counter.Counter`)
:type class_: :class:`~statsd.client.Client`
| 5.577327
| 1.965579
| 2.837499
|
'''Shortcut for getting an :class:`~statsd.average.Average` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Average)
|
def get_average(self, name=None)
|
Shortcut for getting an :class:`~statsd.average.Average` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
| 6.50479
| 2.714754
| 2.396089
|
'''Shortcut for getting a :class:`~statsd.counter.Counter` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Counter)
|
def get_counter(self, name=None)
|
Shortcut for getting a :class:`~statsd.counter.Counter` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
| 5.658897
| 2.647239
| 2.13766
|
'''Shortcut for getting a :class:`~statsd.gauge.Gauge` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Gauge)
|
def get_gauge(self, name=None)
|
Shortcut for getting a :class:`~statsd.gauge.Gauge` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
| 5.896401
| 2.694381
| 2.188407
|
'''Shortcut for getting a :class:`~statsd.raw.Raw` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Raw)
|
def get_raw(self, name=None)
|
Shortcut for getting a :class:`~statsd.raw.Raw` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
| 6.684026
| 2.744023
| 2.435849
|
'''Shortcut for getting a :class:`~statsd.timer.Timer` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
'''
return self.get_client(name=name, class_=statsd.Timer)
|
def get_timer(self, name=None)
|
Shortcut for getting a :class:`~statsd.timer.Timer` instance
:keyword name: See :func:`~statsd.client.Client.get_client`
:type name: str
| 5.863096
| 2.688876
| 2.1805
|
r
if v_str is None:
cls._git_version = None
return
v_str = v_str.strip()
try:
version = cls._git_version = tuple(
int(x) for x in v_str.split()[2].split('.')[:3])
except Exception:
raise ValueError("Could not parse git version output %r. Please "
"report this" % v_str)
return version
|
def init_git_version(cls, v_str)
|
r"""Parse git version string and store the resulting tuple on self.
:returns: the parsed version tuple
Only the first 3 digits are kept. This is good enough for the few
version dependent cases we need, and coarse enough to avoid
more complicated parsing.
Some real-life examples::
>>> GitRepo.init_git_version('git version 1.8.5.3')
(1, 8, 5)
>>> GitRepo.init_git_version('git version 1.7.2.5')
(1, 7, 2)
Seen on MacOSX (not on MacPorts)::
>>> GitRepo.init_git_version('git version 1.8.5.2 (Apple Git-48)')
(1, 8, 5)
Seen on Windows (Tortoise Git)::
>>> GitRepo.init_git_version('git version 1.8.4.msysgit.0')
(1, 8, 4)
A compiled version::
>>> GitRepo.init_git_version('git version 2.0.3.2.g996b0fd')
(2, 0, 3)
Rewrapped by `hub <https://hub.github.com/>`_, it has two lines:
>>> GitRepo.init_git_version('git version 1.7.9\nhub version 1.11.0')
(1, 7, 9)
This one does not exist, allowing us to prove that this method
actually governs the :attr:`git_version` property
>>> GitRepo.init_git_version('git version 0.0.666')
(0, 0, 666)
>>> GitRepo('', '').git_version
(0, 0, 666)
Expected exceptions::
>>> try: GitRepo.init_git_version('invalid')
... except ValueError: pass
After playing with it, we must reset it so that tests can run with
the proper detected one, if needed::
>>> GitRepo.init_git_version(None)
| 3.715954
| 4.485227
| 0.828487
|
out = self.log_call(['git', 'ls-remote', remote, ref],
cwd=self.cwd,
callwith=subprocess.check_output).strip()
for sha, fullref in (l.split() for l in out.splitlines()):
if fullref == 'refs/heads/' + ref:
return 'branch', sha
elif fullref == 'refs/tags/' + ref:
return 'tag', sha
elif fullref == ref and ref == 'HEAD':
return 'HEAD', sha
return None, ref
|
def query_remote_ref(self, remote, ref)
|
Query remote repo about given ref.
:return: ``('tag', sha)`` if ref is a tag in remote
``('branch', sha)`` if ref is branch (aka "head") in remote
``(None, ref)`` if ref does not exist in remote. This happens
notably if ref if a commit sha (they can't be queried)
| 4.175377
| 3.793506
| 1.100664
|
logger.log(log_level, "%s> call %r", self.cwd, cmd)
ret = callwith(cmd, **kw)
if callwith == subprocess.check_output:
ret = console_to_str(ret)
return ret
|
def log_call(self, cmd, callwith=subprocess.check_call,
log_level=logging.DEBUG, **kw)
|
Wrap a subprocess call with logging
:param meth: the calling method to use.
| 4.414177
| 5.433329
| 0.812426
|
logger.info('Start aggregation of %s', self.cwd)
target_dir = self.cwd
is_new = not os.path.exists(target_dir)
if is_new:
self.init_repository(target_dir)
self._switch_to_branch(self.target['branch'])
for r in self.remotes:
self._set_remote(**r)
self.fetch()
merges = self.merges
if not is_new:
# reset to the first merge
origin = merges[0]
merges = merges[1:]
self._reset_to(origin["remote"], origin["ref"])
for merge in merges:
self._merge(merge)
self._execute_shell_command_after()
logger.info('End aggregation of %s', self.cwd)
|
def aggregate(self)
|
Aggregate all merges into the target branch
If the target_dir doesn't exist, create an empty git repo otherwise
clean it, add all remotes , and merge all merges.
| 4.504878
| 3.79771
| 1.186209
|
logger.info('Checking repo status')
status = self.log_call(
['git', 'status', '--porcelain'],
callwith=subprocess.check_output,
cwd=self.cwd,
)
if status:
raise DirtyException(status)
|
def _check_status(self)
|
Check repo status and except if dirty.
| 6.80686
| 5.02035
| 1.355854
|
cmd = tuple()
for option in FETCH_DEFAULTS:
value = merge.get(option, self.defaults.get(option))
if value:
cmd += ("--%s" % option, str(value))
return cmd
|
def _fetch_options(self, merge)
|
Get the fetch options from the given merge dict.
| 4.764218
| 3.973476
| 1.199005
|
remotes = self._get_remotes()
exising_url = remotes.get(name)
if exising_url == url:
logger.info('Remote already exists %s <%s>', name, url)
return
if not exising_url:
logger.info('Adding remote %s <%s>', name, url)
self.log_call(['git', 'remote', 'add', name, url], cwd=self.cwd)
else:
logger.info('Remote remote %s <%s> -> <%s>',
name, exising_url, url)
self.log_call(['git', 'remote', 'rm', name], cwd=self.cwd)
self.log_call(['git', 'remote', 'add', name, url], cwd=self.cwd)
|
def _set_remote(self, name, url)
|
Add remote to the repository. It's equivalent to the command
git remote add <name> <url>
If the remote already exists with an other url, it's removed
and added aggain
| 2.272594
| 2.229442
| 1.019356
|
REPO_RE = re.compile(
'^(https://github.com/|git@github.com:)'
'(?P<owner>.*?)/(?P<repo>.*?)(.git)?$')
PULL_RE = re.compile(
'^(refs/)?pull/(?P<pr>[0-9]+)/head$')
remotes = {r['name']: r['url'] for r in self.remotes}
all_prs = {}
for merge in self.merges:
remote = merge['remote']
ref = merge['ref']
repo_url = remotes[remote]
repo_mo = REPO_RE.match(repo_url)
if not repo_mo:
logger.debug('%s is not a github repo', repo_url)
continue
pull_mo = PULL_RE.match(ref)
if not pull_mo:
logger.debug('%s is not a github pull reqeust', ref)
continue
pr_info = {
'owner': repo_mo.group('owner'),
'repo': repo_mo.group('repo'),
'pr': pull_mo.group('pr'),
}
pr_info['path'] = '{owner}/{repo}/pulls/{pr}'.format(**pr_info)
pr_info['url'] = 'https://github.com/{path}'.format(**pr_info)
pr_info['shortcut'] = '{owner}/{repo}#{pr}'.format(**pr_info)
r = self._github_api_get('/repos/{path}'.format(**pr_info))
if r.status_code != 200:
logger.warning(
'Could not get status of {path}. '
'Reason: {r.status_code} {r.reason}'.format(r=r, **pr_info)
)
continue
pr_info['state'] = r.json().get('state')
pr_info['merged'] = (
not r.json().get('merged') and 'not ' or ''
) + 'merged'
all_prs.setdefault(pr_info['state'], []).append(pr_info)
return all_prs
|
def collect_prs_info(self)
|
Collect all pending merge PRs info.
:returns: mapping of PRs by state
| 2.309986
| 2.335198
| 0.989204
|
all_prs = self.collect_prs_info()
for pr_info in all_prs.get('closed', []):
logger.info(
'{url} in state {state} ({merged})'.format(**pr_info)
)
|
def show_closed_prs(self)
|
Log only closed PRs.
| 5.477186
| 5.35555
| 1.022712
|
for __, prs in self.collect_prs_info().items():
for pr_info in prs:
logger.info(
'{url} in state {state} ({merged})'.format(**pr_info)
)
|
def show_all_prs(self)
|
Log all PRs grouped by state.
| 6.871643
| 6.009881
| 1.143391
|
repo_list = []
for directory, repo_data in config.items():
if not os.path.isabs(directory):
directory = os.path.abspath(directory)
repo_dict = {
'cwd': directory,
'defaults': repo_data.get('defaults', dict()),
'force': force,
}
remote_names = set()
if 'remotes' in repo_data:
repo_dict['remotes'] = []
remotes_data = repo_data['remotes'] or {}
for remote_name, url in remotes_data.items():
if not url:
raise ConfigException(
'%s: No url defined for remote %s.' %
(directory, remote_name))
remote_dict = {
'name': remote_name,
'url': url
}
repo_dict['remotes'].append(remote_dict)
remote_names.add(remote_name)
if not remote_names:
raise ConfigException(
'%s: You should at least define one remote.' % directory)
else:
raise ConfigException('%s: remotes is not defined.' % directory)
if 'merges' in repo_data:
merges = []
merge_data = repo_data.get('merges') or []
for merge in merge_data:
try:
# Assume parts is a str
parts = merge.split(' ')
if len(parts) != 2:
raise ConfigException(
'%s: Merge must be formatted as '
'"remote_name ref".' % directory)
merge = {
"remote": parts[0],
"ref": parts[1],
}
except AttributeError:
# Parts is a dict
try:
merge["remote"] = str(merge["remote"])
merge["ref"] = str(merge["ref"])
except KeyError:
raise ConfigException(
'%s: Merge lacks mandatory '
'`remote` or `ref` keys.' % directory)
# Check remote is available
if merge["remote"] not in remote_names:
raise ConfigException(
'%s: Merge remote %s not defined in remotes.' %
(directory, merge["remote"]))
merges.append(merge)
repo_dict['merges'] = merges
if not merges:
raise ConfigException(
'%s: You should at least define one merge.' % directory)
else:
raise ConfigException(
'%s: merges is not defined.' % directory)
# Only fetch required remotes by default
repo_dict["fetch_all"] = repo_data.get("fetch_all", False)
if isinstance(repo_dict["fetch_all"], string_types):
repo_dict["fetch_all"] = frozenset((repo_dict["fetch_all"],))
elif isinstance(repo_dict["fetch_all"], list):
repo_dict["fetch_all"] = frozenset(repo_dict["fetch_all"])
if 'target' not in repo_data:
raise ConfigException('%s: No target defined.' % directory)
parts = (repo_data.get('target') or "") .split(' ')
if len(parts) != 2:
raise ConfigException(
'%s: Target must be formatted as '
'"remote_name branch_name"' % directory)
remote_name, branch = repo_data.get('target').split(' ')
if remote_name not in remote_names:
raise ConfigException(
'%s: Target remote %s not defined in remotes.' %
(directory, remote_name))
repo_dict['target'] = {
'remote': remote_name,
'branch': branch,
}
commands = []
if 'shell_command_after' in repo_data:
cmds = repo_data['shell_command_after']
# if str: turn to list
if cmds:
if isinstance(cmds, string_types):
cmds = [cmds]
commands = cmds
repo_dict['shell_command_after'] = commands
repo_list.append(repo_dict)
return repo_list
|
def get_repos(config, force=False)
|
Return a :py:obj:`list` list of repos from config file.
:param config: the repos config in :py:class:`dict` format.
:param bool force: Force aggregate dirty repos or not.
:type config: dict
:rtype: list
| 2.095239
| 2.096848
| 0.999232
|
if not os.path.exists(config):
raise ConfigException('Unable to find configuration file: %s' % config)
file_extension = os.path.splitext(config)[1][1:]
conf = kaptan.Kaptan(handler=kaptan.HANDLER_EXT.get(file_extension))
if expand_env:
with open(config, 'r') as file_handler:
config = Template(file_handler.read())
config = config.substitute(os.environ)
conf.import_config(config)
return get_repos(conf.export('dict') or {}, force)
|
def load_config(config, expand_env=False, force=False)
|
Return repos from a directory and fnmatch. Not recursive.
:param config: paths to config file
:type config: str
:param expand_env: True to expand environment varialbes in the config.
:type expand_env: bool
:param bool force: True to aggregate even if repo is dirty.
:returns: expanded config dict item
:rtype: iter(dict)
| 4.096
| 3.921625
| 1.044465
|
if not log:
log = logging.getLogger()
if not log.handlers:
channel = logging.StreamHandler()
if level == logging.DEBUG:
channel.setFormatter(DebugLogFormatter())
else:
channel.setFormatter(LogFormatter())
log.setLevel(level)
log.addHandler(channel)
|
def setup_logger(log=None, level=logging.INFO)
|
Setup logging for CLI use.
:param log: instance of logger
:type log: :py:class:`Logger`
| 2.450916
| 2.635969
| 0.929797
|
main_parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter)
main_parser.add_argument(
'-c', '--config',
dest='config',
type=str,
nargs='?',
help='Pull the latest repositories from config(s)'
).completer = argcomplete.completers.FilesCompleter(
allowednames=('.yaml', '.yml', '.json'), directories=False
)
main_parser.add_argument(
'-p', '--push',
dest='do_push',
action='store_true', default=False,
help='Push result to target',
)
main_parser.add_argument(
'-d', '--dirmatch',
dest='dirmatch',
type=str,
nargs='?',
help='Pull only from the directories. Accepts fnmatch(1)'
'by commands'
)
main_parser.add_argument(
'--log-level',
default='INFO',
dest='log_level',
type=_log_level_string_to_int,
nargs='?',
help='Set the logging output level. {0}'.format(_LOG_LEVEL_STRINGS))
main_parser.add_argument(
'-e', '--expand-env',
dest='expand_env',
default=False,
action='store_true',
help='Expand environment variables in configuration file',
)
main_parser.add_argument(
'-f', '--force',
dest='force',
default=False,
action='store_true',
help='Force cleanup and aggregation on dirty repositories.',
)
main_parser.add_argument(
'-j', '--jobs',
dest='jobs',
default=1,
type=int,
help='Amount of processes to use when aggregating repos. '
'This is useful when there are a lot of large repos. '
'Set `1` or less to disable multiprocessing (default).',
)
main_parser.add_argument(
'command',
nargs='?',
default='aggregate',
help='aggregate (default): run the aggregation process.\n'
'show-all-prs: show GitHub pull requests in merge sections\n'
' such pull requests are indentified as having\n'
' a github.com remote and a\n'
' refs/pull/NNN/head ref in the merge section.\n'
'show-closed-prs: show pull requests that are not open anymore.\n'
)
return main_parser
|
def get_parser()
|
Return :py:class:`argparse.ArgumentParser` instance for CLI.
| 3.838035
| 3.770102
| 1.018019
|
parser = get_parser()
argcomplete.autocomplete(parser, always_complete_options=False)
args = parser.parse_args()
setup_logger(
level=args.log_level
)
try:
if args.config and \
args.command in \
('aggregate', 'show-closed-prs', 'show-all-prs'):
run(args)
else:
parser.print_help()
except KeyboardInterrupt:
pass
|
def main()
|
Main CLI application.
| 4.605954
| 4.456965
| 1.033428
|
repos = load_config(args.config, args.expand_env)
dirmatch = args.dirmatch
for repo_dict in repos:
r = Repo(**repo_dict)
logger.debug('%s' % r)
if not match_dir(r.cwd, dirmatch):
logger.info("Skip %s", r.cwd)
continue
r.aggregate()
if args.do_push:
r.push()
|
def load_aggregate(args)
|
Load YAML and JSON configs and begin creating / updating , aggregating
and pushing the repos (deprecated in favor or run())
| 5.196364
| 4.646417
| 1.11836
|
try:
logger.debug('%s' % repo)
dirmatch = args.dirmatch
if not match_dir(repo.cwd, dirmatch):
logger.info("Skip %s", repo.cwd)
return
if args.command == 'aggregate':
repo.aggregate()
if args.do_push:
repo.push()
elif args.command == 'show-closed-prs':
repo.show_closed_prs()
elif args.command == 'show-all-prs':
repo.show_all_prs()
except Exception:
err_queue.put_nowait(sys.exc_info())
finally:
sem.release()
|
def aggregate_repo(repo, args, sem, err_queue)
|
Aggregate one repo according to the args.
Args:
repo (Repo): The repository to aggregate.
args (argparse.Namespace): CLI arguments.
| 3.212941
| 3.277561
| 0.980284
|
repos = load_config(args.config, args.expand_env, args.force)
jobs = max(args.jobs, 1)
threads = []
sem = threading.Semaphore(jobs)
err_queue = Queue()
for repo_dict in repos:
if not err_queue.empty():
break
sem.acquire()
r = Repo(**repo_dict)
tname = os.path.basename(repo_dict['cwd'])
if jobs > 1:
t = threading.Thread(
target=aggregate_repo, args=(r, args, sem, err_queue))
t.daemon = True
t.name = tname
threads.append(t)
t.start()
else:
with ThreadNameKeeper():
threading.current_thread().name = tname
aggregate_repo(r, args, sem, err_queue)
for t in threads:
t.join()
if not err_queue.empty():
while True:
try:
exc_type, exc_obj, exc_trace = err_queue.get_nowait()
except EmptyQueue:
break
traceback.print_exception(exc_type, exc_obj, exc_trace)
sys.exit(1)
|
def run(args)
|
Load YAML and JSON configs and run the command specified
in args.command
| 3.002118
| 3.018315
| 0.994634
|
if state.reporter.get_errors():
state.do_test(incorrect_msg)
return state
|
def has_no_error(
state, incorrect_msg="Your code generated an error. Fix it and try again!"
)
|
Check whether the submission did not generate a runtime error.
Simply use ``Ex().has_no_error()`` in your SCT whenever you want to check for errors.
By default, after the entire SCT finished executing, ``sqlwhat`` will check
for errors before marking the exercise as correct. You can disable this behavior
by using ``Ex().allow_error()``.
Args:
incorrect_msg: If specified, this overrides the automatically generated feedback message
in case the student's query did not return a result.
| 10.734056
| 16.467579
| 0.65183
|
# first check if there is no error
has_no_error(state)
if not state.solution_result:
raise NameError(
"You are using has_result() to verify that the student query generated an error, but the solution query did not return a result either!"
)
if not state.student_result:
state.do_test(incorrect_msg)
return state
|
def has_result(state, incorrect_msg="Your query did not return a result.")
|
Checks if the student's query returned a result.
Args:
incorrect_msg: If specified, this overrides the automatically generated feedback message
in case the student's query did not return a result.
| 8.834644
| 8.610726
| 1.026005
|
# check that query returned something
has_result(state)
# assumes that columns cannot be jagged in size
n_stu = len(next(iter(state.student_result.values())))
n_sol = len(next(iter(state.solution_result.values())))
if n_stu != n_sol:
_msg = state.build_message(
incorrect_msg, fmt_kwargs={"n_stu": n_stu, "n_sol": n_sol}
)
state.do_test(_msg)
return state
|
def has_nrows(
state,
incorrect_msg="Your query returned a table with {{n_stu}} row{{'s' if n_stu > 1 else ''}} while it should return a table with {{n_sol}} row{{'s' if n_sol > 1 else ''}}.",
)
|
Test whether the student and solution query results have equal numbers of rows.
Args:
incorrect_msg: If specified, this overrides the automatically generated feedback message
in case the number of rows in the student and solution query don't match.
| 4.464823
| 4.724498
| 0.945036
|
# check that query returned something
has_result(state)
n_stu = len(state.student_result)
n_sol = len(state.solution_result)
if n_stu != n_sol:
_msg = state.build_message(
incorrect_msg, fmt_kwargs={"n_stu": n_stu, "n_sol": n_sol}
)
state.do_test(_msg)
return state
|
def has_ncols(
state,
incorrect_msg="Your query returned a table with {{n_stu}} column{{'s' if n_stu > 1 else ''}} while it should return a table with {{n_sol}} column{{'s' if n_sol > 1 else ''}}.",
)
|
Test whether the student and solution query results have equal numbers of columns.
Args:
incorrect_msg: If specified, this overrides the automatically generated feedback message
in case the number of columns in the student and solution query don't match.
:Example:
Consider the following solution and SCT: ::
# solution
SELECT artist_id as id, name FROM artists
# sct
Ex().has_ncols()
# passing submission
SELECT artist_id as id, name FROM artists
# failing submission (too little columns)
SELECT artist_id as id FROM artists
# passing submission (two columns, even though not correct ones)
SELECT artist_id, label FROM artists
| 3.626486
| 4.307378
| 0.841924
|
if missing_msg is None:
missing_msg = "The system wants to verify row {{index + 1}} of your query result, but couldn't find it. Have another look."
if expand_msg is None:
expand_msg = "Have another look at row {{index + 1}} in your query result. "
msg_kwargs = {"index": index}
# check that query returned something
has_result(state)
stu_res = state.student_result
sol_res = state.solution_result
n_sol = len(next(iter(sol_res.values())))
n_stu = len(next(iter(stu_res.values())))
if index >= n_sol:
raise BaseException(
"There are only {} rows in the solution query result, and you're trying to fetch the row at index {}".format(
n_sol, index
)
)
if index >= n_stu:
_msg = state.build_message(missing_msg, fmt_kwargs=msg_kwargs)
state.do_test(_msg)
return state.to_child(
append_message={"msg": expand_msg, "kwargs": msg_kwargs},
student_result={k: [v[index]] for k, v in stu_res.items()},
solution_result={k: [v[index]] for k, v in sol_res.items()},
)
|
def check_row(state, index, missing_msg=None, expand_msg=None)
|
Zoom in on a particular row in the query result, by index.
After zooming in on a row, which is represented as a single-row query result,
you can use ``has_equal_value()`` to verify whether all columns in the zoomed in solution
query result have a match in the student query result.
Args:
index: index of the row to zoom in on (zero-based indexed).
missing_msg: if specified, this overrides the automatically generated feedback
message in case the row is missing in the student query result.
expand_msg: if specified, this overrides the automatically generated feedback
message that is prepended to feedback messages that are thrown
further in the SCT chain.
:Example:
Suppose we are testing the following SELECT statements
* solution: ``SELECT artist_id as id, name FROM artists LIMIT 5``
* student : ``SELECT artist_id, name FROM artists LIMIT 2``
We can write the following SCTs: ::
# fails, since row 3 at index 2 is not in the student result
Ex().check_row(2)
# passes, since row 2 at index 1 is in the student result
Ex().check_row(0)
| 4.103746
| 3.983594
| 1.030162
|
if missing_msg is None:
missing_msg = "We expected to find a column named `{{name}}` in the result of your query, but couldn't."
if expand_msg is None:
expand_msg = "Have another look at your query result. "
msg_kwargs = {"name": name}
# check that query returned something
has_result(state)
stu_res = state.student_result
sol_res = state.solution_result
if name not in sol_res:
raise BaseException("name %s not in solution column names" % name)
if name not in stu_res:
_msg = state.build_message(missing_msg, fmt_kwargs=msg_kwargs)
state.do_test(_msg)
return state.to_child(
append_message={"msg": expand_msg, "kwargs": msg_kwargs},
student_result={name: stu_res[name]},
solution_result={name: sol_res[name]},
)
|
def check_column(state, name, missing_msg=None, expand_msg=None)
|
Zoom in on a particular column in the query result, by name.
After zooming in on a column, which is represented as a single-column query result,
you can use ``has_equal_value()`` to verify whether the column in the solution query result
matches the column in student query result.
Args:
name: name of the column to zoom in on.
missing_msg: if specified, this overrides the automatically generated feedback
message in case the column is missing in the student query result.
expand_msg: if specified, this overrides the automatically generated feedback
message that is prepended to feedback messages that are thrown
further in the SCT chain.
:Example:
Suppose we are testing the following SELECT statements
* solution: ``SELECT artist_id as id, name FROM artists``
* student : ``SELECT artist_id, name FROM artists``
We can write the following SCTs: ::
# fails, since no column named id in student result
Ex().check_column('id')
# passes, since a column named name is in student_result
Ex().check_column('name')
| 4.904038
| 4.908629
| 0.999065
|
if too_many_cols_msg is None:
too_many_cols_msg = (
"Your query result contains the column {{col}} but shouldn't."
)
if expand_msg is None:
expand_msg = "Have another look at your query result. "
child_stu_result = {}
child_sol_result = {}
for col in state.solution_result:
child = check_column(state, col)
child_stu_result.update(**child.student_result)
child_sol_result.update(**child.solution_result)
cols_not_in_sol = list(
set(state.student_result.keys()) - set(child_stu_result.keys())
)
if not allow_extra and len(cols_not_in_sol) > 0:
_msg = state.build_message(
"Your query result contains the column `{{col}}` but shouldn't.",
fmt_kwargs={"col": cols_not_in_sol[0]},
)
state.do_test(_msg)
return state.to_child(
append_message={"msg": expand_msg, "kwargs": {}},
student_result=child_stu_result,
solution_result=child_sol_result,
)
|
def check_all_columns(state, allow_extra=True, too_many_cols_msg=None, expand_msg=None)
|
Zoom in on the columns that are specified by the solution
Behind the scenes, this is using ``check_column()`` for every column that is in the solution query result.
Afterwards, it's selecting only these columns from the student query result and stores them in a child
state that is returned, so you can use ``has_equal_value()`` on it.
This function does not allow you to customize the messages for ``check_column()``. If you want to manually
set those, simply use ``check_column()`` explicitly.
Args:
allow_extra: True by default, this determines whether students are allowed to have included
other columns in their query result.
too_many_cols_msg: If specified, this overrides the automatically generated feedback message in
case ``allow_extra`` is False and the student's query returned extra columns when
comparing the so the solution query result.
expand_msg: if specified, this overrides the automatically generated feedback
message that is prepended to feedback messages that are thrown
further in the SCT chain.
:Example:
Consider the following solution and SCT: ::
# solution
SELECT artist_id as id, name FROM artists
# sct
Ex().check_all_columns()
# passing submission
SELECT artist_id as id, name FROM artists
# failing submission (wrong names)
SELECT artist_id, name FROM artists
# passing submission (allow_extra is True by default)
SELECT artist_id as id, name, label FROM artists
| 3.527079
| 3.23195
| 1.091316
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.