id_within_dataset
int64
1
55.5k
snippet
stringlengths
19
14.2k
tokens
listlengths
6
1.63k
nl
stringlengths
6
352
split_within_dataset
stringclasses
1 value
is_duplicated
bool
2 classes
16,569
def _print_on_nosuchfile(e): if (e.errno == 2): logging.error("Could not find zic. Perhaps you need to install libc-bin or some other package that provides it, or it's not in your PATH?")
[ "def", "_print_on_nosuchfile", "(", "e", ")", ":", "if", "(", "e", ".", "errno", "==", "2", ")", ":", "logging", ".", "error", "(", "\"Could not find zic. Perhaps you need to install libc-bin or some other package that provides it, or it's not in your PATH?\"", ")" ]
print helpful troubleshooting message e is an exception raised by subprocess .
train
false
16,570
def masi_distance(label1, label2): len_intersection = len(label1.intersection(label2)) len_union = len(label1.union(label2)) len_label1 = len(label1) len_label2 = len(label2) if ((len_label1 == len_label2) and (len_label1 == len_intersection)): m = 1 elif (len_intersection == min(len_label1, len_label2)): m = 0.67 elif (len_intersection > 0): m = 0.33 else: m = 0 return ((1 - (len_intersection / float(len_union))) * m)
[ "def", "masi_distance", "(", "label1", ",", "label2", ")", ":", "len_intersection", "=", "len", "(", "label1", ".", "intersection", "(", "label2", ")", ")", "len_union", "=", "len", "(", "label1", ".", "union", "(", "label2", ")", ")", "len_label1", "=", "len", "(", "label1", ")", "len_label2", "=", "len", "(", "label2", ")", "if", "(", "(", "len_label1", "==", "len_label2", ")", "and", "(", "len_label1", "==", "len_intersection", ")", ")", ":", "m", "=", "1", "elif", "(", "len_intersection", "==", "min", "(", "len_label1", ",", "len_label2", ")", ")", ":", "m", "=", "0.67", "elif", "(", "len_intersection", ">", "0", ")", ":", "m", "=", "0.33", "else", ":", "m", "=", "0", "return", "(", "(", "1", "-", "(", "len_intersection", "/", "float", "(", "len_union", ")", ")", ")", "*", "m", ")" ]
distance metric that takes into account partial agreement when multiple labels are assigned .
train
false
16,571
def _find_set_info(set): cmd = '{0} list -t {1}'.format(_ipset_cmd(), set) out = __salt__['cmd.run_all'](cmd, python_shell=False) if (out['retcode'] > 0): return False setinfo = {} _tmp = out['stdout'].split('\n') for item in _tmp: if (':' in item): (key, value) = item.split(':', 1) setinfo[key] = value[1:] return setinfo
[ "def", "_find_set_info", "(", "set", ")", ":", "cmd", "=", "'{0} list -t {1}'", ".", "format", "(", "_ipset_cmd", "(", ")", ",", "set", ")", "out", "=", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "python_shell", "=", "False", ")", "if", "(", "out", "[", "'retcode'", "]", ">", "0", ")", ":", "return", "False", "setinfo", "=", "{", "}", "_tmp", "=", "out", "[", "'stdout'", "]", ".", "split", "(", "'\\n'", ")", "for", "item", "in", "_tmp", ":", "if", "(", "':'", "in", "item", ")", ":", "(", "key", ",", "value", ")", "=", "item", ".", "split", "(", "':'", ",", "1", ")", "setinfo", "[", "key", "]", "=", "value", "[", "1", ":", "]", "return", "setinfo" ]
return information about the set .
train
true
16,573
def rm_handler(app, handler_name, func, key=None): handler_funcs_name = '{0}_funcs'.format(handler_name) handler_funcs = getattr(app, handler_funcs_name) try: handler_funcs.get(key, []).remove(func) except ValueError: pass
[ "def", "rm_handler", "(", "app", ",", "handler_name", ",", "func", ",", "key", "=", "None", ")", ":", "handler_funcs_name", "=", "'{0}_funcs'", ".", "format", "(", "handler_name", ")", "handler_funcs", "=", "getattr", "(", "app", ",", "handler_funcs_name", ")", "try", ":", "handler_funcs", ".", "get", "(", "key", ",", "[", "]", ")", ".", "remove", "(", "func", ")", "except", "ValueError", ":", "pass" ]
remove a handler from an application .
train
false
16,575
def base_sqs(): sqs = SearchQuerySet() for facet in settings.OSCAR_SEARCH_FACETS['fields'].values(): options = facet.get('options', {}) sqs = sqs.facet(facet['field'], **options) for facet in settings.OSCAR_SEARCH_FACETS['queries'].values(): for query in facet['queries']: sqs = sqs.query_facet(facet['field'], query[1]) return sqs
[ "def", "base_sqs", "(", ")", ":", "sqs", "=", "SearchQuerySet", "(", ")", "for", "facet", "in", "settings", ".", "OSCAR_SEARCH_FACETS", "[", "'fields'", "]", ".", "values", "(", ")", ":", "options", "=", "facet", ".", "get", "(", "'options'", ",", "{", "}", ")", "sqs", "=", "sqs", ".", "facet", "(", "facet", "[", "'field'", "]", ",", "**", "options", ")", "for", "facet", "in", "settings", ".", "OSCAR_SEARCH_FACETS", "[", "'queries'", "]", ".", "values", "(", ")", ":", "for", "query", "in", "facet", "[", "'queries'", "]", ":", "sqs", "=", "sqs", ".", "query_facet", "(", "facet", "[", "'field'", "]", ",", "query", "[", "1", "]", ")", "return", "sqs" ]
return the base searchqueryset for haystack searches .
train
false
16,576
def errprint(msg): msg = as_unicode(msg) if ((not request) or (not (u'cmd' in local.form_dict)) or conf.developer_mode): print msg.encode(u'utf-8') error_log.append(msg)
[ "def", "errprint", "(", "msg", ")", ":", "msg", "=", "as_unicode", "(", "msg", ")", "if", "(", "(", "not", "request", ")", "or", "(", "not", "(", "u'cmd'", "in", "local", ".", "form_dict", ")", ")", "or", "conf", ".", "developer_mode", ")", ":", "print", "msg", ".", "encode", "(", "u'utf-8'", ")", "error_log", ".", "append", "(", "msg", ")" ]
log error .
train
false
16,577
def count_trackbacks_handler(sender, **kwargs): entry = kwargs['entry'] entry.trackback_count = (F('trackback_count') + 1) entry.save(update_fields=['trackback_count'])
[ "def", "count_trackbacks_handler", "(", "sender", ",", "**", "kwargs", ")", ":", "entry", "=", "kwargs", "[", "'entry'", "]", "entry", ".", "trackback_count", "=", "(", "F", "(", "'trackback_count'", ")", "+", "1", ")", "entry", ".", "save", "(", "update_fields", "=", "[", "'trackback_count'", "]", ")" ]
update entry .
train
true
16,578
@bdd.then(bdd.parsers.parse('the completion model should be {model}')) def check_model(quteproc, model): pattern = 'Setting completion model to {} with pattern *'.format(model) quteproc.wait_for(message=pattern)
[ "@", "bdd", ".", "then", "(", "bdd", ".", "parsers", ".", "parse", "(", "'the completion model should be {model}'", ")", ")", "def", "check_model", "(", "quteproc", ",", "model", ")", ":", "pattern", "=", "'Setting completion model to {} with pattern *'", ".", "format", "(", "model", ")", "quteproc", ".", "wait_for", "(", "message", "=", "pattern", ")" ]
make sure the completion model was set to something .
train
false
16,579
def if_matplotlib(func): @wraps(func) def run_test(*args, **kwargs): try: import matplotlib matplotlib.use('Agg', warn=False) import matplotlib.pyplot as plt plt.figure() except ImportError: raise SkipTest('Matplotlib not available.') else: return func(*args, **kwargs) return run_test
[ "def", "if_matplotlib", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "run_test", "(", "*", "args", ",", "**", "kwargs", ")", ":", "try", ":", "import", "matplotlib", "matplotlib", ".", "use", "(", "'Agg'", ",", "warn", "=", "False", ")", "import", "matplotlib", ".", "pyplot", "as", "plt", "plt", ".", "figure", "(", ")", "except", "ImportError", ":", "raise", "SkipTest", "(", "'Matplotlib not available.'", ")", "else", ":", "return", "func", "(", "*", "args", ",", "**", "kwargs", ")", "return", "run_test" ]
test decorator that skips test if matplotlib not installed .
train
false
16,580
@contextmanager def persistent_grades_feature_flags(global_flag, enabled_for_all_courses=False, course_id=None, enabled_for_course=False): PersistentGradesEnabledFlag.objects.create(enabled=global_flag, enabled_for_all_courses=enabled_for_all_courses) if course_id: CoursePersistentGradesFlag.objects.create(course_id=course_id, enabled=enabled_for_course) (yield)
[ "@", "contextmanager", "def", "persistent_grades_feature_flags", "(", "global_flag", ",", "enabled_for_all_courses", "=", "False", ",", "course_id", "=", "None", ",", "enabled_for_course", "=", "False", ")", ":", "PersistentGradesEnabledFlag", ".", "objects", ".", "create", "(", "enabled", "=", "global_flag", ",", "enabled_for_all_courses", "=", "enabled_for_all_courses", ")", "if", "course_id", ":", "CoursePersistentGradesFlag", ".", "objects", ".", "create", "(", "course_id", "=", "course_id", ",", "enabled", "=", "enabled_for_course", ")", "(", "yield", ")" ]
most test cases will use a single call to this manager .
train
false
16,581
def update_item_projected_qty(item_code): frappe.db.sql(u'update tabItem set\n DCTB DCTB total_projected_qty = ifnull((select sum(projected_qty) from tabBin where item_code=%s), 0)\n DCTB DCTB where name=%s', (item_code, item_code))
[ "def", "update_item_projected_qty", "(", "item_code", ")", ":", "frappe", ".", "db", ".", "sql", "(", "u'update tabItem set\\n DCTB DCTB total_projected_qty = ifnull((select sum(projected_qty) from tabBin where item_code=%s), 0)\\n DCTB DCTB where name=%s'", ",", "(", "item_code", ",", "item_code", ")", ")" ]
set total_projected_qty in item as sum of projected qty in all warehouses .
train
false
16,582
def _get_image(vm_): images = avail_images() vm_image = str(config.get_cloud_config_value('image', vm_, __opts__, search_global=False)) if (not vm_image): raise SaltCloudNotFound('No image specified for this VM.') if (vm_image in images): return vm_image raise SaltCloudNotFound("The specified image, '{0}', could not be found.".format(vm_image))
[ "def", "_get_image", "(", "vm_", ")", ":", "images", "=", "avail_images", "(", ")", "vm_image", "=", "str", "(", "config", ".", "get_cloud_config_value", "(", "'image'", ",", "vm_", ",", "__opts__", ",", "search_global", "=", "False", ")", ")", "if", "(", "not", "vm_image", ")", ":", "raise", "SaltCloudNotFound", "(", "'No image specified for this VM.'", ")", "if", "(", "vm_image", "in", "images", ")", ":", "return", "vm_image", "raise", "SaltCloudNotFound", "(", "\"The specified image, '{0}', could not be found.\"", ".", "format", "(", "vm_image", ")", ")" ]
return the vms image .
train
false
16,583
def logger(_modem, message_, type_): pass
[ "def", "logger", "(", "_modem", ",", "message_", ",", "type_", ")", ":", "pass" ]
fixture which provides a logger for tests .
train
false
16,584
def max_inputs_to_GpuElemwise(node): type_sizes = get_device_type_sizes() int_size = type_sizes['int_size'] gpu_ptr_size = type_sizes['gpu_ptr_size'] argument_limit = 232 ndim = node.inputs[0].type.ndim size_param_mandatory = int_size size_param_mandatory += (int_size * ndim) size_param_mandatory += sum(((gpu_ptr_size + (int_size * ndim)) for i in node.outputs)) nb_bytes_avail = (argument_limit - size_param_mandatory) nb_bytes_per_inputs = ((ndim * int_size) + gpu_ptr_size) max_nb_inputs = (nb_bytes_avail // nb_bytes_per_inputs) if ((node.inputs[0].type.ndim == 1) and (max_nb_inputs > 14)): return 14 return max_nb_inputs
[ "def", "max_inputs_to_GpuElemwise", "(", "node", ")", ":", "type_sizes", "=", "get_device_type_sizes", "(", ")", "int_size", "=", "type_sizes", "[", "'int_size'", "]", "gpu_ptr_size", "=", "type_sizes", "[", "'gpu_ptr_size'", "]", "argument_limit", "=", "232", "ndim", "=", "node", ".", "inputs", "[", "0", "]", ".", "type", ".", "ndim", "size_param_mandatory", "=", "int_size", "size_param_mandatory", "+=", "(", "int_size", "*", "ndim", ")", "size_param_mandatory", "+=", "sum", "(", "(", "(", "gpu_ptr_size", "+", "(", "int_size", "*", "ndim", ")", ")", "for", "i", "in", "node", ".", "outputs", ")", ")", "nb_bytes_avail", "=", "(", "argument_limit", "-", "size_param_mandatory", ")", "nb_bytes_per_inputs", "=", "(", "(", "ndim", "*", "int_size", ")", "+", "gpu_ptr_size", ")", "max_nb_inputs", "=", "(", "nb_bytes_avail", "//", "nb_bytes_per_inputs", ")", "if", "(", "(", "node", ".", "inputs", "[", "0", "]", ".", "type", ".", "ndim", "==", "1", ")", "and", "(", "max_nb_inputs", ">", "14", ")", ")", ":", "return", "14", "return", "max_nb_inputs" ]
return the maximum number of inputs this gpuelemwise apply node can accept .
train
false
16,586
def check_chars_data_fields(header, mapping_data, warnings): allowed_data_field_chars = (('+-%./ :,;_' + digits) + letters) allowed_sampleid_chars = (('.' + digits) + letters) correction = 1 sample_id_field = 'SampleID' fields_to_skip = ['BarcodeSequence', 'LinkerPrimerSequence', 'ReversePrimer'] for curr_field in range(len(header)): if (header[curr_field] in fields_to_skip): continue if (header[curr_field] == sample_id_field): valid_chars = allowed_sampleid_chars else: valid_chars = allowed_data_field_chars for curr_data in range(len(mapping_data)): curr_cell = mapping_data[curr_data][curr_field].replace('\n', '') for curr_char in curr_cell: if (curr_char not in valid_chars): warnings.append(('Invalid characters found in %s DCTB %d,%d' % (mapping_data[curr_data][curr_field].replace('\n', ''), (curr_data + correction), curr_field))) break return warnings
[ "def", "check_chars_data_fields", "(", "header", ",", "mapping_data", ",", "warnings", ")", ":", "allowed_data_field_chars", "=", "(", "(", "'+-%./ :,;_'", "+", "digits", ")", "+", "letters", ")", "allowed_sampleid_chars", "=", "(", "(", "'.'", "+", "digits", ")", "+", "letters", ")", "correction", "=", "1", "sample_id_field", "=", "'SampleID'", "fields_to_skip", "=", "[", "'BarcodeSequence'", ",", "'LinkerPrimerSequence'", ",", "'ReversePrimer'", "]", "for", "curr_field", "in", "range", "(", "len", "(", "header", ")", ")", ":", "if", "(", "header", "[", "curr_field", "]", "in", "fields_to_skip", ")", ":", "continue", "if", "(", "header", "[", "curr_field", "]", "==", "sample_id_field", ")", ":", "valid_chars", "=", "allowed_sampleid_chars", "else", ":", "valid_chars", "=", "allowed_data_field_chars", "for", "curr_data", "in", "range", "(", "len", "(", "mapping_data", ")", ")", ":", "curr_cell", "=", "mapping_data", "[", "curr_data", "]", "[", "curr_field", "]", ".", "replace", "(", "'\\n'", ",", "''", ")", "for", "curr_char", "in", "curr_cell", ":", "if", "(", "curr_char", "not", "in", "valid_chars", ")", ":", "warnings", ".", "append", "(", "(", "'Invalid characters found in %s DCTB %d,%d'", "%", "(", "mapping_data", "[", "curr_data", "]", "[", "curr_field", "]", ".", "replace", "(", "'\\n'", ",", "''", ")", ",", "(", "curr_data", "+", "correction", ")", ",", "curr_field", ")", ")", ")", "break", "return", "warnings" ]
checks for valid sampleid and other data field characters header: list of header strings mapping_data: list of lists of raw metadata mapping file data warnings: list of warnings .
train
false
16,587
@task @timed def install_python_prereqs(): if no_prereq_install(): print NO_PREREQ_MESSAGE return uninstall_python_packages() files_to_fingerprint = list(PYTHON_REQ_FILES) files_to_fingerprint.append(sysconfig.get_python_lib()) src_dir = os.path.join(sys.prefix, 'src') if os.path.isdir(src_dir): files_to_fingerprint.append(src_dir) this_file = __file__ if this_file.endswith('.pyc'): this_file = this_file[:(-1)] files_to_fingerprint.append(this_file) prereq_cache('Python prereqs', files_to_fingerprint, python_prereqs_installation)
[ "@", "task", "@", "timed", "def", "install_python_prereqs", "(", ")", ":", "if", "no_prereq_install", "(", ")", ":", "print", "NO_PREREQ_MESSAGE", "return", "uninstall_python_packages", "(", ")", "files_to_fingerprint", "=", "list", "(", "PYTHON_REQ_FILES", ")", "files_to_fingerprint", ".", "append", "(", "sysconfig", ".", "get_python_lib", "(", ")", ")", "src_dir", "=", "os", ".", "path", ".", "join", "(", "sys", ".", "prefix", ",", "'src'", ")", "if", "os", ".", "path", ".", "isdir", "(", "src_dir", ")", ":", "files_to_fingerprint", ".", "append", "(", "src_dir", ")", "this_file", "=", "__file__", "if", "this_file", ".", "endswith", "(", "'.pyc'", ")", ":", "this_file", "=", "this_file", "[", ":", "(", "-", "1", ")", "]", "files_to_fingerprint", ".", "append", "(", "this_file", ")", "prereq_cache", "(", "'Python prereqs'", ",", "files_to_fingerprint", ",", "python_prereqs_installation", ")" ]
installs python prerequisites .
train
false
16,588
def signal_committed_filefields(sender, instance, **kwargs): for field_name in getattr(instance, '_uncommitted_filefields', ()): fieldfile = getattr(instance, field_name) if fieldfile: signals.saved_file.send_robust(sender=sender, fieldfile=fieldfile)
[ "def", "signal_committed_filefields", "(", "sender", ",", "instance", ",", "**", "kwargs", ")", ":", "for", "field_name", "in", "getattr", "(", "instance", ",", "'_uncommitted_filefields'", ",", "(", ")", ")", ":", "fieldfile", "=", "getattr", "(", "instance", ",", "field_name", ")", "if", "fieldfile", ":", "signals", ".", "saved_file", ".", "send_robust", "(", "sender", "=", "sender", ",", "fieldfile", "=", "fieldfile", ")" ]
a post_save signal handler which sends a signal for each filefield that was committed this save .
train
true
16,589
def get_sort_params(input_params, default_key='created_at', default_dir='desc'): params = input_params.copy() sort_keys = [] sort_dirs = [] while ('sort_key' in params): sort_keys.append(params.pop('sort_key').strip()) while ('sort_dir' in params): sort_dirs.append(params.pop('sort_dir').strip()) if ((len(sort_keys) == 0) and default_key): sort_keys.append(default_key) if ((len(sort_dirs) == 0) and default_dir): sort_dirs.append(default_dir) return (sort_keys, sort_dirs)
[ "def", "get_sort_params", "(", "input_params", ",", "default_key", "=", "'created_at'", ",", "default_dir", "=", "'desc'", ")", ":", "params", "=", "input_params", ".", "copy", "(", ")", "sort_keys", "=", "[", "]", "sort_dirs", "=", "[", "]", "while", "(", "'sort_key'", "in", "params", ")", ":", "sort_keys", ".", "append", "(", "params", ".", "pop", "(", "'sort_key'", ")", ".", "strip", "(", ")", ")", "while", "(", "'sort_dir'", "in", "params", ")", ":", "sort_dirs", ".", "append", "(", "params", ".", "pop", "(", "'sort_dir'", ")", ".", "strip", "(", ")", ")", "if", "(", "(", "len", "(", "sort_keys", ")", "==", "0", ")", "and", "default_key", ")", ":", "sort_keys", ".", "append", "(", "default_key", ")", "if", "(", "(", "len", "(", "sort_dirs", ")", "==", "0", ")", "and", "default_dir", ")", ":", "sort_dirs", ".", "append", "(", "default_dir", ")", "return", "(", "sort_keys", ",", "sort_dirs", ")" ]
retrieves sort keys/directions parameters .
train
false
16,590
def test_serie_config(): chart = Line() chart.add('1', s1, stroke=False) chart.add('2', s2) q = chart.render_pyquery() assert (len(q('.serie-0 .line')) == 0) assert (len(q('.serie-1 .line')) == 1) assert (len(q('.serie-0 .dot')) == 5) assert (len(q('.serie-1 .dot')) == 6)
[ "def", "test_serie_config", "(", ")", ":", "chart", "=", "Line", "(", ")", "chart", ".", "add", "(", "'1'", ",", "s1", ",", "stroke", "=", "False", ")", "chart", ".", "add", "(", "'2'", ",", "s2", ")", "q", "=", "chart", ".", "render_pyquery", "(", ")", "assert", "(", "len", "(", "q", "(", "'.serie-0 .line'", ")", ")", "==", "0", ")", "assert", "(", "len", "(", "q", "(", "'.serie-1 .line'", ")", ")", "==", "1", ")", "assert", "(", "len", "(", "q", "(", "'.serie-0 .dot'", ")", ")", "==", "5", ")", "assert", "(", "len", "(", "q", "(", "'.serie-1 .dot'", ")", ")", "==", "6", ")" ]
test per serie configuration .
train
false
16,591
def cifs(registry, xml_parent, data): console_prefix = 'CIFS: ' plugin_tag = 'jenkins.plugins.publish__over__cifs.CifsPublisherPlugin' publisher_tag = 'jenkins.plugins.publish__over__cifs.CifsPublisher' transfer_tag = 'jenkins.plugins.publish__over__cifs.CifsTransfer' plugin_reference_tag = 'jenkins.plugins.publish_over_cifs.CifsPublisherPlugin' base_publish_over(xml_parent, data, console_prefix, plugin_tag, publisher_tag, transfer_tag, plugin_reference_tag)
[ "def", "cifs", "(", "registry", ",", "xml_parent", ",", "data", ")", ":", "console_prefix", "=", "'CIFS: '", "plugin_tag", "=", "'jenkins.plugins.publish__over__cifs.CifsPublisherPlugin'", "publisher_tag", "=", "'jenkins.plugins.publish__over__cifs.CifsPublisher'", "transfer_tag", "=", "'jenkins.plugins.publish__over__cifs.CifsTransfer'", "plugin_reference_tag", "=", "'jenkins.plugins.publish_over_cifs.CifsPublisherPlugin'", "base_publish_over", "(", "xml_parent", ",", "data", ",", "console_prefix", ",", "plugin_tag", ",", "publisher_tag", ",", "transfer_tag", ",", "plugin_reference_tag", ")" ]
yaml: cifs upload files via cifs .
train
false
16,593
def get_metadata_from_file(source_path, config=None, lang=None): try: if (lang and config): source_path = get_translation_candidate(config, source_path, lang) elif lang: source_path += (u'.' + lang) with io.open(source_path, u'r', encoding=u'utf-8-sig') as meta_file: meta_data = [x.strip() for x in meta_file.readlines()] return _get_metadata_from_file(meta_data) except (UnicodeDecodeError, UnicodeEncodeError): raise ValueError(u'Error reading {0}: Nikola only supports UTF-8 files'.format(source_path)) except Exception: return {}
[ "def", "get_metadata_from_file", "(", "source_path", ",", "config", "=", "None", ",", "lang", "=", "None", ")", ":", "try", ":", "if", "(", "lang", "and", "config", ")", ":", "source_path", "=", "get_translation_candidate", "(", "config", ",", "source_path", ",", "lang", ")", "elif", "lang", ":", "source_path", "+=", "(", "u'.'", "+", "lang", ")", "with", "io", ".", "open", "(", "source_path", ",", "u'r'", ",", "encoding", "=", "u'utf-8-sig'", ")", "as", "meta_file", ":", "meta_data", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "meta_file", ".", "readlines", "(", ")", "]", "return", "_get_metadata_from_file", "(", "meta_data", ")", "except", "(", "UnicodeDecodeError", ",", "UnicodeEncodeError", ")", ":", "raise", "ValueError", "(", "u'Error reading {0}: Nikola only supports UTF-8 files'", ".", "format", "(", "source_path", ")", ")", "except", "Exception", ":", "return", "{", "}" ]
extract metadata from the file itself .
train
false
16,594
def register_date_typecasters(connection): def cast_date(value, cursor): return value cursor = connection.cursor() cursor.execute('SELECT NULL::date') date_oid = cursor.description[0][1] cursor.execute('SELECT NULL::timestamp') timestamp_oid = cursor.description[0][1] cursor.execute('SELECT NULL::timestamp with time zone') timestamptz_oid = cursor.description[0][1] oids = (date_oid, timestamp_oid, timestamptz_oid) new_type = psycopg2.extensions.new_type(oids, 'DATE', cast_date) psycopg2.extensions.register_type(new_type)
[ "def", "register_date_typecasters", "(", "connection", ")", ":", "def", "cast_date", "(", "value", ",", "cursor", ")", ":", "return", "value", "cursor", "=", "connection", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "'SELECT NULL::date'", ")", "date_oid", "=", "cursor", ".", "description", "[", "0", "]", "[", "1", "]", "cursor", ".", "execute", "(", "'SELECT NULL::timestamp'", ")", "timestamp_oid", "=", "cursor", ".", "description", "[", "0", "]", "[", "1", "]", "cursor", ".", "execute", "(", "'SELECT NULL::timestamp with time zone'", ")", "timestamptz_oid", "=", "cursor", ".", "description", "[", "0", "]", "[", "1", "]", "oids", "=", "(", "date_oid", ",", "timestamp_oid", ",", "timestamptz_oid", ")", "new_type", "=", "psycopg2", ".", "extensions", ".", "new_type", "(", "oids", ",", "'DATE'", ",", "cast_date", ")", "psycopg2", ".", "extensions", ".", "register_type", "(", "new_type", ")" ]
casts date and timestamp values to string .
train
false
16,595
@utils.template_doc(**get_config_file()) def mpl_to_plotly(fig, resize=False, strip_style=False, verbose=False): if matplotlylib: renderer = matplotlylib.PlotlyRenderer() matplotlylib.Exporter(renderer).run(fig) if resize: renderer.resize() if strip_style: renderer.strip_style() if verbose: print renderer.msg return renderer.plotly_fig else: warnings.warn("To use Plotly's matplotlylib functionality, you'll need to have matplotlib successfully installed with all of its dependencies. You're getting this error because matplotlib or one of its dependencies doesn't seem to be installed correctly.")
[ "@", "utils", ".", "template_doc", "(", "**", "get_config_file", "(", ")", ")", "def", "mpl_to_plotly", "(", "fig", ",", "resize", "=", "False", ",", "strip_style", "=", "False", ",", "verbose", "=", "False", ")", ":", "if", "matplotlylib", ":", "renderer", "=", "matplotlylib", ".", "PlotlyRenderer", "(", ")", "matplotlylib", ".", "Exporter", "(", "renderer", ")", ".", "run", "(", "fig", ")", "if", "resize", ":", "renderer", ".", "resize", "(", ")", "if", "strip_style", ":", "renderer", ".", "strip_style", "(", ")", "if", "verbose", ":", "print", "renderer", ".", "msg", "return", "renderer", ".", "plotly_fig", "else", ":", "warnings", ".", "warn", "(", "\"To use Plotly's matplotlylib functionality, you'll need to have matplotlib successfully installed with all of its dependencies. You're getting this error because matplotlib or one of its dependencies doesn't seem to be installed correctly.\"", ")" ]
convert a matplotlib figure to plotly dictionary and send .
train
false
16,596
def start_webserver_any_free_port(ip, webroot, handler=WebHandler): web_server = HTTPServer((ip, 0), webroot, handler) server_thread = threading.Thread(target=web_server.serve_forever) server_thread.name = 'WebServer' server_thread.daemon = True server_thread.start() web_server.wait_for_start() return (server_thread, web_server.get_port())
[ "def", "start_webserver_any_free_port", "(", "ip", ",", "webroot", ",", "handler", "=", "WebHandler", ")", ":", "web_server", "=", "HTTPServer", "(", "(", "ip", ",", "0", ")", ",", "webroot", ",", "handler", ")", "server_thread", "=", "threading", ".", "Thread", "(", "target", "=", "web_server", ".", "serve_forever", ")", "server_thread", ".", "name", "=", "'WebServer'", "server_thread", ".", "daemon", "=", "True", "server_thread", ".", "start", "(", ")", "web_server", ".", "wait_for_start", "(", ")", "return", "(", "server_thread", ",", "web_server", ".", "get_port", "(", ")", ")" ]
create a http server daemon in any free port available .
train
false
16,597
@must_have_permission(ADMIN) @must_be_branched_from_node def get_draft_registration(auth, node, draft, *args, **kwargs): return (serialize_draft_registration(draft, auth), http.OK)
[ "@", "must_have_permission", "(", "ADMIN", ")", "@", "must_be_branched_from_node", "def", "get_draft_registration", "(", "auth", ",", "node", ",", "draft", ",", "*", "args", ",", "**", "kwargs", ")", ":", "return", "(", "serialize_draft_registration", "(", "draft", ",", "auth", ")", ",", "http", ".", "OK", ")" ]
return a single draft registration :return: serialized draft registration :rtype: dict .
train
false
16,598
def compact(vertices, indices, tolerance=0.001): n = len(vertices) V = np.zeros(n, dtype=[('pos', np.float32, 3)]) V['pos'][:, 0] = vertices[:, 0] V['pos'][:, 1] = vertices[:, 1] V['pos'][:, 2] = vertices[:, 2] epsilon = 0.001 decimals = int((np.log(epsilon) / np.log((1 / 10.0)))) V_ = np.zeros_like(V) X = V['pos'][:, 0].round(decimals=decimals) X[np.where((abs(X) < epsilon))] = 0 V_['pos'][:, 0] = X Y = V['pos'][:, 1].round(decimals=decimals) Y[np.where((abs(Y) < epsilon))] = 0 V_['pos'][:, 1] = Y Z = V['pos'][:, 2].round(decimals=decimals) Z[np.where((abs(Z) < epsilon))] = 0 V_['pos'][:, 2] = Z (U, RI) = np.unique(V_, return_inverse=True) indices = indices.ravel() I_ = indices.copy().ravel() for i in range(len(indices)): I_[i] = RI[indices[i]] I_ = I_.reshape((len(indices) / 3), 3) return (U.view(np.float32).reshape(len(U), 3), I_, RI)
[ "def", "compact", "(", "vertices", ",", "indices", ",", "tolerance", "=", "0.001", ")", ":", "n", "=", "len", "(", "vertices", ")", "V", "=", "np", ".", "zeros", "(", "n", ",", "dtype", "=", "[", "(", "'pos'", ",", "np", ".", "float32", ",", "3", ")", "]", ")", "V", "[", "'pos'", "]", "[", ":", ",", "0", "]", "=", "vertices", "[", ":", ",", "0", "]", "V", "[", "'pos'", "]", "[", ":", ",", "1", "]", "=", "vertices", "[", ":", ",", "1", "]", "V", "[", "'pos'", "]", "[", ":", ",", "2", "]", "=", "vertices", "[", ":", ",", "2", "]", "epsilon", "=", "0.001", "decimals", "=", "int", "(", "(", "np", ".", "log", "(", "epsilon", ")", "/", "np", ".", "log", "(", "(", "1", "/", "10.0", ")", ")", ")", ")", "V_", "=", "np", ".", "zeros_like", "(", "V", ")", "X", "=", "V", "[", "'pos'", "]", "[", ":", ",", "0", "]", ".", "round", "(", "decimals", "=", "decimals", ")", "X", "[", "np", ".", "where", "(", "(", "abs", "(", "X", ")", "<", "epsilon", ")", ")", "]", "=", "0", "V_", "[", "'pos'", "]", "[", ":", ",", "0", "]", "=", "X", "Y", "=", "V", "[", "'pos'", "]", "[", ":", ",", "1", "]", ".", "round", "(", "decimals", "=", "decimals", ")", "Y", "[", "np", ".", "where", "(", "(", "abs", "(", "Y", ")", "<", "epsilon", ")", ")", "]", "=", "0", "V_", "[", "'pos'", "]", "[", ":", ",", "1", "]", "=", "Y", "Z", "=", "V", "[", "'pos'", "]", "[", ":", ",", "2", "]", ".", "round", "(", "decimals", "=", "decimals", ")", "Z", "[", "np", ".", "where", "(", "(", "abs", "(", "Z", ")", "<", "epsilon", ")", ")", "]", "=", "0", "V_", "[", "'pos'", "]", "[", ":", ",", "2", "]", "=", "Z", "(", "U", ",", "RI", ")", "=", "np", ".", "unique", "(", "V_", ",", "return_inverse", "=", "True", ")", "indices", "=", "indices", ".", "ravel", "(", ")", "I_", "=", "indices", ".", "copy", "(", ")", ".", "ravel", "(", ")", "for", "i", "in", "range", "(", "len", "(", "indices", ")", ")", ":", "I_", "[", "i", "]", "=", "RI", "[", "indices", "[", "i", "]", "]", "I_", "=", "I_", ".", "reshape", "(", "(", "len", "(", "indices", ")", "/", "3", ")", ",", "3", ")", "return", "(", "U", ".", "view", "(", "np", ".", "float32", ")", ".", "reshape", "(", "len", "(", "U", ")", ",", "3", ")", ",", "I_", ",", "RI", ")" ]
compact iterable by removing falsy values .
train
true
16,600
def agent_formatter(metric, value, timestamp, tags, hostname, device_name=None, metric_type=None, interval=None): attributes = {} if tags: attributes['tags'] = list(tags) if hostname: attributes['hostname'] = hostname if device_name: attributes['device_name'] = device_name if metric_type: attributes['type'] = metric_type if interval: pass if attributes: return (metric, int(timestamp), value, attributes) return (metric, int(timestamp), value)
[ "def", "agent_formatter", "(", "metric", ",", "value", ",", "timestamp", ",", "tags", ",", "hostname", ",", "device_name", "=", "None", ",", "metric_type", "=", "None", ",", "interval", "=", "None", ")", ":", "attributes", "=", "{", "}", "if", "tags", ":", "attributes", "[", "'tags'", "]", "=", "list", "(", "tags", ")", "if", "hostname", ":", "attributes", "[", "'hostname'", "]", "=", "hostname", "if", "device_name", ":", "attributes", "[", "'device_name'", "]", "=", "device_name", "if", "metric_type", ":", "attributes", "[", "'type'", "]", "=", "metric_type", "if", "interval", ":", "pass", "if", "attributes", ":", "return", "(", "metric", ",", "int", "(", "timestamp", ")", ",", "value", ",", "attributes", ")", "return", "(", "metric", ",", "int", "(", "timestamp", ")", ",", "value", ")" ]
formats metrics coming from the metricsaggregator .
train
false
16,601
def reload_(name): term(name)
[ "def", "reload_", "(", "name", ")", ":", "term", "(", "name", ")" ]
refreshes config files by calling service reload .
train
false
16,602
def format_timezone(offset, unnecessary_negative_timezone=False): if ((offset % 60) != 0): raise ValueError('Unable to handle non-minute offset.') if ((offset < 0) or unnecessary_negative_timezone): sign = '-' offset = (- offset) else: sign = '+' return ('%c%02d%02d' % (sign, (offset / 3600), ((offset / 60) % 60))).encode('ascii')
[ "def", "format_timezone", "(", "offset", ",", "unnecessary_negative_timezone", "=", "False", ")", ":", "if", "(", "(", "offset", "%", "60", ")", "!=", "0", ")", ":", "raise", "ValueError", "(", "'Unable to handle non-minute offset.'", ")", "if", "(", "(", "offset", "<", "0", ")", "or", "unnecessary_negative_timezone", ")", ":", "sign", "=", "'-'", "offset", "=", "(", "-", "offset", ")", "else", ":", "sign", "=", "'+'", "return", "(", "'%c%02d%02d'", "%", "(", "sign", ",", "(", "offset", "/", "3600", ")", ",", "(", "(", "offset", "/", "60", ")", "%", "60", ")", ")", ")", ".", "encode", "(", "'ascii'", ")" ]
format a timezone for git serialization .
train
false
16,603
def get_escalation_policies(profile='pagerduty', subdomain=None, api_key=None): return _list_items('escalation_policies', 'id', profile=profile, subdomain=subdomain, api_key=api_key)
[ "def", "get_escalation_policies", "(", "profile", "=", "'pagerduty'", ",", "subdomain", "=", "None", ",", "api_key", "=", "None", ")", ":", "return", "_list_items", "(", "'escalation_policies'", ",", "'id'", ",", "profile", "=", "profile", ",", "subdomain", "=", "subdomain", ",", "api_key", "=", "api_key", ")" ]
list escalation_policies belonging to this account cli example: salt myminion pagerduty .
train
true
16,604
def test_default_order(): table = PersonTable(PersonProxy.objects.all()) Person.objects.create(first_name='Foo', last_name='Bar') Person.objects.create(first_name='Bradley', last_name='Ayers') table.data.order_by([]) assert (list(table.rows[0])[1] == 'Ayers')
[ "def", "test_default_order", "(", ")", ":", "table", "=", "PersonTable", "(", "PersonProxy", ".", "objects", ".", "all", "(", ")", ")", "Person", ".", "objects", ".", "create", "(", "first_name", "=", "'Foo'", ",", "last_name", "=", "'Bar'", ")", "Person", ".", "objects", ".", "create", "(", "first_name", "=", "'Bradley'", ",", "last_name", "=", "'Ayers'", ")", "table", ".", "data", ".", "order_by", "(", "[", "]", ")", "assert", "(", "list", "(", "table", ".", "rows", "[", "0", "]", ")", "[", "1", "]", "==", "'Ayers'", ")" ]
if orderable=false .
train
false
16,606
def nutation_matrix(epoch): (epsa, dpsi, deps) = nutation_components2000B(epoch.jd) return matrix_product(rotation_matrix((- (epsa + deps)), u'x', False), rotation_matrix((- dpsi), u'z', False), rotation_matrix(epsa, u'x', False))
[ "def", "nutation_matrix", "(", "epoch", ")", ":", "(", "epsa", ",", "dpsi", ",", "deps", ")", "=", "nutation_components2000B", "(", "epoch", ".", "jd", ")", "return", "matrix_product", "(", "rotation_matrix", "(", "(", "-", "(", "epsa", "+", "deps", ")", ")", ",", "u'x'", ",", "False", ")", ",", "rotation_matrix", "(", "(", "-", "dpsi", ")", ",", "u'z'", ",", "False", ")", ",", "rotation_matrix", "(", "epsa", ",", "u'x'", ",", "False", ")", ")" ]
nutation matrix generated from nutation components .
train
false
16,607
def getComplexByDictionary(dictionary, valueComplex): if ('x' in dictionary): valueComplex = complex(euclidean.getFloatFromValue(dictionary['x']), valueComplex.imag) if ('y' in dictionary): valueComplex = complex(valueComplex.real, euclidean.getFloatFromValue(dictionary['y'])) return valueComplex
[ "def", "getComplexByDictionary", "(", "dictionary", ",", "valueComplex", ")", ":", "if", "(", "'x'", "in", "dictionary", ")", ":", "valueComplex", "=", "complex", "(", "euclidean", ".", "getFloatFromValue", "(", "dictionary", "[", "'x'", "]", ")", ",", "valueComplex", ".", "imag", ")", "if", "(", "'y'", "in", "dictionary", ")", ":", "valueComplex", "=", "complex", "(", "valueComplex", ".", "real", ",", "euclidean", ".", "getFloatFromValue", "(", "dictionary", "[", "'y'", "]", ")", ")", "return", "valueComplex" ]
get complex by dictionary .
train
false
16,608
def is_primitive_root(r, p, factors, exponents): if (p != (prod(factors, exponents) + 1)): return False for f in factors: (q, control) = divmod((p - 1), f) if (control != 0): return False if (pow(r, q, p) == 1): return False return True
[ "def", "is_primitive_root", "(", "r", ",", "p", ",", "factors", ",", "exponents", ")", ":", "if", "(", "p", "!=", "(", "prod", "(", "factors", ",", "exponents", ")", "+", "1", ")", ")", ":", "return", "False", "for", "f", "in", "factors", ":", "(", "q", ",", "control", ")", "=", "divmod", "(", "(", "p", "-", "1", ")", ",", "f", ")", "if", "(", "control", "!=", "0", ")", ":", "return", "False", "if", "(", "pow", "(", "r", ",", "q", ",", "p", ")", "==", "1", ")", ":", "return", "False", "return", "True" ]
returns true if a is a primitive root of p a is said to be the primitive root of p if gcd == 1 and totient(p) is the smallest positive number s .
train
false
16,611
def bind_parameters(obj, substitutions, fields=None): if (fields is None): fields = [k for k in obj.__dict__.keys() if (not k.startswith('_'))] for field in fields: data = getattr(obj, field) if isinstance(data, basestring): new_data = Template(data).safe_substitute(substitutions) if (new_data != data): LOG.debug(('Parameterized %s -> %s' % (repr(data), repr(new_data)))) setattr(obj, field, new_data)
[ "def", "bind_parameters", "(", "obj", ",", "substitutions", ",", "fields", "=", "None", ")", ":", "if", "(", "fields", "is", "None", ")", ":", "fields", "=", "[", "k", "for", "k", "in", "obj", ".", "__dict__", ".", "keys", "(", ")", "if", "(", "not", "k", ".", "startswith", "(", "'_'", ")", ")", "]", "for", "field", "in", "fields", ":", "data", "=", "getattr", "(", "obj", ",", "field", ")", "if", "isinstance", "(", "data", ",", "basestring", ")", ":", "new_data", "=", "Template", "(", "data", ")", ".", "safe_substitute", "(", "substitutions", ")", "if", "(", "new_data", "!=", "data", ")", ":", "LOG", ".", "debug", "(", "(", "'Parameterized %s -> %s'", "%", "(", "repr", "(", "data", ")", ",", "repr", "(", "new_data", ")", ")", ")", ")", "setattr", "(", "obj", ",", "field", ",", "new_data", ")" ]
bind the parameters to the given fields .
train
false
16,612
@contextlib.contextmanager def disabled_excepthook(): old_excepthook = sys.excepthook sys.excepthook = sys.__excepthook__ try: (yield) finally: if (sys.excepthook is sys.__excepthook__): sys.excepthook = old_excepthook
[ "@", "contextlib", ".", "contextmanager", "def", "disabled_excepthook", "(", ")", ":", "old_excepthook", "=", "sys", ".", "excepthook", "sys", ".", "excepthook", "=", "sys", ".", "__excepthook__", "try", ":", "(", "yield", ")", "finally", ":", "if", "(", "sys", ".", "excepthook", "is", "sys", ".", "__excepthook__", ")", ":", "sys", ".", "excepthook", "=", "old_excepthook" ]
run code with the exception hook temporarily disabled .
train
false
16,616
def _filterNames(names): names = [n for n in names if (n not in EXCLUDE_NAMES)] for pattern in EXCLUDE_PATTERNS: names = [n for n in names if ((not fnmatch.fnmatch(n, pattern)) and (not n.endswith('.py')))] return names
[ "def", "_filterNames", "(", "names", ")", ":", "names", "=", "[", "n", "for", "n", "in", "names", "if", "(", "n", "not", "in", "EXCLUDE_NAMES", ")", "]", "for", "pattern", "in", "EXCLUDE_PATTERNS", ":", "names", "=", "[", "n", "for", "n", "in", "names", "if", "(", "(", "not", "fnmatch", ".", "fnmatch", "(", "n", ",", "pattern", ")", ")", "and", "(", "not", "n", ".", "endswith", "(", "'.py'", ")", ")", ")", "]", "return", "names" ]
given a list of file names .
train
false
16,617
def get_gunicorn_workers(): return {'gunicorn_workers': multiprocessing.cpu_count()}
[ "def", "get_gunicorn_workers", "(", ")", ":", "return", "{", "'gunicorn_workers'", ":", "multiprocessing", ".", "cpu_count", "(", ")", "}" ]
this function will return the maximum workers that can be started depending upon number of cpus present on the machine .
train
false
16,619
def islink(p): return _false
[ "def", "islink", "(", "p", ")", ":", "return", "_false" ]
test whether a path is a symbolic link .
train
false
16,621
def filter_label_2(context, label): return False
[ "def", "filter_label_2", "(", "context", ",", "label", ")", ":", "return", "False" ]
test filter label 2 .
train
false
16,622
def encode_csr(csr): return encode_b64jose(OpenSSL.crypto.dump_certificate_request(OpenSSL.crypto.FILETYPE_ASN1, csr.wrapped))
[ "def", "encode_csr", "(", "csr", ")", ":", "return", "encode_b64jose", "(", "OpenSSL", ".", "crypto", ".", "dump_certificate_request", "(", "OpenSSL", ".", "crypto", ".", "FILETYPE_ASN1", ",", "csr", ".", "wrapped", ")", ")" ]
encode csr as jose base-64 der .
train
false
16,623
def _get_so_name(filename): cmd = ['objdump', '-p', filename] m = re.search('\\s+SONAME\\s+([^\\s]+)', compat.exec_command(*cmd)) return m.group(1)
[ "def", "_get_so_name", "(", "filename", ")", ":", "cmd", "=", "[", "'objdump'", ",", "'-p'", ",", "filename", "]", "m", "=", "re", ".", "search", "(", "'\\\\s+SONAME\\\\s+([^\\\\s]+)'", ",", "compat", ".", "exec_command", "(", "*", "cmd", ")", ")", "return", "m", ".", "group", "(", "1", ")" ]
return the soname of a library .
train
false
16,624
def test_unit_multiplication_with_string(): u1 = u.cm us = u'kg' assert ((us * u1) == (u.Unit(us) * u1)) assert ((u1 * us) == (u1 * u.Unit(us)))
[ "def", "test_unit_multiplication_with_string", "(", ")", ":", "u1", "=", "u", ".", "cm", "us", "=", "u'kg'", "assert", "(", "(", "us", "*", "u1", ")", "==", "(", "u", ".", "Unit", "(", "us", ")", "*", "u1", ")", ")", "assert", "(", "(", "u1", "*", "us", ")", "==", "(", "u1", "*", "u", ".", "Unit", "(", "us", ")", ")", ")" ]
check that multiplication with strings produces the correct unit .
train
false
16,627
def harness(testdir_from_ns={None: os.curdir}, argv=sys.argv, setup_func=None, default_tags=None): if (not logging.root.handlers): logging.basicConfig() try: (log_level, action, tags) = _parse_opts(argv[1:], (default_tags or [])) except getopt.error: (_, ex, _) = sys.exc_info() log.error((str(ex) + " (did you need a '--' before a '-TAG' argument?)")) return 1 log.setLevel(log_level) if (action == 'help'): print __doc__ return 0 if (action == 'list'): return list_tests(testdir_from_ns, tags) elif (action == 'test'): result = test(testdir_from_ns, tags, setup_func=setup_func) if (result is None): return None return (len(result.errors) + len(result.failures)) else: raise TestError(("unexpected action/mode: '%s'" % action))
[ "def", "harness", "(", "testdir_from_ns", "=", "{", "None", ":", "os", ".", "curdir", "}", ",", "argv", "=", "sys", ".", "argv", ",", "setup_func", "=", "None", ",", "default_tags", "=", "None", ")", ":", "if", "(", "not", "logging", ".", "root", ".", "handlers", ")", ":", "logging", ".", "basicConfig", "(", ")", "try", ":", "(", "log_level", ",", "action", ",", "tags", ")", "=", "_parse_opts", "(", "argv", "[", "1", ":", "]", ",", "(", "default_tags", "or", "[", "]", ")", ")", "except", "getopt", ".", "error", ":", "(", "_", ",", "ex", ",", "_", ")", "=", "sys", ".", "exc_info", "(", ")", "log", ".", "error", "(", "(", "str", "(", "ex", ")", "+", "\" (did you need a '--' before a '-TAG' argument?)\"", ")", ")", "return", "1", "log", ".", "setLevel", "(", "log_level", ")", "if", "(", "action", "==", "'help'", ")", ":", "print", "__doc__", "return", "0", "if", "(", "action", "==", "'list'", ")", ":", "return", "list_tests", "(", "testdir_from_ns", ",", "tags", ")", "elif", "(", "action", "==", "'test'", ")", ":", "result", "=", "test", "(", "testdir_from_ns", ",", "tags", ",", "setup_func", "=", "setup_func", ")", "if", "(", "result", "is", "None", ")", ":", "return", "None", "return", "(", "len", "(", "result", ".", "errors", ")", "+", "len", "(", "result", ".", "failures", ")", ")", "else", ":", "raise", "TestError", "(", "(", "\"unexpected action/mode: '%s'\"", "%", "action", ")", ")" ]
convenience mainline for a test harness "test .
train
false
16,628
def guess_content_type(filename, default='application/octet-stream'): if filename: return (mimetypes.guess_type(filename)[0] or default) return default
[ "def", "guess_content_type", "(", "filename", ",", "default", "=", "'application/octet-stream'", ")", ":", "if", "filename", ":", "return", "(", "mimetypes", ".", "guess_type", "(", "filename", ")", "[", "0", "]", "or", "default", ")", "return", "default" ]
given a filename .
train
false
16,629
def get_attach_link(doc, print_format): return frappe.get_template(u'templates/emails/print_link.html').render({u'url': get_url(), u'doctype': doc.reference_doctype, u'name': doc.reference_name, u'print_format': print_format, u'key': doc.get_parent_doc().get_signature()})
[ "def", "get_attach_link", "(", "doc", ",", "print_format", ")", ":", "return", "frappe", ".", "get_template", "(", "u'templates/emails/print_link.html'", ")", ".", "render", "(", "{", "u'url'", ":", "get_url", "(", ")", ",", "u'doctype'", ":", "doc", ".", "reference_doctype", ",", "u'name'", ":", "doc", ".", "reference_name", ",", "u'print_format'", ":", "print_format", ",", "u'key'", ":", "doc", ".", "get_parent_doc", "(", ")", ".", "get_signature", "(", ")", "}", ")" ]
returns public link for the attachment via templates/emails/print_link .
train
false
16,630
def libvlc_media_parse_with_options(p_md, parse_flag): f = (_Cfunctions.get('libvlc_media_parse_with_options', None) or _Cfunction('libvlc_media_parse_with_options', ((1,), (1,)), None, ctypes.c_int, Media, MediaParseFlag)) return f(p_md, parse_flag)
[ "def", "libvlc_media_parse_with_options", "(", "p_md", ",", "parse_flag", ")", ":", "f", "=", "(", "_Cfunctions", ".", "get", "(", "'libvlc_media_parse_with_options'", ",", "None", ")", "or", "_Cfunction", "(", "'libvlc_media_parse_with_options'", ",", "(", "(", "1", ",", ")", ",", "(", "1", ",", ")", ")", ",", "None", ",", "ctypes", ".", "c_int", ",", "Media", ",", "MediaParseFlag", ")", ")", "return", "f", "(", "p_md", ",", "parse_flag", ")" ]
parse the media asynchronously with options .
train
true
16,632
def fill_section(section, acquire_settings, log_printer, bears): prel_needed_settings = {} for bear in bears: needed = bear.get_non_optional_settings() for key in needed: if (key in prel_needed_settings): prel_needed_settings[key].append(bear.name) else: prel_needed_settings[key] = [needed[key][0], bear.name] needed_settings = {} for (setting, help_text) in prel_needed_settings.items(): if (setting not in section): needed_settings[setting] = help_text if (len(needed_settings) > 0): new_vals = acquire_settings(log_printer, needed_settings, section) for (setting, help_text) in new_vals.items(): section.append(Setting(setting, help_text)) return section
[ "def", "fill_section", "(", "section", ",", "acquire_settings", ",", "log_printer", ",", "bears", ")", ":", "prel_needed_settings", "=", "{", "}", "for", "bear", "in", "bears", ":", "needed", "=", "bear", ".", "get_non_optional_settings", "(", ")", "for", "key", "in", "needed", ":", "if", "(", "key", "in", "prel_needed_settings", ")", ":", "prel_needed_settings", "[", "key", "]", ".", "append", "(", "bear", ".", "name", ")", "else", ":", "prel_needed_settings", "[", "key", "]", "=", "[", "needed", "[", "key", "]", "[", "0", "]", ",", "bear", ".", "name", "]", "needed_settings", "=", "{", "}", "for", "(", "setting", ",", "help_text", ")", "in", "prel_needed_settings", ".", "items", "(", ")", ":", "if", "(", "setting", "not", "in", "section", ")", ":", "needed_settings", "[", "setting", "]", "=", "help_text", "if", "(", "len", "(", "needed_settings", ")", ">", "0", ")", ":", "new_vals", "=", "acquire_settings", "(", "log_printer", ",", "needed_settings", ",", "section", ")", "for", "(", "setting", ",", "help_text", ")", "in", "new_vals", ".", "items", "(", ")", ":", "section", ".", "append", "(", "Setting", "(", "setting", ",", "help_text", ")", ")", "return", "section" ]
retrieves needed settings from given bears and asks the user for missing values .
train
false
16,633
def load_MNIST_images(filename): with open(filename, 'r') as f: magic = np.fromfile(f, dtype=np.dtype('>i4'), count=1) num_images = np.fromfile(f, dtype=np.dtype('>i4'), count=1) num_rows = np.fromfile(f, dtype=np.dtype('>i4'), count=1) num_cols = np.fromfile(f, dtype=np.dtype('>i4'), count=1) images = np.fromfile(f, dtype=np.ubyte) images = images.reshape((num_images, (num_rows * num_cols))).transpose() images = (images.astype(np.float64) / 255) f.close() return images
[ "def", "load_MNIST_images", "(", "filename", ")", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "magic", "=", "np", ".", "fromfile", "(", "f", ",", "dtype", "=", "np", ".", "dtype", "(", "'>i4'", ")", ",", "count", "=", "1", ")", "num_images", "=", "np", ".", "fromfile", "(", "f", ",", "dtype", "=", "np", ".", "dtype", "(", "'>i4'", ")", ",", "count", "=", "1", ")", "num_rows", "=", "np", ".", "fromfile", "(", "f", ",", "dtype", "=", "np", ".", "dtype", "(", "'>i4'", ")", ",", "count", "=", "1", ")", "num_cols", "=", "np", ".", "fromfile", "(", "f", ",", "dtype", "=", "np", ".", "dtype", "(", "'>i4'", ")", ",", "count", "=", "1", ")", "images", "=", "np", ".", "fromfile", "(", "f", ",", "dtype", "=", "np", ".", "ubyte", ")", "images", "=", "images", ".", "reshape", "(", "(", "num_images", ",", "(", "num_rows", "*", "num_cols", ")", ")", ")", ".", "transpose", "(", ")", "images", "=", "(", "images", ".", "astype", "(", "np", ".", "float64", ")", "/", "255", ")", "f", ".", "close", "(", ")", "return", "images" ]
returns a 28x28x[number of mnist images] matrix containing the raw mnist images .
train
false
16,634
def teardown_function(function): slogging.configure(**function.snapshot)
[ "def", "teardown_function", "(", "function", ")", ":", "slogging", ".", "configure", "(", "**", "function", ".", "snapshot", ")" ]
stop the home assistant server .
train
false
16,636
def get_frameworks(): frameworks = [caffe] if torch: frameworks.append(torch) return frameworks
[ "def", "get_frameworks", "(", ")", ":", "frameworks", "=", "[", "caffe", "]", "if", "torch", ":", "frameworks", ".", "append", "(", "torch", ")", "return", "frameworks" ]
return list of all available framework instances there may be more than one instance per framework class .
train
false
16,638
def _group_players_service(service): _apply_service(service, SonosDevice.group_players)
[ "def", "_group_players_service", "(", "service", ")", ":", "_apply_service", "(", "service", ",", "SonosDevice", ".", "group_players", ")" ]
group media players .
train
false
16,641
def _get_volume_create_az_value(instance): if CONF.cinder.cross_az_attach: return None return instance.availability_zone
[ "def", "_get_volume_create_az_value", "(", "instance", ")", ":", "if", "CONF", ".", "cinder", ".", "cross_az_attach", ":", "return", "None", "return", "instance", ".", "availability_zone" ]
determine az to use when creating a volume uses the cinder .
train
false
16,644
def generate_state_space(data): state_space = defaultdict(list) for (state_a, state_b, cost) in data: state_space[state_a].append((state_b, cost)) state_space[state_b].append((state_a, cost)) return state_space
[ "def", "generate_state_space", "(", "data", ")", ":", "state_space", "=", "defaultdict", "(", "list", ")", "for", "(", "state_a", ",", "state_b", ",", "cost", ")", "in", "data", ":", "state_space", "[", "state_a", "]", ".", "append", "(", "(", "state_b", ",", "cost", ")", ")", "state_space", "[", "state_b", "]", ".", "append", "(", "(", "state_a", ",", "cost", ")", ")", "return", "state_space" ]
generate our state space graph given a set of possible actions: data = [ .
train
false
16,645
def py_type_name(type_name): return {'blob': 'bytes', 'character': 'string', 'double': 'float', 'long': 'integer', 'map': 'dict', 'structure': 'dict', 'timestamp': 'datetime'}.get(type_name, type_name)
[ "def", "py_type_name", "(", "type_name", ")", ":", "return", "{", "'blob'", ":", "'bytes'", ",", "'character'", ":", "'string'", ",", "'double'", ":", "'float'", ",", "'long'", ":", "'integer'", ",", "'map'", ":", "'dict'", ",", "'structure'", ":", "'dict'", ",", "'timestamp'", ":", "'datetime'", "}", ".", "get", "(", "type_name", ",", "type_name", ")" ]
get the python type name for a given model type .
train
false
16,646
def hsvColor(hue, sat=1.0, val=1.0, alpha=1.0): c = QtGui.QColor() c.setHsvF(hue, sat, val, alpha) return c
[ "def", "hsvColor", "(", "hue", ",", "sat", "=", "1.0", ",", "val", "=", "1.0", ",", "alpha", "=", "1.0", ")", ":", "c", "=", "QtGui", ".", "QColor", "(", ")", "c", ".", "setHsvF", "(", "hue", ",", "sat", ",", "val", ",", "alpha", ")", "return", "c" ]
generate a qcolor from hsva values .
train
false
16,648
def output(): return s3_rest_controller()
[ "def", "output", "(", ")", ":", "return", "s3_rest_controller", "(", ")" ]
default output handler for printing protocol .
train
false
16,649
def _raise_typeerror(X): input_type = (X.format if sp.issparse(X) else type(X)) err = ('Expected a CSR or CSC sparse matrix, got %s.' % input_type) raise TypeError(err)
[ "def", "_raise_typeerror", "(", "X", ")", ":", "input_type", "=", "(", "X", ".", "format", "if", "sp", ".", "issparse", "(", "X", ")", "else", "type", "(", "X", ")", ")", "err", "=", "(", "'Expected a CSR or CSC sparse matrix, got %s.'", "%", "input_type", ")", "raise", "TypeError", "(", "err", ")" ]
raises a typeerror if x is not a csr or csc matrix .
train
false
16,650
@gen.coroutine def VerifyAssetId(client, user_id, device_id, prefix_id, asset_id, has_timestamp): try: asset_name = IdPrefix.GetAssetName(prefix_id).lower() if has_timestamp: (truncated_ts, embedded_device_id, uniquifier) = DeconstructTimestampAssetId(prefix_id, asset_id) else: (embedded_device_id, uniquifier) = DeconstructAssetId(prefix_id, asset_id) except: raise InvalidRequestError(('%s id "%s" does not have a valid format.' % (asset_name, asset_id))) if (embedded_device_id != device_id): device = (yield gen.Task(Device.Query, client, user_id, embedded_device_id, None, must_exist=False)) if (device is None): raise PermissionError(('User %d and device %d do not have permission to create %s "%s".' % (user_id, device_id, asset_name, asset_id))) if (uniquifier.server_id is not None): raise PermissionError(('Clients do not have permission to create %s "%s".' % (asset_name, asset_id)))
[ "@", "gen", ".", "coroutine", "def", "VerifyAssetId", "(", "client", ",", "user_id", ",", "device_id", ",", "prefix_id", ",", "asset_id", ",", "has_timestamp", ")", ":", "try", ":", "asset_name", "=", "IdPrefix", ".", "GetAssetName", "(", "prefix_id", ")", ".", "lower", "(", ")", "if", "has_timestamp", ":", "(", "truncated_ts", ",", "embedded_device_id", ",", "uniquifier", ")", "=", "DeconstructTimestampAssetId", "(", "prefix_id", ",", "asset_id", ")", "else", ":", "(", "embedded_device_id", ",", "uniquifier", ")", "=", "DeconstructAssetId", "(", "prefix_id", ",", "asset_id", ")", "except", ":", "raise", "InvalidRequestError", "(", "(", "'%s id \"%s\" does not have a valid format.'", "%", "(", "asset_name", ",", "asset_id", ")", ")", ")", "if", "(", "embedded_device_id", "!=", "device_id", ")", ":", "device", "=", "(", "yield", "gen", ".", "Task", "(", "Device", ".", "Query", ",", "client", ",", "user_id", ",", "embedded_device_id", ",", "None", ",", "must_exist", "=", "False", ")", ")", "if", "(", "device", "is", "None", ")", ":", "raise", "PermissionError", "(", "(", "'User %d and device %d do not have permission to create %s \"%s\".'", "%", "(", "user_id", ",", "device_id", ",", "asset_name", ",", "asset_id", ")", ")", ")", "if", "(", "uniquifier", ".", "server_id", "is", "not", "None", ")", ":", "raise", "PermissionError", "(", "(", "'Clients do not have permission to create %s \"%s\".'", "%", "(", "asset_name", ",", "asset_id", ")", ")", ")" ]
verifies that "asset_id" conforms to the following requirements: 1 .
train
false
16,652
def get_scripts(): if OS_WINDOWS: batpath = os.path.join('bin', 'windows', 'evennia.bat') scriptpath = os.path.join(sys.prefix, 'Scripts', 'evennia_launcher.py') with open(batpath, 'w') as batfile: batfile.write(('@"%s" "%s" %%*' % (sys.executable, scriptpath))) return [batpath, os.path.join('bin', 'windows', 'evennia_launcher.py')] else: return [os.path.join('bin', 'unix', 'evennia')]
[ "def", "get_scripts", "(", ")", ":", "if", "OS_WINDOWS", ":", "batpath", "=", "os", ".", "path", ".", "join", "(", "'bin'", ",", "'windows'", ",", "'evennia.bat'", ")", "scriptpath", "=", "os", ".", "path", ".", "join", "(", "sys", ".", "prefix", ",", "'Scripts'", ",", "'evennia_launcher.py'", ")", "with", "open", "(", "batpath", ",", "'w'", ")", "as", "batfile", ":", "batfile", ".", "write", "(", "(", "'@\"%s\" \"%s\" %%*'", "%", "(", "sys", ".", "executable", ",", "scriptpath", ")", ")", ")", "return", "[", "batpath", ",", "os", ".", "path", ".", "join", "(", "'bin'", ",", "'windows'", ",", "'evennia_launcher.py'", ")", "]", "else", ":", "return", "[", "os", ".", "path", ".", "join", "(", "'bin'", ",", "'unix'", ",", "'evennia'", ")", "]" ]
get custom npm scripts .
train
false
16,654
def aparam(): style = ctx.db_module.paramstyle if (style == 'qmark'): return '?' elif (style == 'numeric'): return ':1' elif (style in ['format', 'pyformat']): return '%s' raise UnknownParamstyle, style
[ "def", "aparam", "(", ")", ":", "style", "=", "ctx", ".", "db_module", ".", "paramstyle", "if", "(", "style", "==", "'qmark'", ")", ":", "return", "'?'", "elif", "(", "style", "==", "'numeric'", ")", ":", "return", "':1'", "elif", "(", "style", "in", "[", "'format'", ",", "'pyformat'", "]", ")", ":", "return", "'%s'", "raise", "UnknownParamstyle", ",", "style" ]
use in a sql string to make a spot for a db value .
train
false
16,655
def normalize_classname(classname): return classname.replace(u'/', u'.')
[ "def", "normalize_classname", "(", "classname", ")", ":", "return", "classname", ".", "replace", "(", "u'/'", ",", "u'.'", ")" ]
ensure the dot separated class name .
train
false
16,656
def normalize_index(context, builder, idxty, idx): if (isinstance(idxty, types.Array) and (idxty.ndim == 0)): assert isinstance(idxty.dtype, types.Integer) idxary = make_array(idxty)(context, builder, idx) idxval = load_item(context, builder, idxty, idxary.data) return (idxty.dtype, idxval) else: return (idxty, idx)
[ "def", "normalize_index", "(", "context", ",", "builder", ",", "idxty", ",", "idx", ")", ":", "if", "(", "isinstance", "(", "idxty", ",", "types", ".", "Array", ")", "and", "(", "idxty", ".", "ndim", "==", "0", ")", ")", ":", "assert", "isinstance", "(", "idxty", ".", "dtype", ",", "types", ".", "Integer", ")", "idxary", "=", "make_array", "(", "idxty", ")", "(", "context", ",", "builder", ",", "idx", ")", "idxval", "=", "load_item", "(", "context", ",", "builder", ",", "idxty", ",", "idxary", ".", "data", ")", "return", "(", "idxty", ".", "dtype", ",", "idxval", ")", "else", ":", "return", "(", "idxty", ",", "idx", ")" ]
normalize the index type and value .
train
false
16,660
def test_doesnt_fail_on_curly(): ok = False try: assert False, '}' except AssertionError: ok = True Assert(ok)
[ "def", "test_doesnt_fail_on_curly", "(", ")", ":", "ok", "=", "False", "try", ":", "assert", "False", ",", "'}'", "except", "AssertionError", ":", "ok", "=", "True", "Assert", "(", "ok", ")" ]
ensures that asserting a string with a curly brace doesnt choke up the string formatter .
train
false
16,661
@pytest.mark.integration @pytest.mark.parametrize('style', ['visibility: hidden', 'display: none']) def test_invisible(caret_tester, style): caret_tester.js.load('position_caret/invisible.html', style=style) caret_tester.check()
[ "@", "pytest", ".", "mark", ".", "integration", "@", "pytest", ".", "mark", ".", "parametrize", "(", "'style'", ",", "[", "'visibility: hidden'", ",", "'display: none'", "]", ")", "def", "test_invisible", "(", "caret_tester", ",", "style", ")", ":", "caret_tester", ".", "js", ".", "load", "(", "'position_caret/invisible.html'", ",", "style", "=", "style", ")", "caret_tester", ".", "check", "(", ")" ]
test with hidden text elements .
train
false
16,662
def create_project(**kwargs): defaults = {} defaults.update(kwargs) ProjectTemplateFactory.create(slug=settings.DEFAULT_PROJECT_TEMPLATE) project = ProjectFactory.create(**defaults) project.default_issue_status = IssueStatusFactory.create(project=project) project.default_severity = SeverityFactory.create(project=project) project.default_priority = PriorityFactory.create(project=project) project.default_issue_type = IssueTypeFactory.create(project=project) project.default_us_status = UserStoryStatusFactory.create(project=project) project.default_task_status = TaskStatusFactory.create(project=project) project.save() return project
[ "def", "create_project", "(", "**", "kwargs", ")", ":", "defaults", "=", "{", "}", "defaults", ".", "update", "(", "kwargs", ")", "ProjectTemplateFactory", ".", "create", "(", "slug", "=", "settings", ".", "DEFAULT_PROJECT_TEMPLATE", ")", "project", "=", "ProjectFactory", ".", "create", "(", "**", "defaults", ")", "project", ".", "default_issue_status", "=", "IssueStatusFactory", ".", "create", "(", "project", "=", "project", ")", "project", ".", "default_severity", "=", "SeverityFactory", ".", "create", "(", "project", "=", "project", ")", "project", ".", "default_priority", "=", "PriorityFactory", ".", "create", "(", "project", "=", "project", ")", "project", ".", "default_issue_type", "=", "IssueTypeFactory", ".", "create", "(", "project", "=", "project", ")", "project", ".", "default_us_status", "=", "UserStoryStatusFactory", ".", "create", "(", "project", "=", "project", ")", "project", ".", "default_task_status", "=", "TaskStatusFactory", ".", "create", "(", "project", "=", "project", ")", "project", ".", "save", "(", ")", "return", "project" ]
create a project along with its dependencies .
train
false
16,663
def __py_new(name, data=''): return __get_builtin_constructor(name)(data)
[ "def", "__py_new", "(", "name", ",", "data", "=", "''", ")", ":", "return", "__get_builtin_constructor", "(", "name", ")", "(", "data", ")" ]
new - return a new hashing object using the named algorithm; optionally initialized with data .
train
false
16,664
def assert_request_user_has_permission(request, permission_type): has_permission = request_user_has_permission(request=request, permission_type=permission_type) if (not has_permission): user_db = get_user_db_from_request(request=request) raise ResourceTypeAccessDeniedError(user_db=user_db, permission_type=permission_type)
[ "def", "assert_request_user_has_permission", "(", "request", ",", "permission_type", ")", ":", "has_permission", "=", "request_user_has_permission", "(", "request", "=", "request", ",", "permission_type", "=", "permission_type", ")", "if", "(", "not", "has_permission", ")", ":", "user_db", "=", "get_user_db_from_request", "(", "request", "=", "request", ")", "raise", "ResourceTypeAccessDeniedError", "(", "user_db", "=", "user_db", ",", "permission_type", "=", "permission_type", ")" ]
check that currently logged-in user has specified permission .
train
false
16,665
@flake8ext def check_no_imports_from_tests(logical_line, filename, noqa): msg = 'N343 Production code must not import from neutron.tests.*' if noqa: return if ('neutron/tests/' in filename): return for regex in (tests_imports_dot, tests_imports_from1, tests_imports_from2): if re.match(regex, logical_line): (yield (0, msg))
[ "@", "flake8ext", "def", "check_no_imports_from_tests", "(", "logical_line", ",", "filename", ",", "noqa", ")", ":", "msg", "=", "'N343 Production code must not import from neutron.tests.*'", "if", "noqa", ":", "return", "if", "(", "'neutron/tests/'", "in", "filename", ")", ":", "return", "for", "regex", "in", "(", "tests_imports_dot", ",", "tests_imports_from1", ",", "tests_imports_from2", ")", ":", "if", "re", ".", "match", "(", "regex", ",", "logical_line", ")", ":", "(", "yield", "(", "0", ",", "msg", ")", ")" ]
n343 production code must not import from neutron .
train
false
16,668
def get_csv(client, result_response): content = json.loads(result_response.content) assert_true(content['isSuccess']) csv_link = ('/beeswax/download/%s/csv' % content['id']) csv_resp = client.get(csv_link) return ''.join(csv_resp.streaming_content)
[ "def", "get_csv", "(", "client", ",", "result_response", ")", ":", "content", "=", "json", ".", "loads", "(", "result_response", ".", "content", ")", "assert_true", "(", "content", "[", "'isSuccess'", "]", ")", "csv_link", "=", "(", "'/beeswax/download/%s/csv'", "%", "content", "[", "'id'", "]", ")", "csv_resp", "=", "client", ".", "get", "(", "csv_link", ")", "return", "''", ".", "join", "(", "csv_resp", ".", "streaming_content", ")" ]
get the csv for a query result .
train
false
16,669
def dup_monic(f, K): if (not f): return f lc = dup_LC(f, K) if K.is_one(lc): return f else: return dup_exquo_ground(f, lc, K)
[ "def", "dup_monic", "(", "f", ",", "K", ")", ":", "if", "(", "not", "f", ")", ":", "return", "f", "lc", "=", "dup_LC", "(", "f", ",", "K", ")", "if", "K", ".", "is_one", "(", "lc", ")", ":", "return", "f", "else", ":", "return", "dup_exquo_ground", "(", "f", ",", "lc", ",", "K", ")" ]
divide all coefficients by lc(f) in k[x] .
train
false
16,670
def apply_wrappers_to_content(content, module, request): content = module.system.replace_urls(content) content = module.system.replace_course_urls(content) content = module.system.replace_jump_to_id_urls(content) return make_static_urls_absolute(request, content)
[ "def", "apply_wrappers_to_content", "(", "content", ",", "module", ",", "request", ")", ":", "content", "=", "module", ".", "system", ".", "replace_urls", "(", "content", ")", "content", "=", "module", ".", "system", ".", "replace_course_urls", "(", "content", ")", "content", "=", "module", ".", "system", ".", "replace_jump_to_id_urls", "(", "content", ")", "return", "make_static_urls_absolute", "(", "request", ",", "content", ")" ]
updates a piece of html content with the filter functions stored in its module system .
train
false
16,671
def parameter_space(__fail_fast=False, **params): def decorator(f): argspec = getargspec(f) if argspec.varargs: raise AssertionError("parameter_space() doesn't support *args") if argspec.keywords: raise AssertionError("parameter_space() doesn't support **kwargs") if argspec.defaults: raise AssertionError("parameter_space() doesn't support defaults.") argnames = argspec.args if (argnames[0] == 'self'): argnames = argnames[1:] extra = (set(params) - set(argnames)) if extra: raise AssertionError(('Keywords %s supplied to parameter_space() are not in function signature.' % extra)) unspecified = (set(argnames) - set(params)) if unspecified: raise AssertionError(('Function arguments %s were not supplied to parameter_space().' % extra)) param_sets = product(*(params[name] for name in argnames)) if __fail_fast: @wraps(f) def wrapped(self): for args in param_sets: f(self, *args) return wrapped else: return subtest(param_sets, *argnames)(f) return decorator
[ "def", "parameter_space", "(", "__fail_fast", "=", "False", ",", "**", "params", ")", ":", "def", "decorator", "(", "f", ")", ":", "argspec", "=", "getargspec", "(", "f", ")", "if", "argspec", ".", "varargs", ":", "raise", "AssertionError", "(", "\"parameter_space() doesn't support *args\"", ")", "if", "argspec", ".", "keywords", ":", "raise", "AssertionError", "(", "\"parameter_space() doesn't support **kwargs\"", ")", "if", "argspec", ".", "defaults", ":", "raise", "AssertionError", "(", "\"parameter_space() doesn't support defaults.\"", ")", "argnames", "=", "argspec", ".", "args", "if", "(", "argnames", "[", "0", "]", "==", "'self'", ")", ":", "argnames", "=", "argnames", "[", "1", ":", "]", "extra", "=", "(", "set", "(", "params", ")", "-", "set", "(", "argnames", ")", ")", "if", "extra", ":", "raise", "AssertionError", "(", "(", "'Keywords %s supplied to parameter_space() are not in function signature.'", "%", "extra", ")", ")", "unspecified", "=", "(", "set", "(", "argnames", ")", "-", "set", "(", "params", ")", ")", "if", "unspecified", ":", "raise", "AssertionError", "(", "(", "'Function arguments %s were not supplied to parameter_space().'", "%", "extra", ")", ")", "param_sets", "=", "product", "(", "*", "(", "params", "[", "name", "]", "for", "name", "in", "argnames", ")", ")", "if", "__fail_fast", ":", "@", "wraps", "(", "f", ")", "def", "wrapped", "(", "self", ")", ":", "for", "args", "in", "param_sets", ":", "f", "(", "self", ",", "*", "args", ")", "return", "wrapped", "else", ":", "return", "subtest", "(", "param_sets", ",", "*", "argnames", ")", "(", "f", ")", "return", "decorator" ]
unpack a sequence of positional parameter spaces into the product of each space .
train
false
16,672
def test_fix_stim(): raw = read_raw_fif(raw_fname, preload=True) raw._data[raw.ch_names.index('STI 014'), :3] = [0, (-32765), 0] with warnings.catch_warnings(record=True) as w: events = find_events(raw, 'STI 014') assert_true((len(w) >= 1)) assert_true(any((('STI016' in str(ww.message)) for ww in w))) assert_array_equal(events[0], [(raw.first_samp + 1), 0, 32765]) events = find_events(raw, 'STI 014', uint_cast=True) assert_array_equal(events[0], [(raw.first_samp + 1), 0, 32771])
[ "def", "test_fix_stim", "(", ")", ":", "raw", "=", "read_raw_fif", "(", "raw_fname", ",", "preload", "=", "True", ")", "raw", ".", "_data", "[", "raw", ".", "ch_names", ".", "index", "(", "'STI 014'", ")", ",", ":", "3", "]", "=", "[", "0", ",", "(", "-", "32765", ")", ",", "0", "]", "with", "warnings", ".", "catch_warnings", "(", "record", "=", "True", ")", "as", "w", ":", "events", "=", "find_events", "(", "raw", ",", "'STI 014'", ")", "assert_true", "(", "(", "len", "(", "w", ")", ">=", "1", ")", ")", "assert_true", "(", "any", "(", "(", "(", "'STI016'", "in", "str", "(", "ww", ".", "message", ")", ")", "for", "ww", "in", "w", ")", ")", ")", "assert_array_equal", "(", "events", "[", "0", "]", ",", "[", "(", "raw", ".", "first_samp", "+", "1", ")", ",", "0", ",", "32765", "]", ")", "events", "=", "find_events", "(", "raw", ",", "'STI 014'", ",", "uint_cast", "=", "True", ")", "assert_array_equal", "(", "events", "[", "0", "]", ",", "[", "(", "raw", ".", "first_samp", "+", "1", ")", ",", "0", ",", "32771", "]", ")" ]
test fixing stim sti016 for neuromag .
train
false
16,673
def next(iter): return iter.next()
[ "def", "next", "(", "iter", ")", ":", "return", "iter", ".", "next", "(", ")" ]
retrieve the next item from the iterator by calling its next() method .
train
false
16,675
def _GetCallingModuleObjectAndName(): for depth in range(1, sys.getrecursionlimit()): if (not (sys._getframe(depth).f_globals is globals())): globals_for_frame = sys._getframe(depth).f_globals (module, module_name) = _GetModuleObjectAndName(globals_for_frame) if (module_name is not None): return (module, module_name) raise AssertionError('No module was found')
[ "def", "_GetCallingModuleObjectAndName", "(", ")", ":", "for", "depth", "in", "range", "(", "1", ",", "sys", ".", "getrecursionlimit", "(", ")", ")", ":", "if", "(", "not", "(", "sys", ".", "_getframe", "(", "depth", ")", ".", "f_globals", "is", "globals", "(", ")", ")", ")", ":", "globals_for_frame", "=", "sys", ".", "_getframe", "(", "depth", ")", ".", "f_globals", "(", "module", ",", "module_name", ")", "=", "_GetModuleObjectAndName", "(", "globals_for_frame", ")", "if", "(", "module_name", "is", "not", "None", ")", ":", "return", "(", "module", ",", "module_name", ")", "raise", "AssertionError", "(", "'No module was found'", ")" ]
returns the module thats calling into this module .
train
false
16,676
def compare_package(version1, version2): def normalize(v): return [int(x) for x in re.sub('(\\.0+)*$', '', v).split('.')] return cmp(normalize(version1), normalize(version2))
[ "def", "compare_package", "(", "version1", ",", "version2", ")", ":", "def", "normalize", "(", "v", ")", ":", "return", "[", "int", "(", "x", ")", "for", "x", "in", "re", ".", "sub", "(", "'(\\\\.0+)*$'", ",", "''", ",", "v", ")", ".", "split", "(", "'.'", ")", "]", "return", "cmp", "(", "normalize", "(", "version1", ")", ",", "normalize", "(", "version2", ")", ")" ]
compare version packages .
train
false
16,678
def setboolean(obj, attr, _bool=None): if (_bool is None): _bool = dict(_boolean_states) res = _bool[getattr(obj, attr).lower()] setattr(obj, attr, res) return res
[ "def", "setboolean", "(", "obj", ",", "attr", ",", "_bool", "=", "None", ")", ":", "if", "(", "_bool", "is", "None", ")", ":", "_bool", "=", "dict", "(", "_boolean_states", ")", "res", "=", "_bool", "[", "getattr", "(", "obj", ",", "attr", ")", ".", "lower", "(", ")", "]", "setattr", "(", "obj", ",", "attr", ",", "res", ")", "return", "res" ]
replace the attribute with a boolean .
train
false
16,680
def GeneratePhotoUrl(obj_store, photo_id, suffix): return obj_store.GenerateUrl((photo_id + suffix), cache_control='private,max-age=31536000')
[ "def", "GeneratePhotoUrl", "(", "obj_store", ",", "photo_id", ",", "suffix", ")", ":", "return", "obj_store", ".", "GenerateUrl", "(", "(", "photo_id", "+", "suffix", ")", ",", "cache_control", "=", "'private,max-age=31536000'", ")" ]
generate s3 signed url for the given photo .
train
false
16,682
def crop_hint(photo_file): service = get_service() with open(photo_file, 'rb') as image: image_content = base64.b64encode(image.read()) service_request = service.images().annotate(body={'requests': [{'image': {'content': image_content.decode('UTF-8')}, 'features': [{'type': 'CROP_HINTS'}]}]}) response = service_request.execute() print json.dumps(response, indent=2)
[ "def", "crop_hint", "(", "photo_file", ")", ":", "service", "=", "get_service", "(", ")", "with", "open", "(", "photo_file", ",", "'rb'", ")", "as", "image", ":", "image_content", "=", "base64", ".", "b64encode", "(", "image", ".", "read", "(", ")", ")", "service_request", "=", "service", ".", "images", "(", ")", ".", "annotate", "(", "body", "=", "{", "'requests'", ":", "[", "{", "'image'", ":", "{", "'content'", ":", "image_content", ".", "decode", "(", "'UTF-8'", ")", "}", ",", "'features'", ":", "[", "{", "'type'", ":", "'CROP_HINTS'", "}", "]", "}", "]", "}", ")", "response", "=", "service_request", ".", "execute", "(", ")", "print", "json", ".", "dumps", "(", "response", ",", "indent", "=", "2", ")" ]
run a crop hint request on the image .
train
false
16,684
def survey_get_series_questions_of_type(question_list, qtype): if isinstance(qtype, (list, tuple)): types = qtype else: types = qtype questions = [] for question in question_list: if (question['type'] in types): questions.append(question) elif ((question['type'] == 'Link') or (question['type'] == 'GridChild')): widget_obj = survey_getWidgetFromQuestion(question['qstn_id']) if (widget_obj.getParentType() in types): question['name'] = widget_obj.fullName() questions.append(question) return questions
[ "def", "survey_get_series_questions_of_type", "(", "question_list", ",", "qtype", ")", ":", "if", "isinstance", "(", "qtype", ",", "(", "list", ",", "tuple", ")", ")", ":", "types", "=", "qtype", "else", ":", "types", "=", "qtype", "questions", "=", "[", "]", "for", "question", "in", "question_list", ":", "if", "(", "question", "[", "'type'", "]", "in", "types", ")", ":", "questions", ".", "append", "(", "question", ")", "elif", "(", "(", "question", "[", "'type'", "]", "==", "'Link'", ")", "or", "(", "question", "[", "'type'", "]", "==", "'GridChild'", ")", ")", ":", "widget_obj", "=", "survey_getWidgetFromQuestion", "(", "question", "[", "'qstn_id'", "]", ")", "if", "(", "widget_obj", ".", "getParentType", "(", ")", "in", "types", ")", ":", "question", "[", "'name'", "]", "=", "widget_obj", ".", "fullName", "(", ")", "questions", ".", "append", "(", "question", ")", "return", "questions" ]
get questions of a particular question type .
train
false
16,685
def verify_packages(subset, all_packages): left_out = (set(subset) - set(all_packages)) if left_out: raise ValueError('Unknown packages', sorted(left_out))
[ "def", "verify_packages", "(", "subset", ",", "all_packages", ")", ":", "left_out", "=", "(", "set", "(", "subset", ")", "-", "set", "(", "all_packages", ")", ")", "if", "left_out", ":", "raise", "ValueError", "(", "'Unknown packages'", ",", "sorted", "(", "left_out", ")", ")" ]
verify that a subset of packages are among all packages .
train
false
16,686
def reverse_usage_url(handler_name, usage_key, kwargs=None): return reverse_url(handler_name, 'usage_key_string', usage_key, kwargs)
[ "def", "reverse_usage_url", "(", "handler_name", ",", "usage_key", ",", "kwargs", "=", "None", ")", ":", "return", "reverse_url", "(", "handler_name", ",", "'usage_key_string'", ",", "usage_key", ",", "kwargs", ")" ]
creates the url for handlers that use usage_keys as url parameters .
train
false
16,687
def route_table_absent(name, region=None, key=None, keyid=None, profile=None): ret = {'name': name, 'result': True, 'comment': '', 'changes': {}} r = __salt__['boto_vpc.get_resource_id']('route_table', name=name, region=region, key=key, keyid=keyid, profile=profile) if ('error' in r): ret['result'] = False ret['comment'] = r['error']['message'] return ret rtbl_id = r['id'] if (not rtbl_id): ret['comment'] = 'Route table {0} does not exist.'.format(name) return ret if __opts__['test']: ret['comment'] = 'Route table {0} is set to be removed.'.format(name) ret['result'] = None return ret r = __salt__['boto_vpc.delete_route_table'](route_table_name=name, region=region, key=key, keyid=keyid, profile=profile) if ('error' in r): ret['result'] = False ret['comment'] = 'Failed to delete route table: {0}'.format(r['error']['message']) return ret ret['changes']['old'] = {'route_table': rtbl_id} ret['changes']['new'] = {'route_table': None} ret['comment'] = 'Route table {0} deleted.'.format(name) return ret
[ "def", "route_table_absent", "(", "name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'result'", ":", "True", ",", "'comment'", ":", "''", ",", "'changes'", ":", "{", "}", "}", "r", "=", "__salt__", "[", "'boto_vpc.get_resource_id'", "]", "(", "'route_table'", ",", "name", "=", "name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "(", "'error'", "in", "r", ")", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "r", "[", "'error'", "]", "[", "'message'", "]", "return", "ret", "rtbl_id", "=", "r", "[", "'id'", "]", "if", "(", "not", "rtbl_id", ")", ":", "ret", "[", "'comment'", "]", "=", "'Route table {0} does not exist.'", ".", "format", "(", "name", ")", "return", "ret", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'comment'", "]", "=", "'Route table {0} is set to be removed.'", ".", "format", "(", "name", ")", "ret", "[", "'result'", "]", "=", "None", "return", "ret", "r", "=", "__salt__", "[", "'boto_vpc.delete_route_table'", "]", "(", "route_table_name", "=", "name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "(", "'error'", "in", "r", ")", ":", "ret", "[", "'result'", "]", "=", "False", "ret", "[", "'comment'", "]", "=", "'Failed to delete route table: {0}'", ".", "format", "(", "r", "[", "'error'", "]", "[", "'message'", "]", ")", "return", "ret", "ret", "[", "'changes'", "]", "[", "'old'", "]", "=", "{", "'route_table'", ":", "rtbl_id", "}", "ret", "[", "'changes'", "]", "[", "'new'", "]", "=", "{", "'route_table'", ":", "None", "}", "ret", "[", "'comment'", "]", "=", "'Route table {0} deleted.'", ".", "format", "(", "name", ")", "return", "ret" ]
ensure the named route table is absent .
train
true
16,688
def new(rsa_key): return PKCS115_SigScheme(rsa_key)
[ "def", "new", "(", "rsa_key", ")", ":", "return", "PKCS115_SigScheme", "(", "rsa_key", ")" ]
return a new ripemd160 object .
train
false
16,689
def get_etag(text): etag = md5() etag.update(text.encode('utf-8')) return ('"%s"' % etag.hexdigest())
[ "def", "get_etag", "(", "text", ")", ":", "etag", "=", "md5", "(", ")", "etag", ".", "update", "(", "text", ".", "encode", "(", "'utf-8'", ")", ")", "return", "(", "'\"%s\"'", "%", "etag", ".", "hexdigest", "(", ")", ")" ]
etag from collection or item .
train
false
16,690
def _add_element_attrs(elem, attrs): for (attr, value) in attrs.items(): elem.attrib[attr] = value return elem
[ "def", "_add_element_attrs", "(", "elem", ",", "attrs", ")", ":", "for", "(", "attr", ",", "value", ")", "in", "attrs", ".", "items", "(", ")", ":", "elem", ".", "attrib", "[", "attr", "]", "=", "value", "return", "elem" ]
add attributes to the given element .
train
true
16,691
@assert_crypto_availability def decrypt_data(token, secret, data): if data.startswith(ENCRYPT_MARKER): try: encoded_data = data[len(ENCRYPT_MARKER):] aes_key = generate_aes_key(token, secret) decoded_data = base64.b64decode(encoded_data) iv = decoded_data[:16] encrypted_data = decoded_data[16:] cipher = AES.new(aes_key, AES.MODE_CFB, iv) decrypted_data = cipher.decrypt(encrypted_data) return json.loads(decrypted_data) except: raise DecryptError('data appeared to be corrupted') else: return data
[ "@", "assert_crypto_availability", "def", "decrypt_data", "(", "token", ",", "secret", ",", "data", ")", ":", "if", "data", ".", "startswith", "(", "ENCRYPT_MARKER", ")", ":", "try", ":", "encoded_data", "=", "data", "[", "len", "(", "ENCRYPT_MARKER", ")", ":", "]", "aes_key", "=", "generate_aes_key", "(", "token", ",", "secret", ")", "decoded_data", "=", "base64", ".", "b64decode", "(", "encoded_data", ")", "iv", "=", "decoded_data", "[", ":", "16", "]", "encrypted_data", "=", "decoded_data", "[", "16", ":", "]", "cipher", "=", "AES", ".", "new", "(", "aes_key", ",", "AES", ".", "MODE_CFB", ",", "iv", ")", "decrypted_data", "=", "cipher", ".", "decrypt", "(", "encrypted_data", ")", "return", "json", ".", "loads", "(", "decrypted_data", ")", "except", ":", "raise", "DecryptError", "(", "'data appeared to be corrupted'", ")", "else", ":", "return", "data" ]
decrypt the data with the given secret key .
train
false
16,692
def _convert_timestamps_to_datetimes(image_meta): for attr in ['created_at', 'updated_at', 'deleted_at']: if image_meta.get(attr): image_meta[attr] = timeutils.parse_isotime(image_meta[attr]) return image_meta
[ "def", "_convert_timestamps_to_datetimes", "(", "image_meta", ")", ":", "for", "attr", "in", "[", "'created_at'", ",", "'updated_at'", ",", "'deleted_at'", "]", ":", "if", "image_meta", ".", "get", "(", "attr", ")", ":", "image_meta", "[", "attr", "]", "=", "timeutils", ".", "parse_isotime", "(", "image_meta", "[", "attr", "]", ")", "return", "image_meta" ]
returns image with timestamp fields converted to datetime objects .
train
false
16,693
def take_nth(n, seq): return itertools.islice(seq, 0, None, n)
[ "def", "take_nth", "(", "n", ",", "seq", ")", ":", "return", "itertools", ".", "islice", "(", "seq", ",", "0", ",", "None", ",", "n", ")" ]
every nth item in seq .
train
false
16,694
def get_dhcp_options(dhcp_options_name=None, dhcp_options_id=None, region=None, key=None, keyid=None, profile=None): if (not any((dhcp_options_name, dhcp_options_id))): raise SaltInvocationError('At least one of the following must be specified: dhcp_options_name, dhcp_options_id.') if ((not dhcp_options_id) and dhcp_options_name): dhcp_options_id = _get_resource_id('dhcp_options', dhcp_options_name, region=region, key=key, keyid=keyid, profile=profile) if (not dhcp_options_id): return {'dhcp_options': {}} try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) r = conn.get_all_dhcp_options(dhcp_options_ids=[dhcp_options_id]) except BotoServerError as e: return {'error': salt.utils.boto.get_error(e)} if (not r): return {'dhcp_options': None} keys = ('domain_name', 'domain_name_servers', 'ntp_servers', 'netbios_name_servers', 'netbios_node_type') return {'dhcp_options': dict(((k, r[0].options.get(k)) for k in keys))}
[ "def", "get_dhcp_options", "(", "dhcp_options_name", "=", "None", ",", "dhcp_options_id", "=", "None", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "if", "(", "not", "any", "(", "(", "dhcp_options_name", ",", "dhcp_options_id", ")", ")", ")", ":", "raise", "SaltInvocationError", "(", "'At least one of the following must be specified: dhcp_options_name, dhcp_options_id.'", ")", "if", "(", "(", "not", "dhcp_options_id", ")", "and", "dhcp_options_name", ")", ":", "dhcp_options_id", "=", "_get_resource_id", "(", "'dhcp_options'", ",", "dhcp_options_name", ",", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "if", "(", "not", "dhcp_options_id", ")", ":", "return", "{", "'dhcp_options'", ":", "{", "}", "}", "try", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "r", "=", "conn", ".", "get_all_dhcp_options", "(", "dhcp_options_ids", "=", "[", "dhcp_options_id", "]", ")", "except", "BotoServerError", "as", "e", ":", "return", "{", "'error'", ":", "salt", ".", "utils", ".", "boto", ".", "get_error", "(", "e", ")", "}", "if", "(", "not", "r", ")", ":", "return", "{", "'dhcp_options'", ":", "None", "}", "keys", "=", "(", "'domain_name'", ",", "'domain_name_servers'", ",", "'ntp_servers'", ",", "'netbios_name_servers'", ",", "'netbios_node_type'", ")", "return", "{", "'dhcp_options'", ":", "dict", "(", "(", "(", "k", ",", "r", "[", "0", "]", ".", "options", ".", "get", "(", "k", ")", ")", "for", "k", "in", "keys", ")", ")", "}" ]
return a dict with the current values of the requested dhcp options set cli example: .
train
true
16,696
def strip_escapes(text): return _ANSI_ESCAPE_PATTERN.sub('', text)
[ "def", "strip_escapes", "(", "text", ")", ":", "return", "_ANSI_ESCAPE_PATTERN", ".", "sub", "(", "''", ",", "text", ")" ]
removes ansi escape sequences from text .
train
false
16,700
def sharpe_ratio(returns, periods=252): return ((np.sqrt(periods) * np.mean(returns)) / np.std(returns))
[ "def", "sharpe_ratio", "(", "returns", ",", "periods", "=", "252", ")", ":", "return", "(", "(", "np", ".", "sqrt", "(", "periods", ")", "*", "np", ".", "mean", "(", "returns", ")", ")", "/", "np", ".", "std", "(", "returns", ")", ")" ]
create the sharpe ratio for the strategy .
train
false
16,701
def CheckComment(line, filename, linenum, next_line_start, error): commentpos = line.find('//') if (commentpos != (-1)): if ((re.sub('\\\\.', '', line[0:commentpos]).count('"') % 2) == 0): if ((not (Match('^.*{ *//', line) and (next_line_start == commentpos))) and (((commentpos >= 1) and (line[(commentpos - 1)] not in string.whitespace)) or ((commentpos >= 2) and (line[(commentpos - 2)] not in string.whitespace)))): error(filename, linenum, 'whitespace/comments', 2, 'At least two spaces is best between code and comments') comment = line[commentpos:] match = _RE_PATTERN_TODO.match(comment) if match: leading_whitespace = match.group(1) if (len(leading_whitespace) > 1): error(filename, linenum, 'whitespace/todo', 2, 'Too many spaces before TODO') username = match.group(2) if (not username): error(filename, linenum, 'readability/todo', 2, 'Missing username in TODO; it should look like "// TODO(my_username): Stuff."') middle_whitespace = match.group(3) if ((middle_whitespace != ' ') and (middle_whitespace != '')): error(filename, linenum, 'whitespace/todo', 2, 'TODO(my_username) should be followed by a space') if (Match('//[^ ]*\\w', comment) and (not Match('(///|//\\!)(\\s+|$)', comment))): error(filename, linenum, 'whitespace/comments', 4, 'Should have a space between // and comment')
[ "def", "CheckComment", "(", "line", ",", "filename", ",", "linenum", ",", "next_line_start", ",", "error", ")", ":", "commentpos", "=", "line", ".", "find", "(", "'//'", ")", "if", "(", "commentpos", "!=", "(", "-", "1", ")", ")", ":", "if", "(", "(", "re", ".", "sub", "(", "'\\\\\\\\.'", ",", "''", ",", "line", "[", "0", ":", "commentpos", "]", ")", ".", "count", "(", "'\"'", ")", "%", "2", ")", "==", "0", ")", ":", "if", "(", "(", "not", "(", "Match", "(", "'^.*{ *//'", ",", "line", ")", "and", "(", "next_line_start", "==", "commentpos", ")", ")", ")", "and", "(", "(", "(", "commentpos", ">=", "1", ")", "and", "(", "line", "[", "(", "commentpos", "-", "1", ")", "]", "not", "in", "string", ".", "whitespace", ")", ")", "or", "(", "(", "commentpos", ">=", "2", ")", "and", "(", "line", "[", "(", "commentpos", "-", "2", ")", "]", "not", "in", "string", ".", "whitespace", ")", ")", ")", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/comments'", ",", "2", ",", "'At least two spaces is best between code and comments'", ")", "comment", "=", "line", "[", "commentpos", ":", "]", "match", "=", "_RE_PATTERN_TODO", ".", "match", "(", "comment", ")", "if", "match", ":", "leading_whitespace", "=", "match", ".", "group", "(", "1", ")", "if", "(", "len", "(", "leading_whitespace", ")", ">", "1", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/todo'", ",", "2", ",", "'Too many spaces before TODO'", ")", "username", "=", "match", ".", "group", "(", "2", ")", "if", "(", "not", "username", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'readability/todo'", ",", "2", ",", "'Missing username in TODO; it should look like \"// TODO(my_username): Stuff.\"'", ")", "middle_whitespace", "=", "match", ".", "group", "(", "3", ")", "if", "(", "(", "middle_whitespace", "!=", "' '", ")", "and", "(", "middle_whitespace", "!=", "''", ")", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/todo'", ",", "2", ",", "'TODO(my_username) should be followed by a space'", ")", "if", "(", "Match", "(", "'//[^ ]*\\\\w'", ",", "comment", ")", "and", "(", "not", "Match", "(", "'(///|//\\\\!)(\\\\s+|$)'", ",", "comment", ")", ")", ")", ":", "error", "(", "filename", ",", "linenum", ",", "'whitespace/comments'", ",", "4", ",", "'Should have a space between // and comment'", ")" ]
checks for common mistakes in todo comments .
train
true
16,702
def abbrtask(S, max): if (S is None): return u'???' if (len(S) > max): (module, _, cls) = S.rpartition(u'.') module = abbr(module, ((max - len(cls)) - 3), False) return ((module + u'[.]') + cls) return S
[ "def", "abbrtask", "(", "S", ",", "max", ")", ":", "if", "(", "S", "is", "None", ")", ":", "return", "u'???'", "if", "(", "len", "(", "S", ")", ">", "max", ")", ":", "(", "module", ",", "_", ",", "cls", ")", "=", "S", ".", "rpartition", "(", "u'.'", ")", "module", "=", "abbr", "(", "module", ",", "(", "(", "max", "-", "len", "(", "cls", ")", ")", "-", "3", ")", ",", "False", ")", "return", "(", "(", "module", "+", "u'[.]'", ")", "+", "cls", ")", "return", "S" ]
abbreviate task name .
train
false
16,704
def worker_update(context, id, filters=None, orm_worker=None, **values): filters = (filters or {}) query = _worker_query(context, id=id, **filters) _worker_set_updated_at_field(values) reference = (orm_worker or models.Worker) values['race_preventer'] = (reference.race_preventer + 1) result = query.update(values) if (not result): raise exception.WorkerNotFound(id=id, **filters) _orm_worker_update(orm_worker, values) return result
[ "def", "worker_update", "(", "context", ",", "id", ",", "filters", "=", "None", ",", "orm_worker", "=", "None", ",", "**", "values", ")", ":", "filters", "=", "(", "filters", "or", "{", "}", ")", "query", "=", "_worker_query", "(", "context", ",", "id", "=", "id", ",", "**", "filters", ")", "_worker_set_updated_at_field", "(", "values", ")", "reference", "=", "(", "orm_worker", "or", "models", ".", "Worker", ")", "values", "[", "'race_preventer'", "]", "=", "(", "reference", ".", "race_preventer", "+", "1", ")", "result", "=", "query", ".", "update", "(", "values", ")", "if", "(", "not", "result", ")", ":", "raise", "exception", ".", "WorkerNotFound", "(", "id", "=", "id", ",", "**", "filters", ")", "_orm_worker_update", "(", "orm_worker", ",", "values", ")", "return", "result" ]
update a worker with given values .
train
false
16,705
def _inherited_dashboard(dashboard, base_dashboards_from_pillar, ret): base_dashboards = [] for base_dashboard_from_pillar in base_dashboards_from_pillar: base_dashboard = __salt__['pillar.get'](base_dashboard_from_pillar) if base_dashboard: base_dashboards.append(base_dashboard) elif (base_dashboard_from_pillar != _DEFAULT_DASHBOARD_PILLAR): ret.setdefault('warnings', []) warning_message = 'Cannot find dashboard pillar "{0}".'.format(base_dashboard_from_pillar) if (warning_message not in ret['warnings']): ret['warnings'].append(warning_message) base_dashboards.append(dashboard) result_dashboard = {} tags = set() for dashboard in base_dashboards: tags.update(dashboard.get('tags', [])) result_dashboard.update(dashboard) result_dashboard['tags'] = list(tags) return result_dashboard
[ "def", "_inherited_dashboard", "(", "dashboard", ",", "base_dashboards_from_pillar", ",", "ret", ")", ":", "base_dashboards", "=", "[", "]", "for", "base_dashboard_from_pillar", "in", "base_dashboards_from_pillar", ":", "base_dashboard", "=", "__salt__", "[", "'pillar.get'", "]", "(", "base_dashboard_from_pillar", ")", "if", "base_dashboard", ":", "base_dashboards", ".", "append", "(", "base_dashboard", ")", "elif", "(", "base_dashboard_from_pillar", "!=", "_DEFAULT_DASHBOARD_PILLAR", ")", ":", "ret", ".", "setdefault", "(", "'warnings'", ",", "[", "]", ")", "warning_message", "=", "'Cannot find dashboard pillar \"{0}\".'", ".", "format", "(", "base_dashboard_from_pillar", ")", "if", "(", "warning_message", "not", "in", "ret", "[", "'warnings'", "]", ")", ":", "ret", "[", "'warnings'", "]", ".", "append", "(", "warning_message", ")", "base_dashboards", ".", "append", "(", "dashboard", ")", "result_dashboard", "=", "{", "}", "tags", "=", "set", "(", ")", "for", "dashboard", "in", "base_dashboards", ":", "tags", ".", "update", "(", "dashboard", ".", "get", "(", "'tags'", ",", "[", "]", ")", ")", "result_dashboard", ".", "update", "(", "dashboard", ")", "result_dashboard", "[", "'tags'", "]", "=", "list", "(", "tags", ")", "return", "result_dashboard" ]
return a dashboard with properties from parents .
train
true
16,707
def _find_lteq(a, x): i = bisect_left(a, x) if ((i != len(a)) and (a[i] == x)): return i raise ValueError
[ "def", "_find_lteq", "(", "a", ",", "x", ")", ":", "i", "=", "bisect_left", "(", "a", ",", "x", ")", "if", "(", "(", "i", "!=", "len", "(", "a", ")", ")", "and", "(", "a", "[", "i", "]", "==", "x", ")", ")", ":", "return", "i", "raise", "ValueError" ]
locate the leftmost value exactly equal to x .
train
false
16,709
def decrement_month(date): if (date.day != 1): raise ValueError('Input must be truncated to the 1st of the month.') date -= datetime.timedelta(days=1) return date.replace(day=1)
[ "def", "decrement_month", "(", "date", ")", ":", "if", "(", "date", ".", "day", "!=", "1", ")", ":", "raise", "ValueError", "(", "'Input must be truncated to the 1st of the month.'", ")", "date", "-=", "datetime", ".", "timedelta", "(", "days", "=", "1", ")", "return", "date", ".", "replace", "(", "day", "=", "1", ")" ]
given a truncated datetime .
train
false
16,710
def p_testlist(p): if (len(p) == 2): p[0] = p[1] elif isinstance(p[1], list): p[0] = p[1] else: p[0] = [p[1]] if isinstance(p[0], list): p[0] = ast.Tuple(p[0])
[ "def", "p_testlist", "(", "p", ")", ":", "if", "(", "len", "(", "p", ")", "==", "2", ")", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "elif", "isinstance", "(", "p", "[", "1", "]", ",", "list", ")", ":", "p", "[", "0", "]", "=", "p", "[", "1", "]", "else", ":", "p", "[", "0", "]", "=", "[", "p", "[", "1", "]", "]", "if", "isinstance", "(", "p", "[", "0", "]", ",", "list", ")", ":", "p", "[", "0", "]", "=", "ast", ".", "Tuple", "(", "p", "[", "0", "]", ")" ]
testlist : testlist_multi comma | testlist_multi .
train
false