content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def find_true_prev_op(ops, cur_op, var_name): """ Find the true prev op that outputs var_name variable. Args: ops (list): A list of ops. cur_op (Operator): Current operator which has var_name variable. var_name (string): Variable name. """ prev_op = [] for op in ops: if op == cur_op: break for out_name in op.output_names: for out_var_name in op.output(out_name): if out_var_name == var_name: prev_op.append(op) if prev_op: if not len(prev_op) == 1: raise ValueError("There must be only one previous op " "that outputs {0} variable".format(var_name)) else: return prev_op[0] return None
0bb5c9296a810c8ec8d8c66c8a33d2d915d23280
40,418
def rot32long( w, nLeft ): """ \ A simpler, slower rot32 to test the tester and compare speeds. This creates longs temporarily, but returns an int. For comparison with rot32(). It's about half as fast. """ w &= 0xffffFFFF nLeft &= 31 # which makes nLeft >= 0 w = ( w << nLeft ) | ( w >> ( 32 - nLeft ) ) return int( ( w & 0x7fffFFFF ) | ( - ( w & 0x80000000 ) ) )
6b8b8a41d215419a900b408b1a2379731b41a40f
40,419
def canonicalize_node_size(node): """ Given a node description from the GCE API returns the canonical butter format. """ return { "type": node.name, # Memory is returned in "MB" "memory": int(node.ram * 1000 * 1000), "cpus": float(node.extra["guestCpus"]), "storage": node.disk * 1024, "location": node.extra["zone"].name }
3bc9655234b6f7141eb3b311b3aecd9ba5ec1b99
40,420
def mix_images(background_img, foreground_img): """paste an image on top of another image Args: background_img: pillow image in background foreground_img: pillow image in foreground Returns: pillow image """ background_img = background_img.convert('RGBA') foreground_img = foreground_img.convert('RGBA') img_w, img_h = foreground_img.size bg_w, bg_h = background_img.size offset = ((bg_w - img_w) // 2, (bg_h - img_h) // 2) background_img.paste(foreground_img, offset, mask=foreground_img) return background_img
b86834041c42891b54795b72588f33f1c41e320a
40,421
def _decode_instance(encoded_data, decoded_objects, data_to_decode): """ Decode a data structure Args: encoded_data (:obj:`dict`, :obj:`list`, or scalar): data structure with encoded objects decoded_objects (:obj:`dict`): dictionary that maps the unique ids of encoded objects to dictionaries that represent the decoded objects data_to_decode (:obj:`list`): list of tuples of data structures that still need to decoded. The first element represents the data structure that needs to be decoded. The second element represents the object that will represent the decoded data structure. Returns: :obj:`dict`, :obj:`list`, or scalar: decoded data structure """ if isinstance(encoded_data, dict) and '__type' in encoded_data: obj_type = encoded_data.get('__type') obj = decoded_objects.get(encoded_data['__id'], None) if obj is None: obj = {'__type': obj_type} decoded_objects[encoded_data['__id']] = obj data_to_decode.append((encoded_data, obj)) elif isinstance(encoded_data, list): obj = [] data_to_decode.append((encoded_data, obj)) elif isinstance(encoded_data, dict): obj = {} data_to_decode.append((encoded_data, obj)) else: obj = encoded_data return obj
8e9cb5502aded89cc04268b3098cff9e25fb1a91
40,422
import bisect def find_min_revision_index(revisions_list, revision): """Find the min index for bisection. Find largest revision <= the given revision.""" # bisect_left partitions |revisions_list| into 2 such that: # all(val < revision for val in a[:index]) # all(val >= revision for val in a[index:]) index = bisect.bisect_left(revisions_list, revision) if index < len(revisions_list) and revisions_list[index] == revision: return index if index > 0: return index - 1 # No revisions <= given revision. return None
5a664b74613394a7376a5d2e54333dbf66e83b2c
40,423
def users_to_names(users): """Convert a list of Users to a list of user names (str). """ return [u.display_name if u is not None else '' for u in users]
881b6717e11d88971ef307fd6b128f9d83d0868c
40,424
def join(separator=""): """ create a generator which do join on each element of input iterable """ def _join(input): yield from (separator.join(line) for line in input) return _join
39892f8ecab08147dcd6dd8f3f6b5e873c7f8d16
40,425
import subprocess def compute_time_tree(msa_filepath, tree_filepath, num_cpus=8): """Compute time tree (???)""" out_path = '/'.join(msa_filepath.split('/')[:-1]) + '/timetree' tree_cmd = f"treetime ancestral --aln {msa_filepath} --tree {tree_filepath} --outdir {out_path}" subprocess.check_call(tree_cmd, shell=True) return out_path
1a2ce9a94fb727ebd53e34bf16d37e0837015878
40,428
def sensibleBulk(Tw,Ta,S,rhoa=1.2,Ch=1.5e-3,cpa=1004.67): """ Sensible heat flux from water using the bulk exchange formulation Inputs: Tw - Water temp [C] Ta - Air temp [C] S - Wind speed magnitude [m s^-1] rhoa - air density [kg m^-3] Ch - Stanton number cpa - Specific heat of air [J kg-1 K-1] """ return -rhoa*cpa*Ch*S*(Tw-Ta)
ec7bf965a58704c7cbd5e099f6771fa40698c4e8
40,429
def get_opt_limits_sub_by_keys(keys,opt_limits,limit_offset=0,match_type='>='): """ 由字段名过滤,获取匹配的字段限制条件 """ opt_limits_sub=[] danger_check='' if not opt_limits: return opt_limits,danger_check for l in opt_limits: _keys=l[limit_offset].keys() if match_type=='>=': #只需要确保传入的条件大于配置的条件,之后的操作再做值判断 where条件使用 if set(keys)>=set(_keys) or not _keys: opt_limits_sub.append(l) elif match_type=='==': #只需要确保传入的条件恒等于配置的条件 insert update的值判断时使用 if set(keys)==set(_keys) or not _keys: opt_limits_sub.append(l) elif match_type=='<=': #只需要确保传入的条件小于配置的条件,之后的操作再做值判断 if set(keys)<=set(_keys) or not _keys: opt_limits_sub.append(l) if not opt_limits_sub: danger_check='columns number not enough' return opt_limits_sub,danger_check
0d5278bac1ec60244ba7ac0f411d6fb6e3cd598a
40,430
def triage_hashes(hash_map): """Triage hash map in pair of names to keep and to remove in that order. Three cases: 0. size zero regardless of hash => remove 1. unique hash => keep 2. hash matching two entries => keep both 3. hash with more than two entries => keep first and last, rest remove """ keep, remove = [], [] for info in hash_map.values(): if info[0][1] == 0: remove.extend(name for name, _ in info) else: if len(info) == 1: keep.extend(name for name, _ in info) else: first, last = info[0][0], info[-1][0] keep.extend([first, last]) remove.extend(name for name, _ in info[1:-1]) return keep, remove
0d2cb2f6cbff3436b780ac45eb1db0c3b7753488
40,432
def report_mobility(mv, rv, rsv, i): """Give a 'report' of the mobility, and corresponding regression and residual coefficients. mv is short for mobility_vector (a vector of mobility values)""" # If the mobility value is an integer, display it as an integer if mv[i] % 1 == 0: return "$m$ = {:.0f}, $r$ = {:.3f}, $s$ = {:.3f}".format(mv[i], rv[i], rsv[i]) # Otherwise, display the first decimal of the mobility value else: return "$m$ = {:.1f}, $r$ = {:.3f}, $s$ = {:.3f}".format(mv[i], rv[i], rsv[i])
aca473381d3d9ae4f0c9ae408b73e6b83152470d
40,433
import re def remove_outer_parens(s): """ If there are outer parens when we don't need them, get rid of them Only one set >>> a = "(1910-1920)" >>> remove_outer_parens(a) '1910-1920' """ ret_val = s m = re.match("\s*\((?P<inside_data>.*)\)\s*", s) if m: ret_val = m.group("inside_data") return ret_val
353aa7adb19dd6e521c038acac3dabac72030325
40,434
import re def expNumRe(text): """ expand numeric regular expression to list e.g. 'n[01-03],n1[0-1]': ['n01','n02','n03','n10','n11'] e.g. 'n[09-11].com': ['n09.com','n10.com','n11.com'] """ explist = [] for regex in text.split(','): regex = regex.strip() r = re.match(r'(.*)\[(\d+)-(\d+)\](.*)', regex) if r: h = r.group(1) d1 = r.group(2) d2 = r.group(3) t = r.group(4) convert = lambda d: str(('%0' + str(min(len(d1), len(d2))) + 'd') % d) if d1 > d2: d1, d2 = d2, d1 explist.extend([h + convert(c) + t for c in range(int(d1), int(d2)+1)]) else: # keep original value if not matched explist.append(regex) return explist
cd42ba0fca726c69a0a3b4335373317467cc9463
40,435
def strip_header(src): """ Directly manipulating function text to strip header, usually or maybe always just the "def" lines for a method or function. This function assumes that the docstring and header, if any, have already been removed. Parameters ---------- src : str source code """ lines = src.split('\n') first_len = None for i, line in enumerate(lines): n1 = len(line) newline = line.lstrip() tab = n1 - len(newline) if first_len is None: first_len = tab elif n1 == 0: continue if tab != first_len: return '\n'.join(lines[i:]) return ''
8d9afc2938332d4cc4da9666d2294e646e20bcce
40,436
def visualize_notebook_path(path, notebook_type='jupyter'): """ This function returns a Cytoscape network visualization for Jupyter notebooks :param path object: dash_html_components object with the cytoscape network (returned by get_cytoscape_network()) :param str notebook_type: the type of notebook where the network will be visualized (currently only jupyter notebook is supported) :param dict layout: specific layout properties (see https://dash.plot.ly/cytoscape/layout) :return: cyjupyter.cytoscape.Cytoscape object Example:: net = get_cytoscape_network(G, identifier='corr', args={'title':'Cytoscape path', 'stylesheet':stylesheet, 'layout': layout}) visualize_notebook_path(net, notebook_type='jupyter') """ net = None if notebook_type == 'jupyter': net = path.children[1] elif notebook_type == 'jupyterlab': pass return net
9823ea51f5fa7678a02e6cf784f304444fb976fc
40,437
import sys import argparse def parse_args(args=sys.argv[1:]): """Parse arguments.""" parser = argparse.ArgumentParser( description="Apply fish-eye effect to images.", prog='python3 fish.py') parser.add_argument("-i", "--image", help="path to image file." " If no input is given, the supplied example 'grid.jpg' will be used.", type=str, default="grid.jpg") parser.add_argument("-o", "--outpath", help="file path to write output to." " format: <path>.<format(jpg,png,etc..)>", type=str, default="fish.png") parser.add_argument("-d", "--distortion", help="The distoration coefficient. How much the move pixels from/to the center." " Recommended values are between -1 and 1." " The bigger the distortion, the further pixels will be moved outwars from the center (fisheye)." " The Smaller the distortion, the closer pixels will be move inwards toward the center (rectilinear)." " For example, to reverse the fisheye effect with --distoration 0.5," " You can run with --distortion -0.3." " Note that due to double processing the result will be somewhat distorted.", type=float, default=0.5) return parser.parse_args(args)
8a6feefecd3bfecd778ed308ce1c579fa55972db
40,438
import shutil def process_java_resources(target, source, env): """Copy resource file into .resources dir. """ shutil.copy2(str(source[0]), str(target[0])) return None
3ee5194703956d43187a0c4f802c3ee4c132c18a
40,440
def function_paths(func, tags): """Paths to all source files in tags defining a function func.""" return sorted([tag['file'] for tag in tags if tag['symbol'] == func])
39931421d8220bd9aa74dc9d5813d29e7e686b5c
40,441
def float_to_string(value: float, replacement: str = "0,00") -> str: """ Converts a float to a properly formatted string value """ return ("%.2f" % value).replace('.', ',') if value is not None else replacement
95570ff4fcb78911c9f9a66f5559aea7fa73bbee
40,442
import random def get_fake_interface_config(iface=None, iface_id=None, state=None, mac=None, iface_type=None, networks=None): """Create a random fake interface configuration Returns the serialized and parametrized representation of a node's interface configuration. Represents the average amount of data. """ return {"name": iface or "eth0", "id": iface_id or random.randint(0, 1000), "state": state or "unknown", "mac": mac or "08:00:27:a4:01:6b", "max_speed": 100, "type": iface_type or "ether", "current_speed": 100, "assigned_networks": networks or [{"id": 1, "name": "fuelweb_admin"}, {"id": 3, "name": "management"}, {"id": 4, "name": "storage"}, {"id": 5, "name": "fixed"}]}
698c7bdbd328243de3a65024902f6c2f7d5bd75c
40,443
def merge(left,right): """ merge two arrays :param left: :param right: :return: """ print(left,right) i_right = 0 res = [] i_left= 0 while i_left < len(left) and i_right < len(right): if left[i_left] < right[i_right]: res.append(left[i_left]) i_left +=1 else: res.append(right[i_right]) i_right += 1 print(res) while i_left < len(left): res.append(left[i_left]) i_left += 1 while i_right < len(right): res.append(right[i_right]) i_right += 1 return res
c8c3a004a9128be76def4842bed70dd38eab7212
40,444
def is_leap_year(year): """Check whether a year is a leap year. year - integer > 0 Return values: True if the year is a leap year, false otherwise.""" if year % 4 != 0: return False if year % 100 == 0 and year % 400 != 0: return False return True
edbad43ba0b4d7c1b8a6580c5e6e9e79582eade9
40,445
def _observation_is_not_empty(o): """ Test if an observation form is empty. Might occur when making list of nests and only pictures are given. Method will need to expand if empty cases occur on other forms. """ if o.RegistrationTID == 10: if o.ObsComment is None and o.ObsHeader is None and o.Comment is None and len(o.URLs) == 0: return False return True
95816b8adaa64c41e7f3dc2eb9ab2ddece14d727
40,446
def get_release_date(data): """gets device release date""" try: launch = data['launch'] splt = launch.split(' ') return int(float(splt[0])) except (KeyError, TypeError): return None
e250206326dd1f8d332a55b225e262ddabdd3791
40,447
import glob import re def _find_cpt_base(cpt_base): """ Find checkpoint file base name in current directory :param str cpt_base: Start of checkpoint file name that ends with a number of one to three digits followed by '.cpt' :return: The base name of the checkpoint files (everything but the number and ".cpt") :rtype: str """ possible_matches = glob.glob(cpt_base + "*.cpt") for f_name in possible_matches: match = re.match(r"({}.*?)\d{}\.cpt".format(cpt_base, "{1,3}"), f_name) if match: return match.group(1) raise ValueError( "No checkpoint file name found based on the base " "name {}.".format(cpt_base) )
765bc409c49ffdc9d0574fab93a4e3a8e5660ab2
40,448
def ackermann_no_memo( m, n ): """Evaluates Ackermann's function. """ if m == 0: return n + 1 elif m > 0 and n == 0: return ackermann_no_memo( m - 1, 1 ) elif m > 0 and n > 0: return ackermann_no_memo( m - 1, ackermann_no_memo( m, n - 1 ) )
7391a64ee7ef4ac7692cd298ea5dd397ca760f31
40,449
import numpy as np import math def get_geometric_mean(dataset, metric): """ Habibs geometric mean """ zeroes = [] non_zeroes = [] sum_of_logs = 0.0 for index, row in dataset.iterrows(): if row[metric] > 0: non_zeroes.append(row[metric]) sum_of_logs += np.log2(row[metric]) else: zeroes.append(row[metric]) m = len(zeroes) n = len(non_zeroes) nbynplusm = n/(n + m) right_side_of_exp = (1/(n + m)) * sum_of_logs exp_value = math.exp(right_side_of_exp) geometric_mean = nbynplusm * exp_value return geometric_mean
9a25218e146c323a76d3cc4fc8388ae946e683ee
40,450
def accum(s): """Given a string, `s`, duplicate each character based on its index position (plus one) and capitalize the first character in the sequence. Concatenate the resulting sequences, separating them with a `'-'`. Parameters ---------- s = str input string Returns ------- str """ assert s.isalpha(), 'String can only include [a-zA-Z]' multiples = [i+1 for i in range(len(s))] z = zip(s, multiples) return '-'.join([(c[0]*c[1]).title() for c in z])
4cf5799bccfc605f3045a197cfa9067a3f38c7ed
40,451
def _as_bytes(s): """ Used to ensure string is treated as bytes The output Args: s (str): string to convert to bytes Returns: byte-encoded string Example: >>> str(_as_bytes('Hello, World').decode()) # Duck typing to check for byte-type object 'Hello, World' """ if isinstance(s, bytes): return s return bytes(s, encoding='latin_1')
fb5c2d09a1a1d930e05142ec4644554979156170
40,452
def _maybe_correct_vars(vars): """Change vars from string to singleton tuple of string, if necessary.""" if isinstance(vars, str): return (vars,) else: return vars
1aa46b03988f06a3697b703991c64899e173d0eb
40,453
def column_filter(column): """standardize column names""" return column.replace(' ', '_').replace('-', '_').lower().strip()
75b0933f1628e458f58daf8ec282e5545e249f5a
40,455
def record_property(name): """A read only record property proxy. Example:: class Obj(...): foo = record_propery('foo') """ @property def prop(self): return getattr(self._record, name) return prop
4caf1a1cf04e8cb19f91b3e27aa388bd519afbc8
40,456
import os import yaml def load_yaml(): """ Load the ff_data.yaml as a dict.""" thisdir = os.path.dirname(os.path.abspath(__file__)) yamlfullpath = os.path.join(thisdir, 'ff_data.yaml') with open(yamlfullpath, 'r') as stream: ff_data = yaml.safe_load(stream) return ff_data
6367190b008ffecabdfad3bd6d5a72c85f78fcb4
40,458
def decrementing_pattern(size: int) -> bytes: """ Return `size` bytes with a pattern of decrementing byte values. """ ret = bytearray(size) for i in range(size - 1, -1, -1): ret[i] = i & 0xff return bytes(ret)
b33981468c9e23ae09582e873547147609ebd2e2
40,459
from typing import List import json def analyze_apache_logs(input_file: str, http_response_code_threshold=0.5) -> List: """ Analyze parsed Apache access log file to find malicious activity. :param input_file: Apache access log file (JSON format) :param http_response_code_threshold: HTTP response code ratio under which to flag as malicious :return: list of malicious logs """ malicious_logs = [] http_response_ratios = {} with open(input_file, 'r') as f: logs = json.load(f) # look for specific message types and count number of HTTP 200 response codes versus error codes for log in logs: if 'Nmap Scripting Engine' in log['user_agent']: mal_data = {'category': 'NMAP Scanning', 'client_ip': log['client_ip'], 'datetime': log['datetime']} malicious_logs.append(mal_data) if log['client_ip'] not in http_response_ratios: http_response_ratios[log['client_ip']] = {'200': 0, 'error': 0} if log['response_code'] != '200': http_response_ratios[log['client_ip']]['error'] += 1 else: http_response_ratios[log['client_ip']]['200'] += 1 http_response_ratios[log['client_ip']]['datetime'] = log['datetime'] # process HTTP response code ratios and append to malicious logs if ratio is under given threshold for k, v in http_response_ratios.items(): http_200 = v['200'] http_error = v['error'] total = http_200 + http_error ratio = http_200 / total if ratio < http_response_code_threshold: v['ratio'] = ratio v['category'] = 'Web Directory Enumeration' tmp_dict = {'category': 'Web Directory Enumeration', 'client_ip': k, 'datetime': v['datetime']} malicious_logs.append(tmp_dict) return malicious_logs
2255e9ca7c43f93d28f61e6e25687d3b5f61ebd8
40,460
def calcserverusedmem(mems): """ Return int(100*(MemTotal-MemFree)/MemTotal) from /proc/meminfo. """ return 100 * (mems[0] - mems[1]) / mems[0]
057e35c5ac12e5dbaffd0048c230ec62bd83f162
40,461
def calculate_paye(gross_salary, currency_cost) -> float: """Return paye in user currency""" # Assume the base currency is Uganda Shillings gross_salary_ugx = gross_salary * currency_cost if gross_salary_ugx < 235000: paye = 0 elif 235000 < gross_salary_ugx < 335000: paye = 0.1 * (gross_salary - (235000 / currency_cost)) + (10000 / currency_cost) elif 335000 < gross_salary_ugx < 410000: paye = 0.2 * (gross_salary - (335000 / currency_cost)) + (10000 / currency_cost) elif 10000000 > gross_salary_ugx > 410000: paye = 0.3 * (gross_salary - (410000 / currency_cost)) + (25000 / currency_cost) else: """If gross salary is greater than 10 million""" paye = 0.3 * (gross_salary - (410000 / currency_cost)) + (25000 / currency_cost) + ( gross_salary - (10000000 / currency_cost)) * 0.1 return paye
d0a5af7e7ca2a4475fe9c1600145934ffe26c43c
40,462
def build_lookup_dict_snmp_trap(list_content): """ Build key/value lookup dict specifically for SNMP Traps which use "server-ip" + "version" :param list_content: List of dicts to derive lookup structs from :return: lookup dict """ lookup_dict = {} for item in list_content: item_server_ip = item.get('server_ip') item_version = item.get('version') item_id = item.get('id') if item_server_ip and item_version and item_id is not None: lookup_dict["{0}+{1}".format(item_server_ip, item_version)] = item_id return lookup_dict
42b38ff7cd26cd5c785f474d131c67c303fbe1ce
40,463
import re def parse_index(code): """Parse current item in ``code`` for an index expression Returns ------- index : None | int | slice The index that was found. """ item = code.lookahead() if not item: return m = re.match(r"^(\d?)(:?)(\d?)$", item) if not m: return code.next() start, colon, stop = m.groups() if start or stop: start = int(start) if start else None stop = int(stop) if stop else None if stop is not None and stop < (start or 0) + 2: raise code.error("Redundant slice definition (length 1)") else: raise code.error("Index does nothing") if colon: if start == 0: raise code.error("Redundant definition (omit '0' in '0:')") return slice(start, stop) else: assert stop is None return start
5f149b301b84c5aad0c2388692caeb7c5f8ebd9b
40,465
def get_mmdet_hash(): """Get the git hash of the installed mmdetection repo.""" return '44a7ef2e80f355defb943d02bbee4a011b362a9d'
35a6a9ad393de948813b499de113468fb331a511
40,466
import math def format_hash(hash_str: str, hash_len: int, hash_seg_len: int, hash_sep: str) -> str: """ Format a hash string: keep only hash_len chars from it, and break it up into segments of len hash_seg_len, using the hash_sep as separator. Ex: >>> format_hash('abcdef1232567890', 8, 2, '-') ab-cd-ef-12 """ hash_str = hash_str[:hash_len] if hash_seg_len >= hash_len: return hash_str num_segs = math.ceil(len(hash_str) / hash_seg_len) return hash_sep.join(hash_str[hash_seg_len * i: (hash_seg_len * i + hash_seg_len)] for i in range(num_segs))
2e7866fcc871bab1c1758403bc198a10c54c1334
40,468
def calc_consumer_sentiment(scenario, years, total_sales): """OPPORTUNITY: Consumer Sentiment - Increasing consumer sentiment and increasing sales of products Args: scenario (object): The farm scenario years (int): No. of years for analysis total_sales (list): Waste-adjusted yield for crop 1 Returns: total_sales (list): Adjusted sales with market sentiment included """ customer_sentiment = 0 return customer_sentiment
85a02031fa4285cf92751346f4b0bf2274d98045
40,469
import importlib def package_is_installed(package_name): """Return true iff package can be successfully imported.""" try: importlib.import_module(package_name) return True except Exception: return False
eb0c279bd85aae209331d4e6677fb19c31ab037e
40,470
def get_timing(design, workdir, stage): """get timing from different stage""" slack , tns = 'None', 'None' f = open(workdir + '/' + design + '.sta.' + stage + '.summary', 'r') while True: line = f.readline() if 'Slack' in line: slack = format(float(line.split(':')[-1].rstrip().lstrip().replace(',','')), '.2f') continue elif 'TNS' in line: tns = format(float(line.split(':')[-1].rstrip().lstrip().replace(',','')), '.2f') break f.close() return slack, tns
3ff5c70191bfcf8692516098baeeaae11841ec57
40,471
def config_section_data(): """Produce the default configuration section for app.config, when called by `resilient-circuits config [-c|-u]` """ config_data = u"""[fn_mcafee_atd] # url example: https://127.0.0.1:8888 atd_url=<your_atd_url> atd_username=<your_atd_username> atd_password=<your_atd_password> # Amount of time in minutes before the function quits and throws an error timeout=30 # Interval in seconds to wait and check if the file has finished being analyzed polling_interval=60 # parameter with values either 'run_now' or 'add_to_q', defaults to 'add_to_q' filePriority=add_to_q # If your ATD server uses a cert which is not automatically trusted by your machine set trust_cert=False trust_cert=[True|False] """ return config_data
cf211b9b8c41e8206175ce534d8c3c0b6dbc9205
40,472
from typing import List import difflib def get_list_difference(list1: List[str], list2: List[str]) -> List[str]: """ Return list of elements that help turn list1 into list2. This should be a "minimal" list of differences based on changing one list into the other. >>> get_list_difference(["a", "b"], ["b", "a"]) ['b'] >>> get_list_difference(["a", "b"], ["a", "c", "b"]) ['c'] >>> get_list_difference(["a", "b", "c", "d", "e"], ["a", "c", "b", "e"]) ['c', 'd'] The last list happens because the matcher asks to insert 'c', then delete 'c' and 'd' later. """ diff = set() matcher = difflib.SequenceMatcher(None, list1, list2) # None means skip junk detection for code, left1, right1, left2, right2 in matcher.get_opcodes(): if code != "equal": diff.update(list1[left1:right1]) diff.update(list2[left2:right2]) return sorted(diff)
e09e87148f4827a766afe42d8878e2eeeb4f3127
40,473
def _lane_detail_to_ss(fcid, ldetail): """Convert information about a lane into Illumina samplesheet output. """ return [fcid, ldetail["lane"], ldetail["name"], ldetail["genome_build"], ldetail["bc_index"], ldetail["description"], "N", "", "", ldetail["project_name"]]
cad5549b67a9147685416e9982b219ed29577190
40,474
import re def _number_captions(md_body: str) -> str: """Incrementally number tables and image captions.""" images = {} tables = {} output = md_body.splitlines() for index, line in enumerate(output): if re.match(r'!\[.+\]\(.+\)', line): images[index] = line if output[index].lower().startswith('table: '): tables[index] = line for index, row in enumerate(tables): output[row] = f'Table: Table {index + 1} - {tables[row].split(": ")[1]}' for index, row in enumerate(images): output[row] = images[row].replace('![', f'![Figure {index + 1} - ') return '\n'.join(output)
694be9b6d37e22e97f21912b0a0469b11006e938
40,475
def nv_compute_capability(dev): """If *dev* is an Nvidia GPU :class:`pyopencl.Device`, return a tuple *(major, minor)* indicating the device's compute capability. """ try: return (dev.compute_capability_major_nv, dev.compute_capability_minor_nv) except: return None
eb7b6a9386b1f80019e94a0ed697f795d4474d79
40,477
import os def get_file_name(path: str) -> str: """ Gets the file name from the given path. :param path: the path to the file """ return os.path.basename(path).split(".")[0]
800b04ac4348317aeb6d3106bcf5be876898ce48
40,478
def _get_captured_py_code(capfd) -> str: """Reads the Python code which is written to stdout.""" out, err = capfd.readouterr() assert not err py_code_lines = out.split('\n')[4:] py_code = '\n'.join(py_code_lines) return py_code
3cb36642d928746d2492a4d483760e9e9cacb7ac
40,479
import io import zipfile import os def zip_dir_as_bytes(path): """ Zip input directory, return as bytes. :param path: input directory. :return: """ with io.BytesIO() as io_bytes: with zipfile.ZipFile(io_bytes, 'w', zipfile.ZIP_DEFLATED) as zipper: for root, dirs, files in os.walk(path, topdown=False): for name in files: full_path = os.path.join(root, name) relative_path = os.path.relpath(full_path, path) zipper.write(filename=full_path, arcname=relative_path) for name in dirs: full_path = os.path.join(root, name) relative_path = os.path.relpath(full_path, path) zipper.write(filename=full_path, arcname=relative_path) zip_bytes = io_bytes.getvalue() return zip_bytes
a01fca657936da2059025f595abdeabb1e16968e
40,480
def make_mmi_cmd(fa): """Return a minimap2 cmd string to build mmi index. """ return 'minimap2 -x map-pb -d {fa}.mmi {fa}'.format(fa=fa)
8f03063cb3abcfee66ad364f788f4851c955d0b9
40,481
def _dict_keys_get(d, keys): """Recursively get values from d using `__getitem__` """ d = d for k in keys: d = d[k] return d
d83dfce489ecff1b53eb7434e12d615aaf76def8
40,482
from typing import List from typing import Set import os def get_past_groups(past_groups_directory_path: str) -> List[Set[str]]: """Get all past groups of people listed in the input directory." Args: past_groups_directory_path: Path to directory holding files containing past groups. Each file in the directory should be text files containing past groups, where each group is indicated its own s line containing email addresses separated by spaces. Returns: A list of all past groups. """ past_groups = [] for entry in os.scandir(past_groups_directory_path): if not entry.name.startswith(".") and entry.is_file: with open(entry.path) as group_file: for line in group_file: past_groups.append(set(line.split())) return past_groups
66bb92b2eab4178e735d68e75b53e93fd9ff77ff
40,483
def get_byte_array(intval): """Generate byte array from numpy integer.""" byte_array = [int(i) for i in intval.tobytes()] return byte_array
37902aca723abc342413eee7335a3ea86e9dedc2
40,485
def _remove_tokens(tokenized_docs, counts, min_counts, max_counts): """ Words with count < min_counts or count > max_counts will be removed. """ total_tokens_count = sum( count for token, count in counts.most_common() ) print('total number of tokens:', total_tokens_count) unknown_tokens_count = sum( count for token, count in counts.most_common() if count < min_counts or count > max_counts ) print('number of tokens to be removed:', unknown_tokens_count) keep = {} for token, count in counts.most_common(): keep[token] = count >= min_counts and count <= max_counts return [(i, [t for t in doc if keep[t]]) for i, doc in tokenized_docs]
2ef618c0ef7c7180c1426ca99f67ee98862813c8
40,486
def calculate_tf_idf(tf, idf): """Method that calculates the TF-IDF values for each character over every movie""" tf_idf = tf.copy() for character in tf_idf.index: for movie in tf_idf.columns: if str(tf_idf.loc[character][movie]) != 'nan': tf_idf.loc[character][movie] = tf_idf.loc[character][movie] * idf[character] return tf_idf
6e7139cc7a3102ff10b60b561066d9f6bc2ce0e3
40,487
def _get_cmd_tree(subcmds): """Convert flat list of subcmd objects into hierarchical dictionary {'command name': {'subcommand name 1': subcmd1, 'subcommand name 2': subcmd2}}""" cmds = {} for sub_cmd in subcmds: cmd_dict = cmds.setdefault(sub_cmd.cmd, {}) cmd_dict[sub_cmd.name] = sub_cmd return cmds
76f44db545d298b94f9eb2323a5ade280b5f0380
40,490
import sys import os def isRunningFromCheckout(cScriptDepth = 1): """ Checks if we're running from the SVN checkout or not. """ try: sFile = __file__; cScriptDepth = 1; except: sFile = sys.argv[0]; sDir = os.path.abspath(sFile); while cScriptDepth >= 0: sDir = os.path.dirname(sDir); if os.path.exists(os.path.join(sDir, 'Makefile.kmk')) \ or os.path.exists(os.path.join(sDir, 'Makefile.kup')): return True; cScriptDepth -= 1; return False;
65c631b1121a9b5a4da680893b8b5594b5916dbe
40,491
import subprocess import re def get_local_changes(): """Determine whether local changes have been made to the current git repository Returns: bool or None: bool indicating presence of modifications or None if not run in a git repository bool or None: bool indicating presence of untracked files or None if not run in a git repository """ try: status = subprocess.check_output(["git", "status", "--porcelain"]).strip().decode() except subprocess.CalledProcessError: return None, None local_mods = False # If there have been changes, each line will start with a one or two character key indicating type of change if status == "": local_mods = False else: for line in status.splitlines(): if re.match("[ MADRCU]{1,5}", line): local_mods = True elif re.match("\?\?", line): # Indicates untracked files - we don't need to record this continue else: print("*****") print("Unexpected start of line - does this indicate a local modification?") print(line) print("*****") local_mods = True # play it safe and indicate that there are local changes return local_mods
242703c6eba2775c2dc199ac8f6911dfe314f56f
40,493
def _key_split(matchobj): """Expands a {key a+b+c} syntax into <span class="key">a</span> + ... More explicitly, it takes a regex matching {key ctrl+alt+del} and returns: <span class="key">ctrl</span> + <span class="key">alt</span> + <span class="key">del</span> """ keys = [k.strip() for k in matchobj.group(1).split('+')] return ' + '.join(['<span class="key">%s</span>' % key for key in keys])
519aa2512967aabf266df604280c07d85575a291
40,494
def ping(): """Simple view used to monitor extrapypi server""" return "pong", 200
ab42f43b56e6ee5982e1fd09b71af703985f89a9
40,495
def is_3d(ds, v): """Check if xr.DataArray has 3 dimensions.""" dims = ds[v].dims if len(dims) == 3: return True return False
e095561f47f9daeeb57be327a8af93bb4ac2c2f4
40,496
def flat_ind_zp_so3(l, m, n, b): """ The SO3 spectrum consists of matrices f_hat^l of size (2l+1, 2l+1) for l=0, ..., L_max = b - 1. These can be stored in a zero-padded array A of shape (b, 2b, 2b) with axes l, m, n with zero padding around the center of the last two axes. If we flatten this array A we get a vector v of size 4b^3. This function gives the flat index in this array v corresponding to element (l, m, n) The zero-based 3D index of (l, m, n) in A is (l, b + m, b + n). The corresponding flat index is i = l * 4b^2 + (b + m) * 2b + b + n :param l, m, n: spectral indices :return: index of (l, m, n) in flat zero-padded vector """ return l * 4 * (b ** 2) + (b + m) * 2 * b + b + n
f1e9327e33ae31fce28c33d18c2c49b72adafb22
40,498
import warnings def process_interaction_params(parameters): """Formats and completes interaction parameters. Interaction parameters are combined into a dictionary passed as a single argumnet. Parameters ---------- color: tuple Format (r,g,b). headheight: float Height of interaction head. headwidth: float Width of interaction head. zorder: int Matplotlib zorder value of the interaction. direction: string Determines what side of the construct the interaction is drawn on; 'forward' or 'reverse'. linewidth: float Determines the width of lines used to draw the interaction. heightskew: float Skews the total height of the interaction. sending_length_skew: float Skews the length of the origin line of the interaction. receiving_length_skew: float Skews the length of the receiving line of the interaction. distance_from_baseline: float Skews the distance between the interaction and the baseline. """ final_parameters = {'color': (0,0,0), 'headheight': 7.0, 'headwidth': 7.0, 'zorder': 0, 'direction': 'forward', 'linewidth': 1.0, 'heightskew': 10.0, 'sending_length_skew': 0.0, 'receiving_length_skew': 0.0, 'distance_from_baseline': 10.0} if parameters is None: return final_parameters # Collate interaction parameters for key in parameters: if key in final_parameters: final_parameters[key] = parameters[key] else: warnings.warn(f"""'{key}' is not a valid interaction parameter.""") # Amplify zorder to ensure all drawings composing the interaction can be grouped on Z axis final_parameters['zorder'] *= 100 return final_parameters
9665eb8ae602e3436609eda4920772c006a82127
40,500
import os import re def normpath(file_path): """Collapse duplicate leading slashes and resolve relative references in a path Arguments: file_path (str): Path to process Returns: str: Processed file_path """ # Consolidate redundant slashes and relative references fp = os.path.normpath(file_path) # Tapis filePaths should always be absolute if not fp.startswith('/'): fp = '/' + fp # Strip trailing slash fp = re.sub('(/)+$', '', fp) return os.path.normpath(fp)
2d2a04e378d53a6ed4d70b3ab65eb82f57b272e0
40,503
import torch def kl_loss_full(mean, var, mean_prior, var_prior): """ KL divergence of two multivariate normal distributions. :param mean: mean of distribution 1 :param var: covariance of distribution 1 :param mean_prior: mean of distribution 2 :param var_prior: covariance of distribution 2 :return: KL divergence of distribution 1 and 2 """ mvn = torch.distributions.MultivariateNormal(loc=mean, covariance_matrix=var) prior = torch.distributions.MultivariateNormal(loc=mean_prior, covariance_matrix=var_prior) return torch.distributions.kl_divergence(mvn, prior).mean()
b3d7e01a37445b354f47daabb0159745d438b1df
40,504
def xml_tree_equivalence(e1, e2): """ Rough XML comparison function based on https://stackoverflow.com/a/24349916/1294458. This is necessary to provide some sort of structural equivalence of a generated XML tree; however there is no XML deserialisation implementation yet. A naive text comparison fails because it seems it enforces ordering, which seems to vary between python versions etc. Strictly speaking, I think, only the *leaf-list* element mandates ordering.. this function uses simple sorting on tag name, which I think, should maintain the relative order of these elements. """ if e1.tag != e2.tag: return False if e1.text != e2.text: return False if e1.tail != e2.tail: return False if e1.attrib != e2.attrib: return False if len(e1) != len(e2): return False e1_children = sorted(e1.getchildren(), key=lambda x: x.tag) e2_children = sorted(e2.getchildren(), key=lambda x: x.tag) if len(e1_children) != len(e2_children): return False return all(xml_tree_equivalence(c1, c2) for c1, c2 in zip(e1_children, e2_children))
bdd135de65e0ecdf9f6d9d22f03b4b5dc06c476c
40,505
def flip_check(intron, flip_dict): """ Checks an intron against a dictionary of introns with scores that did not survive boundary switching. If present, the score resulting from the boundary switch will be returned, otherwise None. """ name = intron.get_name() if name in flip_dict: return flip_dict[name] else: return None
9b1a392a102c757ccf4938fe0bf3ea6ae4955238
40,507
def correct_title(title): """ Return properly formatted job title. """ # Make sure title is a string title = str(title) if "grad student" in title.lower(): return "Grad student" # We will group all professors together if "professor" in title.lower(): return "Professor" else: return "Research staff"
c1ddd3392bc0e1c310d21a2e61086526a086b240
40,508
def merge(user, default): """Merge 2 data structures.""" for key, val in default.items(): if key not in user: user[key] = val else: if isinstance(user[key], dict) and isinstance(val, dict): user[key] = merge(user[key], val) elif isinstance(user[key], list) and isinstance(val, list): user[key] = user[key] + val else: user[key] = val return user
751e1c4148f355551577b2274ec65479770e8c62
40,510
def _mk_key(srev_info): """Returns the key for a SignedRevInfo object.""" return (srev_info.rev_info().isd_as(), srev_info.rev_info().p.ifID)
780fb59859b514e0f6d4699cb1e6fef1d6e14042
40,511
def clean_copy(model, custom_objects=None): """Returns a copy of the model without other model uses of its layers.""" weights = model.get_weights() new_model = model.__class__.from_config( model.get_config(), custom_objects=custom_objects ) new_model.set_weights(weights) return new_model
d2bc0801c47b21361e51f61bae1054660abb28e5
40,512
import torch def _to_leaf(state, latent_vars): """ Ensures that all latent parameters are reset to leaf nodes, before calling :param state: :return: """ for key in latent_vars: state[key] = torch.tensor(state[key], requires_grad=True) return state
1fa81646d265fffedcf6b12901890e779bcd58f7
40,513
def get_channels(ifo, plottype): """Get a list of channels to plot for a given IFO. Plot Type must be either 'irigb' or 'duotone'.""" if plottype == "irigb": return ['{}:CAL-PCAL{}_IRIGB_OUT_DQ'.format(ifo, arm) for arm in ['X', 'Y']] elif plottype == "duotone": return ['{}:CAL-PCAL{}_FPGA_DTONE_IN1_DQ'.format(ifo, arm) for arm in ['X', 'Y']] else: raise ValueError("Must specify 'irigb' or 'duotone' for plottype.")
d0c46f2a4f39b4a9eeb79a7ea9b43e994c95ad24
40,514
import numpy def _read_matrix(matrix_file): """ Read numpy matrix using the loadtxt function. """ matrix = numpy.loadtxt(matrix_file, dtype='float') return matrix
87b367a8041887f4f9cf783b92b010534b5b8e7e
40,515
def as_gray(frame): """ Convert a frame to gray scale. This function implements lazy evaluation. :param frame: the frame to be converted :type frame: ``pims.frame.Frame`` or ``numpy.ndarray`` :returns: the frame converted in gray scale :rtype: ``pims.frame.Frame`` or ``numpy.ndarray`` """ red = frame[:, :, 0] blue = frame[:, :, 1] green = frame[:, :, 2] rval = 0.2125 * red + 0.7154 * green + 0.0721 * blue return rval.astype(frame.dtype)
31c5464adfbc1019a90c5a962c03ad3cb956ceb5
40,516
def use_tpu(tpu_cores: int, tpu_resource: str, tf_version: str): """An operator that configures GCP TPU spec in a container op. Args: tpu_cores: Required. The number of cores of TPU resource. For example, the value can be '8', '32', '128', etc. Check more details at: https://cloud.google.com/tpu/docs/kubernetes-engine-setup#pod-spec. tpu_resource: Required. The resource name of the TPU resource. For example, the value can be 'v2', 'preemptible-v1', 'v3' or 'preemptible-v3'. Check more details at: https://cloud.google.com/tpu/docs/kubernetes-engine-setup#pod-spec. tf_version: Required. The TensorFlow version that the TPU nodes use. For example, the value can be '1.12', '1.11', '1.9' or '1.8'. Check more details at: https://cloud.google.com/tpu/docs/supported-versions. """ def _set_tpu_spec(task): task.add_pod_annotation('tf-version.cloud-tpus.google.com', tf_version) task.add_resource_limit('cloud-tpus.google.com/{}'.format(tpu_resource), str(tpu_cores)) return task return _set_tpu_spec
ce5b9869512119966d59f1c0fb5ec53eeee53237
40,517
def context(text, position, level="paragraph"): """Get sentence or paragraph that surrounds the given position.""" if level == "sentence": pass elif level == "paragraph": pass return ""
9ac8070d6874411677605372b46bbc64e979af44
40,519
from typing import Any from pathlib import Path import json def _save_to_json(data: Any, path: Path) -> Path: """ json形式で保存する Parameters ---------- data : Any 保存対象のデータ path : Path 保存先 Returns ------- Path 保存されたPath """ with open(path, "w") as f: json.dump(data, f) return path
d3fb406bc4767e2e5ce88b83126e68b0906850b6
40,520
import uuid def generate_dcos_engine_template( linux_ssh_public_key: str, num_masters: int, master_vm_size: str, num_windows_private_agents: int, windows_private_vm_size: str, num_windows_public_agents: int, windows_public_vm_size: str, num_linux_private_agents: int, linux_private_vm_size: str, num_linux_public_agents: int, linux_public_vm_size: str, windows_admin_user: str, windows_admin_password: str, linux_admin_user: str, dcos_engine_orchestrator_release: str, ): """ Generates the template provided to dcos-engine """ unique_id = str(uuid.uuid4())[:8] + 'dcos' template = { "apiVersion": "vlabs", "properties": { "orchestratorProfile": { "orchestratorType": "DCOS", "orchestratorRelease": dcos_engine_orchestrator_release }, "masterProfile": { "count": num_masters, "dnsPrefix": "master" + unique_id, "vmSize": master_vm_size }, "agentPoolProfiles": [ { "name": "wpub", "count": num_windows_public_agents, "vmSize": windows_public_vm_size, "osType": "Windows", "dnsPrefix": "wpub" + unique_id, "ports": [80, 443, 8080, 3389] }, { "name": "wpri", "count": num_windows_private_agents, "vmSize": windows_private_vm_size, "osType": "Windows", }, { "name": "linpub", "count": num_linux_public_agents, "vmSize": linux_public_vm_size, "osType": "Linux", "dnsPrefix": "linpub" + unique_id, "ports": [80, 443, 22] }, { "name": "linpri", "count": num_linux_private_agents, "vmSize": linux_private_vm_size, "osType": "Linux" } ], "windowsProfile": { "adminUsername": windows_admin_user, "adminPassword": windows_admin_password }, "linuxProfile": { "adminUsername": linux_admin_user, "ssh": { "publicKeys": [ { "keyData": linux_ssh_public_key } ] } } } } return template
221703cb1c1c31eb7f4f951c92fe50a76932e60e
40,521
def __remap_path_distances(temporal_distances): """ Mapping shortest paths temporal distances in hop distances :param temporal_distances: a dictionary of <node_id, reach_time> :return: a dictionary <node_id, hop_distance> """ res = {} tids = sorted(set(temporal_distances.values())) tids = {t: pos + 1 for pos, t in enumerate(tids)} for k, v in list(temporal_distances.items()): res[k] = tids[v] return res
00fbe17003cddc23253572cfc0c9289078e8eb69
40,523
def ll_intersection(A, B, P, Q): """Compute intersection of two segments formed by four points.""" denominator = (A[0]-B[0]) * (P[1]-Q[1]) - (A[1]-B[1]) * (P[0]-Q[0]) if denominator == 0: return 0.0, 0.0 numerator_x = (A[0]*B[1]-B[0]*A[1]) * (P[0]-Q[0]) - (A[0]-B[0]) * (P[0]*Q[1]-Q[0]*P[1]) numerator_y = (A[0]*B[1]-B[0]*A[1]) * (P[1]-Q[1]) - (A[1]-B[1]) * (P[0]*Q[1]-Q[0]*P[1]) return numerator_x/denominator, numerator_y/denominator
96ffca17213b5fbc4ebd9fac338d4de30dd423a7
40,524
import json import requests def get_weather(): """ This function colects and process al the data and returns a string with the content of the weather notifications of the main module """ #Read configuration file and sets the parameters with open('config.json', 'r') as configfile: weather = json.load(configfile) api_key = weather["weather"]["api_key"] base_url = weather["weather"]["base_url"] city_name = weather["weather"]["city_name"] complete_url = base_url + "appid=" + api_key + "&q=" + city_name response = requests.get(complete_url) info = response.json() #Relevant information is extracted air_temperature = round(info["main"]["temp"]-273.15, 2) feels_like = round(info["main"]["feels_like"]-273.15, 2) weather_description = info["weather"][0]["description"] location_name = info["name"] location_temp_text = "At "+ str(location_name)+" the air temperature is " + str(air_temperature) feels_like_temp = ", but it feels like " + str(feels_like) weather_description_text = ". The weather description is " + str(weather_description) + "." return location_temp_text + feels_like_temp + weather_description_text
f931c2ad85ba94951a733eaa1813725071f921bf
40,525
import os def _get_previous_coverage(): """Helper function that returns the last reported coverage.""" coverage_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'coverage' ) # The first and only line of the file contains the coverage. with open(coverage_path) as f: coverage = f.readline() return float(coverage.strip())
cfbfe19f68bdee2503ed61332c1ff7211225dc98
40,527
import json def output_generic_result_lazily(out_file, name, retrieve_result): """This function is used to output data to file. It doesn't do this straight away but instead returns a function which can be called when the data actually should be written. When the returned function is called a complete JSON-document containing data from retrieve_result will be written as one line to the JSON-file. :param out_file: file handle to write data to. :param name: name to store data under in file. :param retrieve_result: function that returns data to write. This should take no arguments and is only called when calling the returned function. """ def output(): result = retrieve_result() data = { name: result } json.dump(data, fp=out_file) # Newline to generate JSON Lines data, one doc per line out_file.write('\n') return output
a208da86f71df0a3a9a9e47be4ecb13b7b265ec4
40,528
def unindent(text, skip1=False): """Remove leading spaces that are present in all lines of ``text``. Parameters ---------- test : str The text from which leading spaces should be removed. skip1 : bool Ignore the first line when determining number of spaces to unindent, and remove all leading whitespaces from it. """ # count leading whitespaces lines = text.splitlines() ws_lead = [] for line in lines[skip1:]: len_stripped = len(line.lstrip(' ')) if len_stripped: ws_lead.append(len(line) - len_stripped) if len(ws_lead) > skip1: rm = min(ws_lead) if rm: if skip1: lines[0] = ' ' * rm + lines[0].lstrip() text = '\n'.join(line[rm:] for line in lines) return text
c787f5176b7b38ab5e6caec5175c4de3bbf1bbf5
40,529
from typing import IO import sys def stderr_input(prompt: str = '', file: IO = sys.stdout) -> str: # pragma: no cover """ Read a string from standard input, but prompt to standard error. The trailing newline is stripped. If the user hits EOF (Unix: :kbd:`Ctrl-D`, Windows: :kbd:`Ctrl-Z+Return`), raise :exc:`EOFError`. On Unix, GNU readline is used if enabled. The ``prompt`` string, if given, is printed to stderr without a trailing newline before reading. """ if file is sys.stdout: return input(prompt) try: stdin = sys.stdin except AttributeError: raise RuntimeError("stderr_input: lost sys.stdin") file.write(prompt) try: flush = file.flush except AttributeError: pass else: flush() try: file.softspace = 0 # type: ignore except (AttributeError, TypeError): pass line = stdin.readline() if not line: # inputting an empty line gives line == '\n' raise EOFError elif line[-1] == '\n': return line[:-1] return line
754b92300432951d0e6fd712ee8515682375a6d9
40,530
def scrape_images(url): """Scrape images from a website. Parameters: url (str): url to a website Returns: image_container (list): list of pd.DataFrame object containing the table data Raises: None """ image_container = None return image_container
9664093a7e9ecdb45317a0dd699708842960ac00
40,533
def rc(seq): """reverse complement sequence""" comp = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N'} return ''.join(map(lambda x: comp[x], reversed(seq)))
15317f9178e8aae8255014756c58b928c305308c
40,534
def normalize_dict(dict_): """ Replaces all values that are single-item iterables with the value of its index 0. :param dict dict_: Dictionary to normalize. :returns: Normalized dictionary. """ return dict([(k, v[0] if not isinstance(v, str) and len(v) == 1 else v) for k, v in list(dict_.items())])
6c56fbd72bbc334cd0586e9caa596c0b26ca5c0d
40,535
def get_snapshot_id(snapshot): """Get backend snapshot id. Take the id from provider_location in case this is managed snapshot. """ return snapshot['provider_location'] or snapshot['id']
806132d56e3ba617ffba299a5f766a6c017d7caa
40,536
def get_pk_query_name(model): """Format the primary key column of a model with its DB table.""" return '%s.%s' % (model._meta.db_table, model._meta.pk.column)
34549b6ae93e12c9a613377419935d5c26e173e4
40,537
import sys import subprocess def spel1(word,lang='th_TH'): """เป็นคำสั่งตรวจคำผิดโดยใช้ hunspell รับค่า str ส่งออกเป็น list """ try: if sys.platform == 'win32': cmd = "echo "+word+" | hunspell -d "+lang else: cmd = 'echo "'+word+'" | hunspell -d '+lang getoutput = subprocess.getoutput(cmd) del cmd get = getoutput.split("\n") del get[0] if get[0] == '*': getoutput = [] else: if get[1] == "": del get[1] get = get[0].split(":") del get[0] getoutput = get[0].replace(" ","") getoutput = getoutput.split(",") del get return getoutput except subprocess.CalledProcessError: print('plase install hunspell') return None
a4fc9978a11df7a3632414e0599e26f52416d35b
40,539
def parentList(cursor, cd_tax, includeBaseTax=True): """ Retrieve all the parents of a taxon in the database Parameters: ---------- cursor: Psycopg2 cursor cursor for the database connection cd_tax: Int idenfier of the taxon for which we search the parent taxa Returns: ------- all_parents: List(Int) list of identifiers of the parent taxa """ foundMore = True all_parents = [] currentTax = [cd_tax] if includeBaseTax: all_parents += [cd_tax] SQL = "SELECT tp.cd_tax FROM taxon t JOIN taxon tp ON t.cd_sup=tp.cd_tax WHERE t.cd_tax=%s" while foundMore: cursor.execute(SQL,currentTax) currentTax = cursor.fetchone() if not currentTax: foundMore= False else: all_parents += currentTax all_parents.sort() return all_parents
c355d8a2b5fbb6152678359e0290e56e95193ae9
40,540
import math def clean_file(string): """ This function cleans out unnecessary characters from subprocess.Popen() outputs. If an output is a numerical value, it will round it down and return same else it will return the string value """ keep_list = [str(num) for num in range(0,10)] + ['.','x'] for char in string: if char not in keep_list: string = string.replace(char, "") try: return math.floor(float(string)) except: return string
510db481bfc42f4a2ff7156b214d77d0d297dc8d
40,542
def bool_filter(val): """ Convert true/false string to Python bool. Otherwise, return string. """ trues = ['True', 'TRUE', 'true', 'yes', 'Yes'] falses = ['False', 'FALSE', 'false', 'no', 'No'] if any([val == t for t in trues]): return True elif any([val == f for f in falses]): return False else: return val
0b71f4b337dedf14b638b7bc2641e365c69d081e
40,543