content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def PIL_thresh(IMG,threshold = 191): """https://stackoverflow.com/a/6490819""" return IMG.point(lambda p: p > threshold and 255)
ceb80075040e979b7a4c30235eb8056468751710
16,497
def _set_args(args, line_args, file_name): """ Sets the arg list to contain all of the original CL arguments and the arguments provided in the file. :list args: copy of sys.argv command line args :list args_list: ordered list of key, value args from file :str file_name: name file to remove from args list """ args = args + line_args args.remove(file_name) return args
1c38acd5e34f7ee2e27f2396ec6c917ecfde996a
16,500
import csv def read_csv(input_file_path, verbose=False, delimiter=','): """ Function reads csv file and returns list of records :param input_file_path: path to csv file :param verbose: defines if verbose mode :param delimiter: fields separator in csv file :return: list of records """ result = [] with open(input_file_path) as csv_file: for row in csv.reader(csv_file, delimiter=delimiter): result.append(row) if verbose: print(result) return result
57597e820750b11382cd27e31135fa8b6f45153e
16,501
import glob def get_incr_path(path): """Return path with unused incremental suffix""" files = glob.glob(path + '.*') if files: files.sort() sequence = str(int(files[-1][-4:]) + 1) else: sequence = '0' return path + '.' + sequence.zfill(4)
7991bf4ebbb051633f53871237e140e0b2283ba2
16,502
def get_metric_name_from_task(task: str) -> str: """Get the name of the metric for the corresponding GLUE task. If using `load_best_model_at_end=True` in TrainingArguments then you need `metric_for_best_model=metric_name`. Use this method to get the metric_name for the corresponding GLUE task. """ if task == "stsb": return "pearson" elif task == "cola": return "matthews_correlation" else: return "accuracy"
2b911db666e74345a288d1781c2368dfd7a22a74
16,503
import string def removePunctuationField(myString): """ Remove final punctuation from the date field """ lastChar = myString[-1] if lastChar in string.punctuation: cleanedString = myString[:-1] else: cleanedString = myString return cleanedString
4d630d30ce3e92175772ac23ad6ec34a8d852674
16,504
def slice_image(im, dict_obj): """Slice the bounding box out of the image and return.""" left = dict_obj['left'] top = dict_obj['top'] right = dict_obj['right'] bottom = dict_obj['bottom'] im = im[top:bottom, left:right, :] return im
1c4a14386a6d70a922af6bfc2c7415553a48a52d
16,505
from unittest.mock import Mock def fixture_output_block(): """Create mock output block.""" return Mock()
add7ad3a896b34a2161c5daa6db26aa61e9df76f
16,506
from typing import Optional def check_n_jobs(n_jobs: Optional[int] = None): """Parse the ``n_jobs`` parameter for multiprocessing.""" if n_jobs == -1: return None elif n_jobs is None: return 1 else: return n_jobs
0d9c67c2e995df9fb3e497db40466eab9b5041d2
16,507
def _exec_query(layout, **query): """raises error if file is not found for the query""" try: res = layout.get(**query)[0] except Exception as e: msg = "could not find file matching these criteria: {q}".format(q=query) raise Exception(msg) from e return res.path
e238d495d566796ce7dc33f45565b838743ba29a
16,508
from datetime import datetime def convert_to_date(date): """Convert to date with '%B %d, %Y' format.""" return datetime.strptime(date, '%B %d, %Y')
10db2be45c5cae52365858ed324540486f6e5eff
16,509
from datetime import datetime def earlier_than_now_timestamp(): """Timestamp.""" return int(datetime.timestamp(datetime.now())) - 10
275b0a0471277ebf464eb95e8a0ba8771f01194d
16,510
def get_dtim(tree, hart): """Get the DTIM associated with the hart""" dtim_ref = hart.get_field("sifive,dtim") if dtim_ref: dtim = tree.get_by_reference(dtim_ref) return dtim return None
991351235d4179d1e9ac75c3c9746020a81a9cc9
16,511
def _check_startyear(cfgs): """ Check to see that at most one startyear is defined in the config Returns ------- int startyear Raises ------ ValueError if more that one startyear is defined """ first_startyear = cfgs[0].pop("startyear", 1750) if len(cfgs) > 1: for cfg in cfgs[1:]: this_startyear = cfg.pop("startyear", 1750) if this_startyear != first_startyear: raise ValueError("Can only handle one startyear per scenario ensemble") return first_startyear
a43932082bcd128a9badf9b26648e96a9a4ef9bd
16,512
def check_if_point_in_extents(point,extents): """check if a 2D point lies within the bounding box extents = (xmin,ymin,xmax,ymax) returns: boolean""" if (point[0]>=extents[0]) & (point[0]<=extents[2]) & \ (point[1]>=extents[1]) & (point[1]<=extents[3]): return True else: return False return
f25fe2d8b49a44f1866695c749ce1933d6e998fc
16,514
def escape_string(s): """ Escapes special characters and adds quotes for I3. :param s: string to be processed :return: modified string for I3 consumption """ n = s.replace('\\', '\\\\') n = n.replace('"', '\\"') n = n.replace('\r', '\\r"') n = n.replace('\n', '\\n"') n = n.replace('\x00', '\\x00"') n = '"' + n + '"' return n
0c8cfc17ad8f4534d7d00bd7db252a1dce2105a5
16,516
def aggregate_labels(label_list): """Aggregate a sequence of labels.""" all_labels = [] for labels in label_list: for l in labels: all_labels.append(l) return list(set(all_labels))
37e8e3ebe2c6ab9d4a6eee8556605f42aac149b0
16,517
def has_errors(result): """This function checks if a GqlResponse has any errors. Args: result (GqlResponse): [data, errors] Returns: (boolean): Returns `True` if a transaction has at least one error. """ _, errors = result return len(errors) > 0
15fddcf9b2231c946fabb6603edc2635c8b9478f
16,518
def map_fields(record, field_map): """ Replace field names according to field map. Used to replace ArcGIS Online reference feature service field names with database field names. Parameters ---------- record : TYPE Description field_map : TYPE Description Returns ------- TYPE Description """ new_record = {} for field in record.keys(): outfield = field_map["fields"].get(field) if outfield: new_record[outfield] = record[field] else: new_record[field] = record[field] return new_record
4ccf63a62e3df0eb69a9024b9a99064c08d5ae53
16,520
def is_valid_header(val): """Header must have these values.""" return (isinstance(val, dict) and all(x in val for x in ['schema', 'homepage', 'map_name', 'map_id', 'map_description']))
f100ce3f1b184a8b79b30095897c3f85b574e4e6
16,521
def get_dimensions_by_order(dims_in, dataset): """get dimension Parameters ---------- dims_in: int or list of int the dimensions by numerical order dataset: sidpy.Dataset Returns ------- dims_out: list of dimensions """ if isinstance(dims_in, int): dims_in = [dims_in] dims_out = [] for item in dims_in: if isinstance(item, int): if item in dataset._axes: dims_out.append([item, dataset._axes[item]]) return dims_out
3430f045ed57e3d98aec15ffb7298d1c727bee27
16,522
import pickle def load_data(pathToPickleFile): """ Read in pickled file or dir. File: ground_truth_dict = load_data('ground_truth.pkl') Dir: ground_truth_dict = load_data(os.path.join(output_dir, 'ground_truth.pkl')) :param pathToPickleFile: pickled file to read in, e.g. 'dataset.pkl' :return: the data from the pickled file """ with open(pathToPickleFile, 'rb') as pickle_file: data = pickle.load(pickle_file) return data
26655cadd9ba4130b9280eaaa97cdc0b05563521
16,523
from typing import Dict async def health_check() -> Dict[str, str]: """ a call to /health will run a health check against the open-sim-api and will return with 200 and an indicator of health if the api is up and operating normally """ return {"message": "The open sim service is running and healthy"}
d269ecb68df07f449e4125d517aa518dbd770dea
16,526
def coroutine(func): """ Coroutine decorator """ def inner(*args, **kwargs): gen = func(*args, **kwargs) next(gen) return gen return inner
e102070e08c763f744ea90c3806e5c114e04be06
16,527
def _measure_bucket_max(table, maxbin): """check for buckets that are too large (_l1_bucket_max)""" okay = True max_bucket = 0 bad_buckets = 0 for k, vl in table.items(): lvl = len(vl) if lvl >= maxbin: if lvl > max_bucket: max_bucket = lvl bad_buckets = bad_buckets + 1 okay = False return okay
29e1f9f82d064ceccf77c6f9e496bdc711f8a153
16,528
import numpy def degree_correlation_coefficient(graph): """ Parameters ---------- graph : A graph instance whose interface should resemble a ``networkx.DiGraph``. Returns ------- A pearson-like correlation coefficient that ranges from ``-1`` to ``1``. Notes ----- For directed graphs only. """ power = numpy.power def _undirected_correlation(graph): # sum over j*k multi_sum = 0.0 # (sum over j + k)^2 squared_sum = 0.0 # sum over j^2 + k^2 denominator_sum = 0.0 degree = graph.degree() for (u, v) in graph.edges_iter(): src_degree = float(degree[u]) tar_degree = float(degree[v]) multi_sum += src_degree * tar_degree denominator_sum += power(src_degree, 2) + power(tar_degree, 2) squared_sum += src_degree + tar_degree # normalised by a small factor and the number of edges squared_sum = power(squared_sum, 2) / float(graph.size() * 4) return (multi_sum - squared_sum) / ((denominator_sum / 2.0) - squared_sum) def _directed_correlation(graph): # sum over j*k multi_sum = 0.0 # (sum over j + k)^2 squared_sum = 0.0 # sum over j^2 + k^2 denominator_sum = 0.0 in_degree = graph.in_degree() out_degree = graph.out_degree() for (u, nbrs) in graph.adjacency_iter(): for v in nbrs: src_degree = float(out_degree[u]) tar_degree = float(in_degree[v]) multi_sum += src_degree * tar_degree denominator_sum += power(src_degree, 2) + power(tar_degree, 2) squared_sum += src_degree + tar_degree # normalised by a small factor and the number of edges squared_sum = power(squared_sum, 2) / float(graph.size() * 4) return (multi_sum - squared_sum) / (0.5 * denominator_sum - squared_sum) if graph.is_directed(): return _directed_correlation(graph) else: return _undirected_correlation(graph)
46d56bf89c0503d5233b96149b60e92ef3964e3d
16,529
def joinParameter(*args): """Joins dictionaries in a consitent way For multiple occurences of a key the value is defined by the first key : value pair. Arguments: *args: list of parameter dictonaries Returns: dict: the joined dictionary """ keyList = [x.keys() for x in args] n = len(args) keys = [] values = [] for i in range(n): values = values + [args[i][k] for k in keyList[i] if k not in keys] keys = keys + [k for k in keyList[i] if k not in keys] return {keys[i] : values[i] for i in range(len(keys))}
d990e0b107f95e49937febe5edc8cf906f5d2097
16,530
import contextlib import socket def port_available(port): """ Find if a port is in use From http://stackoverflow.com/a/35370008 Args: port: The port to be checked. Returns: True if the port is available; False if it is in use. """ with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: if sock.connect_ex(("localhost", port)) == 0: return False else: return True
5a29eb8a252591a5a05deb759e3811bd52a9940d
16,531
def format_response_df(df): """ format response df to match output format of name/street/zip/city keywords: df -- dataframe object from requests response """ df = df[['merchantName', 'street', 'zip', 'city', 'lng', 'lat']].copy() df.columns = ['Name', 'StrNr', 'PLZ', 'Stadt', 'lon', 'lat'] df.Name = ['Bio Company' if 'Bio Company' in x else x for x in df.Name] df.Name = ['Edeka' if 'EDEKA' in x else x for x in df.Name] df.Name = ['REWE' if 'Rewe' in x else x for x in df.Name] df.Name = ['REWE' if 'REWE' in x else x for x in df.Name] df.Name = ['Aldi' if 'ALDI' in x else x for x in df.Name] df.Name = ['Netto' if 'Netto' in x else x for x in df.Name] df.Name = ['Nahkauf' if 'Nahkauf' in x else x for x in df.Name] df.Name = ['E-Center' if 'E center' in x else x for x in df.Name] df.Name = ['Penny' if 'PENNY' in x else x for x in df.Name] df.Name = ["Kaiser's Tengelmann" if 'Tengelmann' in x else x for x in df.Name] df.Stadt = ['Berlin' if 'berlin' in x.lower() else x for x in df.Stadt] df = df[df.Name != "Kaiser's Tengelmann"] return df
e07459df8c4b66a466bc847189930a03239a1756
16,532
import os def get_flo_files_from_n(dirname, model_name, n_list, include_dirname_in_path=True): """ get the flo files in ascending order """ if dirname == '': dirname = os.getcwd() if include_dirname_in_path: flo_filenames = [os.path.join(dirname, '%s_%i.flo' % (model_name, n)) for n in n_list] else: flo_filenames = ['%s_%i.flo' % (model_name, n) for n in n_list] return flo_filenames
e2f854ddfd02a96a998b85527d688a44a8ee167b
16,534
def _labels_output_string(out_str): """ Read the labels out of a MESS input file """ lbls = [] for line in out_str.splitlines(): if 'T(K)' in line and '->' not in line: rxns = line.strip().split()[1:] line_lbls = [rxn.split('->') for rxn in rxns] line_lbls = [lbl for sublst in line_lbls for lbl in sublst] lbls.extend(line_lbls) # Remove duplicates and make lst a tuple lbls = tuple(set(lbls)) return lbls
ac0127abfe9e45aebdd48e5ea1b214620adcf32a
16,535
def DEFAULT_REPORT_SCRUBBER(raw): """Remove breakdown and properties.""" return {k: v for k, v in raw.items() if k not in ("breakdown", "properties")}
845f1040728b6826ca6f3fe858730085aeb7787d
16,536
def parseTargetPath(targetPath): """ Parse a target path, like 'Jupiter/Voyager1' into parts and return as an array. Returns [system,craft,target,camera], with None for unspecified parts. Then can take apart result with something like - pathparts = parseTargetPath(targetPath) pathSystem, pathCraft, pathTarget, pathCamera = pathparts Returns None if targetPath is empty or None # Returns [None,None,None,None] if targetPath is empty or None """ if targetPath: pathparts = targetPath.split('/') # make sure we have 4 parts, even if blank while len(pathparts)<4: pathparts.append('') # trim,convert blanks to None pathparts = [pathpart.strip() for pathpart in pathparts] pathparts = [pathpart if len(pathpart)>0 else None for pathpart in pathparts] return pathparts return None # return [None,None,None,None]
957e5342d19b557d6e001e4115cb5a8097d282b7
16,537
def padded_insert(items, index, value, null_val=None): """ insert value into the items list at given index if index is larger than length of list then extend it up to index and pad the extra space with null_val """ if len(items) == index: items.append(value) elif len(items) > index: items[index] = value else: items.extend([null_val] * (index - len(items))) items.append(value) return items
725dc2d0e314e1bc76bdb7f6d778e9b13316b2aa
16,540
def mock_invalid_dropbox_config(): """Mock invalid Dropbox config.""" return {}
5614f08ae435e688b71d0a4ee1000c2e9cb37786
16,541
import re def _alphanum_key(string): """Parse a string into string and integer components.""" parity = int(string[0] in '0123456789') return [ int(x) if i % 2 != parity else x for i, x in enumerate(re.split('([0-9]+)', string)[parity:]) ]
12d3f6761a60442327a52c3c60751fe14fa92545
16,543
import base64 def base64_encode(bytes_data: bytes) -> str: """对字符串(bytes)进行base64编码 二进制安全, 比如图片读出来之后的字符串 """ return base64.b64encode(bytes_data).decode()
8a339dc791236a9b58ed8fb585a33a3a05417684
16,544
from typing import List def solution(nums: List[int]) -> bool: """ Look at the list from backwards. Eg: input=[3,2,1,0,4] -> [4,0,1,2,3]. Following the new list, if we can find an element whose value is greater than its gap between index of this element and index of the target, then this element will be a new target for the following elements to reach. Repeating this logic, as long as the last element of the reversed list (the first element based on the input) can reach the last target, it will be able to reach the end. """ nums.reverse() result = False target_idx = 0 for idx, num in enumerate(nums): if num < (idx - target_idx): result = False else: result = True target_idx = idx return result
2d5ce63379c818f1559568dd59082d25b4f03a5d
16,545
def get_registration_response_key(question): """ For mapping schemas to schema blocks: Answer ids will map to the user's response """ return question.get('qid', '') or question.get('id', '')
43176dc20070cf244c779ec6012882d5e72c4a7d
16,546
import numpy def f(y, t, all_dict,gating): """ Calculate derivatives of state variables between spikes. Parameters ---------- y : array state variables of synapses t : float time all_dict : dictionary parameters necessary to calculate derivatives of states gating: dictionary specify state variables necessary to be calculated. Returns ------- derivs : array Derivatives of state variables. """ # unpack params #a_n, Tau_r0, a_FDR, Tau_FDR, a_f, Tau_f, p0bar, a_i, Tau_i, a_D, Tau_D, blah = params n, p, Tau_r, p0, D = y # unpack current values of y #print y #print n #print all_dict,gating derivs =numpy.zeros(len(y)) #print Tau_r0, Tau_r initial_y=n # print gating if gating['Dep']==1: if gating['UR']==1: derivs[0] = (1-n) /Tau_r else: derivs[0] = (1-n) / all_dict['Tau_r'] if gating['Fac']==1: if gating['SMR']==1: derivs[1] = (p0-p) / all_dict['Tau_f'] else: derivs[1] = (all_dict['p0']-p) / all_dict['Tau_f'] if gating['UR']==1: derivs[2] = (all_dict['Tau_r0']-Tau_r) /all_dict['Tau_FDR'] if gating['SMR']==1: derivs[3] = (all_dict['p0bar']-p0) /all_dict['Tau_i'] if gating['DSR']==1: derivs[4] = (1-D) /all_dict['Tau_D'] #print initial_y return derivs
4e93533cdb00ce1a413d2b307c7863d30e82ac06
16,547
def split_sentences(txt, splitchar=".", include_splitchar=False): """Split sentences of a text based on a given EOS char.""" out = [s.split() for s in txt.strip().split(splitchar) if len(s) > 0] return out
22b8cc92062fe2dfdbb4cf51b1210ea2aa8b4c59
16,548
def clean_data(df): """ This procedure removes rows containing NAs from the dataframe :param df: dataframe loaded from the flat file :return: dataframe after removing NAs """ return df.dropna()
2839b19c8aff3ce85eb9f9b0716be6a0b85e74c7
16,550
def colorwheel(color_value): """ A colorwheel. ``0`` and ``255`` are red, ``85`` is green, and ``170`` is blue, with the values between being the rest of the rainbow. :param int color_value: 0-255 of color value to return :return: tuple of RGB values """ if color_value < 0 or color_value > 255: return 0, 0, 0 if color_value < 85: return 255 - color_value * 3, color_value * 3, 0 if color_value < 170: color_value -= 85 return 0, 255 - color_value * 3, color_value * 3 color_value -= 170 return color_value * 3, 0, 255 - color_value * 3
22a65f7846e32e58365e573c73d37fa1272a1fda
16,551
import random def _make_random_sequence(length=50): """ Generate a random string for secret key or password """ chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' return ''.join(random.SystemRandom().choice(chars) for n in range(length))
d69c213b3c2ebfe8fda82400c69a767e91ed9f35
16,552
import requests def get_pypi_info(package="cellpy"): """get version number and sha256 for a pypi package Args: package (str): name of package Returns: [version, sha256] """ url = f"https://pypi.org/pypi/{package}/json" response = requests.get(url) if not response: print(f"url {url} not responding") return None, None response = response.json() version = response["info"]["version"] release = response["releases"][version][-1] sha256 = release["digests"]["sha256"] return version, sha256
819fa2c0ab0264455ae91225dff0d1f8fb498820
16,553
def get_res(catalog): """ Returns the resolution """ return catalog['ishape_hsm_regauss_resolution']
43bd30401f77de10a41be6ab720626acff53d643
16,555
def _offset_to_pts(F, center, pred, stride, num_points): """ Change from point offset to point coordinate. :param center: the initial points center :param pred: the predicted point offsets :param stride: the stride of the offsets :param num_points: the initial number of points for each positions :return pts: the predicted point coordinate """ center_xy = F.slice(center, begin=(None, None, 0), end=(None, None, -1)) pts_center = F.tile(center_xy, reps=(1, 1, num_points)) pred_transpose = F.transpose(pred, axes=(0, 2, 3, 1)) pred_reshape = F.reshape(pred_transpose, (0, -3, -4, num_points, 2)) pred_flip = F.flip(pred_reshape, axis=3) xy_pts_shift = F.reshape(pred_flip, (0, 0, -3)) pts = F.broadcast_add(xy_pts_shift * stride, pts_center) return pts
7488e2392e5f66fd3c0fb40e9831f1c6ab10cac3
16,556
import yaml def load_config(parent_path): """Returns empty dict if unable to load config file""" config_path = parent_path / 'config.yml' if not config_path.is_file(): return dict() with open(config_path) as file: return yaml.load(file, Loader=yaml.SafeLoader)
9370d1191e42023aff87f267b46bc024a47efd3e
16,557
import math def angle_between_vectors(vector1, vector2): """ Returns the angle in radians between vectors 'vector1' and 'vector2':: angle_between((1, 0, 0), (0, 1, 0)): 1.5707963267948966 angle_between((1, 0, 0), (1, 0, 0)): 0.0 angle_between((1, 0, 0), (-1, 0, 0)): 3.141592653589793""" return math.atan2(vector2[0], vector2[2]) - math.atan2(vector1[0], vector1[2])
224965990d6880ea5597b7e2522b24a9f0ef6179
16,558
import pkg_resources def load_resource(resource_path): # pragma: NO COVER """ Gets the content of a resource """ resource_content = pkg_resources.resource_string(__name__, resource_path) return resource_content.decode('utf-8')
5afd4c6072f942c865f64dbceb22cf422903c573
16,560
import click def style_album(artist, album, year): """Returns a unified style for albums.""" return ( click.style(str(artist), fg="magenta", bold=True) + click.style(" - ", fg="white") + click.style(str(album), fg="blue", bold=True) + click.style(" - ", fg="white") + click.style(str(year), fg="white", bold=True) )
f673eff3d38ec5933a17be837740e117f103a3f3
16,561
def add_batch_info(experience, batch_info, buffer=()): """Add batch_info and rollout_info_field string to experience. """ if batch_info is not None: if buffer == () and batch_info.replay_buffer != (): buffer = batch_info.replay_buffer experience = experience._replace( batch_info=batch_info, replay_buffer=buffer) return experience._replace(rollout_info_field='rollout_info')
639f0c2d629043dba7b48fce2a443f73d313aa7b
16,562
def str_is_parametrized(target_str): """ Determine if there are jinja2 parameters in the string :param target_str: :return: """ return '{' + '{' in target_str or '{' + '%' in target_str
3d4f79a4631e4e8c979d673a1933a39b193d8ace
16,563
def list_sample(collection, limit = 3): """ given an ordered collection (list, tuple, ...), return a string representation of the first limit items (or fewer), e.g. "itemA, itemB, itemC and 7 more" """ ln = len(collection) if ln > 1: if ln <= limit: return '%s and %s' % (', '.join(str(ws) for ws in collection[:min(limit, ln)]), collection[min(limit, ln)]) return '%s and %d others' % (', '.join(str(ws) for ws in collection[:min(limit, ln)]), ln - limit) if ln > 0: return collection[0] return ''
3c0926e75fc58ce68ec1919c7b072b067c6749ce
16,567
def _round_down_to_multiple(num, divisor): """ Round the number down to a multiple of the divisor. :param num: :param divisor: :return: """ return num - (num % divisor)
aefb46ca963924ddbbe0d161e2278af0dbcd665d
16,568
def pull(cards, card): """ Pulls the a card from a hand and returns the new hand. Example: > pull([4,5,6,7,8], 6) [4,5,7,8] """ rest = cards[:] rest.remove(card) return rest
8cbe3d0178ae886ee44704e07edfd7501216e699
16,570
def MBtokb(megabytes): """ Converts megabytes to kilobits. :param megabytes: numeric, megabytes :return: numeric, kilobits equivalent. """ kilobytes = megabytes * 8192 return kilobytes
2a9dca3f62fcd603ca2bcb36010d14fee86df53e
16,572
import os def create_directory_and_return_result(folder_name: str) -> bool: """ Create new directory in current directory if it does not exist and return final True if directory exist/created or False. """ if not os.path.exists(folder_name): try: os.mkdir(folder_name) return True except OSError as e: pass return True
3ad915b0ca13251e88e4f896a5bdf4eac7cd4c16
16,574
from typing import List def get_capacity_of_valleys(profile: List[int]) -> int: """Return the capacity of al valley in the profile. >>> get_capacity_of_valleys([2, 1, 2]) 1 >>> get_capacity_of_valleys([2, 0, 1, 0, 1, 0, 3]) 8 >>> get_capacity_of_valleys([3, 2, 1, 0]) 0 >>> get_capacity_of_valleys([0, 1, 1, 5]) 0 >>> get_capacity_of_valleys([0, 1, 2, 2, 2, 1, 0]) 0 >>> get_capacity_of_valleys([2, 1, 3, 0, 2]) 3 >>> get_capacity_of_valleys([3, 0, 1, 3, 0, 5]) 8 """ capacity = 0 #foreward maximum = -1 in_valley = False for i in range(1, len(profile)): if in_valley: #searching for the opposite boundery if profile[i] >= profile[maximum]: capacity += sum(profile[maximum] - height for height in profile[maximum+1:i]) in_valley = False else:# searching for maximum if profile[i-1] > profile[i]: maximum = i-1 in_valley = True # backwards if in_valley is true if in_valley: in_valley = False stop = maximum for i in range(len(profile)-2, stop-1, -1): if in_valley: #searching for the opposite boundery if profile[i] >= profile[maximum]: capacity += sum(profile[maximum] - height for height in profile[i+1:maximum]) in_valley = False else:# searching for maximum if profile[i+1] > profile[i]: maximum = i+1 in_valley = True return capacity
3e12208971349306d6c0217eb618f2830e8b1d1f
16,575
import argparse def args_init(args=None): """Returns the parsed arguments (an instance of argparse.Namespace). Args: args (list): A list of program arguments, Defaults to sys.argv. """ parser = argparse.ArgumentParser() parser.add_argument('program', type=str, metavar='program ...', nargs='?', help='run a program with anything after it used as ' 'arguments') parser.add_argument('arguments', type=str, nargs=argparse.REMAINDER, help=argparse.SUPPRESS, default=[]) parser.add_argument('--config', metavar='FILE', type=str, help='override config file location') parser.add_argument('--reload', action='store_true', help='Reload the config of all CT instances') parser.add_argument('--rgb', action='store_true', help='Use RGB colors (default: detect support, ' 'fallback to xterm-256)') return parser.parse_args(args=args)
314abf68d760c6ca46cb1863b5b666370d6511d7
16,576
import logging def greenlet_exception_logger(logger, level=logging.CRITICAL): """ Return a function that can be used as argument to Greenlet.link_exception() that will log the unhandled exception to the given logger. """ def exception_handler(greenlet): logger.log(level, "Unhandled exception in greenlet: %s", greenlet, exc_info=greenlet.exc_info) return exception_handler
98f413f5f8432214d051f306490014e6927562f2
16,577
def repeatedString( str, pat ): """Return true if str is one or more repeated copies of pat.""" n = len( pat ); if len( str ) < n: return 0 for j in range( 0, len( str ) ): if str[ j ] != pat[ j % n ]: return 0; return 1;
1e2fc8916387ba361b37b19f0627951f6f838b00
16,578
def get_data(fname: str) -> tuple: """ Read the data file. """ with open(fname) as f: texts = f.read().split('\n\n') # Get the fields and all their valid values. Not space efficient, # but there aren't that many of them. fields = {} for field in texts[0].split('\n'): name, data = field.split(': ') for pair in data.split(' or '): mi, ma = pair.split('-') ranges = fields.get(name, []) ranges.extend(i for i in range(int(mi), int(ma)+1)) fields[name] = ranges # Get my ticket. _, data = texts[1].split('\n') my_ticket = [int(d) for d in data.split(',')] # Get the other tickets. tickets = [] for ticket in texts[2].split('\n')[1:]: tickets.append([int(t) for t in ticket.split(',')]) return fields, tickets, my_ticket
90ef3de1c8d2899154709205c9b32085bb4af0e2
16,579
def remove_whitespace_chars(text): """ Remove unnecessary (trailing, double, etc.) whitespace characters from a piece of text. :param text: A piece of text. :return Text without unnecessary whitespace. """ return " ".join(text.split())
40640c421bf6e776001e8cfa443dbb2f7148d6f0
16,580
import textwrap def _patch_with_http_archive(workspace_content, filename): """Replaces local_repository() rules with http_archive() rules.""" workspace_lines = workspace_content.split('\n') http_archive_load = ('load("@bazel_tools//tools/build_defs/repo:http.bzl", ' '"http_archive")') workspace_content = '\n'.join([workspace_lines[0], http_archive_load] + workspace_lines[1:]) base = textwrap.dedent( '''\ local_repository( name = "tink_base", path = "..", ) ''') cc = textwrap.dedent( '''\ local_repository( name = "tink_cc", path = "../cc", )) ''') base_patched = textwrap.dedent( '''\ # Modified by setup.py http_archive( name = "tink_base", urls = ["https://github.com/google/tink/archive/{}.zip"], strip_prefix = "tink-master/", )) '''.format(filename)) cc_patched = textwrap.dedent( '''\ # Modified by setup.py http_archive( name = "tink_cc", urls = ["https://github.com/google/tink/archive/{}.zip"], strip_prefix = "tink-master/cc", )) '''.format(filename)) workspace_content = workspace_content.replace(base, base_patched) workspace_content = workspace_content.replace(cc, cc_patched) return workspace_content
3cd511e82c3609adc1c7ed3ed919e0e410ef73c4
16,581
import re import os def process_wolfram_string(text: str, config: dict) -> str: """Clean and format an answer from Wolfram into a presentable format. Args: text: Original answer from Wolfram Alpha config: { lang: language of the answer root_dir: of the Skill to find a regex file } Returns: Cleaned version of the input string. """ # Remove extra whitespace text = re.sub(r" \s+", r" ", text) # Convert | symbols to commas text = re.sub(r" \| ", r", ", text) # Convert newlines to commas text = re.sub(r"\n", r", ", text) # Convert !s to factorial text = re.sub(r"!", r",factorial", text) regex_file_path = os.path.join( config["root_dir"], "regex", config["lang"], "list.rx" ) with open(regex_file_path, "r") as regex: list_regex = re.compile(regex.readline().strip("\n")) match = list_regex.match(text) if match: text = match.group("Definition") return text
883eefed5c872534bc9df611ff58039e619afb82
16,584
def log_config_state_fixture(): """Return log config state fixture data.""" return { "enabled": True, "level": "info", "logToFile": False, "filename": "", "forceConsole": False, }
8e814edf02d579920e44737ff37a5610203a638a
16,585
import numpy def argmax(a): """ Return unravelled index of the maximum param: a: array to be searched """ return numpy.unravel_index(a.argmax(), a.shape)
86ce80938471764076290698867bd9d08fead330
16,586
import argparse def get_args(): """ Load arguments from cmd """ parser = argparse.ArgumentParser( description="Autocomplete BU's survey and schedule tests." ) parser.add_argument("--run", help="run the bot now.", action="store_true") parser.add_argument( "--schedule", help="schedule a cron job for the bot to run every day.", action="store_true", ) parser.add_argument( "--delete", help="delete the cron job scheduled for this file.", action="store_true", ) parser.add_argument( "--cron", help="the option called from cron to hide the interface.", action="store_true", ) return parser.parse_args()
b8d5ffed51a9d048487a0683e196de69b9ad5856
16,588
def is_function(obj): """ :param obj: the object to check :return: whether the object is callable """ return hasattr(obj, '__call__')
0e8c7121ad6477482d94286640e66a79e7d9b375
16,590
import torch import io import numpy def numpy_tensor_deserializer(tensor_bin) -> torch.Tensor: """Strategy to deserialize a binary input in npy format into Torch tensor Args tensor_bin: A binary representation of a tensor Returns a Torch tensor """ bin_tensor_stream = io.BytesIO(tensor_bin) return torch.from_numpy(numpy.load(bin_tensor_stream))
5b3fc6e633beadb98fb7d49c376ae07d3f9f65dd
16,591
def calculate_bid(player,scotch_pos,first_moves,second_moves): """ cf = 100 - sum(first_moves) cs = 100 - sum(second_moves) if (player == 1 and cf <6) or (player == 2 and cs < 6): if player == 1: return cf if player == 2: return cs if (player == 1 and cf <10) or (player == 2 and cs < 10): return 6 if (player == 1 and cf <40) or (player == 2 and cs < 40): return 12 """ return 10
6bed0b8ce51d5a86d0831bae798d44d83722c94d
16,592
def find_menu_request(self, menu): """查找菜单对应的请求""" # 有且仅有一条请求 path = f'info?gatewayCode={menu.code}' for r in self.driver.requests: if r.method == 'GET' and path in r.url: return r
6200c50e88e1b1f8ae45dd873e6dc89e9851b6d3
16,593
def get_dynamic_hasher_names(HMAC_KEYS): """ Return base dynamic hasher names for each entry in HMAC_KEYS (we need to create one hasher class for each key). Names are sorted to make sure the HMAC_KEYS are tested in the correct order and the first one is always the first hasher name returned. """ algo_name = lambda hmac_id: 'bcrypt{0}'.format(hmac_id.replace('-', '_')) return [algo_name(key) for key in sorted(HMAC_KEYS.keys(), reverse=True)]
f2243dfdf7c0f56afbdd366fa190d0736dd94323
16,594
import os def join_path(base, name, ext): """ Crea una ruta valida desde los parametros base/name.ext """ return os.path.join(base, name + ext)
9bec759bef42bae1c6ca4ec2fc033e05c0d674d3
16,595
def add_etherpad_urls (event_data): """ Add item etherpad_urls""" event_id = event_data['id'] # query our database or #etherpad_urls = find_etherpad_urls(event_id) #event_data['etherpad_urls'] = etherpad_urls return event_data
3be307271fe597cf09444e02d519289206c56664
16,596
import subprocess def get_private_key_from_address(address): """Read the private key for an address you control using bitcoind """ p = subprocess.Popen(['bitcoind', 'dumpprivkey', address], stdout=subprocess.PIPE) out, err = p.communicate() return out.strip()
31aba10d32aec6e01feb27eb9382ebf2908d3818
16,598
def has_primersearch(coll): """Returns True if the passed PDPCollection has primersearch output - coll PDPCollection describing genomes for a run """ if None in [genome.primersearch for genome in coll.data]: return False return True
4777d11a397ece62c005ab67d4706895c50eea10
16,599
def text_table(data, header=None): """Format a list of dicts as a text-mode table.""" data = list(data) if not data: return '' header = header or sorted(data[0].keys()) widths = [len(h) for h in header] for d in data: widths = [ max(a, b) for a, b in zip( widths, [len(str(d.get(h, ''))) for h in header] ) ] def field(n, v): if isinstance(v, (int, float)): return str(v).rjust(widths[n]) return str(v).ljust(widths[n]) def make_row(a): return ' | '.join(a) rows = [] rows.append(make_row(field(n, h) for n, h in enumerate(header))) rows.append('-' * len(rows[0])) for d in data: rows.append(make_row(field(n, d.get(h, '')) for n, h in enumerate(header))) return '\n'.join(rows)
331fb293bfb2a86bda730d331c409e5dedec7868
16,600
def reshape_long(df, reportno, grams_name_dict, geo_name_dict): """ For dfs with four quarter, reshape to long """ if reportno == '1' or reportno == '2' or reportno == '3': usecols = [x for x in df.columns if not x.startswith('Q')] grams_title = grams_name_dict[reportno] df = (df .set_index(usecols) .stack() .reset_index() .rename(columns={0: grams_title})) rename_col = {y: 'Q' for y in [x for x in df.columns if x not in usecols + [grams_title]]} df = df.rename(columns=rename_col) df = df.assign(Q=df.Q.str.split('Q').str[1]) df = df.rename(columns={'GEO': geo_name_dict[reportno]}) return df
f5072f1597b9393050577a49b84ed55d6ca8e899
16,601
import csv def get_region_properties(properties_tsv): """List of properties used for directly linking Wikidata items to regions. e.g., P19: place of birth These are compiled based on knowledge of Wikidata and Marc Miquel's excellent work: https://github.com/marcmiquel/WDO/blob/e482a2df2b41d389945f3b82179b8b7ca338b8d5/src_data/wikipedia_diversity.py """ expected_header = ['Property', 'Label'] region_properties = [] with open(properties_tsv, 'r') as fin: tsvreader = csv.reader(fin, delimiter='\t') assert next(tsvreader) == expected_header for line in tsvreader: property = line[0] label = line[1] region_properties.append((property, label)) return region_properties
1e47f0020b84a10f449532cd8b3af8512f81dd0e
16,602
def make_special_identifier(ln, ed, ms, aliquot=None): """ ln: str or int a: int aliquot ms: int mass spectrometer id ed: int extract device id """ if isinstance(ed, int): ed = "{:02d}".format(ed) if isinstance(ms, int): ms = "{:02d}".format(ms) d = "{}-{}-{}".format(ln, ed, ms) if aliquot: if not isinstance(aliquot, str): aliquot = "{:02d}".format(aliquot) d = "{}-{}".format(d, aliquot) return d
269113bac788c81ef35b0964d2215c8f0b85e569
16,603
def cond_decorator(flag, dec): """conditional decorator that is used if the flag is true. :param flag: the boolean flag :type flag: boolean """ def decorate(fn): """the internal decorator""" return dec(fn) if flag else fn return decorate
bcf82a58f213a277941b9e9e4ff3af7dd3be74d8
16,604
def gap_frequency(pileup): """returns the frequency of gaps (n. gaps divided by total number of reads, including N)""" tot_pileup = pileup.sum(axis=0) return tot_pileup[4] / tot_pileup.sum(axis=0)
9f97e944f06d05dd97517ad80a92dcc235e51bc3
16,605
def scale_list(list_): """ Returns a scaled list with the minimum value subtracted from each element of the corresponding list. Parameters ---------- list_ : list Input list. Returns ------- scaled_list : list Scaled list. Examples -------- >>> list_ = [6, 3, 5, 11, 3, 2, 8, 6] >>> scale_list(list_) [4, 1, 3, 9, 1, 0, 6, 4] """ scaled_list = [i - min(list_) for i in list_] return scaled_list
ef64ae41ca223bbf6a4b1c8c535bd2072a53a7a1
16,608
def n_letter_words(all_words, n): """ Given a collection of words, return the ones that are three letters @param all_words is the string containing all words @returns a list of three-letter words """ res = [] all_words = all_words.split(" ") for word in all_words: if len(word) == n: res.append(word) return res
ac97c0a90016b022c4323e30445164641600748c
16,609
def checksum(data): """ Calculates the checksum, given a message without the STX and ETX. Returns a list with the ordered checksum bytes""" calc_sum = 0 for i in data: calc_sum += i low_sum = calc_sum >> 8 & 0xFF high_sum = calc_sum & 0xFF return bytearray([high_sum, low_sum])
c958e6b160a33343fee54de7d2725d93dff9e286
16,611
def get_separation(ref_radec, target_radec): """ Function to calculate the separation between a reference and target source. ref_radec : skycoord ra-dec format of the reference source target_radec : skycoord ra-dec format of the target source The skycoord format is obtained from astropy's coordinates module as follow: my_sky_coord = astropy.coordinates.SkyCoord(ra, dec, unit='deg', frame='fk5') """ ref_target_sep = ref_radec.separation(target_radec) return ref_target_sep.arcsecond
f33ba2aa083d8f2068a12efa394d209604300d97
16,612
def github_disable_two_factor_requirement_org(rec): """ author: @mimeframe description: Two-factor authentication requirement was disabled for an org. repro_steps: (a) Visit /organizations/<org>/settings/security (b) Uncheck 'Require two-factor authentication...' (c) Click 'Save' reference: https://help.github.com/ articles/requiring-two-factor-authentication-in-your-organization/ """ return rec['action'] == 'org.disable_two_factor_requirement'
4edab96d9918f4685db74fa32f5cc9ecdc6dccec
16,613
def make_ideal_counter(needed_nurses_per_shift): """ 매개변수: 한 근무당 한 팀에서 필요한 간호사의 인원 수 N 출력: 리스트 == [0, N, N, N] """ counter_list = [0] + [needed_nurses_per_shift] * 3 return counter_list
614412d97854479d380dc8be18c2545162bd4ddf
16,614
def search_sorted(a, v): """ Implementation of searchsorted based on binary search with the same semantics as numpy's searchsorted. Used as the basis of the C implementation which we use in the simplify algorithm. """ upper = len(a) if upper == 0: return 0 lower = 0 while upper - lower > 1: mid = (upper + lower) // 2 if v >= a[mid]: lower = mid else: upper = mid offset = 0 if a[lower] < v: offset = 1 return lower + offset
701da6b53f43a1b1d1046f653a8232b093c61528
16,615
import json def load_config(path): """ Loades the configuration file """ with open(path, 'rb') as handle: return json.loads(handle.read().decode('utf-8'))
d057df2578710e2ae644626b1246e04fbe806836
16,616
def contact(): """Returns simple contact page.""" return "Contact info"
8e4ea68944a1d37a5d94596251ed8c1dbd99ab20
16,617
def prob_mass_grey_from_ndvi(ndvi, old_min=0.1, old_max=0.7): """ Calculates probability masses for grey from NDVI values :param ndvi: :param old_min: :param old_max: :return: """ # Not Green belief if ndvi > old_max: return 0 elif ndvi < old_min: return 1 else: new_max = 1 new_min = 0 old_range = old_max - old_min new_range = new_max - new_min return 1 - (((ndvi - old_min) * new_range) / old_range) + new_min
d880c392cd5cdf20a08f8a6c6bdf8d2694824587
16,618
import csv def write_table_to_csv_file(file_name, lowfi_table): """Write the data from a LowFiTable to a .csv file.""" try: with open(file_name, mode='w') as fp: csv_writer = csv.writer(fp, dialect=csv.excel, lineterminator='\n') csv_writer.writerows(lowfi_table.get_headings_and_data_as_list()) return True except: return False
9fbb1def797df95239b1aa5e1198b6fe9f7da26b
16,619
def nt_flush(nt): """Flushes NT key notifications""" def _flush(): assert nt._api.waitForEntryListenerQueue(1.0) assert nt._api.waitForConnectionListenerQueue(1.0) return _flush
556f22777748233775bf11315c9753c8ed679498
16,621
def _construct_name(date, n): """Helper method to construct a name including the directory path""" name = "".join((date, "-img-", "{:03d}".format(n), ".jpg")) return name
a1ecdbf6968216453c8cae08bdccf714fb7edde1
16,622
from os import makedirs from os.path import isdir, join def process_module(module_file, module_keywords, module_dir): """Process a Module file. Parameters ---------- module_file : :class:`str` A template Module file to process. module_keywords : :class:`dict` The parameters to use for Module file processing. module_dir : :class:`str` The directory where the Module file should be installed. Returns ------- :class:`str` The text of the processed Module file. """ if not isdir(join(module_dir, module_keywords['name'])): makedirs(join(module_dir, module_keywords['name'])) install_module_file = join(module_dir, module_keywords['name'], module_keywords['version']) with open(module_file) as m: mod = m.read().format(**module_keywords) with open(install_module_file, 'w') as m: m.write(mod) return mod
5eb39462f71bc067c2af37e2e9485bb9e7016d30
16,623
def hasValidKey(dict, key): """ Return True if key is in dict and not None, False otherwise. """ return key in dict and dict[key] is not None
438ca30f1b133be80389abf8304cd24b09fce1d8
16,624
from datetime import datetime import requests def pull_3year(): """Returns a list (in JSON format) containing all the events from the Penn iCal Calendar. List contains events in chronological order. Each element of the list is a dictionary, containing: - Name of the event 'name' - Start date 'start' - End date 'end' """ BASE_URL = "https://www.stanza.co/api/schedules/almanacacademiccalendar/" events = [] for term in ["fall", "summer", "spring"]: url = "{}{}{}term.ics".format(BASE_URL, datetime.now().year, term) resp = requests.get(url) resp.raise_for_status() r = resp.text lines = r.split("\n") d = {} for line in lines: if line == "BEGIN:VEVENT": d = {} elif line.startswith("DTSTART"): raw_date = line.split(":")[1] start_date = datetime.strptime(raw_date, "%Y%m%d").date() d["start"] = start_date.strftime("%Y-%m-%d") elif line.startswith("DTEND"): raw_date = line.split(":")[1] end_date = datetime.strptime(raw_date, "%Y%m%d").date() d["end"] = end_date.strftime("%Y-%m-%d") elif line.startswith("SUMMARY"): name = line.split(":")[1] d["name"] = str(name).strip() elif line == "END:VEVENT": events.append(d) events.sort(key=lambda d: d["start"]) return events
14da14a13f184edbd5164271d79e758728b449ca
16,625