content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
def get_workflow_id(identifier: int) -> str: """Get a hexadecimal string of eight characters length for the given integer. Parameters ---------- identifier: int Workflow indentifier Returns ------- string """ return hex(identifier)[2:].zfill(8).upper()
fb1767279bd6291c3805f7589638cea8b68156fd
100,502
import tkinter def make_canvas(width, height, title): """ Creates and returns a drawing canvas of the given int size with a blue border, ready for drawing. """ top = tkinter.Tk() top.minsize(width=width, height=height) top.title(title) canvas = tkinter.Canvas(top, width=width + 1, height=height + 1) canvas.pack() return canvas
851224228c507426197d7fc0037b6fa63ae46b37
100,503
import re def generalize_sql(sql): """ Removes most variables from an SQL query and replaces them with X or N for numbers. Based on Mediawiki's DatabaseBase::generalizeSQL """ if sql is None: return None # MW comments # e.g. /* CategoryDataService::getMostVisited N.N.N.N */ sql = re.sub(r'\s?/\*.+\*/', '', sql) sql = re.sub(r"\\\\", '', sql) sql = re.sub(r"\\'", '', sql) sql = re.sub(r'\\"', '', sql) sql = re.sub(r"'[^\']+'", 'X', sql) sql = re.sub(r'"[^\"]+"', 'X', sql) # All newlines, tabs, etc replaced by single space sql = re.sub(r'\s+', ' ', sql) # All numbers => N sql = re.sub(r'-?[0-9]+', 'N', sql) # WHERE foo IN ('880987','882618','708228','522330') sql = re.sub(r' IN\s*\([^)]+\)', ' IN (XYZ)', sql) return sql.strip()
45e473273571a445b17e918fc5afb97f99e46a8b
100,505
def ResolveAutoscalingStatusForMig(client, mig): """Resolves 'autoscaled' property for MIG. Uses 'autoscaler' property of a MIG to resolve 'autoscaled' property for output. Args: client: a GCE client mig: IGM resource as a dict Returns: Status of autoscaler if MIG is autoscaled. None otherwise. """ if 'autoscaler' in mig and mig['autoscaler'] is not None: # status is present in autoscaler iff Autoscaler message has embedded # StatusValueValuesEnum defined. if (hasattr(mig['autoscaler'], 'status') and mig['autoscaler'].status == client.messages.Autoscaler.StatusValueValuesEnum.ERROR): mig['autoscaled'] = 'yes (*)' return mig['autoscaler'].status else: # Assume it to be ACTIVE mig['autoscaled'] = 'yes' return client.messages.Autoscaler.StatusValueValuesEnum.ACTIVE else: mig['autoscaled'] = 'no' return None
c10c05ee3ad5c3dbdab78a3e23e841b468842294
100,508
def get_power(x, y, serial_number): """ Simplify the power equation down to a single expression. """ return (((((x + 10)*y + serial_number) * (x+10)) // 100) % 10) - 5
6d9329ae94b61989c2001d2d7498007561eabca9
100,509
def _task_thunk(task): # pragma: no cover """Thunk for executing tasks, used by MultiProcessTaskExecutor. This is called from separate processes so do not access any global state. Args: task: Task to execute. Returns: The result of the task execution. This is passed to the deferred. """ try: result = task.execute() return result except Exception as e: return e
dfaab923b8ae0a61a3e10535df87514fc0fabd15
100,516
def const(row, args, multiple=False): """A constant value (returns itself). """ return args[0]
1277f26d81fa435bb6efe24f777cbb11f2a230ee
100,520
def is_empty(pq): """ return true if pq is empty """ return not pq
d96746ea94800cb53463b05b2d917cc6b8854f76
100,521
import csv def layer_to_grid(layer_filename): """ Converts a TMX layer to a matrix indicating if a tile is present (0) or not (1). This provides the barriers needed for pathfinding. """ with open(layer_filename, newline='') as f: reader = csv.reader(f) matrix = list(reader) matrix_2 = [] for row in matrix: matrix_2.append([1 if int(num) < 0 else 0 for num in row]) return matrix_2
8419015b67a65f51f3aaedd1dbddac067772b736
100,522
def format_years( year_numbers): """ format a collection of numerical years into a summarized string >>> format_years( {1901}) '1901' >>> format_years( {1750, 1751}) '1750-1751' >>> format_years( {1750, 1752}) '1750, 1752' >>> format_years( {1992, 1994, 1995}) '1992, 1994-1995' >>> format_years( {2000, 2001, 2002, 2004}) '2000-2002, 2004' """ years_list = list(year_numbers) years_list.sort() output = '' # scan the list of years & identify ranges or single items possible_range_end = None for list_index,year in enumerate(years_list): # start the output if list_index == 0: output += str(year) continue #skip to next year # check for a contiguous range of years previous_year = years_list[list_index-1] if year == previous_year+1: possible_range_end = year continue #skip to next year # gap in range detected! if possible_range_end: # complete the range that was in-progress & clear output += '-'+str(possible_range_end) possible_range_end = None # start new range (or single-year) output += ', '+str(year) continue #scan complete: ..check if an incomplete range was being assembled if possible_range_end: output += '-'+str(possible_range_end) return output
0532867c88c43cbbf3bb64ff5cb85c432f588d4f
100,524
def rectanglesOverlap(r1, r2): """Check whether r1 and r2 overlap.""" if ((r1[0] >= r2[0] + r2[2]) or (r1[0] + r1[2] <= r2[0]) or (r1[1] + r1[3] <= r2[1]) or (r1[1] >= r2[1] + r2[3])): return False else: return True
bf1022196956dd01694084c65ba54a7b597b0fd0
100,527
import re def parse_example(txt): """ Parse a block of text linked to a function, looking for an example. Return None if not example was found """ res = re.findall(r'\\example\s*:\s*(\w+)', txt, re.DOTALL) if not res: return if len(res) > 1: print("warning: only zero or one examples authorized for each function") return res[0]
4655ba8e32c028cda9718f445057f0a12777fefb
100,531
def bounding_boxes_xyxy2xywh(bbox_list): """ Transform bounding boxes coordinates. :param bbox_list: list of coordinates as: [[xmin, ymin, xmax, ymax], [xmin, ymin, xmax, ymax]] :return: list of coordinates as: [[xmin, ymin, width, height], [xmin, ymin, width, height]] """ new_coordinates = [] for box in bbox_list: new_coordinates.append([box[0], box[1], box[2] - box[0], box[3] - box[1]]) return new_coordinates
da55bf46bc65bffb999ab7de9ddaedb333718f53
100,532
def _NtfsFileReference(ino: int) -> str: """Returns an NTFS file reference representation of the inode value.""" # https://flatcap.org/linux-ntfs/ntfs/concepts/file_reference.html record = ino & ((1 << 48) - 1) sequence = ino >> 48 return f"{record}-{sequence}"
cc7e52bd7d3d1ebc6d536a8e48d913336ca49c46
100,533
def handle_interrupt(func): """Decorator which ensures that keyboard interrupts are handled properly.""" def wrapper(): try: return func() or 0 except KeyboardInterrupt: print('\n\033[31mError:\033[0m Aborted.') return 1 return wrapper
e8f6e43942fb3c703c399f7140c7d0f575d3e52c
100,546
def un_javascript(s): """Input string assignment code, return what's between the double quotes. """ assert s.count('"') == 2 s = s[s.index('"') + 1 : ] # delete everything up to 1st quote return s[:s.index('"')]
a1914c3be1dc0071f20b9f0e178bcebac23351fd
100,547
def is_left(p, p1, p2): """ Tests if point p is to the left of a line segement between p1 and p2 Output 0 the point is on the line >0 p is to the left of the line <0 p is to the right of the line """ return (p2.x-p1.x)*(p.y-p1.y) - (p.x-p1.x)*(p2.y-p1.y)
75b87ee8c3f22727b3cae5055c3c9452822be70f
100,552
import six import re def ToUnderscore(obj): """Converts a string, list, or dict from camelCase to lower_with_underscores. Descends recursively into lists and dicts, converting all dict keys. Returns a newly allocated object of the same structure as the input. """ if isinstance(obj, six.string_types): return re.sub('(?!^)([A-Z]+)', r'_\1', obj).lower() if isinstance(obj, list): return [ToUnderscore(item) for item in obj] if isinstance(obj, dict): output = {} for k, v in six.iteritems(obj): if isinstance(v, (list, dict)): output[ToUnderscore(k)] = ToUnderscore(v) else: output[ToUnderscore(k)] = v return output return obj
cb48644f90e2b3e60ab6c31d13a2078085399344
100,554
import collections from typing import List def make_list_layer_band(imagery_layers: collections.OrderedDict, count: int) -> List: """ Makes list of all output bands and their respective provenance. :param imagery_layers: All imagery_layers included in output file and attributes :param count: The count of all bands in output file :return: A list output bands and their provenance """ out_list: List[List] = [] band_order: List[int] = [] layer_names: List[str] = [] for layer in imagery_layers: layer_names += [layer] * imagery_layers[layer]["bands_count"] band_order += list(range(1, imagery_layers[layer]["bands_count"] + 1)) for band_number in range(1, count + 1): layer_name = layer_names[band_number - 1] layer_band = band_order[band_number - 1] out_list += [[band_number, layer_name, layer_band]] return out_list
2206650e54edcf2364e415b5645cac0ea6f95a05
100,555
def get_domain_notes(domain): """ Combine all domain notes if there are any. """ all_notes = domain.http.notes + "; " + domain.httpwww.notes + "; " + domain.https.notes + "; " + domain.httpswww.notes all_notes = all_notes.replace(',', ';') return all_notes
daa05bb221ab7d00783c98eb08a245a5a718d4c0
100,558
def append_default_to_args(arguments, default_arguments): """Returns a dictionary with the default arguments appended to the arguments. Example: 1) All default arguments are appended. Input: arguments = {"argument1": "42", "argument2": "binary"} default_arguments = {"argument3": "hello", "argument4": "world"} append_default_to_args(arguments, default_arguments) Output: {"argument1": "42", "argument2": "binary", "argument3": "hello", "argument4": "world"} 2) Some default arguments are appended. Input: arguments = {"argument1": "42", "argument2": "binary"} default_arguments = {"argument2": "hello", "argument3": "world"} append_default_to_args(arguments, default_arguments) Output: {"argument1": "42", "argument2": "binary", "argument3": "world"} 3) No default arguments are appended. Input: arguments = {"argument1": "42", "argument2": "binary"} default_arguments = {"argument1": "hello", "argument2": "world"} append_default_to_args(arguments, default_arguments) Output: {"argument1": "42", "argument2": "binary"} Args: arguments: A dictionary of arguments where the key is the argument name and the value is the value of the argument. default_arguments: A dictionary of arguments where the key is the argument name and the value is the value of the argument. Returns: A newly created dictionary with the default_arguments append to the arguments. """ # Append to arguments my_args = dict(arguments) for key in default_arguments.keys(): my_args.setdefault(key, default_arguments[key]) return my_args
5b776f4b6e49a3904d6b660716b4af905195c093
100,562
def has_keys(needles, haystack): """ Searches for the existence of a set of needles in a haystack """ return all(item in haystack for item in needles)
2be0b190f203d8564035938a7a3b0e96e5300a15
100,565
def get_number_of_usable_hosts_from_raw_address(raw_address: str) -> int: """ Return number of usable host ip addresses. >>> get_number_of_usable_hosts_from_raw_address("192.168.1.15/24") 254 >>> get_number_of_usable_hosts_from_raw_address("91.124.230.205/30") 2 """ slash_ind = raw_address.index('/') prefix = int(raw_address[slash_ind + 1:]) return pow(2, 32-prefix) - 2
be06cb197dd3aad98974ee006a93738f085a5c05
100,570
def ListPairs(list, num_pairs): """\ Return 'num_pairs' amount of elements of list stacked together as lists. Example: list = ['a', 'b', 'c', 'd', 'e'] for one, two, three in ListPairs(list, 3): print one, two, three a b c b c d c d e """ returnlist = [] for i in range(len(list) - num_pairs + 1): singlereturnlist = [] for j in range(num_pairs): singlereturnlist.append(list[i + j]) returnlist.extend([singlereturnlist]) return returnlist
86e416d7a0f4fba9b066d44be24c6e308759af70
100,571
def rabin_karp_find_substring(string, substring, base=256, prime_modulus=487): """ Finds occurances of a substring in a string. This uses the Rabin-Karp rolling hash to calculate a rolling hash value for windows of letters in the string. Since this is a rolling hash when going to a new number we can drop the number that will not be in the next window and add the new one to the hash. Once the hashes are the same there is a candidate match and the strings must be examined letter by letter in case of hash collision. Args: string: the string that is being looked in substring: the string to search for base: the base used to calculate hashes prime_modulus: positive prime number used to bound the hash results Returns: Index of the beginning of the first occurance of a substring that is within the string. """ # substring hash substring_hash = 0 rolling_hash = 0 base_n = pow(base,len(substring)-1)%prime_modulus # get the initial hashes for i in range(len(substring)): rolling_hash = (base * rolling_hash + ord(string[i]))%prime_modulus substring_hash = (base * substring_hash + ord(substring[i]))%prime_modulus for i in range(len(string) - len(substring)+1): # check if hash matches hash of substring if rolling_hash == substring_hash: # check if the letters are 1:1 for s_i, letter in enumerate(substring): if letter != string[i+s_i]: break else: return i # recalulate hash if i < len(string) - len(substring): # remove the ith number and add the i+len(substring)th number rolling_hash = ((rolling_hash - (base_n * ord(string[i]))) * base) + ord(string[i + len(substring)])%prime_modulus # make sure t >= 0 rolling_hash = (rolling_hash + prime_modulus) % prime_modulus return -1
d0069831ff34eb0b03c4af376b0534ae68756a5f
100,574
def threshold_mean(img, thresh_multiplier=0.95, color1=255, color2=0): """Threshold a greyscale image using mean thresholding.""" mean = img.mean() ret = mean * thresh_multiplier img[img > ret] = color1 img[img < ret] = color2 return img
65a4779264bcd068dd1661431f70a6d7f3402eef
100,575
def create_report(info): """Create a report with a list of auto included zcml.""" if not info: # Return a comment. Maybe someone wants to automatically include this # in a zcml file, so make it a proper xml comment. return ["<!-- No zcml files found to include. -->"] report = [] # Try to report meta.zcml first. filenames = list(info) meta = "meta.zcml" if meta in filenames: filenames.remove(meta) filenames.insert(0, meta) for filename in filenames: dotted_names = info[filename] for dotted_name in dotted_names: if filename == "overrides.zcml": line = ' <includeOverrides package="%s" file="%s" />' % (dotted_name, filename) elif filename == "configure.zcml": line = ' <include package="%s" />'% dotted_name else: line = ' <include package="%s" file="%s" />'% (dotted_name, filename) report.append(line) return report
924436603fd8a744058da5fd61cc3860c5aeed2b
100,577
import math def PKCS1(message : int, size_block : int) -> int: """ PKCS1 padding function the format of this padding is : 0x02 | 0x00 | [0xFF...0xFF] | 0x00 | [message] """ # compute the length in bytes of the message length = math.ceil(math.ceil(math.log2(message-1)) / 8) template = "0200" # generate a template 0xFFFFF.....FF of size_block bytes for i in range(size_block-2): template = template + 'FF' template = int(template,16) # Add the 00 of the end of the padding to the template for i in range(length+1) : template = template ^ (0xFF << i*8) # add the padding to the original message message = message | template return message
33b65d1a0304f3d205d0fd2e4c52d4675d5b6ca0
100,578
import json def get_json_result(status): """returns a json status based on the status given Parameters: status (boolean): boolean indicating a good status or bad status Returns: json: {'status': 'ok'} for good (True), {'status': 'ko'} for bad (False) """ return json.dumps({'status': 'ok'}) if status else json.dumps( {'status': 'ko'})
0e8d01db0382bf81039800a2dabe9227f97d8c90
100,580
import itertools def default_calculate_required_lengths(sizes,intersections): """Returns the required number of nodes and the required number of layers of a one-aspect induced subgraph of the form [nodelist][layerlist] determined by the given sizes and intersections requirements. This corresponds to the nnodes and nlayers arguments of default_check_reqs. See Details section on how these are calculated. Parameters ---------- sizes : list of ints > 0 How many nodes should be on each layer of an acceptable induced subgraph. One integer for each layer of an acceptable subgraph. intersections : list of ints >= 0 How many nodes should be shared between sets of layers in an acceptable induced subgraph. If an entry in the list is None, any number of shared nodes is accepted. The order of the intersections is taken to follow the order of layers in sizes, with two-layer intersections being listed first, then three-layer intersections, etc. For more details, see section "Constructing the requirements" in default_check_reqs docstring. Returns ------- nnodes, nlayers : ints The number of nodes and the number of layers required of an acceptable subgraph, as determined by the sizes and intersections requirements. Details ------- The number of layers (nlayers) that an acceptable subgraph must have is simply the length of sizes (since there is an entry for each layer). The number of nodes is the cardinality (size) of the union of the sets of nodes on each layer. This cardinality is unambiguously determined by the numbers of nodes on each layer (sizes) and the number of shared nodes between all combinations of layers (intersections), assuming that there are no undefined values (Nones) in intersections. The cardinality and thus nnodes is calculated by following the inclusion-exclusion principle. Example ------- Calling >>> nnodes,nlayers = default_calculate_required_lengths([2,3,4],[2,1,2,1]) returns nnodes = 5 and nlayers = 3, because nnodes = 2+3+4-2-1-2+1 and nlayers = len([2,3,4]) = 3. Therefore, an induced subgraph must have 5 nodes and 3 layers for it to be possible to satisfy the sizes and intersections requirements. """ assert sizes != [], "Empty layer size list" assert len(intersections) == 2**len(sizes)-len(sizes)-1, "Wrong number of intersections" assert all(i>=1 and isinstance(i,int) for i in sizes) and all(j>=0 and isinstance(j,int) for j in intersections), "Inappropriate intersections or sizes" if not intersections: return sizes[0],1 nlayers = len(sizes) nnodes = sum(sizes) index = 0 for ii in range(2,len(sizes)+1): for _ in list(itertools.combinations(sizes,ii)): if ii % 2 == 0: nnodes = nnodes - intersections[index] index = index + 1 else: nnodes = nnodes + intersections[index] index = index + 1 return nnodes,nlayers
ffa8db953cc567a231e580eb0378ff4efba74d5f
100,582
def get_audio_itag(stream_lst, abr='128kbps', subtype='mp4'): """ Return the `itag` of a YouTube video with specified audio bitrate (`abr`) and subtype. If the desired `arb` does not exist, the user is prompt to input a new one from a list. Input: stream_lst: list of available media formats abr: desired bit rate, string of the form `xxxkpbs`, where `x` is a number subtype: desired subtype, string -- available options are `mp4` (default) and `webm` Output: `itag` of YouTube object """ audio_streams = [stream for stream in stream_lst if stream.includes_audio_track == True and stream.includes_video_track == False] audio_abrs = [stream.abr for stream in audio_streams if stream.subtype == subtype] if abr not in audio_abrs: print('Select a new abr variable from the following list: ', audio_abrs) new_abr = input() return get_audio_itag(stream_lst, new_abr, subtype) itag = [stream.itag for stream in audio_streams if stream.abr == abr] audio_itag = itag[0] return audio_itag
186efb0873f8168e2e6d49514269023ce79c3f44
100,592
def seq_to_value(sequence): """Converts a sequence of base 4 numbers to an integer.""" value = 0 for i in range(0, len(sequence), 1): value += 4 ** i * sequence[-(i+1)] return value
d229c1819d96af3da3e2a28b32da2a1875a17cb6
100,593
import re def has_timestamp(text): """Return True if text has a timestamp of this format: 2014-07-03T23:30:37""" pattern = re.compile(r"[2]\d{3}\-\d{2}\-\w{5}\:\d{2}\:\d{2}") result = pattern.search(text) return True if result else False
b0e65c3bd0f23819c3ffddaf3d39f686743f0b17
100,600
import json def build_error_json(errorCode, errorType, message, httpCode): """Builds the error json string. When an error is encountered during execution, this function is called by the error_handler function to build a properly formatted json string error. Parameters ----------- errorCode : string The original error code created when the error was encountered. errorType : string Reserved for future use. message : string More details about the error which should be included in the error object returned. httpCode : int The 3 digit HTTP code associated with this error. (200, 300, 400) Returns ----------- json string The error message in a properly formatted json string. """ # Build the JSON string using the inputs to the function. jsonResp = '{"errorType":"' + errorType + '", "errorCode":"' + errorCode + '", "httpCode":"' + httpCode + '", "message":"' + message + '"}' # Properly format the error object into JSON jsonResp = json.loads(jsonResp) # Convert the properly formatted JSON object back to a string, and pretty-print it for return. jsonResp = json.dumps(jsonResp, indent=2) return jsonResp
55d460804e374e0b7662727768192c08c7ce4865
100,607
def rowadd(matrix, index1, index2, k): """ Adds the index1 row with index2 row which in turn is multiplied by k """ matrix[index1, :] += k * matrix[index2, :] return matrix
4cbb5e3ed3b26a93ff4908dafedf55b772dd792d
100,614
def cat_dict_tuples(d1, d2): """Concatenate tuples across two dictionaries when keys match""" res = d1.copy() for k2, v2 in d2.iteritems(): res[k2] = res[k2] + d2[k2] return res
3d12f626ece411fe067b6d3303e5078b9a623e46
100,620
def _get_groups_hosts_dict(dynamic_inventory, top_level_group='all'): """Get a dictionary of groups and hosts. Hosts will be listed under their lowest level group membership only. Args: dynamic_inventory (dict): Dynamic inventory dictionary top_level_group (str): Name of top level group Returns: dict: Dictionary containing groups with lists of hosts """ groups_hosts_dict = {} if 'hosts' in dynamic_inventory[top_level_group]: if top_level_group not in groups_hosts_dict: groups_hosts_dict[top_level_group] = [] groups_hosts_dict[top_level_group] += ( dynamic_inventory[top_level_group]['hosts']) if 'children' in dynamic_inventory[top_level_group]: for child in dynamic_inventory[top_level_group]['children']: groups_hosts_dict.update(_get_groups_hosts_dict(dynamic_inventory, child)) return groups_hosts_dict
29ff4213319abe9253bff64702c81fac7c0a5a18
100,622
def convert_csv_to_list(string): """ Convert Cauma separated single string to list. """ return string.split(",")
3235f60df3c73f18f7ffd8ebdb28474d9a1f5043
100,623
def nova_one_import_per_line(logical_line): """ nova HACKING guide recommends one import per line: Do not import more than one module per line Examples: BAD: from nova.rpc.common import RemoteError, LOG BAD: from sqlalchemy import MetaData, Table N301 """ pos = logical_line.find(',') if (pos > -1 and (logical_line.startswith("import ") or (logical_line.startswith("from ") and logical_line.split()[2] == "import"))): return pos, "NOVA N301: one import per line"
1737e7fc44ed45fa45c64c76458f85d8a0ae67fa
100,625
def run_task( ecs_client, cluster_name, task_definition, started_by, container_name="app", command=[]): """ Run a given command against a named container in a task definition on a particular cluster. Returns the response from calling run_task """ return ecs_client.run_task( cluster=cluster_name, taskDefinition=task_definition, overrides={ 'containerOverrides': [ { 'name': container_name, 'command': command }, ] }, count=1, startedBy=started_by, )
95238051a2b023c24a144328d95c56ac899cb33a
100,627
def has_no_numbers(value): """Checks if the string does not contains numbers""" if isinstance(value, str): return not(any(char.isdigit() for char in value)) return False
97428385a68bd461d5cad3528e38ecc1861b2828
100,638
def count_entities(doc, ent_label): """Counts the number of entities in doc that is of type ent_label Keyword arguments doc -- a spacy.tokens.doc.Doc object ent_label -- the entity lable to be checked, e.g. "PERSON" """ return len([ent for ent in doc.ents if ent.label_ == 'PERSON'])
6582f1cfda730ba8313a0a855658a299a65a7a4a
100,643
def always_true(*args): """ A predicate function that is always truthy. """ return True
b08c44467cd14b572b5a7e404da441e9b74b0e26
100,644
def coordinate(latitude, longitude): """Coordinate data model. Parameters ---------- latitude : float Decimal degree coordinate in EPSG:4326 projection. longitude : float Decimal degree coordinate in EPSG:4326 projection. Returns ------- coordinate : dict Coordinate data model as a dictionnary. """ return { 'latitude': latitude, 'longitude': longitude }
8762c4f6b4784408e7bebc48587c1495a93d52c8
100,645
import pickle def load_sim_data(filename): """ Load pickled dataset and return as list. """ with open(filename, 'rb') as f: done = False data_list = [] while not done: try: data = pickle.load(f) data_list.append(data) except EOFError as err: done = True return data_list
628e8ae3523b91a306a5e6b556790625feb6637e
100,646
def get_predictor_cost(x, y, rho, sens, spec, cov): """ Calculate the predictor's cost on a point (x, y) based on the rho and its sensitivity, specificity and coverage """ return x * ((rho * cov * (1 - sens)) + cov - 1) + y * (((1 - rho) * cov * (1 - spec)) + cov - 1) + 1 - cov
f3e88b9cd16a26e0de8f5c54ae12fb0800b1e17d
100,651
def process_selection(selected): """ Processes selection. Either returnes a list in case of single or mutiple specific selection, or a range if a range is specified. Returns None, in the case of a single ':' as input. Raises ValueError for wrong string format and TypeError if non-ints are given, or something unexpected. """ if isinstance(selected, int): #returns 1 long list for singe selection return [selected] elif isinstance(selected, str): #returns range if a string is specified. string is broken up by ":" if selected == ':': return None selected = selected.split(':') try: first = int(selected[0]) last = int(selected[1]) except (ValueError, IndexError): raise ValueError('Input string should be of format from:to(:by).') if len(selected) == 3: #step bw elements is optional step = int(selected[2]) return range(first, last, step) elif len(selected) > 3: raise ValueError('Input string should be of format from:to(:by).') else: return range(first, last) elif hasattr(selected, '__iter__'): #if selection is iterable, put all elements into a list a = [] for x in selected: if isinstance(x, int): a.append(x) else: raise TypeError('Field line selection should only have integers.') return a else: raise TypeError('Wrong field line selection format.')
518acd5582aada399f4ad550859e0359f2c7e285
100,653
import requests def fetch_page_as_text(session: requests.Session, page_url: str) -> str: """ Get the English translated text of the page at the received URL """ headers = {'Accept-Language': 'en-US'} response = session.get(page_url, headers=headers) response.raise_for_status() return response.text
0710acbaa8c0352982b7cc38a5ed88a5135e3fc4
100,654
import re def wrap_text(text, lilen=80, indent=""): """Wrap text to max lilen length lines""" text = text.replace("\n", " ") text = text.replace("\t", " ") text = " ".join(text.split()) return ("\n%s" % indent).join( line.strip() for line in re.findall(r".{1,%s}(?:\s+|$)" % lilen, text) )
4ae85455b0e2576e7f160c0c879db6e3395007f2
100,660
from typing import Callable def mask(lis: list, key: Callable, keep_last=None) -> list: """ Create a mask on `lis` using the `key` function. `key` will be evaluated on pairs of items in `lis`. Returned list will only include items where `key` evaluates to True. Arguments: keep_last (boolean): In a sequence of items where key() is False, keep the last one. """ result = [lis[0]] for item in lis[1:]: if key(item, result[-1]): result.append(item) elif keep_last is True: result[-1] = item return result
77bd7a28fb60a7ede4d615439019ccf918c8c7a6
100,665
def get_alt_specific_variable_name(var_name, alt_name): """ Gets the alternative specific variable, returns a string starting with variable name and ending with alternative name. """ return var_name + "_" + alt_name
1a08cc4502ee971be24e69f0b18080cc43564b0e
100,674
from typing import Dict def get_event_body() -> Dict[str, str]: """ Create event body """ return {"action": "usage"}
d05fb4a924de8dfd9bce96855d9bbd865789e647
100,677
import csv def read_annotation(filename, column): """Read annotation data, output dictionary with values form column as key""" data = {} with open(filename, newline='') as f: reader = csv.DictReader(f, delimiter="\t") for row in reader: data[row[column]] = row return data
c05c94d3fe0cbed781adb7e5ed5fc401586aad9a
100,679
def sublists(superlist, n): """Breaks a list into list of sublists, each of length no more than n.""" result = [] for cut in range(0, len(superlist), n): result.append(superlist[cut:cut + n]) return result
6d315eea180c24fc54b714293b70dad0e5489757
100,682
def count_increased(values): """ Count how many measurements increased from previous value :param values: List of values :returns: Number of measurements that increased from previous value """ # how many measurements increased increased = 0 # first value previous = values[0] # rest of the values for current in values[1:]: if current > previous: increased += 1 previous = current return increased
794333a9ce4749a7a1832e36aa30ce8baa1690bf
100,683
def type_name(cls): """Get the name of a class.""" if not cls: return '(none)' return '{}.{}'.format(cls.__module__, cls.__name__)
708ef5940d415500789dc64d5308632ed9676a7e
100,685
def _label_fn_engchin(fpath): """ Function for labeling the data for the language determination model. """ if "EN_" in fpath: return 0 else: return 1
c31deb4990e1689b139cc3633e9dc6c1fbb655de
100,689
def PercentileToPosition(percentile, field_size): """Converts from percentile to hypothetical position in the field. percentile: 0-100 field_size: int """ beat = percentile * field_size / 100.0 position = field_size - beat + 1 return position
b1a822625a7a63b67e999b00dac15d757dc9fbb4
100,696
import re def parse_show_vlan_summary(raw_result): """ Parse the 'show vlan summary' command raw output. :param str raw_result: vtysh raw result string. :rtype: dict :return: The parsed result of the show vlan summary command in a \ dictionary of the form: :: { 'vlan_count': '4' } """ show_re = ( r'Number\s+of\s+existing\s+VLANs:\s+(?P<vlan_count>\d+)' ) re_result = re.search(show_re, raw_result) assert re_result result = re_result.groupdict() return result
77f834e088c40abcb901f65d7ba36165bcc01e93
100,698
from typing import Tuple def output(count: int, route: Tuple[list, float]) -> str: """Format the route display.""" return f"Option {count}: {', '.join(route[0])}\n"
0efb9dd2fa6018f8dadbfd9923c6a859e988f527
100,701
from datetime import datetime def update_first_notification_object(first_notification_object, assignment_id, notifcation_type, time): """ Gets the FirstAssignmentOfCourse object from the database. If it does not exist for the requested course, it will be created. :param first_notification_object: FirstAssignmentOfCourse object that needs to be updated. :type first_notification_object: FirstAssignmentOfCourse :param assignmen_id: ID of the assignment the notification is for. :type assignmen_id: int :param notifcation_type: Type of planned notification ('early' or 'late'). :type notifcation_type: str :param time: Timestamp of when the notification will need to be send. :type time: int :return: FirstAssignmentOfCourse object that is updated. :rtype: FirstAssignmentOfCourse """ first_notification_object.assignment_id = assignment_id first_notification_object.notification_type = notifcation_type first_notification_object.time = None if time is None else datetime.fromtimestamp(time) first_notification_object.save() return first_notification_object
37d0ee55a8764e20193380708f05c880ee359df3
100,703
def splitDAGPath(path): """Split a Maya DAG path into its components. The path is given as a string that may have the form <namespace>:<path> where <path> is a sequence of strings separated by '|'. The return value is a 2-tuple (namespace, names) where namespace is None if the path did not contain a ':'. names is a list of individual path names. Examples: :time1 -> ('', ['time1']) |foo|bar -> (None, ['', 'foo', 'bar']) foo|bar -> (None, ['foo', 'bar']) """ if not isinstance(path, str): raise ValueError("string type expected as path argument, got %s"%type(path)) namespace = None n = path.find(":") if n!=-1: namespace = path[:n] path = path[n+1:] return namespace, path.split("|")
0ecaff70ac4d41b6871fa76f5be59c31471f045b
100,705
def extract_cds_lines(all_bed_lines): """Extract bed lines with CDS mark.""" selected = [] for line in all_bed_lines.split("\n"): if line == "": continue if line.split("\t")[3].endswith("_CDS"): selected.append(line) return "\n".join(selected) + "\n"
945fcad32ed25b97fdd83b4aecef36530880dc4c
100,706
import torch def transpose(input_, axes=None): """Wrapper of `torch.transpose`. Parameters ---------- input_ : DTensor Input tensor axes : list of ints, optional Axes along which the operation is performed, by default None """ if axes is None: axes = (0, 1) return torch.transpose(input_, axes[0], axes[1])
b42cb2ee334b9c90fbfe53804f6d83183632455a
100,707
def _as_inline_code(text): """Apply inline code markdown to text Wrap text in backticks, escaping any embedded backticks first. E.g: >>> print(_as_inline_code("foo [`']* bar")) `foo [\\`']* bar` """ escaped = text.replace("`", r"\`") return f"`{escaped}`"
52b01b0e6eeb30ea7845c19006375d6c04748770
100,710
import json def get_tour(tours,tour_id,session): """ Returns a tour including coordinates given a `tour_id` (position of the tour in `tours`). """ tour = tours[tour_id] tour_url = tour["_links"]["coordinates"]["href"] headers = {"onlyprops": "true"} response = session.get(tour_url, headers=headers) tour_data = json.loads(response.text) tour['coordinates'] = tour_data['items'] return tour
838ada048dc98d83b7e4b0d9167cbe72d383e90d
100,713
def lin_rodnitsky_ratio(avg_cost_per_conversion_all_queries, avg_cost_per_conversion_queries_with_one_conversion_or_more): """Return the Lin-Rodnitsky Ratio describing the quality of paid search account managemnent. Args: avg_cost_per_conversion_all_queries (float): Average cost per conversion on the whole PPC account. avg_cost_per_conversion_queries_with_one_conversion_or_more (float): Average cost per conversion for only conversions where there was one or more conversions. Returns: Lin-Rodnitsky Ratio (float). 1.0 to 1.5 - Account is too conservatively managed. 1.5 to 2.0 - Account is well-managed. 2.0 to 2.5 - Account is too aggressively managed. 2.5 or more - Account is being mismanaged. """ return avg_cost_per_conversion_all_queries / avg_cost_per_conversion_queries_with_one_conversion_or_more
415a0bf2fdaca924c219ae70cfb9fa1fddd84a87
100,715
def mb_n_bl(n_tr): """Returns number of baselines, based on given number of tracks""" return n_tr * (n_tr-1) / 2
a5c2d875404bbf09ac06b5646b3485e984efb01d
100,716
def extract_span_from_django_request(request, *args, **kwargs): """ Safe utility function to extract the ``current_span`` from ``HttpRequest``. Compatible with ``@trace`` decorator. """ try: return getattr(request, 'current_span', None) except Exception: # pragma: no cover pass return None
b629285f96aab0b41f5a2b5ef94161885dd46f1d
100,721
def _resolve_rest_format(mode, user_format): """Resolve format of submission to JPred REST interface based on provided mode and user format. :param str mode: Submission mode, possible values: `single`, `batch`, `msa`. :param str user_format: Submission format, possible values: `raw`, `fasta`, `msf`, `blc`. :return: Format for JPred REST interface. :rtype: :py:class:`str` """ if user_format == "raw" and mode == "single": rest_format = "seq" elif user_format == "fasta" and mode == "single": rest_format = "seq" elif user_format == "fasta" and mode == "msa": rest_format = "fasta" elif user_format == "msf" and mode == "msa": rest_format = "msf" elif user_format == "blc" and mode == "msa": rest_format = "blc" elif user_format == "fasta" and mode == "batch": rest_format = "batch" else: raise ValueError("""Invalid mode/format combination. Valid combinations are: --mode=single --format=raw --mode=single --format=fasta --mode=msa --format=fasta --mode=msa --format=msf --mode=msa --format=blc --mode=batch --format=fasta""") return rest_format
9ddde46ad08c0c4be7310eec73433984ef02fe16
100,723
def list_insert_list(l, to_insert, index): """ This function inserts items from one list into another list at the specified index. This function returns a copy; it does not alter the original list. This function is adapted from: http://stackoverflow.com/questions/7376019/ Example: a_list = [ "I", "rad", "list" ] b_list = [ "am", "a" ] c_list = list_insert_list(a_list, b_list, 1) print( c_list ) # outputs: ['I', 'am', 'a', 'rad', 'list'] """ ret = list(l) ret[index:index] = list(to_insert) return ret
3a14dab4a78798e2a04452024cabc7b2a7a0b567
100,724
def convert_timedelta(duration): """ Takes in Timedelta and converts it to days, hours, minutes and seconds :param duration: Timedelta Timestamp :return: Days, Hours, Minutes and Seconds """ days, seconds = duration.days, duration.seconds hours = seconds // 3600 minutes = (seconds % 3600) // 60 seconds = (seconds % 60) # Make sure if negative numbers are rounded up to 0 days = max(0, days) hours = max(0, hours) minutes = max(0, minutes) seconds = max(0, seconds) return days, hours, minutes, seconds
239ba75891dbc85da2e4d2f41205d7ce9b66e364
100,725
import itertools def grouper(iterable, n, fillvalue=None): """grouper (3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')""" return itertools.zip_longest(*[iter(iterable)] * n, fillvalue=fillvalue)
0919bafbcfbd55cb0d4b32831834e28719155ea1
100,726
def merge_dicts(*args): """ Merge multiple dictionaries each with unique key :param args: dictionaries to merge :return: 1 merged dictionary """ result = {k: v for d in args for k, v in d.items()} return result
34768ac3bd0a79fa9d1a4939122262a6fbd186fc
100,727
def get_user_group(dest): """ Given a dictionary object representing the dest JSON in the late bind config's parameter file, return two values, the user and group Args: dest: dict object from the late bind config's parameters file e.g. dest["user_group"] = "Bob:devops" Returns: user: user that the late bind config belongs to group: group that the late bind config belongs to """ return dest["user_group"].split(":")
8268034fabf7a5f5831b1a41d6543d48bea1759d
100,732
def column_list(cursor): """ Return list of column names currently found in the cursor. """ colnames = [col[0] for col in cursor.description] return colnames
7ff3f1b6f61d06b7ed18e23b68f2464ab34f68d6
100,738
import json def to_json(text, *args, **kwargs): """ Creates text that, when printed (with format=json in i3blocks config), will set environment variables in order to save state between runs. Pass it text for the program to display, followed by (variable_name, value) pairs to set environment variables for the next time the script is run. Alternatively, pass keyword arguments to set corresponding environment variables. e.g. print(to_json("Spam!", state=state)) """ kwargs["full_text"] = text kwargs.update(dict(args)) return json.dumps(kwargs)
e35a6e46a69ee3be05863ec69d8f0301c30f7180
100,739
import re def parse_version(version): """ Simplistic parser for setuptools_scm versions. Supports final versions and alpha ('a'), beta ('b') and release candidate ('rc') versions. It does not try to parse anything else than that, even if there is more in the version string. Output is a version tuple containing integers. It ends with one or two elements that ensure that relational operators yield correct relations for alpha, beta and rc versions, too. For final versions the last element is a -1. For prerelease versions the last two elements are a smaller negative number and the number of e.g. the beta. This version format is part of the remote protocol, don‘t change in breaking ways. """ version_re = r""" (?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+) # version, e.g. 1.2.33 (?P<prerelease>(?P<ptype>a|b|rc)(?P<pnum>\d+))? # optional prerelease, e.g. a1 or b2 or rc33 """ m = re.match(version_re, version, re.VERBOSE) if m is None: raise ValueError('Invalid version string %s' % version) gd = m.groupdict() version = [int(gd['major']), int(gd['minor']), int(gd['patch'])] if m.lastgroup == 'prerelease': p_type = {'a': -4, 'b': -3, 'rc': -2}[gd['ptype']] p_num = int(gd['pnum']) version += [p_type, p_num] else: version += [-1] return tuple(version)
e03e67728990957fdcd5689b4743fc2717900c92
100,742
def _scale(value, source, destination): """ Linear map a value from a source to a destination range. :param int value: original value :param tuple source: source range :param tuple destination: destination range :rtype: float """ return ( ((value - source[0]) / (source[1]-source[0])) * (destination[1]-destination[0]) + destination[0] )
8c449200a485c324f4b2661a5aecb95c86197108
100,743
from pathlib import Path def fixture_sample_file() -> Path: """Return a sample file.""" return Path(__file__).parent.joinpath("sample.txt")
765dbf2283f2f21c705ce9a8eac1728f4cbed7ea
100,744
def get_closest_level_below(search_coord, lvl_elev_dict): """ retrieves the closest level below a given coordinate """ closest_elev_below = None for lvl_elev in sorted(lvl_elev_dict): if search_coord.Z > lvl_elev: closest_elev_below = lvl_elev else: break return lvl_elev_dict[closest_elev_below]
991d592674602e7a9f0489fab86cdba7aacd8eea
100,745
def linear_extrap(x1, x2, x3, y1, y2): """ return y3 such that (y2 - y1)/(x2 - x1) = (y3 - y2)/(x3 - x2) """ return (x3-x2)*(y2-y1)/(x2-x1) + y2;
9f45b7443ded484c2699e9bd545a3d551a39313e
100,750
def _get_binding(pdb_filename): """ Get binding mode of pdb file. x if none. e.g. 11as_r_u.pdb would give u """ if "_u" in pdb_filename: return "u" elif "_b" in pdb_filename: return "b" else: return "x"
177273f3eb69498f760050e9c366859527082c24
100,753
import math def euclidean_dist(point1, point2): """Compute the Euclidean distance between two points. Parameters ---------- point1, point2 : 2-tuples of float The input points. Returns ------- d : float The distance between the input points. Examples -------- >>> point1 = (1.0, 2.0) >>> point2 = (4.0, 6.0) # (3., 4.) away, simplest Pythagorean triangle >>> euclidean_dist(point1, point2) 5.0 """ (x1, y1) = point1 (x2, y2) = point2 return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)
16e9bab3129a870d73f7bcb4bb060899604e29b3
100,755
def fibonacci_recur(n): """Fibonacci series by recursion. - Time complexity: O(2^n) - Space complexity: O(n). """ if n <= 1: return n else: return fibonacci_recur(n - 1) + fibonacci_recur(n - 2)
97959fe0dbffa13e0198561048fb2d02d95e6e70
100,758
def title_to_snake_case(text): """Converts "Column Title" to column_title """ return text.lower().replace(' ', '_').replace('-', '_')
72893c308cd772cc972f76199ee0e32ce5f7c92b
100,759
def get_level_structure(level_id, schematic): """ Helper function to pull out a DAG level's structure (e.g. DAG or TaskGroup) """ level_structure = schematic[level_id]["structure"] return level_structure
3d4e7475c3b1de9d6458f5779f71e67f7c729fba
100,761
import math def conditional(model, player, state_set, x): """ >>> from simple_model import * >>> m = SimpleModel(0.1, 0.95) >>> conditional(m, 0, set([(1, 1, 1)]), (1,1,0)) 0.95 >>> conditional(m, 0, set([(1, 1, 1)]), (1,0,0)) 0.0 >>> conditional(m, 0, get_satisfying_states(m), (1,0,0)) 0.10000000000000002 >>> from thomas_model import * >>> m = ThomasModel(0.1, 0.25) >>> conditional(m, 0, get_satisfying_states(m), (1,1,1,0,0)) 1.0 >>> conditional(m, 0, set([(1, 1, 1, 0, 0), (1, 1, 1, 0, 1), (1, 1, 1, 1, 1), (1, 1, 1, 1, 0)]), (1,1,1,0,0)) 0.25000000000000017 >>> conditional(m, 0, set([(1, 1, 1, 0, 0), (1, 1, 1, 0, 1), (1, 1, 1, 1, 1), (1, 1, 1, 1, 0)]), (1,1,1,1,1)) 1.0 >>> from extended_thomas_model import * >>> m = ExtendedThomasModel(0.1, 0.25, 2) >>> conditional(m, 0, get_satisfying_states(m), (1,1,0,0,0,0,0,0,0,0,0)) 1.0 """ possible_states = set(model.graphs[player][x]) state_set = state_set.intersection(possible_states) norm = sum([model.probs[x] for x in possible_states]) if len(state_set) > 0: p = sum([model.probs[x] for x in state_set]) p = math.exp( math.log(p) - math.log(norm) ) else: p = 0.0 assert norm > 0 return p
3f5625f38b7246d6498d7320637e332ccd7ce693
100,765
def is_quantity_of_tickets_valid(num_tickets): """ Checks if the ticket quantity is valid according to the specifications. :param num_tickets: the quantity of tickets to be tested :returns: True if the ticket quantity satisfies all requirements """ return 0 < num_tickets < 100
4cd9d1e6b078daab22b703579deef278600117db
100,769
import re def convert_emea_date_to_amer_date(date): """Convert dd/mm/yyyy (EMEA date format) to mm/dd/yyyy (AMER date format)""" pattern = r"(\d{2})(\/)(\d{2})\2(\d{4})" new_format = r"\3\2\1\2\4" sub_ = re.sub(pattern, new_format, date) return sub_
6cd2a1c194d01f670fdb436d0bb64df0d26d3446
100,771
from typing import Dict from typing import Any def _find_host_port(ports: Dict[str, Any], container_port: int) -> str: """Find host port from container port mappings. `ports` is a nested dictionary of the following structure: {'8500/tcp': [{'HostIp': '0.0.0.0', 'HostPort': '32769'}], '8501/tcp': [{'HostIp': '0.0.0.0', 'HostPort': '32768'}]} Args: ports: Dictionary of docker container port mapping. container_port: Corresponding container port you're looking for. Returns: A found host port. Raises: ValueError: No corresponding host port was found. """ mappings = ports.get('{}/tcp'.format(container_port)) if mappings: return mappings[0].get('HostPort') else: raise ValueError( 'No HostPort found for ContainerPort={} (all port mappings: {})' .format(container_port, ports))
516db707bf0474842c3732b34a522dcc725a1213
100,776
def betabin_expval(alpha, beta, n): """ Expected value of beta-binomial distribution. """ return n * alpha / (alpha + beta)
a106fabc20be2333f052ee90536afc1224803a31
100,782
def build_input_string(tokens): """ Return one input string from *tokens*. """ return " ".join(tokens)
b69ad46ce6503236c44810d423deb06330e59d6a
100,784
def manhattan_distance(c1, c2): """Compute manhattan distance between two coordinates""" x1, y1 = c1 x2, y2 = c2 return abs(x1-x2)+abs(y1-y2)
ee2ef9bfac9d25fc6e880c054bf9c3ca4033a21a
100,785
def concatenate_or_append(value, output): # O(1) """ Either concatenate a list or append to it, in order to keep list flattened as list of lists >>> concatenate_or_append([42], []) [[42]] >>> concatenate_or_append([[42, 49]], [[23, 35]]) [[23, 35], [42, 49]] """ if isinstance(value[0], list): # O(1) output += value # O(1) else: # O(1) output.append(value) # O(1) return output # O(1)
3123dc5b57fdb5792a5d65647088e1aea45abf19
100,786
def to_str(n: float) -> str: """return str of float number Args: n (float): arg Returns: str: return """ return str(n)
752356de8c02236889d0a9af961faf07c8153f5e
100,789
def akaike(LnL, k): """ Computes the Akaike Information Criterion: 2k-2ln(L), where k is the number of estimated parameters in the model and LnL is the max ln-likelihood for the model. """ return 2*k-2*LnL
51ef4a7307ffa85997ad60e661c41c3829c7c423
100,790
from typing import Optional def extract_ext(filename: Optional[str]) -> str: """Extract filename from extension. >>> extract_ext('test.txt') 'txt' >>> extract_ext('test') '' >>> extract_ext('test.txt.bak') 'bak' """ if not filename: return '' parts = filename.rsplit('.', maxsplit=1) if len(parts) == 1: return '' return parts[-1].lower()
dc1a4de3b446229dac141c8f492b6f7e1c399eeb
100,791
def replace_data(original_string, off, old_len, string_to_insert): """ Replace part of a string with a new string (lengths may be different). :param original_string: original string :param off: offset in original string to replace data at :param old_len: length of the original string :param string_to_insert: new string to replace substring with :return: a new string after substring replacement """ data_to_insert_list = list(string_to_insert) original_data_list = list(original_string) new_data_list = original_data_list[:off] + data_to_insert_list + original_data_list[off + old_len:] return "".join(new_data_list)
9b16609007a3879f6ee3eadab8b225324ae64df1
100,795
def split(items, partitions): """ Divide a collection into multiple partitions. If elements do not split equally, the last partition will have fewer elements. """ split = [] if partitions > 0: for i in range(0, len(items), partitions): split.append(items[i: i+partitions]) return split
4920b5bd7704ce9fc094f8afd04afd5f66d79896
100,797