content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def load_cutout_list(cutoutlist): """Load the galaxy cutouts from a list of files. Parameters ---------- cutoutlist : string The filename containing the list of paths to image cutouts. Returns ------- fnames : list(string) List containing the path to a cutout as each element. """ with open(cutoutlist) as fin: fnames = fin.readlines() fnames = [d_.strip('\n') for d_ in fnames] return fnames
8c854d497edb3b41092eea114504c2ce206728ae
586,673
def _force(**kwargs): # {{{ """ Determine the force. See Load.__init__() Returns: The force as a float. Examples: >>> _force(kg=1) -9.81 """ if "force" in kwargs: force = float(kwargs["force"]) elif "kg" in kwargs: force = -9.81 * float(kwargs["kg"]) else: raise KeyError("No 'force' or 'kg' present") return force
e4233e12130d612e3a964c380e626471e70008ea
421,050
def simple_substitute(text, alphabet, code): """ Used to encode or decode the given text based on the provided alphabet. PARAMS: plaintext (string): The message you want to encode alphabet (dictionairy[char] = char): Key is plaintext char, value is the substitute. Enter the same alphabet for both encode and decode code (int): Whether to encode (0) or decode (1) RETURNS: string: The decoded plaintext or encoded ciphertext """ if code == 0: plaintext = text ciphertext = "" for plainChar in plaintext: ciphertext += alphabet[plainChar] return ciphertext elif code == 1: ciphertext = text plaintext = "" # Reverting alphabet decodeAlphabet = {} for key in alphabet.keys(): decodeAlphabet[alphabet[key]] = key for cipherChar in ciphertext: plaintext += decodeAlphabet[cipherChar] return plaintext
e34643ef45eb6ceb493c35c43a302e661e0933a9
669,721
import socket def get_host_by_name(dnsHostname: str) -> str: # pragma: no cover - we're just proxying """get host address by its name Args: dnsHostname (str): host name Returns: str: host address """ return socket.gethostbyname(dnsHostname)
354caa06810b5ab07239f35e06117558aae97023
515,072
import itertools def global_integration_cli_args(integration_config_paths): """ The first arguments to pass to a cli command for integration test configuration. """ # List of a config files in order. return list(itertools.chain(*(('--config_file', f) for f in integration_config_paths)))
ee0c552453ed18197890a2666e0a6ccdbb6c53a1
646,362
def _build_atom_unique_id(atom_line): """Returns a unique identifying tuple from an ATOM line""" # unique_id: (name, altloc, resi, insert, chain, segid) unique_id = (atom_line[12:16], atom_line[16], int(atom_line[22:26]), atom_line[26], atom_line[21], atom_line[72:76].strip()) return unique_id
8eca206b237fd4eecf6010306adb85c45af17c52
634,570
def get_command(message, prefixes): """ Gets a command (first word after the prefix) and stores it in the message :param message: MessageWrapper to extract from :param prefixes: list of valid prefixes or string of valid prefix :return: command name """ args = message.content.split(' ') if isinstance(prefixes, list): for sign in prefixes: if message.content.startswith(sign): message.prefix = sign message.command = (args[0])[len(sign):] return message.command elif isinstance(prefixes, str): if message.content.startswith(prefixes): message.prefix = prefixes message.command = (args[0])[len(prefixes):] return message.command return None
387702ab02a2d976e805525941d083e2ec32d1b3
349,388
import json def load_resource(filename): """ Open a file, and return the contents as JSON. Usage: from pkg_resources import resource_filename load_resource(resource_filename(__name__, "resources/file.json")) """ with open(filename, "r") as f: return json.load(f)
4202bbc73d91fce1dd40987647c18dbe54d63f65
482,292
def __determine_wedge_range_indicator_step(max_range): """Determines step size between range indicators on a wedge Args: max_range: Maximum range of the wedge """ if max_range <= 200: return 25 elif max_range <= 400: return 50 else: return 100
fb01e3c124357b1a047eb531c13f740c83c1b314
282,040
import torch def random_float_tensor(seed, size, a=22695477, c=1, m=2 ** 32): """ Generates random tensors given a seed and size https://en.wikipedia.org/wiki/Linear_congruential_generator X_{n + 1} = (a * X_n + c) % m Using Borland C/C++ values The tensor will have values between [0,1) Inputs: seed (int): an int size (Tuple[int]): the size of the output tensor a (int): the multiplier constant to the generator c (int): the additive constant to the generator m (int): the modulus constant to the generator """ num_elements = 1 for s in size: num_elements *= s arr = [(a * seed + c) % m] for i in range(num_elements - 1): arr.append((a * arr[i] + c) % m) return torch.tensor(arr).float().view(size) / m
30075f376d24b86f60ca8ca4ed9379d853b3c913
282,115
def getSeqMotifDict(fimoDict): """ Make a dict between the seq names and list of motifs that occur in it Args: fimoDict: dict between motif names and the seqs it hits Returns: a dict between seq names and a list of motif IDs that hit it """ seqMotifDict = {} tmpCount = 0 for motifId, seqList in fimoDict.items(): for seqName in seqList: if seqName not in seqMotifDict: seqMotifDict[seqName] = [] if motifId not in seqMotifDict[seqName]: seqMotifDict[seqName].append(motifId) return seqMotifDict
622c056d9a0870bd6bf0c7961ea18d1be291f2bc
279,426
def read_nchars(string, n=1): """ Read n characters from string @param string str: string you are reading. @param n int: number of characters to read. """ return string[:n]
55217416c1e37ef31aa27c2ebb78f6a7f51a7b02
183,752
def remove_multi_whitespace(string_or_list): """ Cleans redundant whitespace from extracted data """ if type(string_or_list) == str: return ' '.join(string_or_list.split()) return [' '.join(string.split()) for string in string_or_list]
a284eb1ea685fb55afeefe78d863a716475a9182
708,809
def decorate_pid_for_redirect(pid, redirect_code=303, redirect_n=3): """Return a PID that will trigger redirects.""" return "<REDIRECT:{}:{}>{}".format(redirect_code, redirect_n, pid)
a1244a29638901f6309e94fcd3dee3c4cde2293b
513,268
def gen_team(name): """Generate a dict to represent a soccer team """ return {'name': name, 'avg_height': 0, 'players': []}
e4e064158e5791e2fde66dcd22ac2206f7ed421f
637,180
import codecs def _convert_text_eb2asc(value_to_convert): """ Converts a string from ebcdic to ascii :param value_to_convert: The ebcdic value to convert :return: converted ascii text """ val = codecs.encode(codecs.decode(value_to_convert, "cp500"), "latin-1") return val
1f74909f8f615fdbf9431e4eb759bad778290b88
687,154
def polyXY3(x,y,coeff): """XY cubic with cross-terms | - | x | x2| x3| ---+---+---+---+---+ - | a | b | d | g | y | c | e | h | k | y2| f | i | l | n | y3| j | m | o | p | """ a,b,c,d,e,f,g,h,i,j,k,l,m,n,o,p = coeff xy = x*y z = a z += x*(b + x*(d + x*g)) z += y*(c + y*(f + y*j)) z += xy*(e + x*(h + x*k) + y*(i + y*m) + xy*(l + x*n + y*o + xy*p)) return z
26145e411fc1821775d4669029493f262805b9fc
344,317
def post_gene_data(client, data): """Helper function that returns ReturnValue.data from a given post to the "/" endpoint """ with client: rv = client.post('/', data=data, follow_redirects=True) return rv.data
d3700eb465a6a114e92ad029a6422802439bdfd3
442,825
from pathlib import Path def _sqlite_uri_to_path(uri: str) -> Path: """Convert a SQLite URI to a pathlib.Path""" return Path(uri.split(':')[-1]).resolve()
1cd71c8f6957f84818086f3382449c55c2004f2c
652,476
from pathlib import Path def get_name(path): """Gets the name of the dynophore trajectory without the .pml extension Args: path (str): File path to the dynophore trajectory Returns: str: dynophore_pml """ file = Path(path).stem return file
bef9912cc39702070d160839a9abd102c709deea
632,959
def add_newlines_by_spaces(string, line_length): """Return a version of the passed string where spaces (or hyphens) get replaced with a new line if the accumulated number of characters has exceeded the specified line length""" replacement_indices = set() current_length = 0 for i, char in enumerate(string): current_length += 1 if current_length > line_length and (char == " " or char == "-"): replacement_indices.add(i) current_length = 0 return "".join([("\n" if i in replacement_indices else char) for i, char in enumerate(string)])
326d59fe3d9db67545ed3b9fd4cff487cff51af1
676,518
def rule_applicable(rule: dict, monthly: dict) -> bool: """Helper function to return whether a rule is applicable for the given month's spendings. Args: - monthly: dictionary containing total monthly spendings at merchants in a month - rule: the rule to check compatability with Returns: - app: True if rule is applicable, False otherwise """ app = True for shop in rule.keys(): if shop not in monthly.keys(): app = False return app
1bfc85fcb45b975ac36e17ac7b682b52302643a2
328,757
import csv def read_pf_solution_file(sol_file): """ Reads contents of power flow solution file. Parameters ---------- sol_file : path to solution file. Returns ------- sol_data : solution data (dictionary) """ try: bus_data = {} sol_data = {'v_mag_tol': 0., 'v_ang_tol': 0., 'bus_data': bus_data} f = open(sol_file) reader = csv.reader(f,delimiter=',') v_mag_tol,v_ang_tol = list(map(float,next(reader))) sol_data['v_mag_tol'] = v_mag_tol sol_data['v_ang_tol'] = v_ang_tol next(reader) # header for row in reader: bus_number,code,v_mag,v_ang = int(row[0]),int(row[1]),float(row[2]),float(row[3]) bus_data[bus_number] = {'v_mag': v_mag, # p.u. 'v_ang': v_ang} # degrees return sol_data except IOError: return None
1e635004a24569d4144da4f5022f44f9f7992731
435,275
def to_lowercase_all(df): """ This function transforms all strings in the dataframe to lowercase Args: df (pd.DataFrame): Raw dataframe with some text columns. Returns: pd.DataFrame: Dataframe with lowercase standardization. """ return df.applymap(lambda s: s.lower() if type(s) == str else s)
131b10e97d2a882774b410eae7ab5b8c2c5f6946
648,643
def is_chinese(char: str) -> bool: """ Checks if a given character is in Chinese. :param char: Character to check :return: Whether character is a Chinese character """ char_ord = ord(char) if char_ord < 3400: return False # CJK Unified Ideographs if ( 0x4e00 <= char_ord <= 0x9fff # Unified Ideographs or 0x3400 <= char_ord <= 0x4dbf # Extension A or 0x20000 <= char_ord <= 0x2a6df # Extension B or 0xf900 <= char_ord <= 0xfaff # CJK Compat or 0x2f800 <= char_ord <= 0x2fa1f # Compat Supplement ): return True # Rare characters are omitted for the sake of speed return False
b315352f1062bdce76c233066eb927ebd76632a7
204,106
def create_bzip2(archive, compression, cmd, verbosity, interactive, filenames): """Create a BZIP2 archive.""" cmdlist = [cmd, 'a'] if not interactive: cmdlist.append('-y') cmdlist.extend(['-tbzip2', '-mx=9', '--', archive]) cmdlist.extend(filenames) return cmdlist
f63f10e034407e3a92e0177b039b22e44f221455
607,324
def path_to_params(path): """Take a path name of the form `param1_value1/param2_value2/...` and returns a dictionary.""" params = {} for name in path.split("/"): if "_" in name: k, v = name.split("_", maxsplit=1) params[k] = v return params
269fadf333c23350fb24edb566bf5072162e3ff2
485,052
import json def read_json(json_file): """ (file) -> dict Read in json_file, which is in json format, and output a dict with its contents """ with open(json_file) as data_file: data = json.load(data_file) return data
c49067c54a04d7760a468298052d62c8cd536bb5
654,492
def seen(params): """'.seen' & user || Report last time Misty saw a user.""" msg, user, channel, users = params if msg.startswith('.seen'): return "core/seen.py" else: return None
2ddcf05ebb0edd83ac45574a9a682635b7914c94
656,211
def video(data_type, stream_id, data, control, timestamp): """ Construct a video message to send video data to the server. :param data_type: int the RTMP datatype. :param stream_id: int the stream which the message will be sent on (same as the publish StreamID). :param data: bytes the raw video data. :param control: bytes in hex the control type to send the data as. :param timestamp: int the timestamp of the packet. """ msg = {'msg': data_type, 'stream_id': stream_id, 'timestamp': timestamp, 'body': {'control': control, 'data': data}} return msg
9cca14cfa408db616087dee5dc1a20bdfff57282
334,808
def hex_to_rgb(hex): """Transform colors from hex to rgb""" hex = hex.lstrip('#') hlen = len(hex) return tuple(int(hex[i:i + hlen // 3], 16) for i in range(0, hlen, hlen // 3))
25113cba6a3b677996aa7ef521f46cb27a3ab2dd
56,981
def image_name(image): """Find an appropriate image name for the run command.""" if image.find(':') > 0: return image.strip() else: return "escadrille:" + image.strip()
79e97c6380c54e29ad3c2b419105b19e45d904de
431,818
def even(n): """ A function that determines if a number is even. :param n: The number :return: True if the number is even, False otherwise. """ # If the remainder after dividing by two is zero, then the number is odd. return n % 2 == 0
625e31f6c1ad3a6f71d6bec9d0a0bf9d2a4831e1
133,748
def sum_amount_previous_loan(df): """ sum previous loans amount (demanded amount and obtained amount) :param df: previous loans application dataframe :return dataframe : SK_ID_CURR plus feature PREV_AMT_APP_SUM & PREV_AMT_CRED_SUM """ df_out = df.groupby(['SK_ID_CURR'])['AMT_APPLICATION', 'AMT_CREDIT'].sum().reset_index(). \ rename(columns={'AMT_APPLICATION': 'PREV_AMT_APP_SUM', 'AMT_CREDIT': 'PREV_AMT_CRED_SUM'}) return df_out
8e5c7f9e077628f0368af1119676dd9c46075772
154,555
from typing import List def find_unused_bank(edge_banks: List[int]) -> int: """ Given a list of banks used in one clock, find a bank index that isn't used, or -1 if all are used :param edge_banks: :return: """ for i in list(range(len(edge_banks))): if i not in edge_banks: return i return -1
b59e622fde507ae13231c02f29334760796091d8
603,381
def get_words(match): """A selector function for all the words in the match""" return match[0]
72bed3261f59aeaa6076e458d854c5c3dc84b02b
313,602
from pathlib import Path def oss_installer() -> Path: """ Return the path to an installer for DC/OS OSS master. """ return Path('/tmp/dcos_generate_config.sh')
9377070a5ac7529ac1e32fd70b2ffd7cac74ad56
323,569
from operator import getitem def get_path(event, path): """Extract an item at `path` from an event which is usable as a nested Python mapping (i.e., using `getitem` for each level in `path`). Parameters ---------- event : possibly-nested mapping path : iterable of strings Returns ------- node Whatever lives at the specified `path` (could be a scalar, array, another mapping, etc.) """ if isinstance(path, str): path = [path] node = event for subpath in path: node = getitem(node, subpath) return node
cdb460cb95ba080b91972a19977ddf197037a358
374,276
import json def decode(body): """decode string to object""" if not body: return None return json.loads(body)
2663a3d742b6f5e17d5b0aed876f136b30fdde1c
15,893
def get_entity_targets(test_data): """Extracts entity targets from the test data.""" return [e.get("entities", []) for e in test_data.training_examples]
381b94f8720c4d62f8216bee0815874893005a29
397,045
import ipaddress def is_ip_address(x: str): """ Checks if x is a valid ip address. """ try: ipaddress.ip_address(x) return True except ValueError: return False
0b41d172776e758d37314bb44aeed67116fac702
336,382
def _byte_string(s): """Cast a string or byte string to an ASCII byte string.""" return s.encode('ASCII')
db048e47ebbb964ee6d14d2c32a8d4dcd3b45ad4
659,150
def get_regression_coefs(train_array): """ Get regression coefficients for the training period """ # Code from Google Earth Engine tutorial: # https://developers.google.com/earth-engine/reducers_regression # Define the axes of iation in the collection array. imageAxis = 0 bandAxis = 1 # Check the length of the image axis (number of images). arrayLength = train_array.arrayLength(imageAxis) # Update the mask to ensure that the number of images is greater than or # equal to the number of predictors (the linear model is solveable). train_array = train_array.updateMask(arrayLength.gt(3)) # Get slices of the array according to positions along the band axis. predictors = train_array.arraySlice(bandAxis, 0, 3) response = train_array.arraySlice(bandAxis, 3) # coefficients = predictors.matrixSolve(response) coefficients = predictors.matrixPseudoInverse().matrixMultiply(response) # Turn the results into a multi-band image. coefficientsImage = coefficients.arrayProject([0]).arrayFlatten( [['coef_constant', 'coef_sin', 'coef_cos']]) return coefficientsImage
35ec1c0ab154ef60b1e00423916a8bb468890922
294,467
def split_comments(line, comment_char=';'): """ Splits `line` at the first occurence of `comment_char`. Parameters ---------- line: str comment_char: str Returns ------- tuple[str, str] `line` before and after `comment_char`, respectively. If `line` does not contain `comment_char`, the second element will be an empty string. """ split = line.split(comment_char, 1) data = split[0].strip() if len(split) == 1: return data, '' else: return data, split[1].strip()
8c87e8576e097d34299251c8e41a871ae81cc4fa
126,849
def convert_new_lines(text): """ Convert new lines to a common format. """ return text.replace('\r\n', '\n').replace('\r', '\n')
75934d4f18b7a3a760c11fded959145509825cbe
343,087
def gap_detector(data, mask, start_pixel, width_gap): """ Reproduce a detector gap in reciprocal space data and mask. :param data: the 3D reciprocal space data :param mask: the corresponding 3D mask :param start_pixel: pixel number where the gap starts :param width_gap: width of the gap in pixels :return: data and mask array with a gap """ if data.ndim != 3 or mask.ndim != 3: raise ValueError("data and mask should be 3d arrays") if data.shape != mask.shape: raise ValueError("data and mask should have the same shape") data[:, :, start_pixel : start_pixel + width_gap] = 0 data[:, start_pixel : start_pixel + width_gap, :] = 0 mask[:, :, start_pixel : start_pixel + width_gap] = 1 mask[:, start_pixel : start_pixel + width_gap, :] = 1 return data, mask
0e480886ea812071726372a9d4a3052cb577c05d
625,958
def get_tables(c, verbose=False): """Get all table names in the database.""" c.execute("SELECT name FROM sqlite_master WHERE type='table';") tabs = c.fetchall() if verbose: print(tabs) return tabs
ac8fab6c628319b236b7d6491bd87344c3a77a9f
60,304
def join_places_building_data(places_proj, buildings_proj): """ Add summary building data onto city blocks. Requires columns to be present in the gdfs generated by other functions in osmuf. Parameters ---------- places_proj : geodataframe buildings_proj : geodataframe Returns ------- GeoDataFrame """ building_areas_by_place=buildings_proj[['footprint_m2','total_GEA_m2']].groupby(buildings_proj['city_block_id']).sum() # if there are buildings not associated with a city_block they aggregate under 0 # if this happens remove them from the dataframe if building_areas_by_place.index.contains(0): building_areas_by_place = building_areas_by_place.drop([0]) places_proj = places_proj.merge(building_areas_by_place, on = 'city_block_id') places_proj['net_GSI'] = (places_proj['footprint_m2']/places_proj.area).round(decimals=3) places_proj['net_FSI'] = (places_proj['total_GEA_m2']/places_proj.area).round(decimals=3) places_proj['gross_GSI'] = (places_proj['footprint_m2']/places_proj['gross_area_m2']).round(decimals=3) places_proj['gross_FSI'] = (places_proj['total_GEA_m2']/places_proj['gross_area_m2']).round(decimals=3) places_proj['avg_building:levels'] = (places_proj['total_GEA_m2']/places_proj['footprint_m2']).round(decimals=1) return places_proj
375ee92e7913ba20ed782a5e99c5113d1226f601
59,126
def bytes_to_string(byte_count): """Converts a byte count to a string (in KB, MB...)""" suffix_index = 0 while byte_count >= 1024: byte_count /= 1024 suffix_index += 1 return '{:.2f}{}'.format(byte_count, [' bytes', 'KB', 'MB', 'GB', 'TB'][suffix_index])
2cd7cebc52bca9de7f57e6a23d34476eb8f99cc8
543,031
import json def create_package_json(directory, version): """Create `package.json` in `directory` with a specified version of `version`.""" package_json = directory / "package.json" package_json.write_text(json.dumps({"version": version}), encoding="utf-8") return package_json
c0d2baebb8673243d7af54835f2b8f0ae46432f0
309,000
import math def angle_to_comp(n, deg=False): """Returns the complex number with a magnitude of 1 that forms an angle of n with the real axis n: angle as float \\ deg: bool (if ```True```, n is taken to be in degrees, if ```False```, n is taken to be in radians)""" if deg: n = math.radians(n) return complex(math.cos(n), math.sin(n))
a6e873b7d3bf382d3ea7077bca0a89d436742336
39,988
def positive(value): """Type check value is a natural number (positive nonzero integer).""" value = int(value) if value < 1: raise ValueError() return value
17448c1bda114a8608cb859a37c76c4a8bd5bf2f
276,610
def binary_search(list_obj, value, low, high): """ Recursive Binary Search Algoirthm: Using Divide and Conquer design paradigm to reduce the complexity and increase the efficiency Arguments: list_obj: represent list object value: an object where it's value that we are looking for in the list low: an integer object it default to first index of the list high: an integer object it default to last index of the list """ if low == high: return list_obj[low] == value middle = (low + high) // 2 if value == list_obj[middle]: return True elif value > list_obj[middle]: return binary_search(list_obj, value, middle + 1, high) else: return binary_search(list_obj, value, low, middle - 1) return False
bd1bad68b0fa32e3528dee036f9dcd8e92c6b58e
592,623
import math def get_total_page(total, per): """ Get the page count. :param total: total count :param per: count per page :return: page count int """ return int(math.ceil(1.0 * total / per))
0307fe68dc47f804b39f55ba7d8374ef5dca7468
363,704
from typing import List def is_supported_filetype(file_name: str, ext_list: List[str]) -> bool: """Check if filename has one of the allowed extensions from the list.""" ext_list_lower = [ext.lower() for ext in ext_list] fn_lower = file_name.lower() return fn_lower.endswith(tuple(ext_list_lower))
ed5714b948e9715750b0d7c05be030346bb56042
241,510
def _multihex (blob): """Prepare a hex dump of binary data, given in any common form including [[str]], [[list]], [[bytes]], or [[bytearray]].""" return ' '.join(['%02X' % b for b in bytearray(blob)])
b5f7698275e9e71b6e1c70bf728277a27c78c30e
660,152
def parse_single_alignment(string, reverse=False, one_add=False, one_indexed=False): """ Given an alignment (as a string such as "3-2" or "5p4"), return the index pair. """ assert '-' in string or 'p' in string a, b = string.replace('p', '-').split('-') a, b = int(a), int(b) if one_indexed: a = a - 1 b = b - 1 if one_add: a = a + 1 b = b + 1 if reverse: a, b = b, a return a, b
0f62309de36ae9850a7be6d1cd0c740f5a014b2d
172,542
def my_pow(b,e,m): """ Computes b^e mod m using the square and multiply algorithm""" if e == 0: print('exponent is zero') return 1 ## enter your source code here r = 1 bn = bin(e)[2:] # convert the exponent to binary format, discard the prefix ‘0b’ i = 0 while (i < len(bn)): r = (r * r) % m # square is done automatically for every exponent digit if int(bn[i]) != 0: # multiply is done only if the exponent is 1 r = (r * b) % m i = i + 1 return r
64b0c3a8282daf0ef5a69e9568116878ee58b7d5
500,045
def nonzero_index_set(arr): """ Returns a set of indices corresponding to non-zero entries in a numpy array (or other list-like). """ res = set() for i, val in enumerate(arr): if val > 0: res.add(i) return res
c562145fc416a5c5429847250a511eb45cccf69f
215,008
def _mul_with_int(integer, residue): """ Return k * IntegerResidueClass(n, m) """ return residue.__class__(integer * residue.n, residue.m)
4bc51121934f435fcbd391430e57f41ee1e2330d
510,553
from io import StringIO def dump_data(data): """ Print hex value of data in form 'hex/decimal' :param data: bytes data :return: pretty printed hex/decimal string of data """ buf = StringIO() for index in range(len(data)): buf.write("%x/%d " % (data[index], data[index])) if (index + 1) % 8 == 0: buf.write("\t") if (index + 1) % 16 == 0: buf.write("\n") if len(data) > 0 and buf.getvalue()[-1] != '\n': buf.write("\n") return buf.getvalue()
e229d70882bf5ba0d0025731d10e5c2a3659c713
506,379
def transform_input_ids(input_ids_0, input_ids_1, tokenizer_func): """ Take the input ids for sequences 0 and 1 (claim and evidence) and a tokenizer function. Apply function to tuples of claim-evidence. Return list of token type ids. """ transformed_ids = list(map( lambda ids_tuple: tokenizer_func(ids_tuple[0], ids_tuple[1]), zip(input_ids_0, input_ids_1) )) return transformed_ids
c94dbac49972ce060a684aced18d2e5b3513d042
517,998
def get_number_of_auctions_to_run(q: int, t: int, lmbda: int) -> int: """ Summary line. Extended description of function. Parameters ---------- q: description t: description lmbda: description Returns ------- The appropriate integer for that selection. """ retval: int = 0 if t <= 2: retval = 2400 elif t <= 8: retval = 1600 elif t <= 18: retval = 1200 elif t <= 60: retval = 800 elif t <= 108: retval = 600 elif t <= 144: retval = 500 else: retval = 400 if (q == 1) or (lmbda == 1): retval *= 2 return retval
7a7cd95c1134766bfc6ce3696937c9deadcd690c
151,783
def prune_completions(prefix, all_test_names): """Filter returning only items that will complete the current prefix.""" completions = set() for test_name in all_test_names: if test_name.startswith(prefix): next_break = test_name.find('.', len(prefix) + 1) if next_break >= 0: # Add only enough to complete this level; don't drown # the user with all the possible completions from # here. completions.add(test_name[:next_break]) else: # If there are no more levels, then add the full name # of the leaf. completions.add(test_name) return completions
ad75ecc065dadddfb277329320ae888cf1e3535a
25,481
def filter_by_term(course: tuple[str, str, set], term: str) -> tuple[str, str, set]: """Return a copy of the given course with only sections that meet in the given term. The returned tuple has the same course code and title as the given course, and its sections set is a subset of the original. Note that a 'Y' section meets in BOTH 'F' and 'S' terms, and so should always be included in the returned course tuple. Preconditions: - term in {'F', 'S'} """ return course[0], course[1], {s for s in course[2] if s[1] == 'Y' or s[1] == term}
382629f891af5b1744591a1465989068ed9adc22
626,171
def rho_dust(f): """ Dust density """ return f['u_dustFrac'] * f['rho']
3c4d990b9f2fcb01c0183cedd7b66a2721001de7
654,320
def is_empty_json_response_from_s3(context): """Check if the JSON response from S3 is empty (but not None).""" return context.s3_data == {}
6ec3a41646de74d82f3786e772228da55d91b63a
703,423
def has_tag(obj, tag): """Helper class that tests to see if the obj is a dictionary and contains a particular key/tag. >>> obj = {'test': 1} >>> has_tag(obj, 'test') True >>> has_tag(obj, 'fail') False >>> has_tag(42, 'fail') False """ return type(obj) is dict and tag in obj
5f69fba5b2f37c7566be1e5d63f2c91d1a8727b7
526,506
from typing import Any import re def validate_timestamp(timestamp: Any) -> bool: """ Helper function to validate the input timestamp format. Cymulate API can return empty timestamp or an invalid string (for example the string 'no timestamp'). Args: timestamp: input timestamp Returns: bool: True if the input is in valid format, else False. """ try: if re.match(r'\d{4}-\d{2}-\d{2}', timestamp): return True except Exception: # pylint: disable=broad-except return False return False
2e3549270ddd9759f48038059a8f4d9569460fac
552,456
def versionFromCommitNo(commitNo): """Generate a version string from a numerical commit no""" return "0.0.0-dev%d" % commitNo
6c0ece8021cbe301fdbb561ce5f3ba46e0cc2596
380,675
def user_exists(client, username): """ Checks if user exists. Takes: * keystone client object * username Returns bool """ return len(client.users.list(name=username)) != 0
37b6180c95a4410427a9c133bb120ed18807b572
139,857
from typing import Tuple from typing import Any def replace_value_by_index(xs: Tuple[Any, ...], pos: int, value: Any) -> Tuple[Any, ...]: """ Return a new instance of the tuple replacing the specified position with the new value. :param xs: A tuple. :param pos: Zero-based index of the item which should be replaced. :param value: New value. """ return tuple(value if idx == pos else elem for idx, elem in enumerate(xs))
7f47b606772e8f666bedc40ef206c93de9e7523b
53,069
def post_slash(path): """ Connivence function to ensure postpended slash to a path """ if path == '': path = "/" elif path[-1] != '/': path = path +'/' return path
29beb83210a86a3c7c3eb417020b6e2026142232
250,232
from typing import Dict from typing import Any def clear_per_step_extra_state(extra_state: Dict[str, Any]) -> Dict[str, Any]: """ Clear values in extra_state that are technically only true for a specific step (ex: the eval tune loss calculated after 5 train steps is no longer accurate after 7 train steps, but might not get updated since we might not be doing eval after every step). """ extra_state["tune_eval"]["loss"] = None extra_state["tune_eval"]["perplexity"] = None extra_state["tune_bleu"]["current"] = None return extra_state
d7f561c612a8d52e1cf8a215b414e5c8297c6a24
58,279
import dataclasses def field(pytree_node=True, serialize=True, cache=False, **kwargs): """Mark a field of a dataclass to be: Args: pytree_node: a leaf node in the pytree representation of this dataclass. If False this must be hashable serialize: If True the node is included in the serialization. In general you should not specify this. cache: If True this node is a cache and will be reset every time fields are modified. """ return dataclasses.field( metadata={"pytree_node": pytree_node, "serialize": serialize, "cache": cache}, **kwargs, )
8c2170b742273dc89f4e4a4b7d035ca496971653
612,360
def _byte_to_int(data: bytes) -> int: """ Returns integer value of big endian byte data :param data: :return: Integer value :rtype: int """ return ord(data[0:1]) + (ord(data[1:2]) * 0x100) + (ord(data[2:3]) * 0x10000) + (ord(data[3:4]) * 0x1000000)
2899da967637a86ec43bdba0c1c41bdce5699df8
304,817
from typing import Dict from typing import Mapping def deep_update(original: Dict, updates: Mapping) -> Dict: """ Update a nested dictionary with new values, leaving unupdated values in place. Modifies original in place. :param original: the dictionary whose values will be updated :param updates: the dictionary with the values to you want to insert into original """ for key, value in updates.items(): if isinstance(value, Mapping) and value: returned = deep_update(original.get(key, {}), value) original[key] = returned else: original[key] = updates[key] return original
f8011ef14dcd0833818b62442c13d20401b687fe
490,062
import itertools def get_subdirectories(rates, num_subs): """ Get the list of expected subdirectories in every directory. The expected subdirectories are obtaines from the Cartesian product of the elements of rates and num_subs. :param rates (list): The sending rates :param num_subs (list): All the different number of subscribers. :return: A list containing the path of all the subdirectories. """ # Get subdirectory names combinations = list(itertools.product(rates, num_subs)) sub_dirs = [] for combination in combinations: sub_dirs.append('rate_{}/subs_{}'.format( combination[0], combination[1]) ) return sub_dirs
48f139c106757d4be3192acd28e2f198e96775e8
488,022
def add_page_num(request, page): """ Update page number variable to a url that may or may not have other GET variables """ vars = request.GET.copy() vars["page"] = page var_string = "&".join([f"{k}={v}" for k, v in vars.items()]) return f"{request.path}?{var_string}"
1d4fa1c503869eb110ed27b4b17aacd713e9e6e7
267,709
from typing import Dict from typing import Union import json import logging def load_json_params(param_fname: str, **kwargs) -> Dict[str, Union[int, float, str]]: """Load in the param_fname, overriding with given kwargs""" with open(param_fname) as source: params = json.load(source) for k, v in kwargs.items(): if k not in params: logging.warning(f"Key {k} not in original parameters") params[k] = v return params
c82f498fc93ca3ac805e0710c973af4ce37946dd
266,820
def _lpips_wrapper(sample, gt, lpips_model): """ Computes the frame-wise LPIPS between two videos. Parameters ---------- sample : torch.*.Tensor Tensor representing a video, of shape (length, batch, channels, width, height) and with float values lying in [0, 1]. gt : torch.*.Tensor Tensor representing a video, of shape (length, batch, channels, width, height) and with float values lying in [0, 1]. Its shape should be the same as sample. Returns ------- torch.*.Tensor Tensor of frame-wise LPIPS between the input videos, of shape (length, batch). """ nt, bsz = sample.shape[0], sample.shape[1] img_shape = sample.shape[2:] # Switch to three color channels for grayscale videos if img_shape[0] == 1: sample_ = sample.repeat(1, 1, 3, 1, 1) gt_ = gt.repeat(1, 1, 3, 1, 1) else: sample_ = sample gt_ = gt lpips = lpips_model(sample_.view(nt * bsz, 3, *img_shape[1:]), gt_.view(nt * bsz, 3, *img_shape[1:])) return lpips.view(nt, bsz)
30961e84cd73fa21842daba27c293d8e485ed32f
576,840
def to_secs(delta): """Convert a :py:class:`datetime.timedelta` to a number of seconds. (This is basically a backport of :py:meth:`datetime.timedelta.total_seconds`.) """ return (delta.days * 86400.0 + delta.seconds + delta.microseconds / 1000000.0)
1ff9c8b2a857f713d7bda2691b0b2cfe8a1611dc
647,801
import six def csv_data_with_term(term): """ Data where each line is terminated by term """ stream = six.StringIO() data = [ 'idx,data', '1,one', '2,two', '3,three', ] for line in data: stream.write(line + term) stream.seek(0) return stream
99ff582dc810d9a1c0d4ef63417f9f1331623364
345,490
def _is_throttled(event): """Checks if the message has been throttled already. Args: event: The pub/sub event object Returns: True if contains an attribute called "forwarded" False in any other case """ return (event.get('attributes') is not None) and (event.get('attributes').get('forwarded') is not None)
7246fa743d723a82fb9742acfc8be9bb32bd279c
396,628
def bad_acq_stars(stars): """ Return mask of 'bad' stars, by evaluating AGASC star parameters. :param stars: astropy table-compatible set of agasc records of stars. Required fields are ['CLASS', 'ASPQ1', 'ASPQ2', 'ASPQ3', 'VAR', 'POS_ERR'] :returns: boolean mask true for 'bad' stars """ return ((stars['CLASS'] != 0) | (stars['MAG_ACA_ERR'] > 100) | (stars['POS_ERR'] > 3000) | (stars['ASPQ1'] > 0) | (stars['ASPQ2'] > 0) | (stars['ASPQ3'] > 999) | (stars['VAR'] > -9999))
c33c09fbac780f3d430975df2471e384e62fdae5
444,691
import re def graphite_safe(string): """ Sanitizes a string so that it can be used as part of a Graphite line. It does this by replacing non-alphanumeric characters with underscores. :param string: Your string to sanitize :return: Your sanitized string """ # Convert whitespaces to underscores string = re.sub(r'\s', '_', string) # Convert non-alphanumeric characters to underscores string = re.sub(r'[^\w]', '_', string) # Collapse repeating underscores into one while '__' in string: string = string.replace('__', '_') return string
c21e63f7db13e01fd255325b458cf52ca129e0f6
283,040
def BFS_search(gr, u, v): """ Find the shortest path between two nodes (source and target) using the Breadth-First Search (BFS) algorithm. @type gr: tlp.Graph @param gr: Tulip graph @type u: tlp.node @param u: first node of interest (source) @type v: tlp.node @param v: Second node of interest (target) """ explored = [] queue = [] queue.append([u]) while queue : path = queue.pop(0) node = path[-1] if node not in explored: for n in gr.getInOutNodes(node): new_path = list(path) new_path.append(n) queue.append(new_path) if n == v: new_path.pop() del new_path[0] return new_path explored.append(node)
9349ee5d4869b572d360644b4895ca6acc47f3d0
72,135
from typing import Optional def parse_url_params(params: Optional[dict]) -> Optional[dict]: """Generate parameter dict and filter Nones.""" params = params or {} return {k: v for k, v in params.items() if v is not None} or None
deacafdec7b2efd8640dc0abf8dfdc7a3e32c4ff
127,892
from typing import Tuple def create_columns(column_def: Tuple[str, str]) -> str: """ Prepare columns for table create as follows: - types copied from target table - no indexes or constraints (no serial, no unique, no primary key etc.) """ return (','.join(f'{k} {v}' for k, v in column_def) .replace('bigserial', 'bigint') .replace('smallserial', 'smallint') .replace('serial', 'integer') )
787ce1c3fc5e37c6055f48424e92eec84eb05448
653,938
import collections def parse_billing_data(billing_data): """ parse the billing data and store it in a hash args: billing_data: CSV object of billing data returns: user_dict: dict, keyed by AWS ID, containing name, user total for all services, and currency currency: string, currency used (ie: USD) month: billing month (for CSV output) year: billing year (for CSV output) """ user_dict = collections.defaultdict(dict) currency = '' month = '' year = '' for row in billing_data: if len(row) < 4: continue if row[3] == 'AccountTotal': if not currency: currency = row[23] if not month or not year: date = row[6] month = date[5:7] year = date[0:4] acct_num = row[2] user_dict[acct_num]['name'] = row[9] user_dict[acct_num]['total'] = float(row[24]) user_dict[acct_num]['currency'] = row[23] return user_dict, currency, month, year
470ba2ce69dff81bc6f0ed5ced4e89c67013661d
602,695
def hs2_parquet_constraint(v): """Constraint function, used to only run HS2 against Parquet format, because file format and the client protocol are orthogonal.""" return (v.get_value('protocol') == 'beeswax' or v.get_value('table_format').file_format == 'parquet' and v.get_value('table_format').compression_codec == 'none')
80b51fc37092d12a1061db436e5f2ec85516ff06
498,107
def get_tokens(tagger, content): """ 文書内に出現した名詞のリストを取得する関数。 """ tokens = [] # この記事で出現した名詞を格納するリスト。 node = tagger.parseToNode(content) while node: # node.featureはカンマで区切られた文字列なので、split()で分割して # 最初の2項目をcategoryとsub_categoryに代入する。 category, sub_category = node.feature.split(',')[:2] # 固有名詞または一般名詞の場合のみtokensに追加する。 if category == '名詞' and sub_category in ('固有名詞', '一般'): tokens.append(node.surface) node = node.next return tokens
fa43ddaf6cb554e437a80664ed713a348653db06
635,059
def basic_src(model, u, **kwargs): """ Basic source for linearized modeling Parameters ---------- u: TimeFunction or Tuple Forward wavefield (tuple of fields for TTI or dft) model: Model Model containing the perturbation dm """ w = -model.dm * model.irho if model.is_tti: return (w * u[0].dt2, w * u[1].dt2) return w * u[0].dt2
fb932ff5c0debf7ce3e26b7c7bc336468465167c
194,417
def is_int(string_to_check): """ Checks if string is an integer value. :param string_to_check: String to check :return: True if string is an integer, False otherwise """ try: int(string_to_check) result = True except ValueError: result = False return result
1c554719456fc5fa284b51bc66dbd09940e238fc
468,041
from typing import Collection from typing import Tuple import statistics def calc_lenient_interval_stats(all_intervals: Collection[float]) -> Tuple[float, float, float]: """Calculate min, max and mean values for the given time intervals, in a lenient manner (discarding outliers)""" # We need to be a little lenient on the min and max intervals: we are trying to achieve a constant rate rather than exact intervals. # To achieve this leniency, I'm going to sort the intervals and discard outliers (the highest and lowest 5% of values) discard_count = max(1, len(all_intervals) // 20) sorted_intervals = sorted(all_intervals) intervals = sorted_intervals[discard_count:-discard_count] min_interval = intervals[0] max_interval = intervals[-1] mean_interval = statistics.mean(intervals) return max_interval, mean_interval, min_interval
c1b0c0281c1bc208084d0ebbbe334cd6f2aa602e
262,613
def _to_np_mode(mode): """Convert padding modes from `ndi.correlate` to `np.pad`.""" mode_translation_dict = dict(nearest='edge', reflect='symmetric', mirror='reflect') if mode in mode_translation_dict: mode = mode_translation_dict[mode] return mode
d568cbb7624c9eb5a8a8939480aeaa2194d91bdf
624,928
def collect_ids_recursive(root): """ Return a list of ids of all leaves of the given tree """ if root.is_leaf(): return [root.id] return collect_ids_recursive(root.left) + collect_ids_recursive(root.right)
4ff0356c0af45209224993a4eae91e479beeb800
492,504
def canonical_order(*corners): """return corners of enclosing rectangle in their canonical order (clockwise starting from bottom left). can be used to simply reorder rectangle nodes.""" xmin = min(x[0] for x in corners) xmax = max(x[0] for x in corners) ymin = min(x[1] for x in corners) ymax = max(x[1] for x in corners) return (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)
8d9d34d662dcd961ea7a7cb262c1b6bdfcde03d3
177,185
from operator import eq def eq_zero(a): """ Check if a value is close enough to 0 :param a: :return: """ return eq(a, 0.0)
f79167f622906cd25d7fae7987a66c09cff237fb
383,311
import configparser def to_dict(cfg: configparser.ConfigParser) -> dict: """Converts ConfigParser object into dictionary""" return {s.lower(): dict(cfg[s]) for s in cfg.sections()}
45833955bcfb32c6b0cadc9ad6027686da300d1a
621,597