content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def color_to_rgb(c): """Convert a 24 bit color to RGB triplets.""" return ((c >> 16) & 0xFF, (c >> 8) & 0xFF, c & 0xFF)
57f30d4d346f6cfbeff67d714579f649524e9873
506,331
def square(row, col): """ Squares are represented as tuples of (row, col). Rows are numbered 1 thru 3, cols 'A' thru 'C'. """ return (row, col)
e75db55fb0a51839af3da003e2b706e3ce95a04a
156,280
def dependency(func, **config): """Declare a setter method to be called just after initializing a yaz.Plugin This decorator allows one or more plugin dependencies to be injected. For example: class Helper(yaz.Plugin): pass class HelloWorld(yaz.Plugin): def __init__(self): self.helper = None @yaz.dependency def set_helper(self, helper: Helper): self.helper = helper In the above example, when the HelloWorld plugin is initialized, the set_helper method will be called immediately after the constructor finishes. Multiple dependencies may be injected in a single @yaz.dependency method. """ func.yaz_dependency_config = config return func
42e064416d260561b37463577ca90ed333678064
216,359
import re def parseVersionFromLine(version_str): """Given a string containing a dotted integer version, parse out integers and return as tuple.""" version = re.search(r'(\d+\.\d+\.\d+)', version_str) if not version: return (0,0,0) return tuple(map(int, version.group(1).split(".")))
89917953ccd05219542ce75a90e70b92fbe2d2ab
572,445
def record_payin_transfer_reversal( db, pt_id, remote_id, amount, payin_refund_id=None, ctime=None ): """Record a transfer reversal. Args: pt_id (int): the ID of the reversed transfer in our database remote_id (int): the ID of the reversal in the payment processor's database amount (Money): the reversal amount, must be less or equal to the transfer amount payin_refund_id (int): the ID of the associated payin refund in our database ctime (datetime): when the refund was initiated Returns: Record: the row inserted in the `payin_transfer_reversals` table """ return db.one(""" INSERT INTO payin_transfer_reversals (payin_transfer, remote_id, amount, payin_refund, ctime) VALUES (%(pt_id)s, %(remote_id)s, %(amount)s, %(payin_refund_id)s, coalesce(%(ctime)s, current_timestamp)) ON CONFLICT (payin_transfer, remote_id) DO UPDATE SET amount = excluded.amount , payin_refund = excluded.payin_refund RETURNING * """, locals())
eaa5b8fc8593a9182e36911e9908108818bc16f7
561,493
def _find_longest_key_length(dictionary: dict) -> int: """Returns the longest string length of a key in the given dictionary.""" length: int = 0 for key in dictionary.keys(): if len(key) > length: length = len(str(key)) return length
96a6d8d2354133ea216313f175bde62f691f9a4c
274,358
def determine_table_subset_by_start_and_column(conn, tbl_start, cols): """ :param conn: :param tbl_start: str :param cols: list of column names :return: list of table names Determine which tables that start with a particular string have a particular column. """ all_tables = conn.execute("SELECT name FROM sqlite_master WHERE type='table';") table_subset = [] c = conn.cursor() for tbl_tuple in all_tables: table = tbl_tuple[0] if table.startswith(tbl_start): table_data_query = c.execute( """SELECT * FROM {};""".format(table) ) column_names = [s[0] for s in table_data_query.description] if all(col in column_names for col in cols): table_subset.append(table) return table_subset
c430d11e7f7a484be1feeae1b7e9df5e297fbfb4
437,257
import re def _extract_current_step(current_status_string): """ Attempts to extract the current step numeric identifier from the given status string. Returns the step number or None if none. """ # Older format: `Step 12 :` # Newer format: `Step 4/13 :` step_increment = re.search(r"Step ([0-9]+)/([0-9]+) :", current_status_string) if step_increment: return int(step_increment.group(1)) step_increment = re.search(r"Step ([0-9]+) :", current_status_string) if step_increment: return int(step_increment.group(1))
2d1ee544c1d719ddbef1c175233d8304296ea33c
38,834
import math def get_direct_radiation(normal_surface_direct_radiation: float, solar_altitude: float, solar_azimuth: float, surface_tilt_angle: float, surface_azimuth: float) -> float: """ 傾斜面の直達日射量を求める関数 :param normal_surface_direct_radiation: 法線面直達日射量, W/m2 :param solar_altitude: 太陽高度角, degree :param solar_azimuth: 太陽方位角, degree :param surface_tilt_angle: 傾斜面傾斜角, degree :param surface_azimuth: 傾斜面方位角, degree :return: 傾斜面直達日射量, W/m2 """ # 傾斜面に対する太陽光線の入射角, degree sunlight_incidence_angle\ = math.sin(math.radians(solar_altitude)) * math.cos(math.radians(surface_tilt_angle))\ + math.cos(math.radians(solar_altitude)) * math.sin(math.radians(surface_tilt_angle))\ * math.cos(math.radians(solar_azimuth - surface_azimuth)) return normal_surface_direct_radiation * sunlight_incidence_angle
105e7d92035ebf82bcfdada3c9269cbbd98e3565
126,734
def enlarge(a, x=2, y=None): """Enlarges 2D image array a using simple pixel repetition in both dimensions. Enlarges by factor x horizontally and factor y vertically. If y is left as None, uses factor x for both dimensions.""" assert a.ndim == 2 if y == None: y = x for factor in (x, y): assert factor.__class__ == int assert factor > 0 return a.repeat(y, axis=0).repeat(x, axis=1)
e9daff8e474bc1db799f48eb45315737b994b728
312,554
def mc_pow(data): """ Modulus calculation (square of) Calculated as: real^2+imag^2 """ return data.real**2+data.imag**2
1308aa08ab26bb25fd9f6f0e8ac5762a45c63c99
224,095
def list_to_dict(list_to_convert: dict, key_name: str): """ USed to convert a list of dictionaries to a dictionary using some common properties (i.e. name) Careful as data will be lost for duplicate entries, this assumes the list is a "set". :param list_to_convert: A list of dictionaries :param key_name: A value from each dict to use as the key. :return: A dictionary. """ converted_dict = dict() if list_to_convert: for item in list_to_convert: converted_dict[item[key_name]] = item return converted_dict
edd998a0aac0a989e296694546c63781b03ea392
561,288
from typing import Callable from typing import List def get_shingles( doc: str, normalization_func: Callable, split_method: str, ngram_size: int, ngram_stride: int, ) -> List[str]: """Extracts the shingles from a document. Args: doc (str): The document to extract the shingles from. normalization_func (Callable): The function to normalize the document with. split_method (str): The method to split the document into shingles. Can be 'word_ngram', 'paragraph', 'none' or None. ngram_size (int): The size of the ngrams to use. ngram_stride (int): The stride of the ngrams to use. Returns: list of str: The shingles extracted from the document. Raises: ValueError: If `split_method` is not 'word_ngram', 'paragraph', 'none' or None. """ # Normalise document doc = normalization_func(doc) # Extract shingles from the document, depending on the `split_method` if split_method == "word_ngram": words = [word for word in doc.split(" ") if len(word) > 0] max_word_idx = 1 + len(words) - ngram_size shingles = [ " ".join(words[i : i + ngram_size]).strip() for i in range(0, max_word_idx, ngram_stride) ] or [doc] elif split_method == "paragraph": shingles = [p for p in doc.split("\n") if len(p) > 0] elif split_method == "none" or split_method is None: shingles = [doc] else: raise ValueError(f"Invalid split method: {split_method}") return shingles
98189a9562a1b45b8cbe98e52167c7b60ff1771c
616,545
def reflection_constant(ground_reflections: bool) -> float: """Utility function to calculate a factor that describes how much RF is increased by ground reflection. :param ground_reflections: Whether ground reflections are present :return: Coefficient (dimensionless) that other functions use to multiply EIRP :raises ValueError: if ground_reflections is not bool """ if type(ground_reflections) != bool: raise ValueError("ground_reflections must be boolean: %s" % str(ground_reflections)) if not ground_reflections: return 1 else: return 1.6 * 1.6
e0fac736e56712fff76db4c68632a94a65acd5bd
332,024
def emc_cal(load, discharge): """ Calculate EMC with \sum{l_t}/ \sum{discharge_t} load: numpy.ndarry, load time series discharge: numpy.ndarry, discharge time series """ return load.sum() / discharge.sum() # End emc_cal()
aa3493fc5d810f9cbc8572c09c550a1b80e4d64d
203,373
import dateutil.parser def get_entry_end_date(e): """Returns end date for entry""" return dateutil.parser.parse(e['time_end'])
a9b8bdae873de0ef97de49e342cd4f3bbd8117f6
702,191
import pathlib def this_module_name(short_name: bool = False) -> str: """ By default, module.name includes the package name, like "toolbox.file_util.ez_ftp" To include only the last component, e.g "ez_ftp", set short_name = True. :param short_name: By default, module.name includes the package name, like "toolbox.file_util.ez_ftp". To include only the last component, e.g "ez_ftp", set short_name = True. :return: the name of this module (str) """ try: mod_name = __loader__.name except: mod_name = pathlib.Path(__file__).stem short = __loader__.name.split('.')[-1] if __name__ == '__main__': result = __loader__.name elif __name__ == 'builtins': # Do I want to do something special in this case? result = __name__ else: result = __name__ if short_name: result = result.split('.')[-1] return result
323b072c51505bcd6f37398252416c36be110ada
365,271
import math import random def UniformExpoInteger(low, high, base=2): """Returns base to a power uniformly distributed between low and high. This is useful for exploring large ranges of integers while ensuring that values of all different sizes are represented. """ return int(math.floor(math.pow(base, random.uniform(low, high))))
bf841068e72dcc1b6af71d626a164ed757b6520f
415,285
def format_title(title: str) -> str: """format titles to be usable as filenames and foldernames""" return "".join(list(filter(lambda x: x not in (".", ":", '"', "?", "/", '<', '>'), title)))
08b3333a12539fcf14f01814214dc71695cdd5f4
392,137
def im2mat(I): """Converts and image to matrix (one pixel per line)""" return I.reshape((I.shape[0] * I.shape[1], I.shape[2]))
2d12c487ddcffd3d629c88a9f6f9ed4e7dd9bc2c
508,700
import torch def compute_iou(pred, gt): """ Calculates IoU (Jaccard index) of two sets of bboxes: IOU = pred ∩ gt / (area(pred) + area(gt) - pred ∩ gt) Parameters: Coordinates of bboxes are supposed to be in the following form: [x1, y1, x2, y2] pred (torch.tensor): predicted bboxes gt (torch.tensor): ground truth bboxes Return value: iou (torch.tensor): intersection over union """ def get_box_area(box): return (box[:, 2] - box[:, 0] + 1.) * (box[:, 3] - box[:, 1] + 1.) #_gt = torch.tile(gt, (pred.shape[0], 1)) _gt = gt.repeat(pred.shape[0], 1) _pred = torch.repeat_interleave(pred, gt.shape[0], dim=0) ixmin = torch.maximum(_gt[:, 0], _pred[:, 0]) iymin = torch.maximum(_gt[:, 1], _pred[:, 1]) ixmax = torch.minimum(_gt[:, 2], _pred[:, 2]) iymax = torch.minimum(_gt[:, 3], _pred[:, 3]) width = torch.maximum(ixmax - ixmin + 1., torch.tensor(0)) height = torch.maximum(iymax - iymin + 1., torch.tensor(0)) intersection_area = width * height union_area = get_box_area(_gt) + get_box_area(_pred) - intersection_area iou = (intersection_area / union_area).reshape(pred.shape[0], gt.shape[0]) return iou
90d34acedddfbec1850a627cd21a03c92f7cf727
265,869
def eV(E): """ Returns photon energy in eV if specified in eV or KeV. Assumes that any value that is less than 100 is in KeV. Parameters ---------- E : float The input energy to convert to eV Returns ------- E : float Energy converted to eV from KeV """ if E < 100: E *= 1000.0 return float(E)
bb1811163c223670822e28ac7be6f34bfcbae837
638,898
def get_beta_variance(a, b): """Compute variance of beta distribution based on shape parameters `a` and `b`.""" return a*b/(a+b)**2/(a+b+1)
527067dd0b0dbb4e331eab9030a252ecb0be835d
580,037
import string def clean_punctuation(texts): """Remove punctuation from strings""" cleaned_tokens = [[word.strip(string.punctuation) for word in sentence] for sentence in texts] return cleaned_tokens
06f76dd62ae047b5379b9047f737673bc52c63e8
632,216
def any(iterable): """ any() Purpose: Returns true if any element in the iterable evaluates to true otherwise, return false Parameters: iterable [type=list,tuple] Any item that can be iterated over Returns: A boolean specifying whether any elements of the iterable evaluate to true """ for element in iterable: if element: return True return False
5fbc793bd90055ea12156a54d551623dc20754d5
104,931
import uuid def uuid_naming_strategy(original_name): """File naming strategy that ignores original name and returns an UUID""" return str(uuid.uuid4())
76dd4751fd9b7a832d18aa508076f6934975d39c
323,309
def parse_version(revision): """Convert semver string `revision` to a tuple of integers""" return tuple(map(int, revision.split(".")))
30765dc8e71bab009336d1b3dce92d27b0da99db
567,408
import torch def get_inverse_softplus_offset(offset): """Get inverse offsetted softplus.""" def inverse_softplus_offset(x): return torch.log(x.exp() - 1) + offset return inverse_softplus_offset
0ddf8efd49a839a9ad09e3818c93473580ff7024
601,432
def frate2hsize(sr, frate): """Translate frame rate in Hz to hop size in integer.""" return int(sr*1.0/frate)
e470590b6184e222beb6673755e612a6250b9a43
245,595
def riKey(pre, ri): """ Returns bytes DB key from concatenation with '.' of qualified Base64 prefix bytes pre and int ri (rotation index) of key rotation. Inception has ri == 0 """ if hasattr(pre, "encode"): pre = pre.encode("utf-8") # convert str to bytes return (b'%s.%032x' % (pre, ri))
033a56f16143ec2c8b75847204bc244ade352ae8
487,911
def compare_locations(bq_dataset_location,gcs_bucket_location): """Compares the locations of BQ dataset and GCS bucket Arguments: bq_dataset_location {str} -- BQ dataset location gcs_bucket_location {str} -- GCS Bucket location Returns: bool -- True if matches else False """ if bq_dataset_location==gcs_bucket_location: return True else: return False
b9f3e2dfa4ad14f6d86260ebb032eb85d04eb777
206,877
import pickle def from_pickle(input_path): """Read from pickle file.""" with open(input_path, 'rb') as f: unpickler = pickle.Unpickler(f) return unpickler.load()
4e537fcde38e612e22004007122130c545246afb
4,229
def exact_match(p,l): """ Computes per-response accuracy between predictions (p) and ground-truth responses (l) """ c=0 for i1,i in enumerate(l): if p[i1]==l[i1]: c+=1 return c/len(l)
6312a88753ca5d4324ad388458dcb983150e6bea
274,135
def stats(d_raw_materials, f_secret_points, f_total_money_collected): """ Show machine statistics Params: d_raw_materials: dict f_secret_points: int f_total_money_collected: float Returns: str """ cm_stats = 'sugar {0} tablespoons remaining\n'.format(d_raw_materials['sugar']) cm_stats += 'butter {0} teaspoons remaining\n'.format(d_raw_materials['butter']) cm_stats += 'dark chocolate {0} tablespoons remaining\n'.format(d_raw_materials['dark chocolate']) cm_stats += 'caramel {0} tablespoons remaining\n'.format(d_raw_materials['caramel']) cm_stats += 'light corn syrup {0} teaspoons remaining\n'.format(d_raw_materials['light corn syrup']) cm_stats += 'sweetened condensed milk {0} teaspoons remaining\n'.format(d_raw_materials[ 'sweetened condensed milk']) cm_stats += 'vanilla extract {0} teaspoons remaining\n'.format(d_raw_materials['vanilla extract']) cm_stats += 'sprinkles {0} tablespoons remaining\n'.format(d_raw_materials['sprinkles']) cm_stats += 'bing cherries {0} tablespoons remaining\n'.format(d_raw_materials['bing cherries']) cm_stats += 'candied bacon {0} tablespoons remaining\n'.format(d_raw_materials['candied bacon']) cm_stats += 'bacon infused bourbon {0} tablespoons remaining\n'.format(d_raw_materials['bacon infused bourbon']) cm_stats += 'sea salt {0} tablespoons remaining\n'.format(d_raw_materials['sea salt']) cm_stats += 'Total Money Collected: ${0:.2f}\n'.format(f_total_money_collected) cm_stats += 'Total secret points earned: {0}'.format(f_secret_points) return cm_stats
76e5d0938ae1c2895a2ad3f4777ceefbfaecfe1d
79,785
def getIdHexChars(id): """ get the hex chars of the given id """ if id[0] == 'c': # don't include chunk index index = id.index('_') parts = id[0:index].split('-') else: parts = id.split('-') if len(parts) != 6: raise ValueError(f"Unexpected id format for uuid: {id}") return "".join(parts[1:])
21be133e05fd8b510b65e1c653c97a1360c22158
125,904
import shutil def check_in_path(app): """ Checks to see if the given app exists on the path :param app: app name to look for :return: true if the app exists, false otherwise """ return shutil.which(app) is not None
bbd9eb8f8645f649dcc5fcf7ff2e960df2110b22
269,797
def check_shell_sig(command_tuple): """ Check whether the command uses shell features like pipe, wildcard :param command_tuple: tuple or list :return: bool """ redirect_tags = ('>', '<', '|', ';', '*', '&&', '>>') true_shell = 0 try: for rt in redirect_tags: if rt in command_tuple: true_shell = 1 break # special care for R scripts if command_tuple[0] == "R": true_shell = 1 except Exception as e: print(e) return true_shell
ab2f0f02c396354e71e3163f1e78fc5b72cd6b93
304,268
import requests import time def request_url_json(url: str) -> dict: """Get JSON object version of reponse to GET request to given URL. Args: url: URL to make the GET request. Returns: JSON decoded response from the GET call. Empty dict is returned in case the call fails. """ print(url) try: req = requests.get(url) # print(req.url) except requests.exceptions.ReadTimeout: print('Timeout occoured, retrying after 10s.') time.sleep(10) try: req = requests.get(url) except requests.exceptions.ReadTimeout: print('Timeout occoured, request failed.') return {} if req.status_code == requests.codes.ok: response_data = req.json() else: response_data = {'http_err_code': req.status_code} print('HTTP status code: ' + str(req.status_code)) return response_data
98c0ed775c58a99984c09d2c545a8b03f6492f0c
633,769
def canonicalize_tensor_name(name): """Canonicalizes tensor names. For an op that produces only one output, we may be refer to its output tensor as either "op_name:0" or simply "op_name". This standardizes all internal names as "op_name:0" to simplify the logic. Args: name: Input name to canonicalize. Returns: The canonicalized input name. """ if ":" not in name: return f"{name}:0" return name
5f32572372d9ad6a69f7f9991d2cd8beae3f3d07
679,295
import ast def nodeFunctions(node): """ Get info about functions at the top level of node Args: node: the ast node to search Returns: {<function name>: ast node of function} """ functionDict = {} for item in node.body: if isinstance(item, ast.FunctionDef): functionDict[item.name] = item return functionDict
ccc21bf7438e41d38a7f35a9274a03463365e557
253,640
def get_union_set(group): """ Task 1: gets union set of all questions in a group. Just add every letter to a set to get the union. :param group: list of strings """ question_set = set() for declaration in group: for letter in declaration: question_set.add(letter) return question_set
acfa8e0eef8a84efd5623a6b6497ae6d8a3f3872
333,600
import yaml def load_from_yaml(path, verbose=False): """ Load python object stored in yaml file. :param path: file path to load :param verbose: :return: loaded object """ with open(path, 'r') as f: if verbose: print("Loading data from '" + path + "'") return yaml.load(f, Loader=yaml.CLoader)
375c9db684bdc6abf120b2a119a2a7a2f248df73
618,189
import itertools def n_leading_spaces(s): """Return number of leading spaces of s""" return sum(1 for _ in itertools.takewhile(lambda x : x.isspace(), s)) #6789012345678901234567890123456789012345678901234567890123456789012345
cfec3045ea4df5bac1d3ee5a54b3021e39137239
452,135
def sub_account_universal_transfer_history(self, **kwargs): """Query Universal Transfer History (For Master Account) GET /sapi/v1/sub-account/universalTransfer https://binance-docs.github.io/apidocs/spot/en/#query-universal-transfer-history-for-master-account fromEmail and toEmail cannot be sent at the same time. Return fromEmail equal master account email by default. Only get the latest history of past 30 days. Keyword Args: fromEmail (str, optional) toEmail (str, optional) startTime (int, optional) endTime (int, optional) page (int, optional) limit (int, optional): Default 10, max 20 recvWindow (int, optional): The value cannot be greater than 60000 """ return self.limited_encoded_sign_request( "GET", "/sapi/v1/sub-account/universalTransfer", kwargs )
c552c4eeea8be9eeb0a662cbaa546085d7c5999a
696,457
def zero_fill(value: bytearray) -> bytearray: """ Zeroing byte objects. Args: value: The byte object that you want to reset. Returns: Reset value. """ result = b'' if isinstance(value, (bytes, bytearray)): result = b'/x00' * len(value) result = bytearray(result) return result
e5ce1b6162071e3aed7d5adb3aa5b0a20fc526a7
675,252
import torch def gridsample(source, field, padding_mode, mode='bilinear'): """ A version of the PyTorch grid sampler that uses size-agnostic conventions. Vectors with values -1 or +1 point to the actual edges of the images (as opposed to the centers of the border pixels as in PyTorch 4.1). `source` and `field` should be PyTorch tensors on the same GPU, with `source` arranged as a PyTorch image, and `field` as a PyTorch vector field. `padding_mode` is required because it is a significant consideration. It determines the value sampled when a vector is outside the range [-1,1] Options are: - "zero" : produce the value zero (okay for sampling images with zero as background, but potentially problematic for sampling masks and terrible for sampling from other vector fields) - "border" : produces the value at the nearest inbounds pixel (great for masks and residual fields) If sampling a field (ie. `source` is a vector field), best practice is to subtract out the identity field from `source` first (if present) to get a residual field. Then sample it with `padding_mode = "border"`. This should behave as if source was extended as a uniform vector field beyond each of its boundaries. Note that to sample from a field, the source field must be rearranged to fit the conventions for image dimensions in PyTorch. This can be done by calling `source.permute(0,3,1,2)` before passing to `gridsample()` and `result.permute(0,2,3,1)` to restore the result. """ if source.shape[2] != source.shape[3]: raise NotImplementedError('Grid sampling from non-square tensors ' 'not yet implementd here.') scaled_field = field * source.shape[2] / (source.shape[2] - 1) return torch.nn.functional.grid_sample(source, scaled_field, mode=mode, padding_mode=padding_mode)
6c3f8012e7c6d5d0000a8bd497294e368c9e85a5
217,831
def calcBounds(array): """Calculate the bounding rectangle of a 2D points array. Args: array: A sequence of 2D tuples. Returns: A four-item tuple representing the bounding rectangle ``(xMin, yMin, xMax, yMax)``. """ if len(array) == 0: return 0, 0, 0, 0 xs = [x for x, y in array] ys = [y for x, y in array] return min(xs), min(ys), max(xs), max(ys)
b25f75a793c8cc2667b17c76bc0339a1da11af49
664,731
def validate_config(config, required_fields): """ Check that the config contains all the required fields. :param config: A config dictionary to check. :type config: dict(str: str) :param required_fields: A list of required fields. :type required_fields: list(str) :return: Whether the config is valid. :rtype: bool """ for field in required_fields: if not field in config: return False return True
d2499528698034a665917db3aa28efd702592d39
478,838
import hashlib def get_file_hash(path, block_size=8192): """Calculates the content hash for the file at the given path.""" md5 = hashlib.md5() with open(path, 'rb') as f: while True: block = f.read(block_size) if not block: break md5.update(block) return md5.hexdigest()
429258dc30bb5a6ab10e5c5c5f98000a0c1f324d
630,409
import torch def extract_subtensor(tensor: torch.Tensor, ids_time, ids_feature): """This method extracts a subtensor specified with the indices. Args: tensor: The (T, N_features) tensor from which the data should be extracted. ids_time: List of the times that should be extracted. ids_feature: List of the features that should be extracted. Returns: torch.Tensor: Submask extracted based on the indices. """ T, N_features = tensor.shape # If no identifiers have been specified, we use the whole data if ids_time is None: ids_time = [k for k in range(T)] if ids_feature is None: ids_feature = [k for k in range(N_features)] # Extract the relevant data in the mask subtensor = tensor.clone().detach() subtensor = subtensor[ids_time, :] subtensor = subtensor[:, ids_feature] return subtensor
dd340643c5840319691e45037ad1f4b49b155011
521,888
from typing import Dict from typing import Any from typing import List def get_key_mapping(inverse_mapping: Dict[Any, List[str]]) -> Dict[str, Any]: """Returns the key mapping from the inverse mapping. Args: inverse_mapping: A mapping from the mapped input to the list of keys. Returns: a mapping from the keys to their mapped input. """ mapped_keys = {} for mapped_key, keys in inverse_mapping.items(): for key in keys: mapped_keys[key] = mapped_key return mapped_keys
213e5373953bab72682a41981d287c1b1bbb6e35
680,183
def check_b4_adding(word, gpat, ypat, unused): """ Return True if: gpat indicates that a letter is green and a letter does not match ypat indicates that a letter is yellow and this letter matches or this letter is not found in the word the letter matches a letter known to be unused @param word String word to check @param gpat String Green pattern @param ypat String yellow pattern @param unused String unused letters @return True/False """ bad = False for indx, letter in enumerate(gpat): if letter != '': if letter != word[indx]: return True for indx, ylets in enumerate(ypat): if ylets != '': for ltr in ylets: if ltr == word[indx]: return True if ltr not in word: return True for letter in word: if letter in unused: bad = True break return bad
5a146ceb9bdc2b9712945ca98d93339d7e6fee4f
64,054
def pyver() -> str: """ Returns string with python version number in major.minor.micro format. (e.g. 3.7.3 or 2.7.12) """ return '.'.join(str(i) for i in __import__('sys').version_info[:3])
d84ca1fc251acbafb454565ad925b847e158f03d
615,357
def is_callable(attribute, instance=None): """Check if value or attribute of instance are callable.""" try: if instance: return callable(getattr(instance, attribute)) else: return callable(attribute) except AttributeError: return False
852198540a9889a23fb33e8fb00fec499a5104ea
110,713
import re def _get_global_step_for_checkpoint(checkpoint_path): """Returns the global step for the checkpoint path, or -1 if not found.""" re_match = re.search(r"ckpt-(\d+)$", checkpoint_path) return -1 if re_match is None else int(re_match.group(1))
d212996f811640c53c2f97f98150dd04ea5d9200
607,639
def last_char(text: str, begin: int, end: int, chars: str) -> int: """Returns the index of the last non-whitespace character in string `text` within the bounds [begin, end]. """ while end > begin and text[end - 1] in chars: end -= 1 return end
5d59cd50fb99593d5261513327b9799fc175cd6c
705,017
from typing import List import collections def unique(list_: List) -> List: """Remove duplicate entries from list, keeping it in its original order >>> unique([1, 2, 2, 3, 4, 6, 2, 5]) [1, 2, 3, 4, 6, 5] >>> unique(['bb', 'aa', 'aa', 'aa', 'aa', 'aa', 'bb']) ['bb', 'aa'] """ return list(collections.OrderedDict.fromkeys(list_))
8707e2d2dbf6b77f8818ad39282b113da9d22707
26,339
import io def _is_raw_file(fileobj): """Check if fileobj is a raw file object, e.g created with open.""" fileobj = getattr(fileobj, 'raw', fileobj) return isinstance(fileobj, io.FileIO)
57ba494b535b494db30e853d09c8a29af44afc09
332,835
from typing import List def list_to_string(a: List[int]) -> str: """ Returns string of all elements in list a """ rv = '' for elm in a: rv = rv + ' ' + str(elm) return rv[1:]
30d1cd5e37538bd289bdace6df70910898d66c55
191,227
def get_items(i, l): """ Get a sublist of list elements by their indices :param list[int] i: a list of elements indices :param list l: list :return list: a list of the desired elements """ return map(l.__getitem__, i)
873863ccdad03729c04c1c22b59dff337b618faf
435,420
import torch def to_one_hot(indexes, output_dim): """ :param indexes: list of numbers in the range [0, output_dim) :param output_dim: size of a single one-hot tensor :return: tensor containing one_hot representation of indexes """ assert output_dim >= 2 assert output_dim > max(indexes) assert min(indexes) >= 0 return torch.eye(output_dim)[indexes]
bebf3a084459a95d7b7bf693691a6b0daf9c466d
592,499
import re def replace(string, substitutions): """ Perform many string replacements all at once. Parameters ---------- string : str The string to modify. substitutions : dict of str to str The string replacements to perform. Returns ------- str The modified string. Examples -------- >>> qosy.tools.replace('ABC', {'A':'AB', 'B':'D', 'C':'AC'}) # 'ABDAC' """ # From https://gist.github.com/carlsmith/b2e6ba538ca6f58689b4c18f46fef11c substrings = sorted(substitutions, key=len, reverse=True) regex = re.compile('|'.join(map(re.escape, substrings))) return regex.sub(lambda match: substitutions[match.group(0)], string)
b1b1111f01dbe10a3b59ad949ccb126b7d066062
64,867
import re def split_keyword(keyword): """Split a keyword in multiple ones on any non-alphanumeric character :param string keyword: keyword :return: keywords :rtype: set """ split = set(re.findall(r'\w+', keyword)) return split
015eb669f8ca309c3abe139d6dbb20d0b9022ae8
42,951
from random import randint def deal_community(deck, cardCount): """choose cards randomly from deck""" deckCopy = deck[:] dealt = [] for i in range(cardCount): val = randint(0, len(deckCopy)-1) dealt.append(deckCopy[val]) del deckCopy[val] return dealt
93286355fe3d78fdb1c5eb4ec9f3dd6f4f47ba1f
576,561
import importlib def object_from_name(full_object_name): """Return an object from its PEP3155 fully qualified name in the form "module:qualname". Inspired to how pkg_resources:EntryPoint.parse and pkg_resources:EntryPoint.load work.""" module_name, _, object_name = full_object_name.partition(':') object_ = importlib.import_module(module_name) for attr_name in object_name.split('.'): object_ = getattr(object_, attr_name) return object_
6727e2079c64715fb3542bd0500e29958ef534d4
191,050
def cargar_atletas(nombre_archivo: str) -> list: """ Función que carga el archivo con los atletas a una lista. Parámetro: nombre_archivo: str. Retorna: atletas: list de diccionarios de cada atleta. diccionario de cada atleta: {'nombre': Nombre del atleta, 'genero': "m" o "f", 'edad': XX, 'pais': país, 'anio': año de los juegos, 'evento': evento concursado, 'medalla': "gold", "silver", "bronze", o "na"}. """ # Inicializar lista y apertura de archivo. atletas = list() archivo = open(nombre_archivo, "r") # Omitir línea de títulos. archivo.readline() # Agregar datos a la lista. linea = archivo.readline() while len(linea) > 0: datos = linea.split(",") # Creación de diccionario de atleta individual atleta = {'nombre': datos[0], 'genero': datos[1], 'edad': int(datos[2]), 'pais': datos[3], 'anio': int(datos[4]), 'evento': datos[5], 'medalla': datos[6].replace("\n", '')} # Se añade el diccionario del atleta a la lista global de atletas. atletas.append(atleta) # Siguiente línea del archivo linea = archivo.readline() # Cierre de archivo. archivo.close() return atletas
35d86defd833fa4cd7a5695164e9fef1d3989651
461,327
def _format_jid(local=None, domain=None, resource=None): """Format the given JID components into a full or bare JID. :param string local: Optional. The local portion of the JID. :param string domain: Required. The domain name portion of the JID. :param strin resource: Optional. The resource portion of the JID. :return: A full or bare JID string. """ result = [] if local: result.append(local) result.append('@') if domain: result.append(domain) if resource: result.append('/') result.append(resource) return ''.join(result)
0f72520e15ed86636574c231137174636bf9e91f
165,226
def read_txt(file_path, comment_str="#"): """Txt file reader that ignores comments and empty lines """ out = [] with open(file_path, "r", encoding="utf-8") as f: for line in f: line = line.partition(comment_str)[0] line = line.strip() if len(line) > 0: out.append(line) return out
5a03ed90474c3cec7beb2065e5c380ae97e24439
659,411
def suck_out_formats(reporters): """Builds a dictionary mapping edition keys to their cite_format if any. The dictionary takes the form of: { 'T.C. Summary Opinion': '{reporter} {volume}-{page}', 'T.C. Memo.': '{reporter} {volume}-{page}' ... } In other words, this lets you go from an edition match to its parent key. """ formats_out = {} for reporter_key, data_list in reporters.items(): # For each reporter key... for data in data_list: # Map the cite_format if it exists for edition_key, edition_value in data["editions"].items(): try: formats_out[edition_key] = data["cite_format"] except KeyError: # The item wasn't there; add it. pass return formats_out
a0db907839573ca53f7c96c326afe1eac5491c63
701,457
def HMS(secs): """Return tuple of hours, minutes, and seconds given value in seconds.""" minutes, seconds = divmod(secs, 60.0) hours, minutes = divmod(minutes, 60.0) return hours, minutes, seconds
9f0a099392d554a789c5d13483a266ac414807bf
111,447
def _DoesTargetTypeRequireBuild(target_dict): """Returns true if the target type is such that it needs to be built.""" # If a 'none' target has rules or actions we assume it requires a build. return bool(target_dict['type'] != 'none' or target_dict.get('actions') or target_dict.get('rules'))
03f6ab2e5eb1f1e31e70e007231c5495d1ef18eb
157,805
import hashlib def find_integer(key, hash_start='0'*5): """ Find the smallest integer such that the md5 hash of the given secret key plus the integer yields a hash that begins with a certain number of zeroes (default 5). """ hashed = '' i = 0 while not hashed.startswith(hash_start): i += 1 to_hash = (key + str(i)).encode('ascii') hashed = hashlib.md5(to_hash).hexdigest() return i
3864ebe49bd2cc9ebab6cc056e015005beacd531
244,243
def make_dict(list1, list2): """ Makes a dictionary using the provided lists. Input: list1 (list): List to be used for keys. list2 (list): list to be used for values. Output: out_dict (dict): Dictionary using the input lists """ out_dict = {} i = 0 for item in list1: out_dict[item] = list2[i] if i < len(list2) else None i += 1 return out_dict
c2731540b3a957a08b4a204ade9a54f91339ab0b
684,817
def rreplace(s, old, new, occurrence = 1): """ Replaces the last occurence(s) of an expression in a string. """ return new.join(s.rsplit(old, occurrence))
92a3fdb0e5a2014debd6e3530c7c6c754ed45953
700,466
import ctypes def load_dll(path): """Loads and returns a dynamic library file.""" return ctypes.cdll.LoadLibrary(path)
1ff79a237c74172d5cc225ab0076f8cb297771be
430,021
def raw_table_data(*args, **kwargs): """Returns table formatted data for display in the TableField component.""" # pylint: disable=unused-argument return { "columns": [ {"title": "id"}, {"title": "name"}, {"title": "type"}, ], "data": [ [18371164, "All", "CUSTOM"], [18371166, "None", "CUSTOM"], [18371168, "Localhost", "CUSTOM"], [18371170, "Localnetworks", "CUSTOM"], ], }
194f5a64ec410d1759e5c9b64edbba284449bb9c
460,793
import glob def get_cppn_file_list(training_run, cppn_genome_folder=None): """ Get a list containing all files with cppn parameters in the run :param training_run: String, the name of the training run during which the cppns were saved :param cppn_genome_folder: String, full path of the folder containing the cppn genomes. When None, a default location is used """ if cppn_genome_folder is None: cppn_genome_folder = '/uio/hume/student-u31/eirikolb/tmp/niche_encodings/poet_{}/'.format(training_run) cppn_genome_file_name = 'genome_*.pickle' cppn_genome_file = cppn_genome_folder + cppn_genome_file_name return glob.glob(cppn_genome_file)
14de3ee2886804fb951434d32e04863cbc320cc0
575,065
def _flatten(vertices): """ Convert a nested list of coordinates into a flat list. """ out = [] for item in vertices: if hasattr(item[0], "__iter__"): verts = _flatten(item) out.extend(verts) else: out.append(item) return out
9b33f6f1add39ca1667009693403abdee30bbe96
380,366
def get_systems_to_run_bare_and_oh(df_jobs_anal): """ Takes df_jobs_anal and filter to: * only *O slabs * slabs that have 'NaN' in the active site (not *O that are run from *OH, which have an active site value) * Only completed slabs * Only the first att_num, so that you don't start new sets of *OH and bare jobs from rerun *O jobs """ #| - get_systems_to_run_bare_and_oh # df_jobs_anal = get_df_jobs_anal() df_jobs_anal_i = df_jobs_anal var = "o" df_jobs_anal_i = df_jobs_anal_i.query('ads == @var') var = "NaN" df_jobs_anal_i = df_jobs_anal_i.query('active_site == @var') df_jobs_anal_i = df_jobs_anal_i[df_jobs_anal_i.job_completely_done == True] # ######################################################### indices_to_remove = [] # ######################################################### group_cols = ["compenv", "slab_id", "ads", ] grouped = df_jobs_anal_i.groupby(group_cols) for name, group in grouped: num_rows = group.shape[0] if num_rows > 1: # print(name) # print("") # # print(num_rows) # print("COMBAK CHECK THIS") # print("This was made when there was only 1 *O calc, make sure it's not creating new *OH jobs after running more *O calcs") group_index = group.index.to_frame() group_index_i = group_index[group_index.att_num != 1] indices_to_remove.extend( group_index_i.index.tolist() ) df_jobs_anal_i = df_jobs_anal_i.drop(index=indices_to_remove) indices_out = df_jobs_anal_i.index.tolist() return(indices_out) #__|
b5b712907955d570b488719e6b0a55c00cf7bb43
330,245
def hash_file(hash_object, filepath, blk_size = 8192): """ Run a the contents of a file though a hash generator. :param hash_object: hash object (from :py:mod:`hashlib`) :param filepath: path to the file to feed :type filepath: str :param blk_size: read the file in chunks of that size (in bytes) :type blk_size: integer """ assert hasattr(hash_object, "update") with open(filepath, 'rb') as f: for chunk in iter(lambda: f.read(blk_size), b''): hash_object.update(chunk) return hash_object
d07fe7db3be24c2a37cda0d5de7919f01cdd37bd
610,828
import re def SplitWords(input_string): """Split by '_' if found, otherwise split at uppercase/numeric chars. Will split "some_TEXT" into ["some", "TEXT"], "CamelCase" into ["Camel", "Case"], and "Vector3" into ["Vector", "3"]. """ if input_string.find('_') > -1: # 'some_TEXT_' -> 'some TEXT' return input_string.replace('_', ' ').strip().split() else: input_string = input_string.replace('::', ' ') if re.search('[A-Z]', input_string) and re.search('[a-z]', input_string): # mixed case. # look for capitalization to cut input_strings # 'SomeText' -> 'Some Text' input_string = re.sub('([A-Z])', r' \1', input_string).strip() # 'Vector3' -> 'Vector 3' input_string = re.sub('([^0-9])([0-9])', r'\1 \2', input_string) return input_string.split()
6fd45c9c2cfcc08d0fe1bc2485a2efacd3014406
597,978
def reverse_dict_lookup(dictionary, value_to_find): """ Looks up the key for the supplied value in dictionary dict. Args: dictionary: dictionary, the dictionary to do the reverse lookup value_to_find: the value to find in the dictionary Raises: KeyError if value does not exist in the dictionary """ for key, value in dictionary.items(): if value == value_to_find: return key else: raise KeyError("Could not find {} in map".format(value_to_find))
a9a2c8b6a690e19d006896a88f5022feb3fe42cc
408,958
import itertools def pairs(a): """creates pairs of elements of a single list (no redundancies)""" return [X for X in list(itertools.product(a, a)) if X[0] != X[1]]
f2b9885663247ff81b023c79a53a1197a11177fa
99,249
import pytz def convert_utc_to_localtime(utc_datetime, timezone_str): """ 轉換 utc 時間為指定的 local 時間,如果輸入的時區有錯,則保持原來的 UTC Args: utc_datetime(datetime): utc 時間 timezone(str): 指定轉換的時區,採用 tz database 列表 Returns timezone_dt(datetime): 回傳轉換好的時區時間 """ # 取得 tzinfo 的時區 if timezone_str in pytz.common_timezones: tz = pytz.timezone(timezone_str) # 取得時區資訊的 dateime dateime_include_tzinfo = pytz.utc.localize(utc_datetime, is_dst=None) timezone_dt = dateime_include_tzinfo.astimezone(tz) return timezone_dt return utc_datetime
3185746161ddfd812f023bdfa74bb58bd2be9113
31,840
def inc(n): """Increment an integer.""" return -~n
694ba6320b842985f87a36e452eb0f30e39442b4
8,775
import json def read_data(config_path): """Read in log configuration data from config_path.""" with open(config_path) as infile: return json.load(infile)
c032e8d3a2eae24e89947aa62b72b4b94807b44a
437,282
def elide_text(text: str, max_length: int) -> str: """Shorten a string to a max length and end it with an ellipsis.""" return text if len(text) < max_length else f"{text[:max_length]}..."
0fc2072c53eb70e7e2ed3fb1d132a9a608c4a400
642,511
def is_true(v) -> bool: """ Check if a given bool/str/int value is some form of ``True``: * **bool**: ``True`` * **str**: ``'true'``, ``'yes'``, ``'y'``, ``'1'`` * **int**: ``1`` (note: strings are automatically .lower()'d) Usage: >>> is_true('true') True >>> is_true('no') False :param Any v: The value to check for truthfulness :return bool is_true: ``True`` if the value appears to be truthy, otherwise ``False``. """ v = v.lower() if type(v) is str else v return v in [True, 'true', 'yes', 'y', '1', 1]
52b86d4dcc9d48787ed4490125d20b044f6a981b
300,590
def trim_tokens_predtags(tokens, tags): """Remove the '[CLS]' token and corresponding tag as well as the everything starting from the first '[SEP]' token and corresponding tags. """ sep_idx = tokens.index("[SEP]") return tokens[1:sep_idx], tags[1:sep_idx]
99272754ca55fc853ab2e5e0c5391af4f3d039b6
389,987
def extended_gcd(n_1, n_2): """ Returns (bezout_a, bezout_b, gcd) using the extended euclidean algorithm. Params n1: int n2: int Returns bezout_a: int bezout_b: int gcd: int """ x = 0 x_old = 1 y = 1 y_old = 0 while n_2 != 0: q = n_1 // n_2 #quotient n_1, n_2 = n_2, n_1%n_2 x, x_old = x_old - q*x, x y, y_old = y_old - q*y, y bezout_a = x_old bezout_b = y_old gcd = n_1 return (bezout_a, bezout_b, gcd)
43f0c25f23d3717a0655bd640bc9fec31bb948a8
535,622
def str_parse_as_utf8(content) -> str: """Returns the provided content decoded as utf-8.""" return content.decode('utf-8')
75b8d5f1f8867c50b08146cc3edc1d0ab630280a
707,464
def atmDensPoly6th(ht, dens_co): """ Compute the atmosphere density using a 6th order polynomial. This is used in the ablation simulation for faster execution. Arguments: ht: [float] Height above sea level (m). dens_co: [list] Coeffs of the 6th order polynomial. Return: atm_dens: [float] Atmosphere neutral mass density in kg/m^3. """ # Compute the density rho_a = 1000*(10**(dens_co[0] + dens_co[1]*(ht/1000) + dens_co[2]*(ht/1000)**2 + dens_co[3]*(ht/1000)**3 + dens_co[4]*(ht/1000)**4 + dens_co[5]*(ht/1000)**5)) return rho_a
653b134d513c3fd9b55e72ee37a4c7116aadf8cf
700,634
def _split_predictions(X, y_true, y_pred, Y_prob): """Splits the data based on whether the prediction was correct.""" t_mask = y_true == y_pred f_mask = ~t_mask split_arrays = [(A[t_mask], A[f_mask]) for A in [X, y_true, y_pred, Y_prob]] return list(zip(*split_arrays))
7287db972f5714fb7fac1e3d5ac8b50b831c1a91
491,926
def write_dot(tree, fn, binary=False): """ Given a tree (which may or may not contain frequencies), write a graphviz '.dot' file with a visual representation of the tree. """ special_ascii = {' ': 'SPACE', '\n': 'LF', '\r': 'CR', '\t': 'TAB', '\\': r'\\', '"': r'\"'} def disp_sym(i): if binary: return '0x%02x' % i else: c = chr(i) res = special_ascii.get(c, c) assert res.strip(), repr(c) return res def disp_freq(f): if f is None: return '' return '%d' % f with open(fn, 'w') as fo: # dot -Tpng tree.dot -O def write_nd(fo, nd): if nd.symbol is not None: # leaf node a, b = disp_freq(nd.freq), disp_sym(nd.symbol) fo.write(' %d [label="%s%s%s"];\n' % (id(nd), a, ': ' if a and b else '', b)) else: # parent node fo.write(' %d [shape=circle, style=filled, ' 'fillcolor=grey, label="%s"];\n' % (id(nd), disp_freq(nd.freq))) for k in range(2): if nd.child[k]: fo.write(' %d->%d;\n' % (id(nd), id(nd.child[k]))) for k in range(2): if nd.child[k]: write_nd(fo, nd.child[k]) fo.write('digraph BT {\n') fo.write(' node [shape=box, fontsize=20, fontname="Arial"];\n') write_nd(fo, tree) fo.write('}\n')
4a3d029c8beebd697fc66f771543f04fb1636efa
226,339
def add_whitespace(bounding_box: list, border: int = 5) -> list: """ Add white space to an existing bounding box. Parameters ---------- bounding_box : list Four corner coordinates of the cropped image without whitespace. border : int The amount of whitespace you want to add on all sides. Returns ------- Bounding box with increased whitespace. """ assert len(bounding_box) == 4, "Bounding box can only have 4 corners" larger_box = [] for i, corner in enumerate(bounding_box): if i < 2: larger_box.append(corner - border) else: larger_box.append(corner + border) return larger_box
54d94b9b858e8ebf41275c7ab0a91c8520b06b68
56,362
def nf_input_to_cl(inp): """Convert an input description into command line argument. """ sep = " " if inp.get("separate") else "" val = "'%s'" % inp.get("default") if inp.get("default") else "$%s" % inp["name"] return "%s%s%s" % (inp["prefix"], sep, val)
54943a85ffd0b8c7f5e8b5e5b6d5223b767e6b91
59,228
def pass_floats(output_string): """Parse AFNI command STDOUT output strings and return the output values as a list of floats. :type output_string: str :param output_string: An AFNI command standard output. :rtype: list :return: A list of float values. """ lines = output_string.splitlines() values_list = [] for l in lines: try: val = float(l) values_list.append(val) except: pass return values_list
1f482069decfac59314a4864de395ffb6c4ea074
185,045
def averageGuessesFromGuessMap(guessMap: dict[int, int]) -> float: """Return average guesses from map using weighed sum in form <guesses: words>, e.g <1:20, 3:5> returns 1.75""" weighedSum = 0 wordsCount = 0 for key,item in guessMap.items(): weighedSum = weighedSum + key*item wordsCount = wordsCount + item return weighedSum/wordsCount
58d494133386915f7c7c7bc3a75becc129c7ff41
34,670
def color_rgb(r,g,b): """r,g,b are intensities of red, green, and blue in range(256) Returns color specifier string for the resulting color""" return "#%02x%02x%02x" % (r,g,b)
46a592f92ddbb9c6378805f4a36ac0ee09799948
227,840
def SlashEscapePackageName(pkg_ref, unused_args, request): """Escapes slashes in package name for ListVersionsRequest.""" request.parent = "{}/packages/{}".format( pkg_ref.Parent().RelativeName(), pkg_ref.packagesId.replace("/", "%2F")) return request
46d4a20f119ca4655cf6e4f693d9c4b9eb524b1c
694,337