content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import torch def max_over_grammatrix(inputs, l1, l2): """ :param inputs: [T2 * T1 * B * D] :param l1: :param l2: :return: """ batch_size = inputs.size(2) max_out_list = [] for b in range(batch_size): b_gram_matrix = inputs[:l2[b], :l1[b], b, :] dim = b_gram_matrix.size(-1) b_max, _ = torch.max(b_gram_matrix.contiguous().view(-1, dim), dim=0) max_out_list.append(b_max) max_out = torch.cat(max_out_list, dim=0) return max_out
e2f4ad15a942d5222ed0607f75a1f85a6f3b98b5
23,606
def children(letter, x, y): """Gives the indices of the "children" of the variables describing the neighbours of a cell, according to the scheme described by Knuth""" assert letter in ["a", "b", "c", "d", "e", "f", "g"], "Letter does not have children in Knuth's scheme" if letter == "a": return ("b", x, y | 1, "c", x, y) elif letter == "b": return ("d", x - (x & 2), y, "e", x + (x & 2), y) elif letter == "c": if y % 2 == 0: return ("f", x - (x & 1), y, "g", x, y) else: return ("f", (x - 1) | 1, y + 2, "g", x, y) elif letter == "d": return (None, x + 1, y - 1, None, x + 1, y) elif letter == "e": return (None, x - 1, y - 1, None, x - 1, y) elif letter == "f": return (None, x, y - 1, None, x + 1, y - 1) elif letter == "g": if y % 2 == 0: return (None, x, y + 1 - ((y & 1) << 1), None, x - 1 + ((x & 1) << 1), y - 1) else: return (None, x, y + 1 - ((y & 1) << 1), None, x ^ 1, y + 1)
3b6ea470003581a93f1a32f01228f00837cfc3b3
23,608
def in_cksum_done(s): """Fold and return Internet checksum.""" while (s >> 16): s = (s >> 16) + (s & 0xffff) return (~s & 0xffff)
25011c254e89179fe4232ad0ecfa0a847bf0b30b
23,609
import requests def cvr_to_q(cvr): """Convert CVR to Wikidata ID. Parameters ---------- cvr : str or int CVR identifier. Returns ------- q : str or None Strings with Wikidata IDs. None is returned if the CVR is not found. Examples -------- >>> cvr_to_q("10007127") == 'Q45576' True >>> cvr_to_q(10007127) == 'Q45576' True """ query = 'select ?company where {{ ?company wdt:P1059 "{cvr}" }}'.format( cvr=cvr) url = 'https://query.wikidata.org/sparql' params = {'query': query, 'format': 'json'} response = requests.get(url, params=params) data = response.json() qs = [item['company']['value'][31:] for item in data['results']['bindings']] if len(qs) > 0: return qs[0] else: return None
287896d4193fd1427fdcf03f729b979488210c71
23,611
import ast def empty_list(lineno=None, col=None): """Creates the AST node for an empty list.""" return ast.List(elts=[], ctx=ast.Load(), lineno=lineno, col_offset=col)
2def486baf5537d2754312c6234a7908c4aa46dd
23,612
import numpy def participation_index(W,Ci): """ based on participation_coefficient.m from MATLAB Brain Connectivity Toolbox W: adjacency matrix Ci: community labels """ ## n=length(W); %number of vertices n=len(Ci) ## Ko=sum(W,2); %(out)degree Ko=numpy.sum(W,1) ## Gc=(W~=0)*diag(Ci); %neighbor community affiliation Gc=(W>0).dot(numpy.diag(Ci)) ## Kc2=zeros(n,1); %community-specific neighbors Kc2=numpy.zeros(n) ## for i=1:max(Ci); ## Kc2=Kc2+(sum(W.*(Gc==i),2).^2); ## end for i in numpy.unique(Ci)[1:]: Kc2=Kc2 + (numpy.sum(W*(Gc==i),1)**2) ## P=ones(n,1)-Kc2./(Ko.^2); P=numpy.ones(n)-Kc2/(Ko**2) P[Ko==0]=0 #%P=0 if for nodes with no (out)neighbors return P
75110a1b5792d175d319407c46b2b29109dfe3fc
23,613
import torch def quat2rotmat(quat: torch.Tensor) -> torch.Tensor: """From a rotation matrix from a unit length quaternion Note that quaternion ordering is 'wxyz'. """ assert quat.shape[-1] == 4 qw, qx, qy, qz = quat[..., 0], quat[..., 1], quat[..., 2], quat[..., 3] qx2, qy2, qz2 = qx * qx, qy * qy, qz * qz # Form the matrix R00 = 1. - 2. * (qy2 + qz2) R01 = 2. * (qx * qy - qw * qz) R02 = 2. * (qw * qy + qx * qz) R10 = 2. * (qw * qz + qx * qy) R11 = 1. - 2. * (qx2 + qz2) R12 = 2. * (qy * qz - qw * qx) R20 = 2. * (qx * qz - qw * qy) R21 = 2. * (qw * qx + qy * qz) R22 = 1. - 2. * (qx2 + qy2) R0 = torch.stack([R00, R01, R02], dim=-1) R1 = torch.stack([R10, R11, R12], dim=-1) R2 = torch.stack([R20, R21, R22], dim=-1) mat = torch.stack([R0, R1, R2], dim=-2) return mat
d27590acc4124ef3b8e1c322c6d2faf8ceecb321
23,614
import argparse def get_options(cmd_args=None): """ Argument Parser. """ parser = argparse.ArgumentParser( prog='generateParkingAreaRerouters.py', usage='%(prog)s [options]', description='Generate parking area rerouters from the parking area definition.') parser.add_argument( '-a', '--parking-areas', type=str, dest='parking_area_definition', required=True, help='SUMO parkingArea definition.') parser.add_argument( '-n', '--sumo-net', type=str, dest='sumo_net_definition', required=True, help='SUMO network definition.') parser.add_argument( '--max-number-alternatives', type=int, dest='num_alternatives', default=10, help='Rerouter: max number of alternatives.') parser.add_argument( '--max-distance-alternatives', type=float, dest='dist_alternatives', default=500.0, help='Rerouter: max distance for the alternatives.') parser.add_argument( '--min-capacity-visibility-true', type=int, dest='capacity_threshold', default=25, help='Rerouter: parking capacity for the visibility threshold.') parser.add_argument( '--max-distance-visibility-true', type=float, dest='dist_threshold', default=250.0, help='Rerouter: parking distance for the visibility threshold.') parser.add_argument( '--processes', type=int, dest='processes', default=1, help='Number of processes spawned to compute the distance between parking areas.') parser.add_argument( '-o', type=str, dest='output', required=True, help='Name for the output file.') parser.add_argument( '--tqdm', dest='with_tqdm', action='store_true', help='Enable TQDM feature.') parser.set_defaults(with_tqdm=False) return parser.parse_args(cmd_args)
a2a622e61a32452d2adb12b25aff61fb33c32733
23,617
def flip_transpose(arr): """ Flip a 2D-list (i.e. transpose). """ m = len(arr) n = len(arr[0]) res = [[-1 for _ in range(n)] for _ in range(m)] for i in range(m): for j in range(n): res[i][j] = arr[j][i] return res
ad0bb8b1e3a67cc0323425b5cba26d66665336e0
23,618
from typing import Dict from typing import Union def filter_level0( pairs: Dict[str, str]) -> Dict[str, Union[str, Dict[str, str]]]: """ Filter out key, value pairs for form parent.child: value""" result = {} for k, v in pairs.items(): key_parts = k.split('.') if len(key_parts) > 1: # has form parent.children parent = key_parts[0] # create the new key subkey = '.'.join(key_parts[1:]) parent_dict: Dict[str, str] = result.get(parent, {}) # type: ignore parent_dict[subkey] = v result[parent] = parent_dict # type: ignore else: result[k] = v return result
a8aa3ee0b2a9e7714ee4dcf7bc1f505702e77d27
23,619
def _mean(items): """Return average (aka mean) for sequence of items.""" return sum(items) / len(items)
49fd8e1b42e3b454103074b512e32c1c443f974f
23,620
import shutil import os def unpack_smplh(smplh_dir): """`smplx.create` expects pkl files to be in `smplh` directory""" # unpack `smplh.tar.xz` target = smplh_dir / "smplh" archive_file = smplh_dir / "smplh.tar.xz" if not target.exists(): target.mkdir(exist_ok=True) shutil.unpack_archive(archive_file, extract_dir=target) for gender in ["male", "female", "neutral"]: subdir = target / gender for model_loc in subdir.glob("*.npz"): smplx_expects = target / f"SMPLH_{gender.upper()}.npz" os.rename(model_loc, smplx_expects) print(f"{model_loc} -> {smplx_expects}") return [p for p in target.glob("*.npz")]
0a36312795acfa90bd73a643b809769b013b4b4f
23,621
import logging def safe_get(ds, key): """Safely gets the tag value if present from the Dataset and logs failure. The safe get method of dict works for str, but not the hex key. The added benefit of this funtion is that it logs the failure to get the keyed value. Args: ds (Dataset): pydicom Dataset key (hex | str): Hex code or string name for a key. """ try: return ds[key].value except KeyError as e: logging.error("Failed to get value for key: {}".format(e)) return ""
15d3640410e7af9924fba512ca1c6da83a26b9f0
23,622
import random def rand_pl(m: int) -> int: """ Random integer distributed by a power law in the limit of the parameter m E.g.: With m = 2 returns 1 80% of the time returns 2 20% of the time With m = 3 returns 1 73.47% of the time returns 2 18.37% of the time returns 3 8.16% of the time """ weight = (i**-2 for i in range(1, m+1)) chs = random.choices(range(1, m+1), tuple(weight)) return chs[0]
ab48cf84ba3cf1d62ffcac0d2e702e7936d573b2
23,623
def createWhereCondition(attributes): """ Creates the where portion of filtering conditions. (So far, can only be used reliably for PlayerStats, but, it should work for most tables.) :param attributes: dict :return: str NEEDS AGGREGATION """ where = " WHERE " orderBy = "" if len(attributes): keys = [] for key, value in attributes.items(): key = key.split("/") # split on space if len(key) == 1: per = "" else: per = key[1] key = key[0] if key in ("Pos", "name"): where += " " + key + " LIKE $$" + value + "%$$ " keys.append(key) elif key == "teamID": where = ", TeamInfo as t " + where where += " t.team_name LIKE $$" + value + "%$$ AND t.team_id = teamID " keys.append(key) elif key == "start_year": where += " year >= " + value + " " elif key == "end_year": where += " year <= " + value + " " elif per == "G": where += " " + key + " / G::numeric >= " + value + " " keys.append(key + " / G::numeric") elif per == "36": where += " " + key + " / MP::numeric * 36 >= " + value + " " keys.append(key + " / MP::numeric * 36") elif key != "csrfmiddlewaretoken" and key != "sort": where += " " + key + " >= " + value + " " keys.append(key) where += "AND" if key == "sort": where = where[:-3] if value == "TRUE": orderBy += " ORDER BY " where = where[:-4] + orderBy if orderBy == " ORDER BY ": for key in keys: where += key + " DESC, " where = where[:-2] + ";" return where
b175cd287dbcade622a8b47b700c7a6e0ab02234
23,624
def format_duration(secs): """ >>> format_duration(0) '00:00' >>> format_duration(1) '00:01' >>> format_duration(100) '01:40' >>> format_duration(10000) '02:46:40' >>> format_duration(1000000) '277:46:40' >>> format_duration(0.0) '00:00.000' >>> format_duration(0.5) '00:00.500' >>> format_duration(12345.6789) '03:25:45.679' >>> format_duration(-1) '-00:01' >>> format_duration(-10000) '-02:46:40' """ if secs < 0: return '-' + format_duration(-secs) else: s = int(secs) % 60 m = int(secs) // 60 % 60 h = int(secs) // 60 // 60 res = ':'.join('%02.0f' % x for x in ( [m, s] if h == 0 else [h, m, s] )) if isinstance(secs, float): ms = round(secs % 1, 3) res += ('%.3f' % ms)[1:] return res
444a65d11f54b090f9d03252c3720ef2dabb3062
23,625
import os import shutil def form_results(results_path='./Results', model_type=None, dataset=None): """ Forms folders for each run to store the tensorboard files, saved models and the log files. :return: three string pointing to tensorboard, saved models and log paths respectively. """ if not os.path.exists(results_path): os.mkdir(results_path) folder_name = "/{0}_{1}_model".format(model_type, dataset) tensorboard_path = results_path + folder_name + '/Tensorboard' log_path = results_path + folder_name + '/log' if os.path.exists(results_path + folder_name): shutil.rmtree(results_path + folder_name) if not os.path.exists(results_path + folder_name): os.mkdir(results_path + folder_name) os.mkdir(tensorboard_path) os.mkdir(log_path) return tensorboard_path, log_path
107803807d37f427fdc7340a7d365e529c616f9a
23,626
from typing import Optional def fmac_cisco(mac: str) -> Optional[str]: """ Given a string representation of a MAC address in a common format, return it in Cisco format. """ # Fast-like remove ":", ".", and "-" in one go mac = mac.translate({58: None, 45: None, 46: None}).lower() if len(mac) != 12: return None return f"{mac[:4]}.{mac[4:8]}.{mac[8:12]}"
4c1eb0a0f2b5dcf715c2653a224a18ecf979ac4b
23,627
def plusminus(n): """Get a number of +s or -s corresponding to n's value. If n == 0, returns "". Arg: n: An int Returns: A str, possibly empty. """ return ("-", "+")[n > 0] * abs(n)
26ddf2f90fff6ad3a24aca5ff8e3d3e26c2a7a2e
23,628
def get_console_domain(region: str) -> str: """ Get the domain for the AWS management console based on the region """ if region.startswith('us-gov'): return "console.amazonaws-us-gov.com" if region.startswith('cn'): return "console.amazonaws.cn" if region.startswith('us-iso'): raise ValueError("AWS ISO regions are not supported") return "console.aws.amazon.com"
05c3f313616b4d71a59e4d0bdf5151e509939d87
23,629
def world_point_to_pixel(x, y, intr): """ Feed a point through the OpenCV distortion model to get :param x: :param y: :param intr: :return: """ r2 = x * x + y * y radial_distort = (1 + intr.k1 * r2 + intr.k2 * r2 * r2 + intr.k3 * r2 * r2 * r2) x_distort = x * radial_distort + (2 * intr.p1 * x * y + intr.p2 * (r2 + 2 * x * x)) y_distort = y * radial_distort + (intr.p1 * (r2 + 2 * y * y) + 2 * intr.p2 * x * y) return intr.fx * x_distort + intr.cx, intr.fy * y_distort + intr.cy
debb763ef22970a6744fe8cd91df3f19e8c7ce4b
23,631
def _translate_virVcpuState(state): """ Return human readable virtual vpu state string. """ states = {} states[0] = 'Offline' states[1] = 'Running' states[2] = 'Blocked' states[3] = 'Last' return states[state]
1e65b55b946413e0a684f37cfd83fc2bf7f51761
23,632
import re def IsCommitPosition(regex_match): """Checks if match is correct revision(Cp number) format.""" matched_re = re.match(r'^[0-9]{6}$', regex_match) if matched_re: return True return False
b659215fb5107a5303b91d1d9fe9f4f368a1c280
23,633
def circulation_patron_exists(patron_pid): """Check if user exists.""" return True
0617d16e19269ce62b44bb341e857ffe67da364d
23,634
import re def build_doc_index(doc): """ Given a document string, construct index. Args: doc - a string to be indexed Returns: a dictionary with key being each distinct word in the doc string and value being a list of positions where this word occurs """ doc = doc.lower() index = {} overallIndex = 0 splitRegex = '(\W+)' for word in re.split(splitRegex, doc): if (len(word) >= 2 and not re.match(splitRegex, word)): if word in index: index[word].append(overallIndex) else: index[word] = [overallIndex] overallIndex += len(word) return index
5fa34a3b9978b40d846e4a6d3da60c5a7da0633d
23,636
from dateutil import tz from datetime import datetime def tofrom_utc(timestamp, parseformat, from_utc=True): """ Convert a timestamp to/from UTC time :param str timestamp: Date/time to modify :param str parseformat: Format of the timestamp to parse :param bool from_utc: True if source stamp is UTC; otherwise False :return: Converted timestamp :rtype: str """ utc_zone = tz.tzutc() local_zone = tz.tzlocal() time_obj = datetime.strptime(timestamp, parseformat) new_time = time_obj.replace(tzinfo=(local_zone, utc_zone)[from_utc]) new_time = new_time.astimezone((utc_zone, local_zone)[from_utc]) return new_time.strftime(parseformat)
feaf28653500bf9df58f73e86d19690098f1951d
23,637
def is_list_like(value): """Whether value is tuple or list""" return isinstance(value, (tuple, list))
71bbb42fff718e5f8ce7186116941cd9a6cd6465
23,639
def reorder_array(array, old_ids, new_ids): """Reorders a numpy array based on two lists of ids. The old_ids contains the ids of the elements currently in array. new_ids is the desired order. Elements and sizes must match! """ if type(old_ids) not in [list, tuple]: old_ids = list(old_ids) # Needs to have the index method permut = [old_ids.index(ni) for ni in new_ids] return array[permut]
365789b959a785985c0e4d071c674069a2221949
23,640
import os import subprocess def get_program_dir(program): """Get directory path of the external program. :param program: name of program, e.g. 'ls' or 'cat' :returns: None if it wasn't found, '/path/to/it/' if found """ devnull = open(os.devnull, 'w') try: path = subprocess.check_output(["which", program], stderr=devnull) return os.path.dirname(path.strip()) except subprocess.CalledProcessError: return None
25be5c66b505ab622ca0702fbe1d486a107c20f9
23,642
from unittest.mock import Mock def fake_oauth_token(token, token_secret): """Return a mock OAuth token object.""" return Mock(key=token, secret=token_secret)
608d2aaf1163f35091b9f221043fab2d16af7fb3
23,643
def calc_sum_mem_dict(mem_dict): """ Calculates sum of values stored in memory. :param mem_dict: dictionary with memory address as key and stored value as value :return: int """ return sum(mem_dict.values())
de0ac4f2fc5f04d7e2e1bd8edd73fef2fd5f0b50
23,644
def get_weights(cbf): """Retrieve the latest gain corrections and their corresponding update times.""" weights, times = {}, {} for sensor_name in cbf.sensor: if sensor_name.endswith('_gain_correction_per_channel'): sensor = cbf.sensor[sensor_name] input_name = sensor_name.split('_')[1] reading = sensor.get_reading() weights[input_name] = reading.value times[input_name] = reading.timestamp return weights, times
84887af7eda90fccb46242051a0f5b1d91a8e983
23,645
def allowed_image(filename): """ Check each uploaded image to ensure a permissible filetype and filename Takes full filename as input, returns boolean if filename passes the check """ allowed_img_ext = ["JPEG", "JPG", "HEIC"] # Ensure file has a . in the name if not "." in filename: return False # Split the extension and the file name exten = filename.rsplit(".", 1)[1] # Check if the extension is in ALLOWED_IMAGE_EXTENSIONS if exten.upper() in allowed_img_ext: return True else: return False
6dfdd37587ffa7abd98209c2cb965e78d808c229
23,646
def merge(default, config): """ Override default dict with config dict. """ merged = default.copy() merged.update({ k: v for k,v in config.items() if v and not v=='prompt'}) return merged
be204000174ea69007b536a2309b56b144cde5be
23,648
def batch_checker(predicted_quizzes): """ This function checks the batch of quizzes for that those are right or wrong. Parameter --------- predicted_quizzes (np.array), shape (?, 9, 9) Return ------ checked_quizzes : list of True or False for each quiz in predicted_quizzes """ predicted_quizzes.copy() checked_quizzes = [] for quiz in predicted_quizzes: right = True for i in range(9): for j in range(9): if (list(quiz[i])).count(j+1) == 2 : right = False break else: quiz = quiz.T if (list(quiz[i])).count(j+1) == 2 : right = False break if right == False: break checked_quizzes.append(right) return checked_quizzes
c86ae0c00cc5828b65ea991cf05e0274b8b676f6
23,649
import logging import yaml def get_yaml_config(config_file): """Return configuration from YAML file. :param config_file: Configuration file name :type config_file: string :returns: Dictionary of configuration :rtype: dict """ # Note in its original form get_mojo_config it would do a search pattern # through mojo stage directories. This version assumes the yaml file is in # the pwd. logging.info('Using config %s' % (config_file)) return yaml.safe_load(open(config_file, 'r').read())
07d90588b753a8ddecbca681a94f7ef5ca25fc27
23,650
def cut_list(listing): """creates another list of words between ".." and nests it at the original place""" anf = [] end = [] for i in range(len(listing)): # finds where ".. begins and ends .." if len(listing[i]) > 1: # used to hamdle single " if listing [i][0] == '"' and listing[i][len(listing[i])-1] != '"': anf.append(i) elif listing[i][0] != '"' and listing[i][len(listing[i])-1] == '"': end.append(i+1) elif listing[i] == '"': anf.append(i) #print(anf) #print(end) for i in reversed(range(len(anf))): # iterates through anf from the back to avoid shifting of numbres in list nested_list = listing[anf[i]:end[i]] # creates a list of ".." del listing[anf[i]:end[i]] # insert that list into the first list listing.insert(anf[i], nested_list) return listing
415c3af8cc66961589cb85f26e60242d043e0d29
23,651
def get_player(): """ Add player is a function which takes the top 100 players of team and solo and concatenate them into a bigger list. It only accepts one name once in case a player plays both. Then we are sending the list to the input field for the user to select players. :return: a list of all available players """ # getting players from team chess with open('top100players_ffa.txt') as f1: teams = f1.read().splitlines() # getting players from solo chess with open('top100players_solo.txt') as f2: solo = f2.read().splitlines() # adding both lists list_of_players_available = teams + solo # to delete duplicates list_of_players_available = list(dict.fromkeys(list_of_players_available)) return list_of_players_available
2a20ab01242cd75446812874dd4862638c37fbed
23,654
def parse_nx(nx_object, directed): """ Core parser for networkx objects Args: a networkx graph """ return (nx_object, None)
b973e73bcdbd3d98ca96d5dcebc17cb4e92ad600
23,655
def avg(vals, count=None): """ Returns the average value Args: vals: List of numbers to calculate average from. count: Int of total count that vals was part of. Returns: Float average value throughout a count. """ sum = 0 for v in vals: sum += v if count is None: count = len(vals) return float(sum) / count
dbb7d7d9cacb635b702c842aeaeb55194f7fcb50
23,656
def sum_swap_sorted(a, b): """O(A + B) time and O(1) space.""" if len(a) == 0: raise ValueError("array a must not be empty") if len(b) == 0: raise ValueError("array b must not be empty") sum_a = sum(a) sum_b = sum(b) # exit if difference is odd; sums cannot be balanced! if (sum_a - sum_b) % 2 != 0: return None target = int((sum_a - sum_b) / 2) i = 0 j = 0 while i < len(a) and j < len(b): diff = a[i] - b[j] if diff == target: return a[i], b[j] if diff > target: j += 1 else: i += 1 return None
717922fb2f62ce120f62c36738cf63d80bd1f926
23,658
def north_south_drift(lat, lon): """North south trend depending linearly on latitude.""" return lat
f716c353b947f2d7079d7da663e46da5da9f0cab
23,659
import os def normpath(path): """Normalize a path. Parameters ---------- path : str The path to normalize. Returns ------- npath : str The normalized path. """ if "~" in path: out = os.path.abspath(os.path.expanduser(path)) else: out = os.path.abspath(path) return out
ea051cada6fd855e72b6f36a892bbdf04fe11463
23,660
def format_file_name(input_file, output_file = None): """ Determine the name of the file to write to disk. If the user has specified an output file name and extension, use this to write the file. If they haven't, append "_no_grave" onto the name of the input file and add the .h5m extension. Input: ______ input_file: str User supplied data file location. output_file: str Optional user supplied output file name and extension. Returns: ________ file_name: str The name of the file to write to disk. """ if output_file is None: input_list = input_file.split("/") file_name = '.'.join(input_list[-1].split(".")[:-1]) output_file = file_name + "_no_grave.h5m" return output_file
bd13b2d3b5957df67f1a7d14a6189a12fe4ba3e8
23,661
def _convert_to_minutes(time_in): """ :in: time_in (str) HH:MM format; UTC :out: _minutes_out (int) minutes from midnight that day (00:00) """ hours_str, minutes_str = time_in.split(':') hours, minutes = int(hours_str), int(minutes_str) return hours*60 + minutes
4713194b337857fa98660beee5c18359b3cf6701
23,662
async def combine_records(record_a, record_b, join_fields=None): """ Combines unique information from two records into 1. Args: record_a (``dictionary``): New airtable record. record_b (``dictionary``): Old airtable record (This will be dictate the ``id``) Kwargs: join_fields (``list``, optional): list of fields(``string``) to combine. Returns: record (``dictionary``): If succesful, the combined ``record``, else ``record_a``. """ # noqa try: record = {"id": record_b["id"], "fields": {}} if join_fields: keys = join_fields else: keys = record_a["fields"] for key in keys: field = record_a["fields"][key] if isinstance(field, list): field = record_a["fields"][key] for item in record_b["fields"][key]: if item not in record_a["fields"][key]: field.append(item) elif isinstance(field, str): field = ( record_a["fields"][key] + ", " + record_b["fields"][key] ) elif isinstance(field, int) or ( isinstance(field, float) or isinstance(field, tuple) ): field = record_a["fields"][key] + record_b["fields"][key] record["fields"][key] = field return record except Exception: return record_a
324d6683870b87bed54f2ec94b1d7363b99d295e
23,664
def expand(self, nrepeat="", hindex="", icsys="", sctang="", phase="", **kwargs): """Displays the results of a modal cyclic symmetry analysis. APDL Command: EXPAND Parameters ---------- nrepeat Number of sector repetitions for expansion. The default is 0 (no expansion). modal Specifies that the expansion is for a modal cyclic symmetry analysis. hindex The harmonic index ID for the results to expand. icsys The coordinate system number used in the modal cyclic symmetry solution. The default is the global cylindrical coordinate system (specified via the CSYS command where KCN = 1). sctang The sector angle in degrees, equal to 360 divided by the number of cyclic sectors. -- This field is reserved for future use. phase The phase angle in degrees to use for the expansion. The default is 0. Typically, the value is the peak displacement (or stress/strain) phase angle obtained via the CYCPHASE command. Notes ----- Issue this command to display the results of a modal cyclic symmetry analysis. When you issue the EXPAND,Nrepeat command, subsequent SET commands read data from the results file and expand them to Nrepeat sectors. As long as no entities have been modified, this expansion can be negated (that is, reverted to single sector) by issuing EXPAND with no arguments. If you modify entities and wish to return to the partial model, use the Session Editor (see Restoring Database Contents in the Operations Guide). EXPAND displays the results and allows you to print them, as if for a full model. The harmonic index (automatically retrieved from the results file) appears in the legend column. When plotting or printing element strain energy (SENE), the EXPAND command works with brick or tet models only. Element kinetic energy (KENE) plotting or printing is not supported. EXPAND is a specification command valid only in POST1. It is significantly different from the /CYCEXPAND command in several respects, (although you can use either command to display the results of a modal cyclic symmetry analysis): EXPAND has none of the limitations of the /CYCEXPAND command. EXPAND changes the database by modifying the geometry, the nodal displacements, and element stresses as they are read from the results file, whereas the /CYCEXPAND command does not change the database. Caution:: : The EXPAND command creates new nodes and elements; therefore, saving (or issuing the /EXIT, ALL command) after issuing the EXPAND command can result in large databases. Distributed ANSYS Restriction: This command is not supported in Distributed ANSYS. """ command = f"EXPAND,{nrepeat},{hindex},{icsys},{sctang},{phase}" return self.run(command, **kwargs)
56045aa943d0e0ad14c3262430e52debe874bb28
23,665
def part1_and_2(lines, draw_diagonal=False): """ Part1: Consider only horizontal and vertical lines. Part2: Consider horizontal, vertical, *and* diagonal lines. All diagonal lines will be exactly 45 degrees At how many points do at least two lines overlap? """ # create the empty graph graph = dict() for y in range(0,1000): graph[y] = [0 for x in range(1000)] # draw lines: for line in lines: x1, y1, x2, y2 = line[0], line[1], line[2], line[3] # vertical line: if x1 == x2: for i in range(min(y1, y2), max(y1, y2)+1): graph[i][x1] += 1 # horizontal line: elif y1 == y2: for i in range(min(x1, x2), max(x1, x2)+1): graph[y1][i] += 1 # everything else must be a diagonal line: elif draw_diagonal: if x1 > x2: # ensure x increases from x1 to x2 x1, y1, x2, y2 = line[2], line[3], line[0], line[1] while x1 <= x2: graph[y1][x1] += 1 x1 += 1 if y1 < y2: # downhill slope y1 += 1 else: # uphill slope y1 -= 1 # count the number of crossing lines crossing_lines = 0 for y in graph: for spot in graph[y]: if spot > 1: crossing_lines += 1 return crossing_lines
92501f82ab84a0b7dcbbc13b74ac5a8189fdf518
23,666
from typing import Optional from typing import Union from typing import Tuple def _normalize_keep(keep: Optional[Union[str, Tuple[Optional[str], Optional[str]]]]) -> Tuple[Optional[str], Optional[str]]: """Convert a value passed for the 'keep' parameter to a normalized form.""" if keep is None: return (None, None) elif isinstance(keep, (str, bytes)): if keep == 'first' or keep == 'last': return (keep, keep) else: raise ValueError(f"Unsupported value '{keep}' passed for the `keep` parameter.") elif isinstance(keep, tuple): if len(keep) == 2: return keep else: raise ValueError(f"Invalid tuple (length={len(keep)}) passed for the `keep` parameter.") else: raise ValueError(f"Invalid argument value passed for the `keep` parameter.")
4e91c1c25ffab6b0488e37105d1a2ca5d5b3f849
23,667
def concatranknames(group): """helper function""" group['autnames'] = "%s" % ', '.join(group['namerank'][group['country'] == 'AUT']) return group
1999aa2473cc14816d26d2283a466de38652859a
23,668
import sys import argparse def parse_args(): """Pass command line arguments""" if not sys.argv[1:]: sys.argv.append('-h') parser = argparse.ArgumentParser(description='parse command line options for input') # input files parser.add_argument('-x', '--xlsx', help='xlsx template for input with links to DMP-csv files') args = parser.parse_args() return args
933268db5c790350f4eaf91000f83694cd47e640
23,669
def validate_dict(in_dict, **kwargs): """ Returns Boolean of whether given dict conforms to type specifications given in kwargs. """ if not isinstance(in_dict, dict): raise ValueError('requires a dictionary') for key, value in kwargs.items(): if key == 'required': for required_key in value: if required_key not in in_dict: return False elif key not in in_dict: continue elif value == bool: in_dict[key] = (True if str(in_dict[key]).lower() == 'true' else False) else: if (isinstance(in_dict[key], list) and len(in_dict[key]) == 1 and value != list): in_dict[key] = in_dict[key][0] try: if key in in_dict: in_dict[key] = value(in_dict[key]) except ValueError: return False return True
2b816a440437272c1c84b8abf55d78ddbea29912
23,670
import copy def copy_face_features(feats: list): """ Performs deep copy of feats :param feats: list of features :return: deep-copied features """ return copy.deepcopy(feats)
47c37b528bbb63fe8d123bb8ebcf619d033ee310
23,671
def _get_dict_from_longtolongmap(proto_map): """ Convert the ProtoLongToLongMap_pb2 type to a simple dict. """ if len(proto_map.keys.elements) != len(proto_map.values.elements): raise IndexError('array length mismatch') new_dict = {} for key, value in zip(proto_map.keys.elements, proto_map.values.elements): new_dict[key] = value return new_dict
07502282c5d000a74d0b24eace4dffcbe3dd81ae
23,672
import torch def gram_matrix(x): """ Calculates the Gram matrix for the feature maps contained in x. Parameters: x: feature maps Returns: G: gram matrix """ b, c, h, w = x.size() F = x.view(b, c, h * w) G = torch.bmm(F, F.transpose(1, 2)) G.div_(h * w) return G
69789d925fcd84d3d9dff93f70e41c1f8ae9d3e6
23,673
def search(d, key, default=None): """Return a dict containing to the specified key in the (possibly nested) within dictionary d. If there is no item with that key, return default. """ stack = [d] while stack: cur_d = stack[-1] stack.pop() for k, v in cur_d.items(): if k == key: return cur_d elif isinstance(v, dict): stack.append(v) return default
cd96fda8462b4fe6904f138cc3fc25a83faca802
23,674
def _count_running_workers(cluster): """ Local replacement for the late `._count_active_workers` class method """ return len(cluster.scheduler_info.get('workers'))
f4d23f0ac7b5ecf07b67cd0c9050c6e1d743e902
23,675
def reverse_label(single_npy): """ reverse the normal/abnormal label for shanghaitech :return: """ reverse_npy=1-single_npy reverse_npy=reverse_npy[1:-1] return reverse_npy
e8529fde8cbcd13b0114d6d4d0a1d29972cc2c87
23,676
import locale import codecs def get_locale_codec(): """ Is Very Very Useful :return: """ return codecs.lookup(locale.getpreferredencoding()).name
dae9877892856f2c5565b3bcbc3801200e94fb40
23,677
def compute_route_cost(dist, route): """Compute the cost of a route.""" N = len(route) assert N == len(dist) + 1 assert route[0] == route[-1] cost = 0 for i in range(1, len(route)): u = route[i - 1] v = route[i] c = dist[u][v] assert c != 0 cost += c return cost
8a1f8ce83ac0f2f990dbed08019fa04a97b1b725
23,678
import re def _prefix_only_url_replace_regex(pattern): """ Match urls in quotes pulling out the fields from pattern """ return re.compile(""" (?x) # flags=re.VERBOSE (?P<quote>\\\\?['"]) # the opening quotes {} (?P=quote) # the first matching closing quote """.format(pattern))
ba095fa91f26a37e212d0f60639bfdd569111928
23,679
def compute_cis(series, confidence_level): """ Compute confidence intervals given cf level """ sorted_perfs = series.sort_values() lower_index = int(confidence_level/2 * len(sorted_perfs)) - 1 upper_index = int((1 - confidence_level/2) * len(sorted_perfs)) - 1 lower = sorted_perfs.iloc[lower_index].round(3) upper = sorted_perfs.iloc[upper_index].round(3) mean = round(sorted_perfs.mean(),3) return lower, mean, upper
34451cbb9b4fb3160243a09c748ab6c1c1f8843b
23,680
def is_hello_message(message: str) -> bool: """Checks if a message is a hello message.""" if "Hello" in message: return True return False
1acebc9ee74d05e3e1bb9913f68a6aaf6b48faa2
23,681
def get_markers_args_using_get_marker(node, mark_name): """Deprecated on pytest>=3.6""" return getattr(node.get_marker(mark_name), 'args', ())
7a2e0affb7d338cff60aae0bbaeb886700c06b1e
23,683
import os def load_ids_from_text_files(directory, training_set): """Given a directory where raw ProteinNet records are stored along with .ids files, reads and returns the contents of those files. Effectively returns a list of IDs associated with the training, validation, and test sets. """ with open(os.path.join(directory, f"training_{training_set}_ids.txt"), "r") as trainf, open(os.path.join(directory, "validation_ids.txt"), "r") as validf, open( os.path.join(directory, "testing_ids.txt"), "r") as testf: train_ids = trainf.read().splitlines() valid_ids = validf.read().splitlines() test_ids = testf.read().splitlines() return train_ids, valid_ids, test_ids
fe79fb7d204cceddda313ff3914f66e577ecd963
23,684
def find_output_value(name, outputs): """ Finds a specific output within a collection. """ return next( output['value'] for output in outputs if output['name'] == name )
29ab594f969757ce9e8aab79ced58b285b9e49c2
23,686
def get_score_strings(path): """get scores from test output file, which lists per line the predicted and original scores: predicted: [0.3563531] - orig: 0.3676470588235294 predicted: [0.737128] - orig: 0.7205882352941176 """ of = open(path) lines=of.readlines() scores=lines[:-1] #last line is the loss, mae, mse line and has to be excluded pred, orig = zip(*[i.split(" - ") for i in scores]) pred = enumerate([float((string.split("[")[1]).split("]")[0]) for string in pred]) pred_list = list(pred) orig = enumerate([float(i.split(": ")[1].replace("\\n", "")) for i in orig]) orig_list = list(orig) return pred_list, orig_list
3ae2262db1012824d28ae59964ec54ea37aa8dac
23,687
import random import json def generate_random_client(field, **kwargs): """ Generate clients from 0 to 3 entries. If field is not required, return an empty result. Args: field: The field object. Keyword Arguments: only_required: The argument to generate only required fields. fulldate: A boolen to decide if must be a timestamp or time. index: The index that indicate the record line on CSV. Returns: A random json list with random client values. """ if not field['required'] and kwargs.get("only_required"): return '' # Generate a number between 0 and 3 to define the number of clients. clients_number = random.randint(0, 3) clients = [] # If no clients, check if it will return an empty list or empty value. if clients_number == 0: if bool(random.getrandbits(1)): return '' return json.dumps(clients) for i in range(clients_number): json_loaded = json.loads(field['format']) # Generate the client id and name. json_loaded['clientId'] = str(random.randint(999999, 99999999)) json_loaded['name'] = 'Client Name {}'.format(i) clients.append(json_loaded) return json.dumps(clients)
c27845923e64c8e1075ff893508750c2bbb661a2
23,688
import hashlib def get_sha256_hash(key, size=None): """ Provide a SHA256 hash based on the supplied key values. :param key: An iterable of key values. :param size: The size of the returned hash. Defaults to full hash. If size provided is greater than the hash size the full hash is returned. :returns: a SHA256 hash for the key values supplied. """ partition_hash = hashlib.sha256() for part in key: partition_hash.update(str(part).encode('utf-8')) sha256_hash = partition_hash.hexdigest() if not size or size > len(sha256_hash): size = len(sha256_hash) return sha256_hash[:size]
311c751d5c64eb9bef3a297760922654958d58cc
23,689
def _queue_number_order_priority(v): """Returns the number to be used as a comparison for priority. Lower values are more important. The queue priority is the lowest 31 bits, of which the top 9 bits are the task priority, and the rest is the timestamp which may overflow in the task priority. """ return v.queue_number & 0x7FFFFFFF
fd32678eb1984d2fcf9392467722cc43f72f64d9
23,690
def create_segment_allele_counts(segment_data, allele_data): """ Create a table of total and allele specific segment counts Args: segment_data (pandas.DataFrame): counts of reads in segments allele_data (pandas.DataFrame): counts of reads in segment haplotype blocks with phasing Returns: pandas.DataFrame: output segment data Input segment_counts table is expected to have columns 'chromosome', 'start', 'end', 'readcount'. Input phased_allele_counts table is expected to have columns 'chromosome', 'start', 'end', 'hap_label', 'is_allele_a', 'readcount'. Output table will have columns 'chromosome', 'start', 'end', 'readcount', 'major_readcount', 'minor_readcount', 'major_is_allele_a' """ # Calculate allele a/b readcounts allele_data = ( allele_data .set_index(['chromosome', 'start', 'end', 'hap_label', 'is_allele_a'])['readcount'] .unstack(fill_value=0) .reindex(columns=[0, 1]) .fillna(0.0) .astype(int) .rename(columns={0: 'allele_b_readcount', 1: 'allele_a_readcount'}) ) # Merge haplotype blocks contained within the same segment allele_data = allele_data.groupby(level=[0, 1, 2])[['allele_a_readcount', 'allele_b_readcount']].sum() # Reindex and fill with 0 allele_data = allele_data.reindex(segment_data.set_index(['chromosome', 'start', 'end']).index, fill_value=0) # Calculate major and minor readcounts, and relationship to allele a/b allele_data['major_readcount'] = allele_data[['allele_a_readcount', 'allele_b_readcount']].apply(max, axis=1) allele_data['minor_readcount'] = allele_data[['allele_a_readcount', 'allele_b_readcount']].apply(min, axis=1) allele_data['major_is_allele_a'] = (allele_data['major_readcount'] == allele_data['allele_a_readcount']) * 1 # Merge allele data with segment data segment_data = segment_data.merge(allele_data, left_on=['chromosome', 'start', 'end'], right_index=True) return segment_data
f27b8e925d58ea70806c90ad2d3d5144e7690812
23,691
def update_trace_vector(agent, method, state, action=None): """Updates agent's trace vector (z) with then current state (or state-action pair) using to the given method. Returns the updated vector.""" assert method in ['replace', 'replace_reset', 'accumulating'], 'Invalid trace update method.' # Trace step z = agent._γ * agent._λ * agent._z # Update last observations components if action is not None: x_ids = agent.get_active_features(state, action) # x(s,a) else: x_ids = agent.get_active_features(state) # x(s) if method == 'replace_reset': for a in agent._all_actions: if a != action: x_ids2clear = agent.get_active_features(state, a) # always x(s,a) for id_w in x_ids2clear: z[id_w] = 0 for id_w in x_ids: if (method == 'replace') or (method == 'replace_reset'): z[id_w] = 1 elif method == 'accumulating': z[id_w] += 1 return z
6ce4cd27e91dfca044b94c9b12a3aabdc788a5e5
23,692
def parse_state(value): """ Parse state from LEA code. """ return value[0:2]
8814cc94785674f411afe7ba54802891babb20a7
23,694
import math def get_ue_sig_power(ue_ap_distance): """ Function to calculate signal power between the UE and AP """ # To avoid ZeroDivisionError if ue_ap_distance: distance = (10 * math.log10(1 / math.pow(ue_ap_distance, 2))) # discretizing the distance distance /= 10 return round(distance)
1239e60153c397871e7a8b37d1b48bba39f41bee
23,695
def get_from_box_model(self, name): """ Property getter for all attributes that come from the box model. """ return getattr(self._box_model, name)
183ed25a55dd9b8c0ac6b8082f7be99a1ff78c25
23,696
import re def extract_timestamp(line): """Extract timestamp from log item. :param line: log item. :type line: str :return: timestamp or empty string :rtype: str """ rex = r"(\d{4}\-\d\d\-\d\d\s\d\d:\d\d:\d\d[\,\d]*[\s\w]*)" match = re.search(rex, line) if match: return match.group(1) return ""
2f8efdb9bdc95bf511d2f225ab42e3e489a61677
23,698
import os def get_resources_directory(): """Get imagebuilder resources directory.""" current_dir = os.path.dirname(os.path.abspath(__file__)) return os.path.join(current_dir, "..", "pcluster", "resources")
139664f6dbe540c12225625eb39e54220c7b42aa
23,699
def test_multi_plugin_sending(basicApp, EmptyPlugin): """ Nearly some test as "test_signal_handling_via_plugins", but with an adjustable amount of plugins for sending and receiving. Registers receivers before the signal gets registers itself and afterwards. Checks if all receivers get correctly called, if signal is send. This tests will normally fail, if Weaknamespace is used in groundwork/signals.py: from blinker import WeakNamespace as Namespace So use always Namespace and if need do clean ups on groundwork site. """ amount_pre_plugins = 30 amount_post_plugins = 10 amount_send_plugins = 30 def create_receiver_function(start): def func_template(*args, **kwargs): return ["data_{0}_A".format(start), "data_{0}_B".format(start)] return func_template # PRE receivers plugin_receive_pre = [] for i in range(0, amount_pre_plugins): plugin_receive_pre.append(EmptyPlugin(app=basicApp, name="Plugin_receive_pre_{0}".format(i))) plugin_receive_pre[i].activate() plugin_receive_pre[i].signals.connect("sig_reg_pre_receiver_{0}".format(i), "signal_test", create_receiver_function(i), "receiver signal_test_{0} for test".format(i)) # Count gw internal registered receivers amount = 0 for receiver in basicApp.signals.receivers.keys(): if "sig_reg_pre_receiver" in receiver: amount += 1 assert amount == amount_pre_plugins # Senders plugin_send = [] for i in range(0, amount_send_plugins): plugin_send.append(EmptyPlugin(app=basicApp, name="Plugin_send_{0}".format(i))) plugin_send[i].activate() if i == 0: # We only need to register our signal once plugin_send[0].signals.register("signal_test", "signal_test") assert amount_pre_plugins == len(basicApp.signals.signals["signal_test"]._signal.receivers) # Check, if for our signal all receivers have been registered print("Registered receivers for signal_test: {0}".format( len(basicApp.signals.signals["signal_test"]._signal.receivers))) assert amount_pre_plugins == len(basicApp.signals.signals["signal_test"]._signal.receivers) # Send signal for index, plugin in enumerate(plugin_send): print(" {0} sending...".format(index)) return_values = plugin.signals.send("signal_test") # check return length and content assert len(return_values) == amount_pre_plugins for i in range(0, amount_pre_plugins): found = False for value in return_values: if value[1][0] == "data_{0}_A".format(i) and value[1][1] == "data_{0}_B".format(i): found = True break assert found is True # Register POST receivers plugin_receive_post = [] for i in range(0, amount_post_plugins): plugin_receive_post.append(EmptyPlugin(app=basicApp, name="Plugin_receive_post_{0}".format(i))) plugin_receive_post[i].activate() plugin_receive_post[i].signals.connect("sig_reg_post_receiver_{0}".format(i), "signal_test", create_receiver_function(amount_pre_plugins + i), "receiver signal_test_{0} for test".format(i)) # Send again a signal and check return values # Send signal for index, plugin in enumerate(plugin_send): print(" {0} sending again...".format(index)) return_values = plugin.signals.send("signal_test") # check return length and content assert len(return_values) == amount_pre_plugins + amount_post_plugins for i in range(0, amount_pre_plugins + amount_post_plugins): found = False for value in return_values: if value[1][0] == "data_{0}_A".format(i) and value[1][1] == "data_{0}_B".format(i): found = True break assert found is True
6ac9a88cd4d37d79f86f057fc8f3c8b1049b5b65
23,700
from typing import Union def is_not_selected(*args: Union[str, None]) -> bool: """Check if field not seleced.""" for arg in args: if arg in ("", None): return True return False
ece53a2dafbed06bc4635e0193305e405b68c21c
23,701
def is_unique(sentence): """ 1.1 Is Unique: Implement an algorithm to determine if a string has all unique characters. What if you cannot use additional data structures? Complexity: O(n) time, O(n) space """ h = set([]) for c in sentence: if c in h: return False h.add(c) return True
5c69a4217803c7ab88bb20b641dcd8726d29cb1e
23,702
import binascii def str2bytes(s: str) -> str: """Converts string to hex representations. :param str s: A string to convert :return: Hexadecimal string :rtype: str """ assert isinstance(s, str), 'Expected string got {}'.format(s) ss = str(binascii.hexlify(s.encode()))[1:] return ss.replace("'", "")
f616dc0c34208aa789acfbeff8b84baacac61487
23,703
def get_most_similar_factors(n): """Factorize n into two numbers. Returns the best pair, in the sense that the numbers are the closest to each other.""" i = int(n**0.5 + 0.5) while n % i != 0: i -= 1 return i, n/i
ea45874901031a95ba103b5f6bf89c743d8f65c3
23,705
import numpy def mp1(p, f, h): """Return the 1st order energy.""" ec1 = numpy.tensordot(h, p, axes=([0, 1], [0, 1])) ec2 = numpy.tensordot(f, p, axes=([0, 1], [0, 1])) return 0.5*(ec1 - ec2)
40bf1008a36574381e436cfef236c9845c5e225e
23,706
import click def get_short_help_str(command, limit=45): """ Gets short help for the command or makes it by shortening the long help string. """ return command.short_help or command.help and click.utils.make_default_short_help(command.help, limit) or ''
e123db3a912f1da13b7afd94fb8759a18237c36b
23,707
def countMorphemes(morphlist): """ Cuenta el número de ocurrencias de cada label :param morphlist: Lista de bio-labels :return: Diccionario con las labesl como llave y el número de ocurrencias como valor """ counts = {} for morpheme in morphlist: label = morpheme[0][2:] counts[label] = counts.get(label, 0) + 1 return counts
6cd4aa59b7c41cc416693c3287297570b94197fe
23,710
def sort_file(fh): """ sort the contents of a file handle. """ lst = list(fh.readlines()) lst.sort() return lst
8a8bf189e4294414024285187c66cd303dad2768
23,711
import functools def make_html(func): """Wraps text in html tags""" @functools.wraps(func) def wrapper(*args, **kwargs): return '<strong>' + func(*args, **kwargs) + '</strong>' return wrapper
42ee87d1f33fa9aca8acf8e1751399b2a2fbc45e
23,712
def str_to_int(s): """ Work around for converting str to int in numba. See https://github.com/numba/numba/issues/5650 """ result: int = 0 final_index: int = len(s) - 1 for i, v in enumerate(s): result += (ord(v) - 48) * (10 ** (final_index - i)) return result
4192b8c9cb7930778d04a6e15da7967c210e1a96
23,713
def has_no_end_date(effect): """ has no end date""" return not effect.instance.end
e6023fc531f38069f01d41c2b53e49f676700f2b
23,715
from typing import Union from typing import Dict from typing import Any def dict_to_txt(dict_val: Union[str, Dict[str, Any]]) -> str: """ Return string as "key:val; key2:val2" pairs from `dict_val`. Parameters ---------- dict_val : Union[str, Dict[str, Any]] Dict of key/val pairs or string of single key/value Returns ------- str str formatted as "key:val; key2:val2" """ if isinstance(dict_val, str): if not dict_val: return "" if ":" in dict_val: key, val = dict_val.split(":", maxsplit=1) else: key, val = dict_val, "" return f"{key}:{val}" if isinstance(dict_val, dict): return "\n".join(f"{key}:{val}" for key, val in dict_val.items()) return ""
116aeb9236466e71db5f84651d4cb36d3da05422
23,716
from pathlib import Path from typing import TextIO import gzip def tsv_opener(path: Path) -> TextIO: """ Open a TSV (either text file or gzip-compressed text file). Args: path : The path to the TSV file. """ if path.suffix == ".gz": fh = gzip.open(path, "rt") else: fh = open(path, "r") return fh
7e5186138f9331e27b35458dc0f33b268dc48582
23,717
def reg_iou(x1, y1, x2, y2, dx1, dy1, dx2, dy2): """Bounding box regression function""" pred_x1 = x1 + dx1 pred_y1 = y1 + dy1 pred_x2 = x2 + dx2 pred_y2 = y2 + dy2 return pred_x1, pred_y1, pred_x2, pred_y2
3ea7229136800f448d3731209c63bbeec4aab81d
23,718
def gpsWeekCheck(t): """Makes sure the time is in the interval [-302400 302400] seconds, which corresponds to number of seconds in the GPS week""" if t > 302400.: t = t - 604800. elif t < -302400.: t = t + 604800. return t
acec8cff009f8dac53363a4686d869f4d5054b8d
23,719
import six import binascii def _uvarint(buf): """Reads a varint from a bytes buffer and returns the value and # bytes""" x = 0 s = 0 for i, b_str in enumerate(buf): if six.PY3: b = b_str else: b = int(binascii.b2a_hex(b_str), 16) if b < 0x80: if i > 9 or (i == 9 and b > 1): raise ValueError("Overflow") return (x | b << s, i + 1) x |= (b & 0x7f) << s s += 7 return 0, 0
825921b72501436ca52dff498c76c43c0f5f48ca
23,720
import shutil import os def patch_rom(src, dst, patch_data): """Patch src with the contents of patch_data, saving to dst.""" succeeded = True # If we can work on a copy, copy the ROM so we can work on it if src != dst: shutil.copyfile(src, dst) with open(dst, "r+b") as f: for offset in patch_data: # Unpack the tuple stored in patch_data expected = patch_data.get(offset)[0] new = patch_data.get(offset)[1] # Store the bytes we need for comparison f.seek(offset, 0) old_value = f.read(len(expected)) if old_value == expected: f.seek(-len(expected), 1) f.write(new) print("[ I ] At 0x{0:08x}: {1} -> {2}".format(offset, expected.hex(), new.hex())) else: succeeded = False break # Cleanup in case of failure; remove dst only if it's a copy if src != dst and not succeeded: os.unlink(dst) return succeeded
c62c5a22fccbe7a1d7d66b914a9305c166c27a69
23,721
import torch def get_default_config(): """The default configs.""" use_cuda = torch.cuda.is_available() save_model = False # node_state_dim = 32 # number of features for a node # graph_rep_dim = 128 # number of features of a graph representation # graph_embedding_net_config = dict( # node_state_dim=node_state_dim, # edge_feat_dim=1, # edge_hidden_sizes=[node_state_dim * 2, node_state_dim * 2], # sizes of the hidden layers of the edge message nets. # node_hidden_sizes=[node_state_dim * 2], # sizes of the hidden layers of the node update nets. # n_prop_layers=5, # number of graph propagation layers. # # set to False to not share parameters across message passing layers # share_prop_params=True, # # initialize message MLP with small parameter weights to prevent # # aggregated message vectors blowing up, alternatively we could also use # # e.g. layer normalization to keep the scale of these under control. # edge_net_init_scale=0.1, # # other types of update like `mlp` and `residual` can also be used here. # node_update_type='gru', # # set to False if your graph already contains edges in both directions. # use_reverse_direction=True, # # set to True if your graph is directed # reverse_dir_param_different=False, # # we didn't use layer norm in our experiments but sometimes this can help. # layer_norm=False) # graph_matching_net_config = graph_embedding_net_config.copy() # graph_matching_net_config['similarity'] = 'dotproduct' # config for ubuntu dialogue corpus ubuntu_dialog_corpus_config = dict( data_size=345000, vocab_size=51165, data_pre="/home/liang/Workspace/#ubuntu_test/", data_path=None, eval_data_path=None, test_data_path=None, vocab_path=None, use_glove=False, glove_path="/home/liang/Workspace/glove.840B.300d.txt", ) graph_structure_config = dict( branch_batch_size=100, sen_batch_size=9, emb_dim=300, sen_hidden_dim=300, branch_hidden_dim=300, max_enc_steps=50, max_dec_steps=50, min_dec_steps=5, dropout=1.0, positional_enc=True, # use the positional encoding tricks positional_enc_dim=64, # dimension of word embeddings n_gram=3, # number of n_gram use_norm=True, # use norm norm_alpha=0.25, # norm_alpha user_struct=True, # use the struct of user relation long_attn=False, # use the struct of all sent attn ) return dict( # encoder=dict(node_feat_dim=1, # GraphEditDistance task only cares about graph structure. # edge_feat_dim=1, # node_hidden_sizes=[node_state_dim], # edge_hidden_sizes=None), # aggregator=dict(node_input_size=node_state_dim, # node_hidden_sizes=[graph_rep_dim], # graph_transform_sizes=[graph_rep_dim], # gated=True, # aggregation_type='sum'), # graph_embedding_net=graph_embedding_net_config, # graph_matching_net=graph_matching_net_config, # # Set to `embedding` to use the graph embedding net. # model_type='matching', graph_structure_net=graph_structure_config, # data=dict( # problem='graph_edit_distance', # dataset_params=dict( # # always generate graphs with 20 nodes and p_edge=0.2. # n_nodes_range=[20, 20], # p_edge_range=[0.2, 0.2], # n_changes_positive=1, # n_changes_negative=2, # validation_dataset_size=1000)), data=ubuntu_dialog_corpus_config, training=dict( batch_size=20, learning_rate=1e-3, mode='pair', loss='margin', margin=1.0, # A small regularizer on the graph vector scales to avoid the graph # vectors blowing up. If numerical issues is particularly bad in the # model we can add `snt.LayerNorm` to the outputs of each layer, the # aggregated messages and aggregated node representations to # keep the network activation scale in a reasonable range. graph_vec_regularizer_weight=1e-6, # Add gradient clipping to avoid large gradients. clip_value=10.0, # Increase this to train longer. n_training_steps=10000, # Print training information every this many training steps. print_after=100, # Evaluate on validation set every `eval_after * print_after` steps. eval_after=10), evaluation=dict(batch_size=20), mode="train", seed=8, use_cuda=use_cuda, save_model=save_model, )
ac340f7d2886a168c349e48bb1b3daa0bf48307e
23,723
def findWellsWithGivenTopsCurves( wells, wells_with_all_given_tops, wellsWithNeededCurvesList_real ): """ NOTE: THIS FUNCTION MAY NOT BE USED DUE TO OTHER CHANGES IN THE CODE. It was created to deal with wanting to find the intersection of a list of wells with SITEID only and a list of wells with UWI only. """ new_wells = wells.set_index("SitID").T.to_dict("list") # print("new_wells",new_wells[0]) for key in new_wells: new_wells[key].append(new_wells[key][1].replace("/", "-") + ".LAS") print("new_wells", new_wells) print(len(new_wells)) new_wells_with_all_given_tops = [] for well in wells_with_all_given_tops: print("well in wells_with_all_given_tops:", well) new_wells_with_all_given_tops.append(new_wells[well][2]) return list( set(new_wells_with_all_given_tops).intersection(wellsWithNeededCurvesList_real) )
6f76cdb3866a0b87f3231aae09158bd788dea47e
23,724
def binarize_ic50(ic50, ic50_threshold): """ Binarize ic50 based on a threshold """ if ic50 <= ic50_threshold: return 1 return 0
d1512f790dfad4fb3f85f4757184ceb7d21fc56a
23,725
def filter_rows_by_value(data, min): """Select rows where each value is greater than the given min threshhold.""" rows = data[data > min].dropna().astype(int) return rows
ec49892ea47256fe8d80063c2cdbcd161872ceb7
23,727
def integer(number, *args): """In Python 3 int() is broken. >>> int(bytearray(b'1_0')) Traceback (most recent call last): ... ValueError: """ num = int(number, *args) if isinstance(number, str) and '_' in number or isinstance(number, (bytes, bytearray)) and b' ' in number: raise ValueError() return num
e24f208db97be51ee535ad93cb795958848dd18f
23,728