content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def find_point_of_region(vor, ireg): """Return the index of point within region ireg. Args: vor (Voronoi): Voronoi object. ireg (int): index of region. Returns: int: index of the Voronoi point for region ireg. """ for ip, iregx in enumerate(vor.point_region): if iregx == ireg: return ip return None
7ea3fd05edf5331e16f913e909c995911555fc67
474,791
import torch def angle_axis_to_rotation_matrix(angle_axis: torch.Tensor) -> torch.Tensor: """Convert 3d vector of axis-angle rotation to 4x4 rotation matrix Args: angle_axis (torch.Tensor): tensor of 3d vector of axis-angle rotations. Returns: torch.Tensor: tensor of 4x4 rotation matrices. Shape: - Input: :math:`(N, 3)` - Output: :math:`(N, 4, 4)` Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = kornia.angle_axis_to_rotation_matrix(input) # Nx4x4 """ def _compute_rotation_matrix(angle_axis, theta2, eps=1e-6): # We want to be careful to only evaluate the square root if the # norm of the angle_axis vector is greater than zero. Otherwise # we get a division by zero. k_one = 1.0 theta = torch.sqrt(theta2) wxyz = angle_axis / (theta + eps) wx, wy, wz = torch.chunk(wxyz, 3, dim=1) cos_theta = torch.cos(theta) sin_theta = torch.sin(theta) r00 = cos_theta + wx * wx * (k_one - cos_theta) r10 = wz * sin_theta + wx * wy * (k_one - cos_theta) r20 = -wy * sin_theta + wx * wz * (k_one - cos_theta) r01 = wx * wy * (k_one - cos_theta) - wz * sin_theta r11 = cos_theta + wy * wy * (k_one - cos_theta) r21 = wx * sin_theta + wy * wz * (k_one - cos_theta) r02 = wy * sin_theta + wx * wz * (k_one - cos_theta) r12 = -wx * sin_theta + wy * wz * (k_one - cos_theta) r22 = cos_theta + wz * wz * (k_one - cos_theta) rotation_matrix = torch.cat( [r00, r01, r02, r10, r11, r12, r20, r21, r22], dim=1) return rotation_matrix.view(-1, 3, 3) def _compute_rotation_matrix_taylor(angle_axis): rx, ry, rz = torch.chunk(angle_axis, 3, dim=1) k_one = torch.ones_like(rx) rotation_matrix = torch.cat( [k_one, -rz, ry, rz, k_one, -rx, -ry, rx, k_one], dim=1) return rotation_matrix.view(-1, 3, 3) # stolen from ceres/rotation.h _angle_axis = torch.unsqueeze(angle_axis, dim=1) theta2 = torch.matmul(_angle_axis, _angle_axis.transpose(1, 2)) theta2 = torch.squeeze(theta2, dim=1) # compute rotation matrices rotation_matrix_normal = _compute_rotation_matrix(angle_axis, theta2) rotation_matrix_taylor = _compute_rotation_matrix_taylor(angle_axis) # create mask to handle both cases eps = 1e-6 mask = (theta2 > eps).view(-1, 1, 1).to(theta2.device) mask_pos = (mask).type_as(theta2) mask_neg = (mask == False).type_as(theta2) # noqa # create output pose matrix batch_size = angle_axis.shape[0] rotation_matrix = torch.eye(4).to(angle_axis.device).type_as(angle_axis) rotation_matrix = rotation_matrix.view(1, 4, 4).repeat(batch_size, 1, 1) # fill output matrix with masked values rotation_matrix[..., :3, :3] = \ mask_pos * rotation_matrix_normal + mask_neg * rotation_matrix_taylor return rotation_matrix
461779558e8cf06e05a018253961e4756ada1a47
52,576
def promptChoice(prompt): """ Returns the numerical choice of a prompt. Otherwise returns None. """ try: return int(input(prompt)) except ValueError: return None
6301bda8ebecfa49e7451e6a996607fcae7bca9c
559,797
def gradient_descent(f, initial_guess, n_steps, step_size, plotting_cb): """ Run gradient descent to optimize the cost function f. args: f: a function taking a single jnp.array of decision variables and returning a tuple (cost, cost_gradient) initial_guess: a starting point for the decision variables n_steps: the number of gradient descent steps to run step_size: the step size parameter for gradient descent plotting_cb: a function taking an integer for the current iteration, a jnp.array of decision variables, and a list of costs seen so far. Can plot results, but should not block. No return value needed or used. returns: a tuple containing - the optimized decision variables, in a jnp.array - a list of costs at each step - a list of jnp.arrays of the decision variables at each step """ # Set up lists to store results cost_trace = [] decision_var_trace = [] decision_vars = initial_guess.clone() # Do gradient descent for i in range(n_steps): # Get the cost and gradient cost, cost_grad = f(decision_vars) # Save the cost and decision vars cost_trace.append(cost) decision_var_trace.append(decision_vars) # Plot performance plotting_cb(i, decision_vars, cost_trace) # Update the decision variables decision_vars = decision_vars - step_size * cost_grad # Do a final evaluation and plotting cost, _ = f(decision_vars) cost_trace.append(cost) decision_var_trace.append(decision_vars) plotting_cb(n_steps, decision_vars, cost_trace) # Return the desired information return decision_vars, cost_trace, decision_var_trace
99a22cf3f93a2c54d4d39f16f2f29374cf20c5c1
180,074
def get_spectral_w(w_pars,energy): """ Return spectral weight of an event Parameters ---------- w_pars: parameters obtained with get_spectral_w_pars() function energy: energy of the event in GeV Returns ------- float w """ E0 = w_pars[0] index = w_pars[1] index_w = w_pars[2] R = w_pars[3] N_ = w_pars[4] w = ((energy/E0)**(index_w-index))*R/N_ return w
9a9d9479a8f62ac007dddee1f1bf742ddce44bec
649,878
from datetime import datetime def date_delta(y1, m1, d1, y2, m2, d2): """ Return timedelta object of two date, eg: d = date_delta(2015, 3, 2, 2016, 3, 2) """ y1, m1, d1 = int(y1), int(m1), int(d1) y2, m2, d2 = int(y2), int(m2), int(d2) d1 = datetime(y1, m1, d1, 0, 0, 0) d2 = datetime(y2, m2, d2, 0, 0, 0) return d2 - d1 if d2 > d1 else d1 - d2
f54f55cbfc8558fc5d1e7077e0b29b5d023fafbb
118,864
import torch def DotScorer(dec_out, enc_outs, **kwargs): """ Score for query decoder state and the ith encoder state is given by their dot product. dec_outs: (trg_seq_len x batch x hid_dim) enc_outs: (src_seq_len x batch x hid_dim) output: ((trg_seq_len x) batch x src_seq_len) """ score = torch.bmm( # (batch x src_seq_len x hid_dim) enc_outs.transpose(0, 1), # (batch x hid_dim x trg_seq_len) dec_out.transpose(0, 1).transpose(1, 2)) # (batch x src_seq_len x trg_seq_len) => (trg_seq_len x batch x src_seq_len) return score.transpose(0, 1).transpose(0, 2)
79a56c45d83ca3f525191c089900c49095917fa3
529,998
from typing import Optional def optional(variable: Optional[object], default: object): """ Optional parameter that is default if None. """ return default if variable is None else variable
57ec55f83a89f558ea3fbec35e820e3950f88af5
398,875
import struct def two_ints_to_long(intl, inth): """ Interpert two ints as one long """ longint = struct.pack(">I", inth) + struct.pack(">I", intl) return struct.unpack('>q', longint)[0]
c503e00f1ed934ad22590b942be85b7fd877bb47
693,756
def get_shapefile_record_name_from_location(shapefile_record_names_df, location): """ Get the shapefile record name for the location given. Args: shapefile_record_names_df (pandas DataFrame) : dataframe of shapefile record names and the associated locations location (str) : name of the location Returns: str: Record name for the location as listed in the shapefiles used. """ return shapefile_record_names_df.loc[shapefile_record_names_df['location'] == location].record_name.values[0]
46e52c302736a77a549046cb15243ff375136cb6
97,060
def parse_comma_list(s): """Parse comma-separated list in env var to Python list.""" if not s.strip(): return [] return [b.strip() for b in s.split(',')]
d1fbe2e4ad0b0cfb84636798fa5c146142c9114a
354,172
def 取最大数(数值列表): """ 传入要对比的列表,如(1,2,3),返回里面最大的数字 :param 数值列表: (1,2,3) :return: 3 """ return max(数值列表)
111eba5c9c37e7656ca410447e1ac5a3f0041ea7
40,239
import random def uniform_crossover(dna1, dna2): """randomly crossover genes between dna1 and dna2""" child1 = [] child2 = [] for ind in range(len(dna1)): p = random.random() if p < 0.5: child1.append(dna1[ind]) child2.append(dna2[ind]) else: child2.append(dna1[ind]) child1.append(dna2[ind]) return (child1, child2)
9c72b7b43f41f76c3d00c3430f398ae7e44a0e70
433,511
def get_prefix_length(oracle): """Finds the length of the prefix that mysterious_encrypt adds to the plaintext before encrypting.""" # Encrypt two different ciphertexts ciphertext_a = oracle.encrypt(b'A') ciphertext_b = oracle.encrypt(b'B') # Since the stream ciphers encrypts bit by bit, the prefix length will be equal to # the number of bytes that are equal in the two ciphertext. prefix_length = 0 while ciphertext_a[prefix_length] == ciphertext_b[prefix_length]: prefix_length += 1 return prefix_length
9f884797c2cdc25fca0b8bde39f5b9700eb5372b
597,284
def config_file(tmpdir_factory): """Creates a sample looker.ini file and returns it""" filename = tmpdir_factory.mktemp("settings").join("looker.ini") filename.write( """ [Looker] # API version is required api_version=3.1 # Base URL for API. Do not include /api/* in the url base_url=https://host1.looker.com:19999 # API 3 client id client_id=your_API3_client_id # API 3 client secret client_secret=your_API3_client_secret # Set to false if testing locally against self-signed certs. Otherwise leave True verify_ssl=True looker_url=https://webserver.looker.com:9999 redirect_uri=https://alice.com/auth [NO_CREDENTIALS] base_url=https://host1.looker.com:19999 [EMPTY_STRING_CREDENTIALS] base_url=https://host1.looker.com:19999 client_id= client_secret= """ ) return filename
749e80a61af42a7940b9251c7f326185224d8160
243,642
def listDistInts(refSeqs, querySeqs, self=True): """Gets the ref and query ID for each row of the distance matrix Returns an iterable with ref and query ID pairs by row. Args: refSeqs (list) List of reference sequence names. querySeqs (list) List of query sequence names. self (bool) Whether a self-comparison, used when constructing a database. Requires refSeqs == querySeqs Default is True Returns: ref, query (str, str) Iterable of tuples with ref and query names for each distMat row. """ num_ref = len(refSeqs) num_query = len(querySeqs) if self: if refSeqs != querySeqs: raise RuntimeError('refSeqs must equal querySeqs for db building (self = true)') for i in range(num_ref): for j in range(i + 1, num_ref): yield(j, i) else: comparisons = [(0,0)] * (len(refSeqs) * len(querySeqs)) for i in range(num_query): for j in range(num_ref): yield(j, i) return comparisons
da589e1f24bcacdcc607bf889a1fba0a2ceb25dc
558,381
def parse_sam_region(string): """Parse 1-based genomic region string into 0-based tuple.""" if not string: return () parts = string.strip().split(":") if len(parts) == 1: chrom = parts[0] return (chrom,) else: chrom = parts[0] positions = parts[1].split("-") positions = [int(s) for s in positions] positions[0] = positions[0] - 1 # offset return (chrom,) + tuple(positions)
5c3a0c1b4033e02c2e94b10933ae96bcbca27b00
265,909
def recover_bad_token(stream: str) -> str: """Pull the leading non-whitespace string off the input stream""" return stream.split(' ')[0]
f3726aa6826b741eaedcdc9be430efbfa5e83ca2
427,176
import re def split_alpha_number(symbol): """ 分离字母开头,然后接数字的情况,如果后面有别的字符会被截断 print(split_alpha_number("TF1703.CFE")) ('TF', '1703') :param symbol: :return: """ pattern = re.compile(r'([A-Za-z]+)(\d+)') match = pattern.match(symbol) if match: return match.group(1, 2) return None
70688acbe7c6a09504dd3b1c4881b24f2ee64715
91,257
import base64 def backend_internal_api_token(testconfig): """ Returns the token for the internal api. The token is 'username: password' encoded in base64, where username and password are defined in the backend-internal-api secret """ return base64.b64encode( f'{testconfig["threescale"]["backend_internal_api"]["username"].decode("ascii")}:' f'{testconfig["threescale"]["backend_internal_api"]["password"].decode("ascii")}' .encode("ascii")).decode("ascii")
8e6c78327ef79ecf3e0893fb9b715c3e7d456337
196,810
def _element_in_child_template(root, e): """ detect if the element is trapped inside a nested bind: tag relative to e :type root: UIElement :type e: UIElement :rtype: bool """ return any(x.typeid.startswith('bind:template') for x in root.path_to(e))
05d1497653766f057771d923276f5c586834bac6
459,284
def set_default_schedule(chronos_job): """ Given a chronos job, return a new job identical to the first, but with the schedule replaced with one that will set the job to run now. :param chronos_job: a chronos job dictionary suitable for POSTing to Chronos :returns: the chronos_job parameter, with the 'schedule' field modified to a schedule for chronos to run the job now and only once. The interval field of the schedule is irrelevant, but required by Chronos. """ chronos_job['schedule'] = 'R1//PT1M' return chronos_job
900617b0befdf6e31b8e84714e2dba4604eea762
329,168
def are_all_0(lists, index): """Check if the values at the same index in different list are all to 0. :param list lists: a list of lists to check the value in. :param int index: the index of the values to check in the lists. :returns: True if all the values at the index in the lists are set to 0, False if at least one of them is not 0. """ for l in lists: if l[index] != 0: return False return True
1fe4f8777618eed459907b2995170691be639e5b
47,506
def get_damage(attacker, defender): """Get damage dealt by attacker to defender.""" damage = attacker['Damage'] - defender['Armor'] return damage if damage > 0 else 1
3bb570cb84b2a54a9c6e209eb0db152eae6b17c8
599,079
def diminuir(num, porcentagem): """ -> Calcula o valor reduzido de uma determinada porcentagem :param num: numero que será acrescido da porcentagem :param porcentagem: valor da porcentagem a ser calculada :return: o resultado do cálculo """ resultado = num - (num * (porcentagem / 100)) return resultado
c4515886adb6be846710b964dccc6c55ce982ab9
588,644
def cents_to_dollars(cents): """ Convert cents to dollars. :param cents: Amount in cents :type cents: int :return: float """ return round(cents / 100.0, 2)
dc0d33c34de70b1591a82d8195848418a6ef2fed
495,833
def conv_lin(a, b=1.0, c=0.0, inverse=False): """Simple linear transform will I think store parameters against each sensor then they are handy >>> conv_lin(4,2,3) 11 >>> conv_lin(11,2,3.0,True) 4.0 >>>""" if inverse is False: return a * b + c else: return (a - c) / b
a41d3254c933f59a5fb24253dcebc684a4211108
676,303
import random def random_index(seed, N): """ Args: seed - initial index, N - maximum index Return: A random index between [0, N] except for seed """ offset = random.randint(1, N-1) idx = (seed + offset) % N assert(seed != idx) return idx
72394a6687265234b131524fdb37583c6f56105b
393,527
def plur_sing(count, word, suffix=None): """ Conditionally add an 's' for example, if count isn't 1. """ if suffix is None: suffix = "s" return f"{count} {word}{suffix * (count != 1)}"
519651c1b020619cc3c0ace4d76e47fb57eae17b
571,206
def date_convert_RU(date, short=False): """ Convert date object to string in Russian with month name: '2018-01-06' -> '1 июня'. :param date: date object <datetime.date> :param short: set to True to get abbreviated month name, use for graph labels. :return: date <string> """ months_ru = ['января', 'февраля', 'марта', 'апреля', 'мая', 'юня', 'июля', 'августа', 'сентября', 'октября', 'ноября', 'декабря'] months_ru_short = ['янв', 'фев', 'мар', 'апр', 'мая', 'июн', 'июл', 'авг', 'сент', 'окт', 'нояб', 'дек'] if short: for index, month in enumerate(months_ru_short, 1): if index == date.month: return "{} {}".format(date.day, month) else: for index, month in enumerate(months_ru, 1): if index == date.month: return "{} {}".format(date.day, month)
cc7a292dd95d8a328d1fe87c7f8dce3c55ba471d
231,934
def process_data(data: str) -> tuple[str, dict[str, str]]: """ Process one row of template, then an empty line, then rows of rules Returns: tuple[str, dict[str, str]]: (template, rules-dict) where rules-dict looks like {'CH':'B', 'HH':'N', ...} """ template, _, rules_lines = data.partition('\n\n') rules = {} for line in rules_lines.splitlines(): if len(line) > 0: pair, element = line.split(" -> ") rules[pair] = element return template, rules
a0c66cf7d1c12edaa1610fea8bdd43e61c5ba0ca
500,112
def _is_water_file(f): """ Is this the filename of a water file? :type f: str :rtype: bool >>> _is_water_file('LS7_ETM_WATER_144_-037_2007-11-09T23-59-30.500467.tif') True >>> _is_water_file('createWaterExtents_r3450_3752.log') False >>> _is_water_file('LC81130742014337LGN00_B1.tif') False >>> _is_water_file('LS8_OLITIRS_OTH_P51_GALPGS01-032_113_074_20141203') False >>> # We only currently care about the Tiffs: >>> _is_water_file('LS_WATER_150_-022_1987-05-27T23-23-00.443_2014-03-10T23-55-40.796.nc') False """ return 'WATER' in f and f.endswith('.tif')
2a4171c5a121a16d635ab7c7469f1092310c7d60
508,778
import time async def with_ratelimit(client, method, *args, **kwargs): """ Call a client method with 3s backoff if rate limited. """ func = getattr(client, method) response = await func(*args, **kwargs) if getattr(response, "status_code", None) == "M_LIMIT_EXCEEDED": time.sleep(3) return with_ratelimit(client, method, *args, **kwargs) return response
7ba1d295b770d217a10e6d74a7ea9e47c4c60615
211,576
from typing import Any def validate_data_type(point_type: int, value: Any) -> bool: """ Validate data type,1:number,2:str,3:boolean,4: datetime, 5:location :param point_type: data point type :param value: value :return: True if valid else False """ validate_result = True if (point_type in (1, 4)) and not isinstance(value, (int, float)): validate_result = False elif point_type == 2 and not isinstance(value, str): validate_result = False elif point_type == 3 and not isinstance(value, bool): validate_result = False elif point_type == 5 and not isinstance(value, float): validate_result = False return validate_result
72aa4dc642a7a9a8d8e365a176a880ae732fb31f
155,518
def rivers_with_station(stations): """Takes a list of stations and returns a set of all the rivers in alphabetic order upon which those stations are located""" rivers = set() for station in stations: rivers.add(station.river) rivers = sorted(rivers) return rivers
d0c6579906a7d4063ee979aa219d273cfc5b6099
566,289
def doc_label_name(context_type): """ Takes a string `context_type` and makes that a standard 'label' string. This functions is useful for figuring out the specific label name in metadata given the context_type. :param context_type: A type of tokenization. :type context_type: string :returns: label name for `context_type` as string. """ return str(context_type) + '_label'
da6e18ad15a40c7898b9617208038ca7a8528973
288,150
def can_loop_over(maybe): """Test value to see if it is list like""" try: iter(maybe) except: return 0 else: return 1
7dc0b759f7287c7722c98687aa715b768c7b47ad
526,925
def list_eval_keys(prefix='kitti'): """List the keys of the dictionary returned by the evaluate function.""" return [ prefix + '-EPE(occ)', prefix + '-EPE(noc)', prefix + '-ER(occ)', prefix + '-ER(noc)', prefix + '-inf-time(ms)', prefix + '-eval-time(s)', prefix + '-occl-f-max', prefix + '-best-occl-thresh', ]
1d3b195a95789d7ec5c818056072a60dc1e0b16b
143,961
import copy def resample_list(spec_to_resample, specList, **kwargs): """ Resample a single EchelleSpectrum or Spectrum1D object into a EchelleSpectrumList object. Useful for converting models into echelle spectra with multiple orders. Parameters ---------- spec_to_resample: EchelleSpectrum or specutils Spectrum1D object Object storing spectrum (typically of a model) to be resampled onto the same grid as specList. specList: EchelleSpectrumList object Object storing an echelle spectrum (spectrum with multiple orders) with the wavelength grid to which spec_to_resample will be resampled. **kwargs: optional Extra arguments to be passed to specutils.manipulation.resample which is run to resample spec_to_resample to each order in specList """ spec_out = copy.deepcopy(specList) for i in range(len(specList)): spec_out[i] = spec_to_resample.resample(specList[i], **kwargs) return spec_out
4ef582f164965189d40f01185143069b5bdf551c
382,042
def value_to_bitval(value, bits=8, min=0, max=1): """ Converts a value to a bits-bit number for range min to max :param value: (float) value to convert :param bits: (int) number of bits of resolution :param min: (float) minimum of range :param max: (float) maximum of range :return: (int) value in bits-bit (e.g. 8-bit from 0 to 2^8-1) """ # Convert value to scale of 0 to 1 scaled_value = (value - min) / (max - min) return int(scaled_value * (2**bits - 1))
48979fdf737e5c4ab3dcde72104b7a7035f6173f
260,817
def calc_doublelayer_dielectric_capacitance(eps_fluid, lamb, Cdl): """ Total capacitance due to Debye layer and surface capacitance (Stern layer or dielectric coating) units: F/m^2 Notes: Adjari, 2006 - "The overall capacitance per unit area in the Debye-Huckel limit" Inputs: eps_fluid = permittivity of the fluid (F/m) lamb = Debye length (m) Cdl = Capacitance due to Stern/dielectric layer (F/m^2) """ C_total_Adjari = (1/(1+(eps_fluid/lamb/Cdl)))*(eps_fluid/lamb) return C_total_Adjari
95f4c5abf7eaf4478fc120f633ca9c7f32d88a1e
532,804
def submit(jobs, job_attributes={}, serial=False, dump_job=True): """ lets you submit multiple jobs serial to each other or in parallel Args: jobs (:obj:`list` or `tuple` :obj:`Job`): jobs to submit job_attributes (dict): effective overrides to all job attributes serial (bool): if True jobs will be serial to each other dump_job (bool): if True it will store the job as alf file. Returns: list: job ids """ ids = [] parent_id = None for job in jobs: job.job_attributes.update(job_attributes) if serial and parent_id: # setting serial relationship job.job_attributes["afterjids"] = [parent_id] parent_id = job.submit(dump_job=dump_job, **job.job_attributes) ids.append(parent_id) return ids
d9eb4ca69d4fd6528385a14c21729d987fe21cd9
185,739
import random def generate_number() -> int: """Generate a random number from 1 to 10. Returns: int: Random number. """ return random.randint(1, 10)
f9d4c000bc1e0103f0548924b326c0fa7c27700c
107,982
def pad_bounds(bounds, percent=0): """Pad the bounds by a percentage Parameters ---------- bounds : list-like of (west, south, east, north) percent : int, optional percent to pad the bounds Returns ------- (west, south, east, north) """ xmin, ymin, xmax, ymax = bounds x_pad = (xmax - xmin) * (percent / 100) y_pad = (ymax - ymin) * (percent / 100) return [xmin - x_pad, ymin - y_pad, xmax + x_pad, ymax + y_pad]
b2c94bb5da16efef629bde0d61d361a005b06565
612,773
def has_ext(filename, ext): """ >>> has_ext('test.mp3',['opus','mp3','aac']) True >>> has_ext('test.mp4',['opus','mp3','aac']) False >>> has_ext('test.opus.gz',['opus','mp3','aac']) False >>> has_ext('test.1.OPUS',['opus','mp3','aac']) True """ return filename.split(".")[-1].lower() in ext
672c77eab94a15bee226fce9037feea73a83584c
518,451
def is_prefix(x, pref) -> bool: """Check if pref is a prefix of x. Args: x: Label ID sequence. pref: Prefix label ID sequence. Returns: : Whether pref is a prefix of x. """ if len(pref) >= len(x): return False for i in range(len(pref)): if pref[i] != x[i]: return False return True
ec34e3e08d9ea5822481d1eaf70f1c72caf2e6d2
516,739
import re def remove_whitespace(some_string): """Remove whitespace and punctuations from a string""" some_string = some_string.lower() splitter = r'[\; \, \* \n \.+\- \( \) - \/ : \? \ — \' \’]' parts = re.split(splitter, some_string) new_string = ''.join(parts) return new_string
929d702e7c96830c867a928d3461b09e7b41f1d0
201,055
def lauegroup_to_lattice(lauegroup): """Convert a Laue group representation (from pointless, e.g. I m m m) to something useful, like the implied crystal lattice (in this case, oI.)""" # this has been calculated from the results of Ralf GK's sginfo and a # little fiddling... # # 19/feb/08 added mI record as pointless has started producing this - # why??? this is not a "real" spacegroup... may be able to switch this # off... # 'I2/m': 'mI', lauegroup_to_lattice = { "Ammm": "oA", "C2/m": "mC", "Cmmm": "oC", "Fm-3": "cF", "Fm-3m": "cF", "Fmmm": "oF", "H-3": "hR", "H-3m": "hR", "R-3:H": "hR", "R-3m:H": "hR", "I4/m": "tI", "I4/mmm": "tI", "Im-3": "cI", "Im-3m": "cI", "Immm": "oI", "P-1": "aP", "P-3": "hP", "P-3m": "hP", "P2/m": "mP", "P4/m": "tP", "P4/mmm": "tP", "P6/m": "hP", "P6/mmm": "hP", "Pm-3": "cP", "Pm-3m": "cP", "Pmmm": "oP", } updated_laue = "" for l in lauegroup.split(): if not l == "1": updated_laue += l return lauegroup_to_lattice[updated_laue]
57d7e6851675785b55339046416af35b5cecf6e0
240,668
def _plural_s(cnt, word): """Return word in plural if cnt is not equal 1""" if cnt == 1: return word return '%ss' % word
fa9f5a319caacfaacf1acf4ae02942691a34d0c0
515,636
from typing import List def make_ngrams(text: str, n: int) -> List[str]: """Turn a term string into a list of ngrams of size n :param text: a text string :type text: str :param n: the ngram size :type n: int :return: a list of ngrams :rtype: List[str]""" if not isinstance(text, str): raise TypeError('text must be a string') if not isinstance(n, int): raise TypeError('n must be a positive integer') if n < 1: raise ValueError('n must be a positive integer') if n > len(text): return [] text = "#{t}#".format(t=text) max_start = len(text) - n + 1 return [text[start:start + n] for start in range(0, max_start)]
a574e78a9873a6f2dbbc3643197a9f23d72d84ab
83,913
def _divisible_slice(perm, divisor, start_index): """Check if a three digit slice of a permutation is divisible by divisor""" perm_slice = perm[start_index:start_index + 3] number = int(''.join(perm_slice)) return number % divisor == 0
2698381efd0e7f720f474eb57dc25bc6b8f25dac
476,468
def calculate_reliability(data): """ Calculates the reliability rating of the smartcab during testing. """ success_ratio = data['success'].sum() * 1.0 / len(data) if success_ratio == 1: # Always meets deadline return ("A+", "green") else: if success_ratio >= 0.90: return ("A", "green") elif success_ratio >= 0.80: return ("B", "green") elif success_ratio >= 0.70: return ("C", "#EEC700") elif success_ratio >= 0.60: return ("D", "#EEC700") else: return ("F", "red")
d1c9ad7bba220beeae06c568cfd269aaaebfb994
1,545
import tempfile def get_temp_filename(mode="w+b", buffering=-1, encoding=None, newline=None, suffix=None, prefix=None, dir=None) -> str: """Get temp filename e.g j.sals.fs.get_temp_filename(dir="/home/rafy/") -> '/home/rafy/tmp6x7w71ml' Args: mode (str, optional): [description]. Defaults to "w+b". buffering (int, optional): buffering. Defaults to -1. encoding ([type], optional): encoding . Defaults to None. newline ([type], optional): Defaults to None. suffix ([type], optional): ending suffix. Defaults to None. prefix ([type], optional): prefix . Defaults to None. dir ([type], optional): where to create the file. Defaults to None. Returns: [str]: temp filename """ return tempfile.NamedTemporaryFile(mode, buffering, encoding, newline, suffix, prefix, dir).name
6ee213a5d2acf782b4ac24b2581bfcd733a23156
116,559
def last_elem_in_list(working_list): """ returns the last element of a list. """ return working_list[-1]
c6bb6c538b2474a504358dc4a51f6a3537e11388
242,592
def parse_email_remediation_key(key): """Returns the tuple (message_id, recipient) for the key created using create_email_remediation_key.""" return key.split(':', 1)
495eff306e89ada4430bc4995d96fb88e0c4f032
645,900
def decdeg2dms(dd): """ Convert decimal degrees to deg,min,sec """ is_positive = dd >= 0 dd = abs(dd) minutes, seconds = divmod(dd * 3600, 60) degrees, minutes = divmod(minutes, 60) degrees = degrees if is_positive else -degrees return (degrees, minutes, seconds)
f369e4f17960238cbb070e6e000dbb24dcba9011
582,167
def getAllContribsOutputStrFromSpectraOutput( spectraOutput, energyFmt=None, intensityFmt=None ): """ Gets a str to write all contributions to spectraOutput (e.g. fragA-S-3p contribution) Args: spectraOutput: (GenSpectraOutput object) This contains all information for a generated spectrum energyFmt: (Str, optional) The format string for the energies. Default = "{:.9g}" intensityFmt: (Str, optional) The format string for the intensities. Default = "{:.9g}" Returns outStr: (Str) String containing data on the contributions to the spectrum """ energyFmt = "{:.9g}" if energyFmt is None else energyFmt intensityFmt = "{:.9g}" if intensityFmt is None else intensityFmt labelList = spectraOutput.label dataList = spectraOutput.spectralContributions #Get the labels labelStrs = ", ".join( ["{}-{}-{}".format(x.fragKey, x.eleKey, x.aoKey) for x in labelList] ) labelStrs = "#labels = " + labelStrs outStrList = [labelStrs] outStrList.append( "#Energy, Intensities" ) dataStrFmt = energyFmt + ", " + ", ".join( [intensityFmt for x in range(len(dataList))] ) for idx,x in enumerate(dataList[0]): energy = x[0] currData = [ a[idx][1] for a in dataList ] outStrList.append( dataStrFmt.format( energy, *currData) ) return "\n".join(outStrList)
03b10fa3bcdc9a0458478da0b602a8eaaa770d33
51,031
def find_indexes(s, ch='\n'): """Finds all instances of given char and returns list of indexes """ return [i for i, ltr in enumerate(s) if ltr == ch]
b94bfcf277de7e10f2adee0aabc441b20bc18467
434,008
def comma_synonyms(f, e): """ Expands comma separated words as synonyms. For example: ('house, home') -> ('house'), ('home') :param f: :param e: :return: """ if ',' in e: parts = list(filter(lambda x: x, map(lambda x: x.strip(), e.split(',')))) if len(parts) > 1: print(">COMMA_SYNS", f, e, '-->', parts) return [(f, p) for p in parts] return [(f, e)]
0c5966c191fc87c01d0bd7dccf6f071d0672f5b7
532,515
def hash_table_size(item, tablesize): """ A hashing technique that involves 1. Converting the characters in a string to a list of ordinal values 2. Get the sum of the list 3. Get the remained by doing a modulo using tablesize item - string tablesize """ ordinal_list = [ord(i) for i in item] return sum(ordinal_list) % tablesize
cf47a023c35693681485331878dfd3eb9164a7bf
9,448
def _isIterable(maybeIterable): """Is the argument an iterable object? Taken from the Python Cookbook, recipe 1.12""" try: iter(maybeIterable) except: return False else: return True
bd1e60aa5eba5dcbfe3433531d26a671dd65f9eb
218,088
def _grad_nll(p, gp, y): """ Given parameters and data, compute the gradient of the negative log likelihood of the data under the george Gaussian process. Parameters ---------- p : array GP hyperparameters gp : george.GP y : array data to condition GP on Returns ------- gnll : float gradient of the negative log-likelihood of y under gp """ gp.set_parameter_vector(p) return -gp.grad_log_likelihood(y, quiet=True)
600f22945036454621860d2289fcfdd9a29fdd1e
637,081
def get_doc_info(result): """Get the index, doc_type, and id associated with a document. Parameters ---------- result : dict A document from an Elasticsearch search result. Returns ------- dict A dictionary with keys 'index', 'doc_type', and 'id', containing the name of the index in which the document resides, the doc_type of the document, and its id. """ return { 'index': result['_index'], 'doc_type': result['_type'], 'id': result['_id'] }
f4eb4b235c75d53862974e8937ca5c845b2734c5
145,383
def format_percentage(number): """ Formats a number into a percentage string :param number: a number assumed to be between 0 and 1 :return: str """ return '{}%'.format(round(number * 100))
5736b141b316d2de63804ddb248a45d9e4748ba4
669,440
import re def you_to_yall(text: str) -> str: """Convert all you's to y'all.""" pattern = r'\b(y)(ou)\b' return re.sub(pattern, r"\1'all", text, flags=re.IGNORECASE)
1a3ee7ebf2394f84ad296e18da789dff8ca50a12
7,860
import re def run(text, meta): """Remove refs from the text. Returns text with a regex substitution and unchanged META.""" pat = re.compile(meta.get("ref_regex", "[[(]*ref:[0-9a-f]+[])]*")) text = pat.sub("", text) return text, meta
5ff0034a9269594b994819f7c9df2719f9d009f1
583,530
import hashlib def md5hash(filename, size=50000): """ Helper function to calculate MD5 hash of the file contents (up to a given number of bytes). @param filename file path of file to process @param size the maximum number of bytes to read """ f = open(filename, 'rb') data = f.read(size) f.close() hash = hashlib.md5(data).hexdigest() del data return hash
59891ca50f72e8d3c62504887b0c1480a0753345
613,975
def is_scheduler_like(thing): """ Test if an object can be used as a scheduler. :meta private: """ # Pytorch doesn't have a unified inheritance class for these things. # So we call it a scheduler if it has a "step" attribute that itself is callable return callable(getattr(thing, "step", None))
224c1696798df17b11812aa1cfcf0cd79f3f3afe
516,674
from typing import Any from typing import Union from typing import List from typing import Sequence def _list_convert(x: Any) -> Union[Any, List[Any]]: """Converts argument to list if not already a sequence.""" return [x] if not isinstance(x, Sequence) else x
79a913305a931378e2cb2b8f46a74f1381850ac4
44,112
def clean_string(s): """ Clean all the HTML/Unicode nastiness out of a string. Replaces newlines with spaces. """ return s.replace('\r', '').replace('\n', ' ').replace('\xa0', ' ').strip()
04568d67be1c966eec1ba7cb09ac7303667f66d6
152,224
def get_expected_log_files_dict(base_out): """ :param base_out: Base path structure for log files. For example, if the expected path for the log is 'work/step.path/log/step.conda_info.txt', the argument should be 'work/step.path/log/step'. :type base_out: str :return: Returns dictionary with expected path for log files based on the provided input. """ # Define expected expected = { "conda_info": base_out + ".conda_info.txt", "conda_info_md5": base_out + ".conda_info.txt.md5", "conda_list": base_out + ".conda_list.txt", "conda_list_md5": base_out + ".conda_list.txt.md5", "log": base_out + ".log", "log_md5": base_out + ".log.md5", } # Return return expected
95046fca053fc3da9c03989e79a88e88d4daaec8
562,319
from pathlib import Path def data_paths(root): """Get the paths for the processed data Note: This first requires that you use `frame`'s cli to preprocess the framenet data into json files. Args: root: root path to the json preprocessed data Raises: `ValueError`: if the files do not exist in the `root` directory. Make sure to prepocess the data using `frame`'s frame.cli:preprocess-framenet before loading the data. """ path = Path(root).glob("**/*") paths = [str(p) for p in path] if len(paths) == 0: raise ValueError( f"Preprocessed data not found at <{root}>! Use frame.cli:preprocess-framenet" ) return paths
4d5a0d460b7af8de2d22fc1a709974163dd8c23b
496,886
def is_string(value): """Check if the value is actually a string or not""" try: float(value) return False except ValueError: if value.lower() in ["true", "false"]: return False return True
7cb61dcb81c6baa824a1537fd6c94f2b237c0669
128,917
def is_present(marks, text): """ return True, if all marks present in given text """ return all([mark in text for mark in marks])
0ea198520b1e46f1c9b26a9533e0785140348776
70,873
def get_frame(path): """Get frame for animation from text file.""" with open(path) as file: return file.read()
4b5d7a55d85004f757acdedaf3bdc68d59d84cf8
543,553
def format_timestamp(timestamp): """ Format a timestamp to a string. """ return timestamp.strftime('%Y-%M-%d %H:%M:%S')
fad6ae95f4745e4a13bd5ea9667ed0dc370413d8
380,108
def join(string, iterable, *, on_empty=None): """ A functional version of ``str.join()`` providing more flexibility via the ``on_empty`` parameter. Arguments are not checked. :param string: :param iterable: :param on_empty: :return: """ if on_empty == 'drop': return string.join(item for item in iterable if item) if on_empty == 'abort' and '' in iterable: return '' return string.join(iterable)
8e61d544914f76b5b8f1c2023fe6c3d1d5fa1106
535,043
def snake_case_to_camel_case(string): """Converts a snake case string to a camel case string.""" result = '' capitalize = True for c in string: if c == '_': capitalize = True else: result += c.upper() if capitalize else c capitalize = False return result
fa34d39760a3f7760b94b3bdb3bf4fd6cf9f5262
173,154
def Kml(nodes): """Returns a tuple representing a KML Document node. Args: nodes: Iterable of child nodes representing top-level KML objects. Returns: Tuple for pyfo to produce the <kml> node. """ return ('kml', ('Document', nodes), dict(xmlns='http://www.opengis.net/kml/2.2'))
c8863135e7ea2e806da4070766d5fe510e1bc87e
557,964
def count_vars_vcf(vcf_path): """ Counts the number of variants in a VCF file. """ in_file = open(vcf_path) num_records = 0 for line in in_file: if line.startswith('#'): continue num_records += 1 return num_records
053316c4aae21fe75e96d31a9563dc499b5dea71
113,612
def row_check_tile_direction(row,direction,index) : """Returns the value of the first tile encountered when moving from index in the direction given and the position of this tile. "-1" is a wall""" taille = len(row) currentPosition = index if direction =="left" : currentPosition -= 1 while currentPosition >= 0 and row[currentPosition] == " " : currentPosition -= 1 if currentPosition == -1 : return("-1", -1) else : return (row[currentPosition], currentPosition) if direction == "right" : currentPosition += 1 while currentPosition < taille and row[currentPosition] == " " : currentPosition += 1 if currentPosition == taille : return("-1", taille) else : return (row[currentPosition], currentPosition)
dbbdfa03639eafd2a784eb65553282706bde729a
309,900
from typing import IO import csv def startfile_csv(ARG_filewriter: IO): """Writes the header for the .csv suncalc results file. Parameters ---------- ARG_filewriter : IO The file writing IO object. Returns ------- csvwriter : csv._writer The IO object formatted specifically for this csv file. """ csvwriter = csv.writer(ARG_filewriter, lineterminator="\n", quoting=csv.QUOTE_NONNUMERIC) csvheaders = ["DAY", "HOUR24", "MINUTE", "FRACTIONAL DAY", "DATESTRING", "ALTITUDE", "AZIMUTH"] csvwriter.writerow (csvheaders) return csvwriter
f28eb1466b8945acc424635a5c1cd56d2a0d7cd3
592,140
def get_jssimporter(recipe): """Return the JSSImporter processor section or None.""" processors = [processor for processor in recipe["Process"] if processor.get("Processor") == "JSSImporter"] if len(processors) == 1: result = processors.pop() else: result = None return result
79135db06159fcf3d30bc2f0c1dbf6493a475fd3
302,891
def catalan_dynamic(n: int) -> int: """ Returns the nth catalan number with polynomial time complexity. >>> catalan_dynamic(5) 42 >>> catalan_dynamic(10) 16796 >>> catalan_dynamic(0) 1 >>> catalan_dynamic(-5) 1 >>> catalan_dynamic(1.5) -1 """ if not isinstance(n, int): return -1 if n <= 1: return 1 numbers = [0] * (n + 1) numbers[0], numbers[1] = 1, 1 for i in range(2, n + 1): for j in range(i): numbers[i] += numbers[j] * numbers[i - j - 1] return numbers[-1]
0b33dd7e9d32653d7c3109232de3a127baf4c88b
472,550
def epsIndicator(frontOld, frontNew): """ This function computes the epsilon indicator :param frontOld: Old Pareto front :type frontOld: list :param frontNew: New Pareto front :type frontNew: list :return: epsilon indicator between the old and new Pareto fronts :rtype: float """ epsInd = 0 firstValueAll = True for indNew in frontNew: tempEpsInd = 0 firstValue = True for indOld in frontOld: (aOld, bOld, cOld) = indOld.fitness.values (aNew, bNew, cNew) = indNew.fitness.values compare = max(aOld-aNew, bOld-bNew, cOld-cNew) if firstValue: tempEpsInd = compare firstValue = False if compare < tempEpsInd: tempEpsInd = compare if firstValueAll: epsInd = tempEpsInd firstValueAll = False if tempEpsInd > epsInd: epsInd = tempEpsInd return epsInd
8ec7b18b36a32b963c148aa7592557c2724bde0d
686,534
import string def extract_prefix(name): """Return the library or sample name prefix Arguments: name: the name of a sample or library Returns: The prefix consisting of the name with trailing numbers removed, e.g. 'LD_C' for 'LD_C1' """ return str(name).rstrip(string.digits)
892fe3b74ee42918244aa4bb83e362d7f316d050
391,598
def positive_cap(num): """ Cap a number to ensure positivity :param num: positive or negative number :returns: (overflow, capped_number) """ if num < 0: return 0, abs(num) else: return num, 0
577f1b82979424e405e9e7b7ce06a905619b96e7
549,237
from typing import Sequence from typing import Tuple from typing import Optional def get_fan_in_fan_out( shape: Sequence[int]) -> Tuple[Optional[int], Optional[int]]: """Returns (fan_in, fan_out) of a weight variable of the given shape.""" if not shape: return None, None if len(shape) < 1: return 1, 1 elif len(shape) == 1: # Following _compute_fans() from TF's init_ops.py. return shape[0], shape[0] else: receptive_field_size = 1 for s in shape[:-2]: receptive_field_size *= s fan_in = shape[-2] * receptive_field_size fan_out = shape[-1] * receptive_field_size return fan_in, fan_out
231372e76b549ffd44482b0835634bbb29c07a49
574,492
def get_end_connection(road): """ Returns the intersection that is connected to the end of the road :param road: The road that the intersection is connected to :type road: Road :return: the intersection that is connected to the end of the road """ return road.get_end_connection()
795a672d8dc297fbcf1a36e10761e032ca2a7f1f
182,596
import struct def inet_aton(string): """inet_aton(string) -> packed 32-bit IP representation Convert an IP address in string format (123.45.67.89) to the 32-bit packed binary format used in low-level network functions.""" a, b, c, d = map(int, string.split(".", 3)) return struct.pack("BBBB", a, b, c, d)
0dddf2bcd48eb1596c6a9677ac589998c472fba0
165,837
def validate_sequence_characters(sequence): """ Validates that the sequence passed into it consists only of A, T, G and Cs. """ sequence = set(sequence.upper()) allowed = ['A', 'T', 'G', 'C'] for s in sequence: try: assert s in allowed return True except AssertionError: return False
9fa5e8aa14685c4e98d7cec1df264968137f8257
516,492
import logging def _check_downsampling_mismatch(downsample, num_pooled_features, output_layer_size): """ If downsample is flagged True, but no downsampling size is given, then automatically downsample model. If downsample flagged false, but there is a size given, set downsample to true. Parameters: ---------- downsample : bool Boolean flagging whether model is being downsampled num_pooled_features : int the desired number of features to downsample to output_layer_size : int number of nodes in the output layer being downsampled Returns: ------- downsample : boolean Updated boolean flagging whether model is being downsampled num_pooled_features : int Updated number of features model output is being downsample to """ # If num_pooled_features left uninitialized, and they want to downsample, # perform automatic downsampling if num_pooled_features == 0 and downsample: if output_layer_size % 2 == 0: num_pooled_features = output_layer_size // 2 logging.warning('Automatic downsampling to {}. If you would like to set custom ' 'downsampling, pass in an integer divisor of {} to ' 'num_pooled_features.'.format(num_pooled_features, output_layer_size)) else: raise ValueError('Sorry, no automatic downsampling available for this model.') # If they have initialized num_pooled_features, but not turned on # downsampling, downsample to what they entered elif num_pooled_features != 0 and not downsample: logging.info('Downsampling to {}.'.format(num_pooled_features)) downsample = True return downsample, num_pooled_features
3ecd17596cccf3c84542dcd8c7ab7c9e35c2e65f
438,287
def fastaRecordSizer(recordLines): """ Returns the number of charcters in every line excluding: the first (header) line whitespace at the start and end of lines """ size=0 for i in range(1,len(recordLines)): size+=len(recordLines[i].strip()) return size
328b2e6fca07098616ba305520c7c64d754d4225
625,387
def get_accel_y(fore_veh, back_veh, ego_vy, idm_model): """ A wrapper around IDM, given a front and a back vehicle (and the absolute speed), and a longitudinal driver model, this function returns the desired acceleration of the back vehicle. Arguments fore_vehicle: A vehicle object, for the vehicle that is currently in front. back_vehicle: A vehicle object, representing the vehicle that is behind the fore_veh. ego_vy: A float, indicating the absolute speed of the ego vehicle. This is necessary because the vehicle objects only contain relative speeds. idm_model: A longitudinal driver model consisting of a propagate() function. """ v = back_veh.rel_vy + ego_vy gap = fore_veh.rel_y - back_veh.rel_y dV = back_veh.rel_vy - fore_veh.rel_vy # positive -> back veh approachin return idm_model.propagate(v, gap, dV)
847cc4c7ed02bb27dcfb0bfed966ad2699cd90b6
645,215
def check_direct(element, list_to_check): """ Returns a boolean indicating whether the direct should be update. Inputs: - element: direct that needs to be checked; - list_to_check: operational mode or pb_type and port of the direct to select. """ interconnect = element.getparent() mode_or_pb_type = interconnect.getparent() if mode_or_pb_type.tag not in ['mode', 'pb_type']: return False for mode_or_pb_type_to_check, port_to_check in list_to_check: if mode_or_pb_type.attrib[ 'name'] == mode_or_pb_type_to_check and element.attrib[ 'name'] == port_to_check: return True return False
4ace09e83cbdbf9999402373e986916fa5592905
366,986
def ceildiv(a: int, b: int) -> int: """Safe integer ceil function""" return -(-a // b)
9dabd15a396fd1e3c743074d56081d5181a17775
528,125
import pathlib def unique_directories(files): """Returns a list of directories (pathlib.Path objects) for the files passed without repetitions.""" return list({pathlib.Path(x).parent for x in files})
326ed8b251b21fb36f03c5ddc7edd0b18918e868
49,297
def delete_nth(order, max_e): """Create new list that contains each number at most N times.""" counting = {x: 0 for x in order} final = [] for i in order: if counting[i] < max_e: counting[i] += 1 final.append(i) return final
4125728945022026e8969c47c50b5312b651e0c3
542,801
def blank_line(sROIID_, sComment_, iIndexCount_): """ Return correctly sized filler line for when extract is not possible. """ iCommaCount = 2 * iIndexCount_ + 1 return f'"{sROIID_}"{"," * iCommaCount}{sComment_}'
f86a4296c34bf71d527f9d34264a8c1730ef3a15
491,401
def dict_map_leaves(nested_dicts, func_to_call): """ Applies a given callable to the leaves of a given tree / set of nested dictionaries, returning the result (without modifying the original dictionary) :param nested_dicts: Nested dictionaries to traverse :param func_to_call: Function to call on the 'leaves' of those nested dictionaries :return: Result of the transformation (dict) Note that this and all the functions above use the exact same algorithm/pattern for tree traversal. We could reduce duplication by refactoring it into a function, but that might make the code less readable. """ to_update = dict(nested_dicts) for k, v in nested_dicts.items(): if isinstance(v, dict): to_update[k] = dict_map_leaves(v, func_to_call) else: to_update[k] = func_to_call(v) return to_update
42139741a5a0e3dca7b0662086e0dbe1c16f6c8b
543,309