content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import re import shlex def command_fix(kernel_command): """ Check the command for anything that might cause upset when it gets run by bash. Current checks are: IRkernel:main() needs escaping. Warn for any brackets Warn for unpaired quotes Parameters ---------- kernel_command : str The kernel command that is run by bash. Returns ------- fixed_kernel_command : srt The kernel command with any fixes applied. """ # IRKernel:main() fix # if not escaped or quoted then bash trips up on the brackets if " IRkernel::main()" in kernel_command: kernel_command = kernel_command.replace( " IRkernel::main()", " 'IRkernel::main()'" ) print("Escaping IRkernel::main().") # Unescaped brackets if re.search(r"[^\\][()]", kernel_command) and not re.search( r"[\'\"].*[^\\][()].*[\'\"]", kernel_command ): print("Warning: possibly unescaped brackets in the kernel command.") try: shlex.split(kernel_command) except ValueError: print("Kernel command may be missing quotation marks.") return kernel_command
79f32651a4c30911bd81bcb67c8b2e3b7a2dc8e2
113,499
def split_paragraphs(string): """ Split `string` into a list of paragraphs. A paragraph is delimited by empty lines, or lines containing only whitespace characters. """ para_list = [] curr_para = [] for line in string.splitlines(keepends=True): if line.strip(): curr_para.append(line) else: para_list.append(''.join(curr_para)) curr_para = [] if curr_para: para_list.append(''.join(curr_para)) return para_list
4e321d263beecf37efe1da34c503da314a97b894
113,500
def _wordwrap(text, chars_per_line=80): """Split the lines of a text between whitespaces when a line length exceeds the specified number of characters. Newlines already present in text are kept. """ text_ = text.split('\n') text = [] for l in text_: if len(l) > chars_per_line: l = l.split() c = 0 i = 0 _prev_i = 0 while i < len(l): while c <= chars_per_line and i < len(l): c += len(l[i]) if i < (len(l) - 1): c += 1 # whitespace char i += 1 if c > chars_per_line: i -= 1 text.append(' '.join(l[_prev_i:i])) _prev_i = i c = 0 else: text.append(l) # drop any trailing empty lines while not text[-1].strip(): text.pop() return '\n'.join(text)
075aaa0a057648342e4826025febe99aaf476d87
113,501
def fluctuations(N_avg,N2_avg,**kwargs): """Calculate fluctuations in N from averages <N> and <N^2>.""" return N2_avg - N_avg**2
d4be9a3d67c99a20a4227796af6bfc6fab36aa57
113,504
import logging def make_consolehandler(formatter, level=logging.INFO): """Return a console logging handler.""" ch = logging.StreamHandler() ch.setLevel(level) ch.setFormatter(formatter) return ch
e295a3616c9510be9a77a3ac3470d2c9f62134cf
113,506
def single_source_shortest_path(graph, node): """ Finds the shortest paths from a single source to the rest of the nodes in the graph. """ nodes = [] paths = {} for v in graph: paths[v] = [] sigma = dict.fromkeys(graph, 0.0) distances_dict = dict() sigma[node] = 1.0 distances_dict[node] = 0 remaining_nodes = [node] while remaining_nodes: # use BFS to find shortest paths v = remaining_nodes.pop(0) nodes.append(v) distance_v = distances_dict[v] sigma_v = sigma[v] for edge in graph[v]: if edge not in distances_dict: remaining_nodes.append(edge) distances_dict[edge] = distance_v + 1 if distances_dict[edge] == distance_v + 1: # this is a shortest path, count paths sigma[edge] += sigma_v paths[edge].append(v) # predecessors return nodes, paths, sigma
4b73ee197ae29aa8408c5a981ff147fd517fce7b
113,509
def splitSocketError(error): """Break a socket.error into a message and a "everything else" tuple.""" if len(error.args) > 1: msg = error[1] args = (error[0],) else: msg = error[0] args = () return args, msg
45215ba0a2219a64cdb74f832531c29ae1d4952b
113,510
import pathlib def get_basename_without_extension(path_file: pathlib.Path) -> str: """ conveniance function - who remembers "stem" >>> get_basename_without_extension(pathlib.Path('//main/xyz/test.txt')) 'test' >>> get_basename_without_extension(pathlib.Path('//main/xyz/test')) 'test' >>> get_basename_without_extension(pathlib.Path('//main/xyz/test.txt.back')) 'test.txt' >>> get_basename_without_extension(pathlib.Path('//main/xyz/.test')) '.test' >>> get_basename_without_extension(pathlib.Path('//main/xyz/.test.txt')) '.test' """ basename = path_file.stem return basename
abe5b06490bac276295547a96263a7fe53596b44
113,519
import glob def find_bams(project, stats_base_dir): """ Find all bams for a project and return a dictionary of "igo_id" -> "bam list" """ bam_unix_regex = stats_base_dir + '/**/*_IGO_' + project + '_*.bam' print("Searching for all .bams for project {} starting in folder {} matching glob {}".format(project, stats_base_dir, bam_unix_regex)) # search for all .bams named like /igo/staging/stats/DIANA_0479_BHM2NVDSX3/RNA/GA28_ot_IGO_12785_H_1.bam project_bams = glob.glob(bam_unix_regex, recursive=True) # recurisive=True causes the search to be a bit slow (more than 1 min) print("Total bams found {}".format(len(project_bams))) bamdict = {} for bam in project_bams: igo_id = bam.split("_IGO_")[1] igo_id = igo_id.replace(".bam","") print("Adding IGO ID {} bam {} to bam dictionary".format(igo_id, bam)) if igo_id in bamdict: # add the bam to the list of bams for that igoId bamdict[igo_id].append(bam) else: bamdict.update({igo_id:[bam]}) # add dictionary entry with 1 list item return bamdict
2c5d3c3ffd31d92a38fc99e9c46d764a553ba058
113,526
def convert_flags_to_boolean_dict(flags): """Return a dict with a key set to `True` per element in the flags list.""" return {f: True for f in flags}
3dcf2c4cfa08b0a76859e4a98f5fcf92e9d63fec
113,530
def count_bits(bits): """Count number of bits on each bit position.""" counted_bits = { position: {'1': 0, '0': 0} for position, _ in enumerate(bits[0]) } for line in bits: for position, bit in enumerate(line): counted_bits[position][bit] += 1 return counted_bits
267d065cbe6b58a6c6a4c77132ad4763bda3e8c4
113,531
def edges_to_graph(input_edges): """Converts a list of edges into a graph. Vertices without an edge are not considered. Parameters ---------- input_edges : list of tuples (a, b) List of edges. Returns ------- dict A graph as dict, each node is once key with a set of adjacent knots as value. """ graph = dict() for a, b in input_edges: if a in graph: graph[a].add(b) else: s = set() s.add(b) graph[a] = s if b in graph: graph[b].add(a) else: s = set() s.add(a) graph[b] = s return graph
0ef846ab9b2f84b42f20f2f25cabfe3acf74b47a
113,533
def split_dataset(dataframe, set_size): """ Splits a dataframe in two sets. :param dataframe: Dataframe to split. :param set_size: Size of the set located at the end. It is a number between 0 and 1. :return: The dataframe split in two sets. """ if set_size: other_set_size = 1 - set_size split_point = int(len(dataframe) * other_set_size) set_keys = dataframe[:split_point] other_set_keys = dataframe[split_point:] return set_keys, other_set_keys return None, None
4275bf613990c15cd11535a5aa21e61215a40bd2
113,535
def get_str_indices(container, string): """Return the indices of ``string`` in a list or tuple``container`` Args: container (list or tuple): container of strings string (str): the string to locate Returns: list """ assert isinstance(container, (list, tuple)) ind = [] for i, s in enumerate(container): if string == s: ind.append(i) return ind
ceea7c5c7a511498da4b13e9fb53cde5d0c4c3ab
113,538
def reserve_margin_constraint_rule(backend_model, carrier): """ Enforces a system reserve margin per carrier. .. container:: scrolling-wrapper .. math:: \\sum_{loc::tech::carrier \\in loc\\_tech\\_carriers\\_supply\\_all} energy_{cap}(loc::tech::carrier, timestep_{max\\_demand}) \\geq \\sum_{loc::tech::carrier \\in loc\\_tech\\_carriers\\_demand} carrier_{con}(loc::tech::carrier, timestep_{max\\_demand}) \\times -1 \\times \\frac{1}{time\\_resolution_{max\\_demand}} \\times (1 + reserve\\_margin) """ model_data_dict = backend_model.__calliope_model_data["data"] reserve_margin = model_data_dict["reserve_margin"][carrier] max_demand_timestep = model_data_dict["max_demand_timesteps"][carrier] max_demand_time_res = backend_model.timestep_resolution[max_demand_timestep] return sum( # Sum all supply capacity for this carrier backend_model.energy_cap[loc_tech_carrier.rsplit("::", 1)[0]] for loc_tech_carrier in backend_model.loc_tech_carriers_supply_conversion_all if loc_tech_carrier.split("::")[-1] == carrier ) >= sum( # Sum all demand for this carrier and timestep backend_model.carrier_con[loc_tech_carrier, max_demand_timestep] for loc_tech_carrier in backend_model.loc_tech_carriers_demand if loc_tech_carrier.split("::")[-1] == carrier ) * -1 * ( 1 / max_demand_time_res ) * ( 1 + reserve_margin )
c2db318967e7409af302cec1da626d7b01b0ae72
113,542
def contar_caracteres(s): """Função que conta os caracteres de uma string ex.: >>> contar_caracteres('ola') {'o': 1, 'l': 1, 'a': 1} >>> contar_caracteres('maca') {'m': 1, 'a': 2, 'c': 1} :param s: string a ser contada """ resultado={} for caracter in s: resultado[caracter] = resultado.get(caracter, 0) + 1 # se o item dentro do dicionario não estiver disponivel ele inicia a contagem com 0 e ioncrementa return resultado
9a7dea4e30052b2f5a1255bcb3db57c8c98246c4
113,543
def campaign_media(instance, filename): """Return an upload file path for a campaign media attachment.""" if not instance.campaign.id: instance.campaign.save() return 'campaign/{0}/{1}'.format(instance.campaign.id, filename)
9508020322b758c0dfc138c5d1782315971c6efe
113,559
import random def _course_cmp(x, y): """A comparator function for courses. Redacted courses are randomly sorted towards the bottom of the list. Otherwise the original ordering is preserved. Args: x: The first course to be compared. y: The second course to be compared. Returns: -1 if x is 'Redacted,' 1 if y is 'Redacted,' 0 otherwise. """ if x.code == 'Redacted' and y.code == 'Redacted': return random.randint(-1, 1) elif x.code == 'Redacted': return 1 elif y.code == 'Redacted': return -1 return 0
6694a3ecb09685c728d553bb1c24c7c921e4eb18
113,560
def is_nucleotide(sequence): """ Checks that the string sequence provided is a valid string consisting only of the 4 nucleotides A, C, G, and T Returns True if so, and False otherwise :param sequence: a DNA sequence :return: A boolean value indicating if the sequence is valid """ valid_string = ["A", "C", "G", "T"] for letter in sequence: if letter not in valid_string: return False return True
7a9237be30cd2780f3f2c2ecc264958ea0362f5b
113,563
def get_longitudes(pnt, lon, rad=3.0): """ Selects longitudes from a list of pointings Parameters ---------- pnt : list of dict Pointings lon : float Galactic longitude (deg) rad : float, optional Selection radius (deg) Returns ------- pnt : list of dict Pointings """ # Initialise list of pointings pointings = [] # Loop over all pointings for p in pnt: # Compute longitude difference dl = p['l'] - lon if dl > 180.0: dl -= 360.0 if dl < -180.0: dl += 360.0 # Append pointing if close to requested longitude if abs(dl) < rad: pointings.append(p) # Return pointings return pointings
b329476449a7aa28f6ac5f836f3f7d994239b121
113,565
import logging def log_this(chrom, run_start, run_end): """Log a coordinate range, then return it as a tuple.""" logging.info("\tAccessible region %s:%d-%d (size %d)", chrom, run_start, run_end, run_end - run_start) return (chrom, run_start, run_end)
be3a951cd1552e0d49a565b144b6adcc449682a7
113,567
def get_gcp_firewall_ingress_rules(project_id, compute): """ Get ingress Firewall data for a given project :param project_id: The project ID to get firewalls for :param compute: The compute resource object created by googleapiclient.discovery.build() :return: Firewall response object """ req = compute.firewalls().list(project=project_id, filter='(direction="INGRESS")') return req.execute()
b91160ef429079f6466fc8a5cb8e8cb4c7389319
113,569
import re def _str_is_bool(data): """Verify if data is boolean.""" return re.match(r"^(true|false)$", str(data), flags=re.IGNORECASE)
b153cc3c62fbf3cb5c0729261e81df99b9ab0c5a
113,574
def _take_nearest_pair(values, target): """Given a sorted, monotonic list of values and a target value, returns the closest two pairs of numbers in the list to the given target. The first being the closest number less than the target, the second being the closest number greater than the target. For example:: >>> _take_nearest_pair([1, 2, 3], 2.5) (2, 3) If the target is not part of the continuous range of the values, then both numbers will either be the minimum or maximum value in the list. For example:: >>> _take_nearest_pair([1, 2, 3], 10) (3, 3) """ low = values[0] high = values[0] for value in values: if value <= target and value >= low: low = value if value > target: high = value break else: # If we never found a value higher than # the target, the the target is outside # of the range of the list. Therefore, # the highest close number is also the # lowest close number. high = low return low, high
339e30bfe22bd4f9bb5c5f155689e59162c75283
113,575
def de_normalize(feat_data, norm_params, norm_type='mean_var'): """ de-normalize data with normalization parameters using norm_type defined method :param feat_data: data to de-normalize :param norm_params: a numpy array of shape (4, N), indicating min, max, mean and variance for N dimensions :param norm_type: str type, 'min_max' or 'mean_var' :return: numpy array, sharing same shape with input data """ assert feat_data.shape[1] == norm_params.shape[1] assert norm_type in ['min_max', 'mean_var'] if norm_type == 'min_max': min_val, min_target = norm_params[0], 0.01 max_val, max_target = norm_params[1], 0.99 return (max_val - min_val + 0.001) * (feat_data - min_target) / (max_target - min_target) + min_val else: mean_val = norm_params[2] variance = norm_params[3] return feat_data * variance + mean_val
6314cdd29bf857e7bebd6dabf22ec8220150ae19
113,576
def chunkify(seq, n): """Split seq into n roughly equally sized lists. https://stackoverflow.com/questions/2130016/splitting-a-list-of-arbitrary-size-into-only-roughly-n-equal-parts """ avg = len(seq) / float(n) out = [] last = 0.0 while last < len(seq): out.append(seq[int(last):int(last + avg)]) last += avg return out
53cb06231230a930516b5b4a67c131ddcce42d15
113,577
def map_type_list_field(old_type): """ This function maps the list type into individual field type which can contain the individual values of the list. Mappings - list:reference <table> --> refererence <table> - list:integer --> integer - list:string --> string """ if (old_type == "list:integer"): return "integer" elif old_type.startswith("list:reference"): return old_type.strip("list:") elif old_type == "list:string": return "string"
496e238c48ed72d0713b4b17a89b9133e3cef8e0
113,579
import struct def xtea_encrypt(key,block,n=32,endian="!"): """ Encrypt 64 bit data block using XTEA block cypher * key = 128 bit (16 char) * block = 64 bit (8 char) * n = rounds (default 32) * endian = byte order (see 'struct' doc - default big/network) >>> z = xtea_encrypt('0123456789012345','ABCDEFGH') >>> z.encode('hex') 'b67c01662ff6964a' Only need to change byte order if sending/receiving from alternative endian implementation >>> z = xtea_encrypt('0123456789012345','ABCDEFGH',endian="<") >>> z.encode('hex') 'ea0c3d7c1c22557f' """ v0,v1 = struct.unpack(endian+"2L",block) k = struct.unpack(endian+"4L",key) sum,delta,mask = 0,0x9e3779b9,0xffffffff for round in range(n): v0 = (v0 + (((v1<<4 ^ v1>>5) + v1) ^ (sum + k[sum & 3]))) & mask sum = (sum + delta) & mask v1 = (v1 + (((v0<<4 ^ v0>>5) + v0) ^ (sum + k[sum>>11 & 3]))) & mask return struct.pack(endian+"2L",v0,v1)
6a404810f908dfe40c5efa0345cd106d98e1fd32
113,585
def get_discipline_area(label): """ Search a PDS4 label for a Discipline_Area. Parameters ---------- label : Label or ElementTree Element Full label for a PDS4 product with-in which to look for a discipline area. Returns ------- Label, ElementTree Element or None Found Discipline_Area section with same return type as *label*, or None if not found. """ return label.find('*/Discipline_Area')
f0464491efe722f981c6dadc117c62eb18479abc
113,587
import codecs import gc import ast def loads(serialized_bytes): """Deserialize bytes back to object tree. Uses ast.literal_eval (safe).""" serialized = codecs.decode(serialized_bytes, "utf-8") if '\x00' in serialized: raise ValueError( "The serpent data contains 0-bytes so it cannot be parsed by ast.literal_eval. Has it been corrupted?") try: gc.disable() return ast.literal_eval(serialized) finally: gc.enable()
ce34a0b2caa09b3bbee5d3438e6bf16736dbddc2
113,589
def pg_conn(pg_fresh, request): """Ensures the database is rolled back at the end of each test function execution""" request.addfinalizer(pg_fresh.rollback) return pg_fresh
c4ee419dacf8470f83f562c53a66d2e7f87b5873
113,597
def listify(x): """ Can be used to force method input to a list. """ # The isinstance() built-in function is recommended over the type() built-in function for testing the type of an object if isinstance(x, list): return x else: return [x]
f34b6a99879961957a31eef99dfd907100c159fc
113,603
def count_gender(data_list): """ Função que conta a quantidade de gêneros contida em uma lista Args: data_list: Lista completa a ser recebida Returns: Uma lista com a contagem de gêneros da lista """ male = 0 female = 0 for item in data_list: if item[6] == "Male": male += 1 elif item[6] == "Female": female += 1 return [male, female]
ad119f7cd84494087e174ca51ff2780594f83044
113,605
def increment(string): """ Increments a string of letters e.g. aaa -> aab, aza -> baa, ... """ letters = list('abcdefghijklmnopqrstuvwxyz') if string[-1] != 'z': chars = [char for char in string] chars[-1] = letters[letters.index(chars[-1]) + 1] return(''.join(chars)) else: return(increment(string[:-1]) + 'a')
0d05663872aaf89077a703b47587abf71fc8127e
113,608
def count_vars_vcf(vcf_path): """ Counts the number of variants in a VCF file. """ in_file = open(vcf_path) num_records = 0 for line in in_file: if line.startswith('#'): continue num_records += 1 return num_records
053316c4aae21fe75e96d31a9563dc499b5dea71
113,612
def _fixed_readline(self): """Read line from remote.""" line = [] while True: char = self.sslobj.read(1) line.append(char) if char in ("\n", ""): return ''.join(line)
0f25e15e3bce6ba30c1492305624d0a2bcba89d6
113,616
def find_tree(gi, found_lib, folders): """ Look for a directory structure in the given library. Returns the id of the last folder of the tree if it was completely found """ dist_folders = gi.libraries.get_folders(found_lib) dist_f = {} for f in dist_folders: dist_f[f['name']] = f path = "" last_f_id = None for f in folders: path += "/" + f if path in dist_f: print("Found folder " + f) last_f_id = dist_f[path]['id'] else: raise Exception("Did not find folder '" + f + "'") return last_f_id
5c1f2719ebfe6488dd7da0b5864b1851cb53d99d
113,617
import json def write_json(filename, src_dict): """write a json file from a dict.""" with open(filename, 'w') as f: return json.dump(src_dict, f, indent=2)
f70fafc24a850756bbb84a58897eccaa73777e29
113,618
def inversion_indices(df1, df2, column): """Return the indices in which df1[column] > df2[column] :param df1: A data frame :param df2: Another data frame :param column: A shared column :return: The indices where df1[column] > df2[column] """ return df1[df1[column] > df2[column]].index
172fd4d59970f139beda66e4ccd6dc0db57ddeb1
113,619
def is_dict(d): """Returns true if `d` is a subclass of dict""" return isinstance(d, dict)
520c1065fd7cef6bf4eb2abb9129ae49829c23fb
113,620
def folder_path_cleaning(folder): """ Modifies string file names from Windows format to Unix format if necessary and makes sure there is a ``/`` at the end. Parameters ---------- folder : string The folder path Returns ------- folder_path : str The folder path """ folder_path = folder.replace('\\', '/') if folder_path[-1] != '/': folder_path += '/' return folder_path
37a67eaad1d61f6222f3e1bc7a0e5ec475cee7e4
113,621
def escape_string(string): """Take a string and return a version that escapes backslashes and quotes. """ return string.replace("\\", "\\\\").replace('"', '\\"')
35df1d49e87f8796fdf81ce9757ccb62a6e4dc8b
113,626
import math def angle_difference(a1, a2): """ Returns the minimal angle difference between two orientations. Parameters ---------- a1 : float An angle in radians a2 : float Another angle in radians Returns ------- angle : float The minimal angle difference in radians """ pos1 = abs(math.pi - abs(abs(a1 - a2) - math.pi)) if a1 < math.pi: pos2 = abs(math.pi - abs(abs((a1 + math.pi) - a2) - math.pi)) elif a2 < math.pi: pos2 = abs(math.pi - abs(abs(a1 - (a2 + math.pi)) - math.pi)) else: return pos1 return pos1 if pos1 < pos2 else pos2
597ba70c1137decd47c68a2012486becc3e4347e
113,627
import torch def sample_reparameterize(mean, std): """ Perform the reparameterization trick to sample from a distribution with the given mean and std Inputs: mean - Tensor of arbitrary shape and range, denoting the mean of the distributions std - Tensor of arbitrary shape with strictly positive values. Denotes the standard deviation of the distribution Outputs: z - A sample of the distributions, with gradient support for both mean and std. The tensor should have the same shape as the mean and std input tensors. """ # sample random vector from normal distribution epsilon = torch.randn(size=(std.shape), device=mean.device) # compute z z = mean + (epsilon * std) return z
33e44a3ed3d3a12774250b2f9c3a6d019abd0ca2
113,629
import string import random def id_generator(size=16, chars=string.ascii_uppercase + string.digits): """Will generate and return a random 16 character ID Params: size: The length of the string to generate chars: The type of characters to include in the random string Returns: A random string """ return str(''.join(random.choice(chars) for x in range(size)))
10b9647a0a42d9040700bff2122b2973c399752c
113,633
def serialize_grades(grades): """ Returns an string with the representation of the grades in XML format. :param grades: grades to serialize :return: an string with the representation in the desired format """ result = '<scores>\n' for student_id, grade in grades.items(): result += "\t<score>\n\t\t<studentId>" + str(student_id) + "</studentId>\n" result += "\t\t<value>" + str(grade.grade) + "</value>\n\t</score>\n" return result + '</scores>'
359fa9f4788c71bbd89406a0dc59f6f32c026058
113,635
def str2bool(val): """Check if str is true boolean.""" return val.lower() in ("yes", "true", "t", "1")
32ad4d7a79e016a95f41b8ce1f58f2b71f91cfae
113,636
def scale_identity(h, D, delta): """identity scaling (no scaling operation)""" return h
1dfaedc131b411fa7105bf4d494505fa94c65447
113,637
def encode_entry(entry): """Convert from an entry to a JSON-representation.""" return [entry.key, entry.settings, entry.constraints.to_json()]
bc11ce2444077b9b78e74a8a98156e25680446b2
113,647
def review_rating(review): """Return the number of stars given (1 to 5).""" return review[1]
5d6acc2743e347b2c817acfd52314eee7c332ee3
113,655
def complexify_string(line): """ Converts a string in the form "(real, imag)" into a complex type. :type line: str :param line: String in the form ``"(real, imag)"``. :rtype: complex :return: Complex number. .. rubric:: Example >>> complexify_string("(1,2)") (1+2j) >>> complexify_string(" ( 1 , 2 ) ") (1+2j) """ temp = line.split(',') return complex(float(temp[0].strip()[1:]), float(temp[1].strip()[:-1]))
87e984680760cdee991112401d71d19e92b9476b
113,656
import re def detect_messenger_sentence(text): """Ban French sentences created by Messenger (like make someone admin, changing nickname, polls...).""" ban_words = ['administrateur', 'sondage', 'discussion vidéo', 'a voté pour', 'a rejoint l’appel', 'a surnommé', 'a commencé à partager une vidéo', 'a changé la photo du groupe'] ret = False for i in ban_words: if re.search(i,text): ret = True return ret
dea4539ffa1a4a07b5dc16f2e290306a7bd7d3c4
113,658
import random def particle_movement_y(time): """ Generates a random movement in the Y label Parameter: time (int): Time step Return: y (int): Y position """ y = 0 directions = [1, -1] for i in range(time): y = y + random.choice(directions) return y
c577b620bf60749df640f0cbb8190790019b061b
113,660
def get_network(network_arg, task_idx): """ Get network file from list of arguments. Parameters ---------- network_arg: list of space-separated network filenames as parsed by argparse (--networks) task_idx: int index of the task/network Returns ------ Path to the network for task task_idx. """ if (task_idx == 0 or len(network_arg) == 1): return network_arg[0] else: return network_arg[task_idx]
eac76c0c70e5dec1dccf8e3c3adea8a199997c10
113,661
from typing import Optional def get_protocol(uri: str) -> Optional[str]: """ Get protocol from a uri by splitting on the :// part. :param uri: The uri :return: The protocol (if any) else None """ if "://" in uri: return uri.split("://")[0] else: return None
6cbba8579951e4c523ee92f2ea35af228bd57254
113,665
import requests def fetch(url, payload, auth): """Fetch requests from groupsio API""" r = requests.get(url, params=payload, auth=auth) try: r.raise_for_status() except requests.exceptions.HTTPError as e: raise e return r
ce18fcf55e243c382c19ade5eb4ebc2205657cc1
113,674
def make_article_json(a): """Make an Article object into a dictionary.""" j = { 'snippet': a.snippet, 'aid': a.aid } if a.subsection_name: j['subsection_name'] = a.subsection_name if a.imgurl: j['imgurl'] = a.imgurl return j
98cf8964826220dad368b290a61a80c4576e649b
113,679
def coordinateSelection(backyardWidth, backyardHeight): """ This function takes and validates input from the user to select which position they want to search for the bone. It takes the width and height of the backyard as parameters. Valid data is: 2 digits separated by a space that represent <row> <column>. "-1" is to quit and all other input data is invalid. """ print("\nTo do so, please, enter the row and the column number of a cell in which you suspect a bone is buried \n(e.g., 0 3 if you suspect that part of a bone is buried on the first row at the 4th column).") userInput = input("Enter -1 to quit : ") # check if user gave no input if len(userInput) == 0: print("***You have not entered anything. You need to enter a valid row and the column number!\n") return "invalid" # check if input is a string elif userInput.isalpha(): print("***You have entered %s. You need to enter a valid row and the column number!\n" %userInput) return "invalid" else: # split input (string) into list userInput = userInput.split() # check if user wants to quit if int(userInput[0]) == -1: return "quit" # check if user entered only 1 value elif len(userInput) == 1: print("***You have entered only 1 value. You need to enter a valid row and the column number!\n") return "invalid" # check if user's input is out of range elif int(userInput[0]) >= backyardHeight : print("You needed to enter a row and column number of a cell that is within the backyard!\n") return "invalid" # check if user's input is out of range elif int(userInput[1]) >= backyardWidth: print("You needed to enter a row and column number of a cell that is within the backyard!\n") return "invalid" # check if user's input is out of range elif (int(userInput[0]) < 0) or (int(userInput[1]) < 0): print("You needed to enter a row and column number of a cell that is within the backyard!\n") return "invalid" # return valid data else: row = int(userInput[0]) column = int(userInput[1]) return [row, column]
81ac16eb74408ce06a676684e67fb76f84dd7563
113,681
def ro(formation_factor, rw): """ Archie Ro - Resistivity of water saturation formation (ohm.m) Parameters ---------- formation_factor : float Archie Formation Factor rw : float Resistivity of formation water (ohm.m) Returns ------- float Returns resistivity of water saturation formation (ohm.m) """ return formation_factor * rw
4e52008f45d3da92e83ec243ceb078df4dc10c57
113,684
def list_size_reducer(reduction_factor,your_list): """ Optional function to reduce the size of the lists output by the inspiral functions (not the merger lists, as those are much shorter), in order to reduce filesize to conserve storage space. NOTES: The typical reduction factor we have used in our research using this code is 100. The inspiral lists used by the matching/merger portions are realtimes, omega, i_phase and i_amp so if you reduce one of these you should reduce all of them. Parameters ---------- reduction_factor: int The factor you want to reduce the list length by. your_list: list The list you want to reduce. Returns ------- reduced_list: list your_list, in reduced form. """ #input type checking assert type(reduction_factor) == int, 'reduction_factor should be an int.' assert type(your_list) == list, ('The thing to be reduced needs to be a ' 'list.') #create new list with every nth point of your_list reduced_list = [your_list[0]] for i in range(reduction_factor,len(your_list),reduction_factor): reduced_list.append(your_list[i]) return reduced_list
377aa817fd172fad24ea1caf55f81a14e2fe3785
113,686
import torch def conv1x1(in_planes, out_planes, init='no', cnv_args={ 'bias': True, 'kernel_size': 1, }, std=0.01): """1x1 convolution""" cnv = torch.nn.Conv2d(in_planes, out_planes, **cnv_args) # init weights ... if init == 'no': pass elif init == 'normal0.01': cnv.weight.data.normal_(0., std) if cnv.bias is not None: cnv.bias.data.fill_(0.) else: assert False return cnv
15d54e38b1574cfed3659642a3d86e635c124e20
113,689
def get_max_week_number(course): """ Get the max week number from a course object :param course: course object :return: last week number included in the course """ weeks = course.get("Weeks") last_period = weeks.split(', ')[-1] if '-' not in last_period: return int(last_period) else: return int(last_period.split('-')[-1])
0497948bdd6b6fb29a134b04df197ed4eb4ac202
113,690
def unbinize(u=u''): """ Returns integer n equivalent of binary unicode string u example: unbinize(u'1011') returns 11 """ n = 0 for bit in u: n <<= 1 n |= 1 if int(bit) else 0 return n
6291416ff6c47194df4029bde25535b16e3f9b45
113,691
def total_annualized_returns(P_rtn, N): """ :param P_rtn: 策略收益 :param N: 策略执行天数 :return: 策略年化收益 """ R_p = pow((1 + P_rtn), 250 / N) - 1 return R_p
31c26abfa39d7bc570b4ae710cd47cf52fb4540e
113,694
import torch from typing import Tuple def do_rnnt_pruning( am: torch.Tensor, lm: torch.Tensor, ranges: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """Prune the output of encoder(am) output and prediction network(lm) output of RNNT. Args: am: The encoder output, with shape (B, T, C) lm: The prediction network output, with shape (B, S + 1, C) ranges: A tensor containing the symbol indexes for each frame that we want to keep. Its shape is (B, T, s_range), see the docs in `get_rnnt_prune_ranges` for more details of this tensor. Returns: Return the pruned am and lm with shape (B, T, s_range, C) """ # am (B, T, C) # lm (B, S + 1, C) # ranges (B, T, s_range) assert ranges.shape[0] == am.shape[0] assert ranges.shape[0] == lm.shape[0] assert am.shape[1] == ranges.shape[1] (B, T, s_range) = ranges.shape (B, S1, C) = lm.shape S = S1 - 1 # (B, T, s_range, C) am_pruning = am.unsqueeze(2).expand((B, T, s_range, C)) # (B, T, s_range, C) lm_pruning = torch.gather( lm.unsqueeze(1).expand((B, T, S + 1, C)), dim=2, index=ranges.reshape((B, T, s_range, 1)).expand((B, T, s_range, C)), ) return am_pruning, lm_pruning
59a4ab6abfead63510e51cd58d7aa154704a31fe
113,700
def findClosest(target, l): """ Find value in l closest target. Return index of such a value """ minVal = l[0] minIdx = 0 for i, e in enumerate(l): if abs(target - minVal) > abs(target - e): minVal = e minIdx = i return minIdx
417508ed425a030e75243aaf9dd7f05b74c7d765
113,702
def tick(price, tick_size=0.05): """ Rounds a given price to the requested tick """ return round(price / tick_size) * tick_size
cc238dc898ba922fde2a3aaa7a76d9218f8d6d06
113,713
import requests import tempfile def download_file(target_url): """ download file to temp path, and return its file path for further usage """ resp = requests.get(target_url) with tempfile.NamedTemporaryFile('wb+', delete=False) as f: file_name = f.name f.write(resp.content) return file_name
b0893f8f40fd83f34ea38dbe620402dd3739ab1f
113,717
import math def is_missing(x): """ Tests for missing values. Parameters ---------- x : object The value to test. Returns ------- out : boolean True if the value is missing. """ if x is None: return True if isinstance(x, (str, dict, list)) and len(x) == 0: return True if isinstance(x, float) and math.isnan(x): return True return False
4ca1f0c4616984c08ff3121bf1c08701dd841ac5
113,719
import zipfile import tarfile def is_archive(filename): """ Test if a given file is an archive. :param filename: file to test. :return: `True` if it is an archive. """ return zipfile.is_zipfile(filename) or tarfile.is_tarfile(filename)
820f4feaa9a214db9c799a981402711e7a7cb873
113,720
import pathlib def path_to_mrl(file_path): """Convert a filesystem path to MRL. Args: file_path (path.Path or str): Path to the resource. Returns: str: Path to the resource within MRL format. """ return pathlib.Path(file_path).as_uri()
8d43091027d7d585b0a8aa59eb8279e02aa3ef8d
113,722
from pathlib import Path import json def get_settings(f_settings: Path = Path() / 'settings.json') -> dict: """Read in the Gmail username/password to use""" print('Getting Gmail credentials') with f_settings.open() as settings_json: settings = json.load(settings_json) return settings
4ae358739ac3a6acb93bf5fc73e00069ea21bf88
113,723
def get_ext_info(hdu): """ Returns name, type and size of a specified HDU extension :param hdu: a single extension of a multiextension HDU item :type hdu: astropy.io.fits.hdu.image class :return: 3- element dictionary with text values for extension name, extension type, and extension size. """ outDict={} outDict["name"]=hdu.name outDict["type"] = str(hdu).split(" object")[0].split(".")[-1] if ((outDict["type"] == 'ImageHDU') and (hdu.shape)): outDict["size"] = "{} x {}".format(hdu.shape[0],hdu.shape[1]) elif "table" in outDict["type"].lower(): nRows = len(hdu.data) nCols = len(hdu.data[0]) outDict["size"] = "{}R x {}C".format(nRows,nCols) else: outDict["size"] = "()" return(outDict)
67b98d488ee4b2fabc05856c60049356d8586df1
113,726
def std_atomic_number(value): """Given an atomic numer in the NAT form (conventional atomic number, where the real atomic number is the remainder when divided by 100), return the actual atomic number. """ return value % 100
0d439ec9294311b0d83220a6d254c9b054944fe0
113,737
def suite_name_func(cls_name, suite): """Function to return a customized name for testsuite.""" return "{} -- {}".format(cls_name, suite.val)
8b8fa31d3c605cedd3413772369c12f1c0d8af0e
113,745
import torch def normalize_coords(coords_2D, width, height): """Normalizes coords_2D (BW x N x 2) in pixel coordinates to be within [-1, 1] Args: coords_2D (torch.tensor): (b*w,N,2) width (float): width of the image in pixels height (float): height of the image in pixels Returns: torch.tensor: (b*w,N,2) coordinates normalized to be within [-1,1] """ batch_size = coords_2D.size(0) u_norm = (2 * coords_2D[:, :, 0].reshape(batch_size, -1) / (width - 1)) - 1 v_norm = (2 * coords_2D[:, :, 1].reshape(batch_size, -1) / (height - 1)) - 1 return torch.stack([u_norm, v_norm], dim=2)
e0eb12e27affdf9dd1e2da05e21283600e51d4ac
113,748
def findnode(nodes, name): """ input: node name returns: node object """ for n in nodes: if n.name == name: return n
6887d59e2587ab7bf53f80d91c32422da87a244e
113,756
def _get_random_subset(x, size, rand_state): """Get a random subset of items from a list or array, without replacement. Parameters ---------- x : list or np.ndarray One-dimensional array of items to sample from. size : int The number of items to sample. Must be less than the number of input items. rand_state : np.random.RandomState A random state object to use for random number generation. Returns ------- sample : list A random subset of the input items. Notes ----- This function generates random subsets identical to the internal ``randsample`` function in MATLAB PREP's ``findNoisyChannels.m``, allowing the same random seed to produce identical results across both PyPREP and MATLAB PREP. """ sample = [] remaining = list(x) for val in rand_state.rand(size): index = round(1 + (len(remaining) - 1) * val) - 1 pick = remaining.pop(index) sample.append(pick) return sample
a4d64cd4caf26d05c635e231d292235ac54fd7ab
113,759
def read_image_bytes(path): """Read raw image bytes from disk we need this because use a lot of bytesIO for manipulation and getting the bytes via PIL image API is 10x slower. Args: path (Path): image path Returns: bytes: images bytes """ f = open(path, 'rb') raw_image = f.read() f.close() return raw_image
a0d8f10924def45a6aa27933fec77ef2ce44549d
113,761
def get_highest_state(states): """ Returns the highest state. """ levels = ['UNDEF', 'OK', 'WARNING', 'CRITICAL'] return levels[max([levels.index(state) for state in states])]
33540ea2cb35c50e5b9a2cacb78efbc9b2361017
113,763
def check_alphabet_records(records, alphabet): """Filter out records not consistent with alphabet.""" alphabet_set = set(alphabet) return (r for r in records if set(r[1]) <= alphabet_set)
cf1871226863534c4dd9c0655ea3d5a35c38b5e7
113,768
def post_float(message): """Test for checking if an input is a positive float""" # the while loop makes sure the program continues until the user # has inputted a valid number while True: # the try statement attempts tp exceute the user input try: userInput = float(input(message)) # The if-else statement tests to see if the number is positive # if the number is positive then the program continues if userInput > 0: return userInput # if the number is not positive then the user must # input a new number else: print("Not a positive number. Try again.") continue # if the user did not input a number, the user is prompted to try again except ValueError: print("Input is not a number. Try again.") continue
304b38b66b0edf67e0cccb1bcde46762d583d24f
113,771
def is_qemu(td, qemu): """ Checks if MACHINE is qemu. """ machine = td.get('MACHINE', '') if (qemu in td.get('MACHINE', '') or machine.startswith('qemu')): return True return False
9f3cc6e483cea879a0ce85a5770fc2ab8a671df3
113,773
def make_games_distribution(games_db): """Return the probability distribution of owning / wanting to play a game This is based on the proportion of 'owned' games on BGG. """ total_owned = sum([g['owned'] for g in games_db]) return [g['owned'] / total_owned for g in games_db]
d9cfbf3886ae3e133542cb5125e88f5b5903416f
113,779
def adjacent(cell, size=None): """ Return the cooridinates of the cells adjacent to a given cell size: [nlines, ncolumns] to avoid returning out-of-bound coordinates """ if size==None: return [ (cell[0]-1, cell[1] ), (cell[0] , cell[1]-1), (cell[0]+1, cell[1] ), (cell[0] , cell[1]+1) ] return [adj for adj in adjacent(cell) if (adj[0]>=0 and adj[0]<size[0] and adj[1]>=0 and adj[1]<size[1])]
7e0dd4be231e0cb3d46c6a6717f348424c8ece9f
113,790
def set_size(width, fraction=1, subplots=(1, 1)): """ Set figure dimensions to avoid scaling in LaTeX. """ golden_ratio = 1.618 height = (width / golden_ratio) * (subplots[0] / subplots[1]) return width, height
109f185ba76081532b2be9c9945625f13da99fc8
113,791
def none_or_str(x): """Cast X to a str if it is not None""" if x is not None and not isinstance(x, str): return str(x) return x
53371f0a7b261fcc92e681b3c69a7683a6f45d6d
113,792
from typing import Optional def node_query( node_name: Optional[str] = None, node_type: Optional[str] = None, node_id: Optional[str] = None, ) -> str: """Create a Cypher node query Parameters ---------- node_name : The name of the node. Optional. node_type : The type of the node. Optional. node_id : The identifier of the node. Optional. Returns ------- : A Cypher node query as a string. """ if node_name is None: node_name = "" rv = node_name or "" if node_type: rv += f":{node_type}" if node_id: if rv: rv += " " rv += f"{{id: '{node_id}'}}" return rv
6487f1cf1ee54c0b13dd536e13ddb95b22496f75
113,794
import re def simple_preprocess(text_list:list) -> list: """Performs simple preprocessing on the list of texts Args: text_list (list): list of texts Returns: list: list of preprocessed texts in tokens format """ text_list = [x.lower() for x in text_list] text_list = [re.sub(r'[^\w]', ' ', x) for x in text_list] text_list = [x.split() for x in text_list] return text_list
1fc7c55ec067666c7cf7f0617065100f4ec69a0c
113,796
import torch def get_device(gpus: int) -> torch.device: """ Return the device type to use. If the system has GPUs available and the user didn't explicitly specify 0 GPUs, the device type will be 'cuda'. Otherwise, the device type will be 'cpu' which will be significantly slower. Parameters ---------- gpus : int An ``int`` of the number of GPUs to use during training. Returns ------- torch.device Returns the specific type of device to use for computations where applicable. """ if gpus > 0: return torch.device('cuda') else: return torch.device('cpu')
3fa7bbf5a39914428f215bce2b7dab9a06b6a4ba
113,797
from typing import List def pascal_case(word: str) -> str: """Converts given word to pascal case. >>> pascal_case("someWord") "SomeWord" """ items: List[str] = [] parts = word.split("_") contains_underscore = len(parts) > 1 for part in parts: if part: if contains_underscore: items.append(part.title()) else: first_char, remain_part = part[0], part[1:] items.append(first_char.upper() + remain_part) return "".join(items)
dc840f4cd3831463b79196bd53a3b36f62f4c9b6
113,798
def notas(*notas:float, situação:bool = False): """ Uma função feita para mostrar: A quantidade de notas A maior nota A menor nota A media da turma A situação da turma :param notas: Uma ou mais notas de alunos :param situação: A situação da turma (opcional) :return: Dicionario com todas as informações mostradas acima """ totalnota = dict() quantidenotas = len(notas) maiornota = 0 menornota = 10 media = 0 for x in notas: if(x > maiornota): maiornota = x if(x < menornota): menornota = x media += x media /= quantidenotas totalnota['quantidade'] = quantidenotas totalnota['maiornota'] = maiornota totalnota['menornota'] = menornota totalnota['media'] = media if(situação == True): if(media < 4): totalnota['situação'] = 'horrivel' elif(media > 7): totalnota['situação'] = 'otima' else: totalnota['situação'] = 'rasoavel' return totalnota
c4617eea28de0c58ae5b677c9991afd1d0185c61
113,810
def parse_references(article): """ Parse list of references from a given BeautifulSoup of an article """ reference_list = [] references = article.find('text').find('div', attrs={'type': 'references'}) references = references.find_all('biblstruct') if references is not None else [] reference_list = [] for reference in references: title = reference.find('title', attrs={'level': 'a'}) if title is None: title = reference.find('title', attrs={'level': 'm'}) title = title.text if title is not None else '' journal = reference.find('title', attrs={'level': 'j'}) journal = journal.text if journal is not None else '' if journal is '': journal = reference.find('publisher') journal = journal.text if journal is not None else '' year = reference.find('date') year = year.attrs.get('when') if year is not None else '' authors = [] for author in reference.find_all('author'): firstname = author.find('forename', {'type': 'first'}) firstname = firstname.text.strip() if firstname is not None else '' middlename = author.find('forename', {'type': 'middle'}) middlename = middlename.text.strip() if middlename is not None else '' lastname = author.find('surname') lastname = lastname.text.strip() if lastname is not None else '' if middlename is not '': authors.append(firstname + ' ' + middlename + ' ' + lastname) else: authors.append(firstname + ' ' + lastname) authors = '; '.join(authors) reference_list.append({ 'title': title, 'journal': journal, 'year': year, 'authors': authors }) return reference_list
c55d24ca90565750aded2161efbace40499c4175
113,813
def get_cache_key(endpoint, identity, page, per_page, sort, route_arg=None): """Build a cache key for the endpoint.""" if not identity: identity = "anonymous" cache_key = "{}_{}_{}_{}_{}".format(endpoint, identity, page, per_page, sort) if route_arg: cache_key = "{}_{}".format(cache_key, route_arg) return cache_key
f6837b1b3fc9ff420896937f3d1330fd4fc157a7
113,819
from typing import Dict import yaml def manifest_reader(file_path: str) -> Dict[str, Dict]: """ Read a yaml manifest file into a dictionary and return it. """ with open(file_path, "r") as file: manifest_dict = yaml.load(file, Loader=yaml.FullLoader) return manifest_dict
99f08f1b2dd4491030bef72317a7c9f95e5dc6e5
113,826
def replace_all(text, replace_dict): """ For every key, value pair in 'replace_dict', if there is a sub-string in 'text' that is equal to the key, replace it with the value. Return 'text' with all sub-strings replaced. """ for i, j in replace_dict.iteritems(): text = text.replace(i, j) return text
52396d57b76ff929fa981fb3493a2d8a46bcf28b
113,828
def cli(ctx, history_id, contents=False, deleted="", visible="", details="", types=""): """Get details of a given history. By default, just get the history meta information. Output: details of the given history """ return ctx.gi.histories.show_history(history_id, contents=contents, deleted=deleted, visible=visible, details=details, types=types)
95697f3b6d5bd3126a4f7eaa2c4c8fa256ce7401
113,830
def Udrift(amp,gAbs,c,d): """Calculates the 2nd order Stokes drift for a linear mode Parameters ---------- amp : float Description: Wave amplitude gAbs : float Magnitude of gravitational acceleration c : float Wave celerity d : float Water depth Returns -------- float Magnitude of the mean velocity drift """ return 0.5*gAbs*amp*amp/c/d
65baa9182e0730fe0d73dac498b3973cff1e8d32
113,831
def _is_prefix(prefix, name): """Determines whether one fullname is a prefix of another. Args: prefix: The fullname that might be a prefix. name: The entire fullname. Returns: A boolean indicating whether or not the first fullname is a prefix of the second. """ prefix_parts = prefix.split('.') if prefix else [] name_parts = name.split('.') if name else [] return name_parts[:len(prefix_parts)] == prefix_parts
f2b05f5f4e9d2f72d49f0dad0ba9d063b726f721
113,834
def aa_precision_recall_with_threshold(correct_aa_confidences, all_aa_confidences, num_original_aa, threshold): """ Calculate precision and recall for the given amino acid confidence score threshold Parameters ---------- correct_aa_confidences : list List of confidence scores for correct amino acids predictions all_aa_confidences : int List of confidence scores for all amino acids prediction num_original_aa : int Number of amino acids in the predicted peptide sequences threshold : float Amino acid confidence score threshold Returns ------- aa_precision: float Number of correct aa predictions divided by all predicted aa aa_recall: float Number of correct aa predictions divided by all original aa """ correct_aa = sum([conf>=threshold for conf in correct_aa_confidences]) predicted_aa = sum([conf>=threshold for conf in all_aa_confidences]) aa_precision = correct_aa/predicted_aa aa_recall = correct_aa/num_original_aa return aa_precision, aa_recall
51cde7bf97af7e4ac74414d3c90ed7afdc25d408
113,835