content
stringlengths
39
14.9k
sha1
stringlengths
40
40
id
int64
0
710k
import re def level_3(text): """Find the lowercase letters in text that are surrounded by exactly 3 uppercase letters.""" pattern = re.compile(r'[a-z][A-Z]{3}([a-z])[A-Z]{3}[a-z]') return ''.join(pattern.findall(text))
295ef38565945ee37bbf05311b0b7e8df0e93ada
79,119
from typing import Tuple from typing import List def read_input_file(filename: str) -> Tuple[List, List]: """ Read IDs and SMILES from a file with expected structure: ID1,SMILES1\n ID2,SMILES2\n (...) Args: filename (str): Input file name Returns: Tuple[List, List]: List of IDs, List of SMILES """ ids = [] smiles = [] with open(filename, "r") as fp: for line in fp.readlines(): ids.append(line.strip("\n").split(",")[0]) smiles.append(line.strip("\n").split(",")[1]) return ids, smiles
81aa0eefeae578566d227a97cfbbbd53486f3a9d
79,120
def should_import(managedcluster): """ should_import returns True if the input managedCluster should be imported, and False if otherwise. :param managedcluster: name of managedCluster to import :return: bool """ conditions = managedcluster['status'].get('conditions', []) for condition in conditions: if condition['type'] == 'ManagedClusterJoined': return False return True
cf02849967d275d3c98e8408fc83aaff9bf52107
79,123
import re def read_keywords(keywords_file): """ Parses a Processing keywords.txt file, returning a dictionary of all keywords in the file, indexed by keyword type. Arguments: keywords_file -- full path to a Processing keywords.txt file """ keywords = {} for line in open(keywords_file): # Skip blank lines and comments if re.match('^s*$', line): continue if re.match('^\s*#', line): continue match = re.match('^(\w+)\s*\t(\w+)\t?(\w*)', line) if match: (kw_name, kw_type, kw_loc) = match.groups() if kw_type in keywords: keywords[kw_type].add(kw_name) else: keywords[kw_type] = set([kw_name]) return keywords
18dee5b3480a5adfba50cc87cbc4020b97ac7a13
79,124
def polygon_area(coords): """ Return the area of a closed polygon """ Xs = coords[:,0] Ys = coords[:,1] # Ignore orientation return 0.5*abs(sum(Xs[:-1]*Ys[1:] - Xs[1:]*Ys[:-1]))
477cd44440d4909b39fc38b162c2ae19b512ef6a
79,125
def frac_to_rainbow_colour(frac): """ frac is a number from 0 to 1. Map to a 3-tuple representing a rainbow colour. 0 -> (0, 1, 0) #green 1/6 -> (0, 0.5, 0.5) #cyan 2/6 -> (0, 0, 1) #blue 3/6 -> (0.5, 0, 0.5) #magenta 4/6 -> (1, 0, 0) #red 5/6 -> (0.5, 0.5, 0) #yellow 6/6 -> (0, 1, 0) #green again """ assert frac >= 0 and frac < 1 interp = frac - int(frac/(1./6))*(1./6) if (frac < 1./6): #green to cyan to_return = (0, 1 - 0.5*interp, 0.5*interp) elif (frac < 2./6): #cyan to blue to_return = (0, 0.5 - 0.5*interp, 0.5 + 0.5*interp) elif (frac < 3./6): #blue to magenta to_return = (0.5*interp, 0, 1 - 0.5*interp) elif (frac < 4./6): #magenta to red to_return = (0.5 + 0.5*interp, 0, 0.5 - 0.5*interp) elif (frac < 5./6): #red to yellow to_return = (1 - 0.5*interp, 0.5*interp, 0) else: #yellow to green to_return = (0.5 - 0.5*interp, 0.5 + 0.5*interp, 0) return to_return
5bb7b82673127b04de683a413f2b3ac2fb29adff
79,127
def get_predictions(decode_steps): """Returns predictions dict given DecodeSteps.""" return dict( predicted_action_types=decode_steps.action_types, predicted_action_ids=decode_steps.action_ids)
200d41b36201bb4c62c5e0c9174973b7de98ace9
79,128
def person_case_is_migrated(case): """ Applies to both person cases representing mothers and person cases representing children. Returns True if the person is marked as having migrated to another AWC, otherwise False. """ return case.get_case_property('migration_status') == 'migrated'
994003ec2c94537666a2dc6e47e333dbc846de57
79,132
async def get_authenticated_user(*, app, logger): """Get information about the authenticated GitHub user. This function wraps the `GET /user <https://developer.github.com/v3/users/#get-the-authenticated-user>`_ method. Parameters ---------- app : `aiohttp.web.Application` The app instance. logger A `structlog` logger instance with bound context related to the Kafka event. Returns ------- response : `dict` The parsed JSON response body from GitHub. """ ghclient = app["root"]["templatebot/gidgethub"] response = await ghclient.getitem("/user") return response
b7577fbe203b08080dd2c2e9c9f3af132095e4d7
79,140
def is_special(name): """ Return True if the name starts and ends with a double-underscore. Such names typically have special meaning to Python, e.g. :meth:`__init__`. """ return name.startswith('__') and name.endswith('__')
56eb52d619347fdabbf2a87a9da442daf659b67c
79,141
def join(*functions, extension=None): """ Returns a function that joins the given functions' outputs. >>> join(components(-1), parameters('page'))('http://example.com/api/planning.json?page=1') 'planning-page-1' """ def wrapper(url): value = '-'.join(function(url) for function in functions) if extension: return f'{value}.{extension}' return value return wrapper
567a77dc0183ff340a2778d59272748cba64a599
79,145
def decorator_with_args(decorator_to_enhance): """Decorate a decorator that takes arguments to change how it's called it allows to change how it's defined. Instead of this defintion : def decorator(*outer_args, **outer_kwargs): def __inner_decorate(func): def __inner_func(*args, **kwargs): kwargs.update(outer_kwargs) return do_whatever(args + outer_args, **kwargs) return __inner_func return __inner_decorate You can use this form : def decorator(func, *outer_args, **outer_kwargs): def __inner_func(*args, **kwargs): kwargs.update(outer_args) return do_whatever(args + outer_args, kwargs) return __inner_func """ def decorator_maker(*args, **kwargs): def decorator_wrapper(func): return decorator_to_enhance(func, *args, **kwargs) return decorator_wrapper return decorator_maker
0c8df023b940afdbbbf185682b92e4b517f7ff31
79,149
def escape_desc(desc): """Escape `desc` suitable for a doc comment.""" if desc is not None: return desc.replace("[", "\\[").replace("]", "\\]") else: return ""
47feb8d4b37fabcf5196a57bb10c356ee78420ec
79,151
def callback_to_list(callback): """Cast callback to list. Parameters ---------- callback : callable or list of callables Callable object(s). Returns ------- list List of callable objects. """ check_callable = True # convert callback to list if not isinstance(callback, list): if callback is None: callback = [] check_callable = False elif callable(callback): callback = [callback] else: raise TypeError("'callback' must be callables or list of " "callables.") # check if all callbacks are callable if check_callable is True: for c in callback: if callable(c) is False: raise TypeError("'callback' is not callable.") return callback
53ce8aa556015fdac4e0b5729d54617c904b6c7c
79,153
def parse_csv_data(filename_csv: str = 'nation_2021 -10-28.csv') -> list: """Function takes a csv file and returns a list of strings for the rows in the file.""" with open(filename_csv, "r") as file: list_of_lines = file.readlines() return list_of_lines
fedb8af215e528ecaa9b4340077cff6491f13e9f
79,154
def utf_decode(data): """ Decode UTF-8 string """ if isinstance(data, str): return data try: decoded = str(data.decode('utf-8')) return decoded except ValueError: return data
ff774011cf1d663a7a5021b827327a02b4a0c7ae
79,157
def path_from_file_uri(uri: str) -> str: """Convert a file URI to a local path. Idempotent on regular paths. Raises if presented with a scheme besides file:// """ if uri.startswith("file://"): return uri.replace("file://", "") if "://" in uri: raise ValueError(f"Cannot convert URI to path: {uri}") return uri
9b4241754e93c62c7c984460a165fac41a14de6f
79,159
def covert_idx_to_hashable_tuple(idx): """Converts idxs to hashable type for set, slice is not hashable""" return (idx[0], idx[1], idx[2], str(idx[3]), str(idx[4]))
484575e455ca3a3eb5eba7c18bb02a6bbf5cfdc8
79,162
def is_less_than(val_a, val_b): """Return True if val_a is less than val_b (evaluate as integer)""" size = len(val_a) if size != len(val_b): return False for i in reversed(range(size)): if val_a[i] < val_b[i]: return True elif val_a[i] > val_b[i]: return False return False
0cf361a88a29cebb4bba713c054dac4e57c4da73
79,173
from datetime import datetime def create_filename(filename,path=None,file_extension=None,append_timestamp=False): """ helper method to create a filename based on name, path , file extension and option to append a timestamp """ if append_timestamp is True: timestamp = "_"+datetime.now().strftime("%Y%m%d_%H%M%S") else: timestamp = "" if file_extension is None: file_end = "" else: file_end = "." + file_extension if path is None: path_save = "" else: path_save = path+"\\" return path_save+filename+timestamp+file_end
97bec768a4b11b05edc9b2aca1b3a04c6825c604
79,186
import math import random def random_shell_coords(radius): """ given a shell radius, return a random shell coordinate centred around (0,0,0) :param radius: radius of shell :return: 3-tuple shell coordinate """ if(radius<0): raise ValueError("Cannot have negative radius") theta = math.radians(random.uniform(0.0, 360.0)) phi = math.radians(random.uniform(0.0, 360.0)) x = radius * math.cos(theta) * math.sin(phi) y = radius * math.sin(theta) * math.sin(phi) z = radius * math.cos(phi) return x, y, z
290244a6d9c8eafd84c2ac39ec7327b936b3121c
79,187
def restore_full_from_partial(x, partial): """ Restore full riemannian gradient from it's partial representation at ManifoldElement x Parameters ---------- x : ManifoldElement, shape (M, N) point at which partial gradient was computed partial : tuple of ManifoldElements of shapes (M, N) matrices M, U_p and V_p^* as partial riemannian gradient Returns ------- out : ManifoldElement riemannian gradient at x """ mid_proj, u_proj, v_proj = partial return mid_proj.rdot(x.u).dot(x.v) + u_proj.dot(x.v) + v_proj.rdot(x.u)
398f8da5e3943d83b808912e14e42121be216941
79,189
import hashlib def calculate_file_md5(fp, chunk_len=2 ** 16): """ Return the md5 digest of the file content of file_path as a string with only hexadecimal digits. :fp: file (an open file object) :chunk_len: int (number of file bytes read per cycle - default = 2^16) """ h = hashlib.md5() while True: chunk = fp.read(chunk_len) if chunk: h.update(chunk) else: break res = h.hexdigest() fp.seek(0) return res
2b59a3286a1aa42c87bb8104a2d80b3d4b7098be
79,190
def valid_interface_data(samples): """Check if samples are valid InterfaceData.""" return (isinstance(samples, (tuple, list)) and len(samples) == 2 and all(isinstance(sample, (tuple, list, float, int)) for sample in samples) and (isinstance(samples[0], float) or len(samples[0]) == len(samples[1])))
d5d24931223ab91decabfbdd5cf5581bce7a9aed
79,192
import math def fpIsInf(x): """ Checks whether the argument is a floating point infinity. """ return math.isinf(x)
ab9e80f5f971a959c07f59aba203928158aa4351
79,199
import math def isinf(x): """Return True if the real or the imaginary part of x is positive or negative infinity.""" return math.isinf(x.real) or math.isinf(x.imag)
3f399fd95f52b35ebb3eb82aa5e7d570a4fe8416
79,201
def celsius(value: float, target_unit: str) -> float: """ Utility function for Celsius conversion in Kelvin or in Fahrenheit :param value: temperature :param target_unit: Celsius, Kelvin or Fahrenheit :return: value converted in the right scale """ if target_unit == "K": # Convert in Kelvin scale return value + 273.15 else: # Convert in Fahrenheit scale return value * 1.8 + 32
3caec1966d4bd700563b2086bef37095c3c97964
79,203
def validate_boolean(setting, value, option_parser, config_parser=None, config_section=None): """Check/normalize boolean settings: True: '1', 'on', 'yes', 'true' False: '0', 'off', 'no','false', '' """ if isinstance(value, bool): return value try: return option_parser.booleans[value.strip().lower()] except KeyError: raise LookupError('unknown boolean value: "%s"' % value)
eb0a7d8bdfa62d91e7da755a1db5ff8c332d3ec1
79,211
def tuplize_2d_array(arr): """Returns a list of tuples, each tuple being one row of arr.""" return [tuple(row) for row in arr]
32266a3098ef3749f377df83f126b7c370ca20e0
79,213
import six def pad(data, bs=16): """PKCS#7 Padding. Takes a bytestring data and an optional blocksize 'bs'""" length = bs - (len(data) % bs) data += six.int2byte(length) * length return data
e5a1c422f4021da2e7afc1af6483c97e99e6b6a6
79,219
def remove_topk_nodes(adj, node_importance_rank, topk): """ Remove top k important nodes by removing all the links connecting to the nodes. Args: adj: The adjacency matrix for the graph. node_importance_rank: The sorted indices of nodes following the node importance (descending) order. topk: The max number of nodes to remove. Returns: The modified adjacency matrix. """ for idx in node_importance_rank[:topk]: for i in range(adj.shape[0]): adj[i, idx] = 0 adj[idx, i] = 0 return adj
d06050056e1cf93b42bf4660660926643af01bc7
79,221
def _get_operational_state(resp, entity): """ The member name is either: 'operationState' (NS) 'operational-status' (NSI) '_admin.'operationalState' (other) :param resp: descriptor of the get response :param entity: can be NS, NSI, or other :return: status of the operation """ if entity == 'NS' or entity == 'NSI': return resp.get('operationState') else: return resp.get('_admin', {}).get('operationalState')
35eeb4aa94b8cdb42ad56b8cb410a0a36acb4a3e
79,225
def do_show_hide(n_clicks): """ Shows/hides the sidebar on toggle click :param n_clicks: num clicks on toggle button :return: className of sidebar defining collapsed state """ if n_clicks is None: return "sidebar" if n_clicks % 2: return "sidebar collapse" else: return "sidebar"
296faa295a4ae02136c8e3e837b9dd5e31a2c7fe
79,232
def range_validator(min_value, max_value): """ A parameter validator that checks range constraint on a parameter. :param min_value: The minimum limit of the range, inclusive. None for no minimum limit. :param max_value: The maximum limit of the range, inclusive. None for no maximum limit. :return: A new range validator """ def _range_validator(instance, attribute, value): if ((min_value is not None) and (value < min_value)) or \ ((max_value is not None) and (value > max_value)): raise ValueError("{} must be in range [{}, {}]".format(attribute.name, min_value, max_value)) return _range_validator
f1c98116057e8fe05a0228c682387abd615ed089
79,234
import json def read_settings(path): """ Writes a dictionary of settings to a json file. :param path: Filepath to settings file. :return: Dictionary of settings """ # as requested in comment try: with open(path, 'r') as file: settings = json.load(file) file.close() except FileNotFoundError: settings = dict() return settings
0867bf304a42c87e54cdbaeebdf2e08548dc81ee
79,236
def join_columns(df, column_names, delim='_'): """Join columns with a delimiter""" new_column = delim.join(column_names) df = df.copy() df[new_column] = df[column_names[0]].map(str) for c in column_names[1:]: df[new_column] += delim + df[c].map(str) return df
88d7b7553d0bd9f19921ef124a67eb76b6295c8e
79,241
def shuffle(x, order=None): """Reorganizes the given string according the order - list of character indices. """ if order == None: return x res = "" for o in order: res += x[o] return res
daa1c33160f82917be40b082e6a0df70fdfd49c8
79,243
import string def ascii_freq(a: bytearray) -> float: """Return the frequency of valid ascii characters in a bytearray. Arguments: a {bytearray} -- Bytearray to test against Returns: float -- Frequency of valid ascii in a bytearray """ total = len(a) ascii_chars = 0 for i in range(total): if chr(a[i]) in string.ascii_letters: ascii_chars += 1 return ascii_chars / total
e6b0e4cfdabb0181faf9aa2f1bba747db2c7d15a
79,245
def add_time(start, duration, day=None): """Adds a time duration to a start time. Parameters ---------- start : str Start time in the format "11:05 AM". duration : str Duration to add in hours:minutes, e.g. "15:27". day : str, default=None Full name of day of the week for the start time (case insensitive), e.g. "tuesday" Returns ------- str Time when duration ends including the number of days passed, if any. If option `day` is provided, also returns the day the duration ends. """ start_time, start_ampm = start.split() start_hour, start_min = start_time.split(":") dur_hour, dur_min = duration.split(":") new_hour = int(start_hour) + int(dur_hour) new_min = int(start_min) + int(dur_min) if start_ampm == "PM": new_hour += 12 if new_min >= 60: new_min = new_min % 60 new_hour += 1 add_days = new_hour // 24 new_hour = new_hour % 24 new_ampm = "AM" if new_hour == 12: new_ampm = "PM" elif new_hour > 12: new_hour -= 12 new_ampm = "PM" elif new_hour == 0: new_hour = 12 extra = "" if add_days == 1: extra = " (next day)" elif add_days > 1: extra = f" ({add_days} days later)" days_of_week = ["monday", "tuesday", "wednesday", "thursday", "friday", "saturday", "sunday"] if day: if day.lower() in days_of_week: day_index = days_of_week.index(day.lower()) day_index += add_days new_day = days_of_week[day_index % 7] extra = f", {new_day.capitalize()}" + extra else: return f"Error: '{day}' is not a day of the week." return f"{new_hour}:{new_min:02} {new_ampm}{extra}"
4daf2dc2ab6411d533704047a799aa3fbd3a097a
79,247
def read_textfile(f_path): """ Open a textfile and return contents as a stripped list.""" with open(f_path) as f: rows = f.readlines() return [row.strip() for row in rows]
bfe45e565e8012cd9468e66ab71c9dfa70bb3954
79,248
import requests def url_shorten(long_url): """Shorten a long url with git.io service""" req = requests.post('http://git.io', data={'url' : long_url}) return req.headers['location']
4fd24f2fa0531de8fdb34de3ff712d2fa0624f35
79,253
def getXYZ(atoms, cart_coords): """ Return a string of the molecule in the XYZ file format. """ natom = len(atoms) xyz = '' for i in range(natom): xyz += '{:s} {:.10f} {:.10f} {:.10f}'.format(atoms[i],cart_coords[3*i],cart_coords[3*i+1],cart_coords[3*i+2]) if i != natom-1: xyz += '\n' return xyz
eb701a932260adfcbaa9777dc051bd3185cdf6e1
79,254
def get_num_samples_per_frame(allsamples,num_channels,data_type): """ Based on number of channels and data type (complex or real, get number of samples per frame. Parameters ---------- allsamples : int total number of sample components in the frame. num_channels : int number of channels in the frame. data_type : int 0 for real, 1 for complex. Returns ------- tot_samples_per_channel_and_frame_full : int number of samples per channel per frame (full samples, i.e. R or R+jI) totsamples_per_channel_and_frame_single : int number of sample components (i.e. R, I). """ tot_samples_per_channel_and_frame=len(allsamples)//num_channels # number of full samples (real and imag for complex) if data_type==1: tot_samples_per_channel_and_frame_full=tot_samples_per_channel_and_frame//2 else: tot_samples_per_channel_and_frame_full=tot_samples_per_channel_and_frame return([tot_samples_per_channel_and_frame,tot_samples_per_channel_and_frame_full])
77c0a879e21bf2c8dc9346b634ee1c46bedc69ca
79,255
def read_bytes(iterable, n=0): """Read n bytes from the iterable and return them as a bytearray""" iterator = iter(iterable) value = bytearray() for i in range(n): nextByte = next(iterator) if isinstance(nextByte, int): value.append(nextByte) elif isinstance(nextByte, bytes): value += next(iterator) return value
89f7304651f5a44187f3a73900ae3f4113767e70
79,256
import textwrap def pyinstrument_magic(rest): """ Profile the code with pyinstrument. """ res = """\ from pyinstrument import Profiler as _Profiler try: with _Profiler() as _profiler: pass {rest} _profiler.open_in_browser() finally: del _profiler, _Profiler """ return res.format(rest=textwrap.indent(rest, ' '*8))
d1dda0a27ddf1125f4479601f32b0875ce38d077
79,258
import glob def dodo_filename(path): """ Find a 'DODO*' file in *path* and return its full pathname """ globble = glob.glob("{}/DODO*".format(path)) if globble: return globble[0] else: return None
8fdd21071303aec16b8e3268e7322a8d82483970
79,260
def predict(model, X_test): """Return predict list. Args: model: lightgbm.Booster X_test: DataFrame Returns: np.ndarray """ y_pred = model.predict(X_test) return y_pred
e8b90d0b8ac55d33a07c1fb5b7479159c2c1f4c6
79,262
import torch def inflate_tensor(tensor, times, dim): """This function inflates the tensor for times along dim. Arguments --------- tensor : torch.Tensor The tensor to be inflated. times : int The tensor will inflate for this number of times. dim : int The dim to be inflated. Returns ------- torch.Tensor The inflated tensor. Example ------- >>> tensor = torch.Tensor([[1,2,3], [4,5,6]]) >>> new_tensor = inflate_tensor(tensor, 2, dim=0) >>> new_tensor tensor([[1., 2., 3.], [1., 2., 3.], [4., 5., 6.], [4., 5., 6.]]) """ return torch.repeat_interleave(tensor, times, dim=dim)
718cef4db9076922030909b7ee7aaa1073baf400
79,263
def getVolume(flaeche, druck, rundheit): """ Funktion zur Berechnung des Volumens aus der Fläche, dem Druck und der Rundheit :param flaeche: Fläche des segmentieren BLOBs in Pixeln :param druck: nicht umgerechneter Relativdruck (d.h. Wert zw. 0 und 1024) :param rundheit: Rundheit :return: Volumen in µl """ offset = -20.105 koeffFlaeche = 0.011523 koeffDruck = -0.064727 koeffRundheit = 0.014931 koeffFlaecheDruck = 0.0002656 vol = offset + koeffFlaeche * flaeche + koeffDruck * druck + koeffRundheit * rundheit + koeffFlaecheDruck * flaeche * druck if vol < 0: return 0 return vol
587a136108f6890577cc40395ec7d457f64f3350
79,264
def from_saved_model(layer): """Returns whether the layer is loaded from a SavedModel.""" return layer.__module__.find('keras.saving.saved_model') != -1
2616dc31d2a6523304259664ce73f0f57a81ebd9
79,268
def prod(x): """ Computes the product of the elements of an iterable :param x: iterable :type x: iterable :return: product of the elements of x """ ret = 1 for item in x: ret = item * ret return ret
b7fbc55730426061db305346ddc48da8c07e0daf
79,271
import six def to_bytes(content, encoding='utf-8', errors='strict'): """Convert a sequence to a bytes type. Borrowed from `PySerial <https://github.com/pyserial/pyserial>`_ since it is now optional. """ if isinstance(content, bytes): # Python 2: isinstance('', bytes) is True return bytes(content) elif isinstance(content, bytearray): return bytes(content) elif isinstance(content, memoryview): return content.tobytes() elif isinstance(content, six.string_types): return bytes(content.encode(encoding, errors)) else: # handle list of integers and bytes (one or more items) # for Python 2 and 3 return bytes(bytearray(content))
7b2af76458f30f296e36e60841fb660f1dd78bee
79,278
def data_resample(df, sample_time='1T'): """ This function downsamples a sample-time indexed pandas dataframe containing measurement channel values based on the sample time supplied. It uses the mean of the values within the resolution interval. It uses the pandas dataframe module `df.resample` Parameters ---------- df : `df` is a sample-time indexed pandas dataframe containing measurement values from the different channels of each meter. sample_time : `sample_time` determines the desired resolution of the downsampled data. For 1 minute - 1T, 1 hour - 1H, 1 month - 1M, 1 Day - 1D etc. The default chosen here is 1 minute. Returns ------- Dataframe Resampled-time indexed pandas dataframe containing downsampled measurement values from the given dataframe. """ # Data is downsampled using the mean of the values within the interval of the sample time provided. # The mean is used because it provided the average/expected value of the measurement within that time range. df_resampled = df.resample(sample_time, closed="left", label="right").mean() return df_resampled
5373ec8edeb3c1295530f5eaea74dae69f9377b1
79,282
def get_multi_machine_types(machinetype): """ Converts machine type string to list based on common deliminators """ machinetypes = [] machine_type_deliminator = [',', ' ', '\t'] for deliminator in machine_type_deliminator: if deliminator in machinetype: machinetypes = machinetype.split(deliminator) break if not machinetypes: machinetypes.append(machinetype) return machinetypes
046c4256f7c993580ddd9db9fa6f195f8f837da0
79,283
import json def create_return_body(mobile_number, message, colour="#FFFFFF"): """ Create jsonified return response body Parameters ---------- number: str User mobile number of the format "+91XXXXXXXXXX" message: str User status message color: str User status colour hex code """ body = {"mobile_number": mobile_number, "message": message, "colour": colour} return json.dumps(body)
9b784c18aafdc2f04570e5981d03f5dabf50c84e
79,284
def _ctmp_err_rate_2_q(gen, g_mat_dict) -> float: """Compute the 2 qubit error rate for a given generator.""" # pylint: disable=invalid-name g_mat = g_mat_dict[gen] b, a, _ = gen r = g_mat[int(b, 2), int(a, 2)] return r
92dfb65087458faa87a15db65d91744044e01352
79,285
def probability_with_pseudocount(text, motif_profile): """Calculate the probability of a string (i.e. text) given a motif profile matrix. With pseudocount. Args: text (string): a text string probability given the profile matrix. motif_profile (dict): represent the probability of occurence of each nucleotide for each column. Returns: Float, the probability of text calculated from profile. """ probability = 1 for i in range(len(text)): probability *= motif_profile[text[i]][i] return probability
35f45cd8561a1f8c0ad3c3cb86d600b725c81039
79,287
def get_encounter(repeater, encounter_uuid): """ Fetches an Encounter by its UUID :raises RequestException: If response status is not in the 200s :raises ValueError: If the response body does not contain valid JSON. :return: Encounter dict """ response = repeater.requests.get( '/ws/rest/v1/bahmnicore/bahmniencounter/' + encounter_uuid, {'includeAll': 'true'}, raise_for_status=True ) return response.json()
37d53b6e0deecd76e69c55a2eded6a3bb2ddadf1
79,295
from typing import List from typing import Any def _prepare_args(args: List[Any]) -> List[str]: """Prepare args This does following conversions: [['a']] -> ['a'] [1] -> ['1'] [None] -> [''] """ if not args: return args result = [] for arg in args: if not isinstance(arg, list): arg = [arg] result += ['' if a is None else str(a) for a in arg] return result
a74adb83c6847ca004a3b60df806f61a5e293b53
79,297
from typing import Tuple def get_input() -> Tuple[int, int, list, set, set]: """Returns input for challenge "No Idea! Returns: Tuple[int, int, list, set, set]: n, m, array, A, B """ n, m = (int(x) for x in input().split()) arr = input().split() a = set(input().split()) b = set(input().split()) return n, m, arr, a, b
c110894f1a0775a4b173f128a201f90d9cb87ce8
79,300
def f_string_3(value): """Round value with 3 decimals.""" return f'{value:.3f}'
db47fec9d708954d66c4f6b444f6a298aec477ad
79,307
def get_centroid(bounding_box): """ Calculate the centroid of bounding box. Parameters ---------- bounding_box : list list of bounding box coordinates of top-left and bottom-right (xlt, ylt, xrb, yrb) Returns ------- centroid: tuple Bounding box centroid pixel coordinates (x, y). """ xlt, ylt, xrb, yrb = bounding_box centroid_x = int((xlt + xrb) / 2.0) centroid_y = int((ylt + yrb) / 2.0) return centroid_x, centroid_y
be8be4602acca78b9943cca5608544ca84dde274
79,308
import torch def point_cloud_from_depth(depth, K): """Transform depth image pixels selected in mask into point cloud in camera frame depth: [N, H, W] depth image mask: [N, H, W] mask of pixels to transform into point cloud K: [N, 3, 3] Intrinsic camera matrix returns: [N, 3, K] """ batch_size, H, W = depth.shape # Create 3D grid data u_img_range = torch.arange(0, W, device=depth.device) v_img_range = torch.arange(0, H, device=depth.device) u_grid, v_grid = torch.meshgrid(u_img_range, v_img_range) u_grid = u_grid.t()[None, :].repeat(batch_size, 1, 1) v_grid = v_grid.t()[None, :].repeat(batch_size, 1, 1) u_img, v_img, d = ( u_grid.reshape(batch_size, -1), v_grid.reshape(batch_size, -1), depth.reshape(batch_size, -1), ) # homogenuous coordinates uv = torch.stack((u_img, v_img, torch.ones_like(u_img)), dim=1).float() # get the unscaled position for each of the points in the image frame unscaled_points = torch.linalg.inv(K) @ uv # scale points by their depth value return d[:, None] * unscaled_points
6d8b1f29fdd8997e5e003ea85c0920b791853fc1
79,309
def merge_list(master_dict, parts): """Given a dict and a list of elements, recursively create sub-dicts to represent each "row" """ if parts: head = parts.pop(0) master_dict[head] = merge_list(master_dict.setdefault(head, dict()), parts) return master_dict
aa109eebe242948892c8aa7025ef7dbadc4ae0b6
79,315
import torch def extract_local_patterns( fnn12: torch.Tensor, fnn_to_seed_local_consistency_map_corr: torch.Tensor, k1: torch.Tensor, k2: torch.Tensor, im1seeds: torch.Tensor, im2seeds: torch.Tensor, scores: torch.Tensor): """ Prepare local neighborhoods around each seed for the parallel RANSACs. This involves two steps: 1) Collect all selected keypoints and refer them with respect to their seed point 2) Sort keypoints by score for the progressive sampling to pick the best samples first fnn12: Matches between keypoints of I_1 and I_2. The i-th entry of fnn12 is j if and only if keypoint k_i in image I_1 is matched to keypoint k_j in image I_2 fnn_to_seed_local_consistency_map_corr: Boolean matrix of size (num_seeds, num_keypoints). Entry (i, j) is True iff keypoint j was assigned to seed i. k1: Keypoint locations in image I_1 k2: Keypoint locations in image I_2 im1seeds: Keypoint index of chosen seeds in image I_1 im2seeds: Keypoint index of chosen seeds in image I_2 scores: Scores to rank correspondences by confidence. Lower scores are assumed to be more confident, consistently with Lowe's ratio scores. Note: scores should be between 0 and 1 for this function to work as expected. Returns: All information required for running the parallel RANSACs. Data is formatted so that all inputs for different RANSACs are concatenated along the same dimension to support different input sizes. im1loc: Keypoint locations in image I_1 for each RANSAC sample. im2loc: Keypoint locations in image I_2 for each RANSAC sample. ransidx: Integer identifier of the RANSAC problem. This allows to distinguish inputs belonging to the same problem. tokp1: Index of the original keypoint in image I_1 for each RANSAC sample. tokp2: Index of the original keypoint in image I_2 for each RANSAC sample. """ # first get an indexing representation of the assignments: # - ransidx holds the index of the seed for each assignment # - tokp1 holds the index of the keypoint in image I_1 for each assignment ransidx, tokp1 = torch.where(fnn_to_seed_local_consistency_map_corr) # - and of course tokp2 holds the index of the corresponding keypoint in image I_2 tokp2 = fnn12[tokp1] # Now take the locations in the image of each considered keypoint ... im1abspattern = k1[tokp1] im2abspattern = k2[tokp2] # ... and subtract the location of its corresponding seed to get relative coordinates im1loc = im1abspattern - k1[im1seeds[ransidx]] im2loc = im2abspattern - k2[im2seeds[ransidx]] # Finally we need to sort keypoints by scores in a way that assignments to the same seed are close together # To achieve this we assume scores lie in (0, 1) and add the integer index of the corresponding seed expanded_local_scores = scores[tokp1] + ransidx.type(scores.dtype) sorting_perm = torch.argsort(expanded_local_scores) im1loc = im1loc[sorting_perm] im2loc = im2loc[sorting_perm] tokp1 = tokp1[sorting_perm] tokp2 = tokp2[sorting_perm] return im1loc, im2loc, ransidx, tokp1, tokp2
5a39473ab42b1f69c77613545ba573eebdcde7e3
79,322
def count_at_centroid(line_geometry, trajectories_gdf): """ Given a LineString geometry, it counts all the geometries in a LineString GeoDataFrame (the GeoDataFrame containing GPS trajectories). This function should be executed per row by means of the df.apply(lambda row : ..) function. Parameters ---------- line_geometry: LineString A street segment geometry tracks_gdf: LineString GeoDataFrame A set of GPS tracks Returns ------- int """ intersecting_tracks = trajectories_gdf[trajectories_gdf.geometry.intersects(line_geometry)] return len(intersecting_tracks)
a65c7f12f7a84adc9d8e0154ba7be423bae53a53
79,324
def all_notes_line_up(a_list, b_list): """ Takes two lists of NoteNode objects. These may be NoteList objects. Returns 2 lists. The first list contains the NoteNode objects in a_list that are unmatched in b_list. The second list contains the NoteNode objects in b_list that are unmatched in a_list. """ a_list = [x for x in a_list if not x.is_rest] # copy NoteList to list b_list = [x for x in b_list if not x.is_rest] # copy NoteList to list # remove matched notes for a_note in a_list[:]: for b_note in b_list[:]: if (a_note.start, a_note.end) == (b_note.start, b_note.end): # remove the matched pair from their respective lists a_list.remove(a_note) b_list.remove(b_note) break return a_list, b_list
6d38de53025f6cfeb6b5e665a6ed9c535cf95792
79,327
def whose_player_list(players_data, teams_data): """Returns a list of tuples; each tuple has information about a player who is on a team ranked in the top 20, plays less than 200 minutes and makes more than 100 passes; the format for each tuple is (player's surname, team played for, team ranking, minutes played, number of passes) """ team = [] for i in range(20): team.append(teams_data[i]["team"]) players_data_tuple = [] for i in range(len(players_data)): if (players_data[i]["team"] in team) and (int(players_data[i]["minutes"]) < 200) and ( int(players_data[i]["passes"]) > 100): players_data_tuple.append( [players_data[i]["surname"], players_data[i]["team"], team.index(players_data[i]["team"]), players_data[i]["minutes"], players_data[i]["passes"]]) return tuple(players_data_tuple) # Reminder: Convert minutes and passes to integers before comparing to values
e13761531cf350fc0131c716f9cde72f6b23cf68
79,329
def blend_color(a, b): """ Blends two colors together by their alpha values. Args: a(tuple): the color to blend on top of b b(tuple): the color underneath a Return: The blended color. """ if len(a) == 3: a = (*a, 255) if len(b) == 3: b = (*b, 255) barf = b[3] / 255 brem = (255 - b[3]) / 255 fred = int(b[0] * barf + a[0] * brem) fgreen = int(b[1] * barf + a[1] * brem) fblue = int(b[2] * barf + a[2] * brem) falpha = min(int(a[3]) + int(b[3]), 255) return (fred, fgreen, fblue, falpha)
ad14f00a5d0d9033ad71184bb0757fdb0b0770cf
79,332
import torch def get_device(request_gpu): """ Return torch.device depending on request_gpu and GPU availability :param request_gpu: Set to True for requesting use of GPU :return torch.device: """ if request_gpu: device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if device == torch.device("cpu"): print("WARN: GPU requested, but is not available. Use CPU instead.") else: device = torch.device("cpu") return device
823740ee40cd118a801624201fa0b58922af3e1d
79,337
def optimize_beta( beta, bottleneck_loss): """ perform a step for updating the adaptive beta :param beta: old value of beta :param bottleneck_loss: current value of bottleneck loss :return: beta_new => updated value of beta """ # please refer to the section 4 of the vdb_paper in literature # for more information about this. # this performs gradient ascent over the beta parameter bottleneck_loss=bottleneck_loss.detach() beta_new = max(0, beta + (1e-6 * bottleneck_loss)) #alpha:1e-6 # return the updated beta value: return beta_new
edc57ac8d7469a5f345ee23586a565fa9610aa75
79,342
def split_filename(filename): """Get the different parts of the filename.""" filename_split = filename.split("_") return filename_split
6677e060d9fae3e506c434a866c052b488eb1b01
79,344
def toVTKString(str): """Convert unicode string into 8-bit encoded ascii string. Unicode characters without ascii equivalent will be stripped out. """ vtkStr = "" for c in str: try: cc = c.encode("latin1", "ignore") except (UnicodeDecodeError): cc = "?" vtkStr = vtkStr + cc return vtkStr
047ab0b65ac1d3796eab85eaadcf9874d8ee2ba3
79,346
import math def _scale_uniform_param(snr, data): """ A helper function to convert given uniform parameters in SNR dB according to desired standard deviation, and scale to correct values according to given data set features. """ return math.sqrt(3 * data.std()**2 / math.pow(10, snr / 10.0))
445b0354aac52e3764d8f9f4d87296a505b20eca
79,347
def GetTaxa(tree): """retrieve all taxa of leaves in a tree.""" return [tree.node(x).get_data().taxon for x in tree.get_terminals()]
c35e4fdf76e49b59aa25990a0ff104fed24aa0e2
79,348
def get_mse(X, Y, axis=None): """ Return Mean Square Error (MSE) between two time series sequences. """ return ((X - Y) ** 2).mean(axis=axis)
6e3ee9fb970c142c06a4428a6502904db1515f12
79,357
import requests def get_image_base_url(api_key): """Returns base image url string along with an image size retrieved from the configuration API """ example_sizes = ['w342', 'w185', 'w154'] url = 'https://api.themoviedb.org/3/configuration' payload = {'api_key': api_key} r = requests.get(url, params=payload) r.raise_for_status() json = r.json() images_json = json['images'] base_url = images_json['base_url'] available_sizes = images_json['poster_sizes'] poster_size = None for size in example_sizes: if size in available_sizes: poster_size = size break return base_url + poster_size
30efc4179db0d16ffd49ef539d95af40ec861072
79,359
def resolveOverwrite(fileName, strPrefix="new-"): """ resolveOverwrite resolve overwrite collisions Args: fileName (Path): desired file name to use strPrefix (str, optional): prefix to use for new name. Defaults to "new-". Returns: Path: Path object with the new file name. """ fileNameTmp = fileName # Check if destination file exist and add prefix if it does if fileNameTmp.is_file(): strSuffix = "" n = 1 while True: fileNameTmp = fileNameTmp.parent.joinpath( strPrefix + fileName.stem + strSuffix + fileName.suffix ) if fileNameTmp.is_file(): strSuffix = " ({})".format(n) n += 1 else: break return fileNameTmp
b5d10b8d1beb8238b27a6b5e8e5bdc334e1dfe4e
79,363
def bulleted_list(items): """Format a bulleted list of values. """ return "\n".join(map(" - {}".format, items))
ec7ed3c48e7add1d48c12613014e0a6fa2c42957
79,364
def is_significant_arg(arg): """ None and empty string are deemed insignificant. """ return ( arg is not None and arg != '' )
816183d481aa0e3a378fe2cf6132dc14cc46f232
79,368
def kadane_track_indices(arr): """ Implementation of Kadane's algorithm for finding the sub-array with the largest sum. Track the indices of the sub-array. Note: Doesn't handle duplicate max values. O(n) Time Complexity O(1) Space Complexity. """ # stores max sum sub-array found so far max_so_far = 0 # stores max sum of sub-array ending at current position max_ending_here = 0 # start and end points of max sum sub-array. start = 0 end = 0 # start index of positive sum sequence beg = 0 for idx in range(len(arr)): # if all array elements are negative. if (max(arr) < 0): max_so_far = max(arr) start = arr.index(max_so_far) end = start + 1 break # Update max sum sub-array ending @ index i by # adding current element to max sum ending at previous idx. max_ending_here = max_ending_here + arr[idx] if max_ending_here < 0: max_ending_here = 0 # reset start of positive sum sequence beg = idx + 1 if max_so_far < max_ending_here: # update the max sum max_so_far = max_ending_here # update max sum sub-array indices start = beg end = idx print("The sum of contiguous sub-array with largest sum is %d", max_so_far) print("The contiguous sub-array with the largest sum is ", arr[start:end]) return max_so_far
13a93b24b2460b97d6bc0e544a2b1ddb7cf96f4d
79,369
def get_artist_tags(connect, artist_mbid, maxtags=20): """ Get the musicbrainz tags and tag count given a musicbrainz artist. Returns two list of length max 'maxtags' Always return two lists, eventually empty """ if artist_mbid is None or artist_mbid == '': return [],[] # find all tags q = "SELECT tag.name,artist_tag.count FROM artist" q += " INNER JOIN artist_tag ON artist.id=artist_tag.artist" q += " INNER JOIN tag ON tag.id=artist_tag.tag" q += " WHERE artist.gid='"+artist_mbid+"'" q += " ORDER BY count DESC LIMIT "+str(maxtags) res = connect.query(q) if len(res.getresult()) == 0: return [],[] return map(lambda x: x[0],res.getresult()),map(lambda x: x[1],res.getresult())
beb31247fae0729d3e5e0cc3791da352b5759f54
79,370
def flatten(pb_value, selectors): """ Get a flattened tuple from pb_value. Selectors is a list of sub-fields. Usage: For a pb_value of: total_pb = { me: { name: 'myself' } children: [{ name: 'child0' }, { name: 'child1' }] } my_name, child0_name = flatten(total_pb, ['me.name', 'children[0].name']) # You get (my_name='myself', child0_name='child0') children_names = flatten(total_pb, 'children.name') # You get (children_names=['child0', 'child1']) """ def __select_field(val, field): if hasattr(val, '__len__'): # Flatten repeated field. return [__select_field(elem, field) for elem in val] if not field.endswith(']'): # Simple field. return val.__getattribute__(field) # field contains index: "field[index]". field, index = field.split('[') val = val.__getattribute__(field) index = int(index[:-1]) return val[index] if index < len(val) else None def __select(val, selector): for field in selector.split('.'): val = __select_field(val, field) if val is None: return None return val # Return the single result for single selector. if type(selectors) is str: return __select(pb_value, selectors) # Return tuple result for multiple selectors. return tuple((__select(pb_value, selector) for selector in selectors))
e3eb5b163363953e88fb3cfae76f456eb54fa4b0
79,371
import weakref def _try_weakref(arg, remove_callback): """Return a weak reference to arg if possible, or arg itself if not.""" try: arg = weakref.ref(arg, remove_callback) except TypeError: # Not all types can have a weakref. That includes strings # and floats and such, so just pass them through directly. pass return arg
bf55443b1ee5aa88f2dff65eb5fe02dd2e85f18d
79,372
def diff(x, axis): """Take the finite difference of a tensor along an axis. Args: x: Input tensor of any dimension. axis: Axis on which to take the finite difference. Returns: d: Tensor with size less than x by 1 along the difference dimension. Raises: ValueError: Axis out of range for tensor. """ shape = x.shape begin_back = [0 for unused_s in range(len(shape))] begin_front = [0 for unused_s in range(len(shape))] begin_front[axis] = 1 size = list(shape) size[axis] -= 1 slice_front = x[begin_front[0]:begin_front[0]+size[0], begin_front[1]:begin_front[1]+size[1]] slice_back = x[begin_back[0]:begin_back[0]+size[0], begin_back[1]:begin_back[1]+size[1]] d = slice_front - slice_back return d
9a2cc40828e2affbb0b11f0ff32be581c18143b9
79,373
from datetime import datetime def timestamp_to_milliseconds(obj): """Convert an object into a timestamp in milliseconds. Parameters ---------- obj A :class:`~datetime.datetime` object, an ISO-8601 formatted :class:`str`, a :class:`float` in seconds, or an :class:`int` in milliseconds. Returns ------- :class:`int` The timestamp in milliseconds. """ if isinstance(obj, int): # in milliseconds return obj if isinstance(obj, float): # in seconds return round(obj * 1e3) if isinstance(obj, str): # an ISO-8601 string string = obj.replace('T', ' ') fmt = '%Y-%m-%d %H:%M:%S' if '.' in string: fmt += '.%f' obj = datetime.strptime(string, fmt) return round(obj.timestamp() * 1e3)
ecfebc78bb7438a216b783b35ba64ac97e5ee356
79,375
def boost_tpm(group_tpm, tpm, med_tpm): """ Return a fraction of boost based on the expression of the IAR. :param float group_tpm: The TPM of the IAR. :param float tpm: The total boost amount allotted to this criterion. :param float med_tpm: The median TPM for all expressed transcripts in the patient. :returns: The percent of boost provided by this criterion. :rtype: float >>> boost_tpm(10, 1, 20) 0.0 >>> boost_tpm(10, 1, 10) 0.35 >>> boost_tpm(10, 1, 5) 0.6 >>> boost_tpm(10, 1, 2) 0.75 >>> boost_tpm(10, 1, 1) 1.0 """ return round(tpm * ((group_tpm >= med_tpm) * 0.35 + (group_tpm >= 2 * med_tpm) * 0.25 + (group_tpm >= 5 * med_tpm) * 0.15 + (group_tpm >= 10 * med_tpm) * 0.25), 2)
2e609bbbd4d05a836093158e517cb8d359766679
79,388
def identity(x): """ A no-op link function. """ return x
1eb3eb382fc2f9d23e36b1f4bcb51a73a1992f9d
79,391
def parse_maddr_str(maddr_str): """ The following line parses a row like: {/ip6/::/tcp/37374,/ip4/151.252.13.181/tcp/37374} into ['/ip6/::/tcp/37374', '/ip4/151.252.13.181/tcp/37374'] """ return maddr_str.replace("{", "").replace("}", "").split(",")
1a1ca1d846c650a3c01dca04a3debf921186d1a7
79,392
def homogeneous(T): """Returns the 4x4 homogeneous transform corresponding to T""" (R,t) = T return [[R[0],R[3],R[6],t[0]], [R[1],R[4],R[7],t[1]], [R[2],R[5],R[8],t[2]], [0.,0.,0.,1.]]
16e5ddb5d217b60181b1aa6520fd164cd3829a41
79,396
def get_num_days(dataset): """ Parse the time:calendar attribute of a dataset and get the number of days a year has """ if "time" in dataset: # get the max days from calendar calendar = dataset["time"].attrs['calendar'] max_days = int(calendar.replace("_day", "")) return max_days else: return len(dataset["doy"])
4ce428b8554fd4049e2d3ad5786885b731504903
79,397
import collections def parse_step_options(opts_seq): """ Parse the steps options passed with -o command line parameters. :param opts_seq: sequence of command line options as returned by argparse. Their format is: <name or category pattern>.<key>=<value> If <name or cat pattern> is missing, '*' is assumed. It will also be passed to all functions. The pattern is matched using fnmatch. If <value> is missing, True is assumed. :returns: An OrderedDict that maps steps name pattern to a dict of key and values. """ # Use an OrderedDict so that the command line order is used to know what # option overrides which one. options_map = collections.OrderedDict() for opt in opts_seq: step_k_v = opt.split('=', 1) step_k = step_k_v[0] step_k = step_k.split('.', 1) if len(step_k) >= 2: step_pattern, k = step_k else: # If no pattern was specified, assume '*' step_pattern = '*' k = step_k[0] # Replace '-' by '_' in key names. k = k.replace('-', '_') v = step_k_v[1] if len(step_k_v) >= 2 else '' options_map.setdefault(step_pattern, dict())[k] = v return options_map
ccede66f58b70556838c6f617416ef4b2643300b
79,399
def defect(p=1/3, n=1): """ return the probability of 1st defect is found during the nth inspection """ return (1-p)**(n-1)*p
793c1355eec65944ca31422ec35009844513ace5
79,406
def init_sim(sim, data, cid): """ Loads a world into a client and returns an sim map :param sim: A `Sim` class :param data: json-serialized scene data to be parsed by `sim` :param cid: The client id to using for `sim` """ s = sim(data, cid) return s.serialize()
895f96cadf180434beed0f78a2c151e617eddc7d
79,407
def bin2str(s1): """ Convert a binary string to corresponding ASCII string """ for i in s1: assert i == "0" or i == "1" bin_list = [int(s1[i:i+8], 2) for i in range(0, len(s1), 8)] for i in bin_list: assert i < 256 bin_list = [chr(i) for i in bin_list] return "".join(bin_list)
937de190b65c8a2b86986b26e6e5ae01a87f8a83
79,411
import re def chuvaSateliteONS(nomeArquivo): """ Lê arquivos texto com chuvas verificadas por satélite. Estes arquivos são disponibilizados pelo ONS. Argumento --------- nomeArquivo : caminho completo para o arquivo de chuvas verificada por satélite. Retorno ------- Listas simples com longitudes, latitudes e chuvas. Estas listas precisam ser tratada caso se deseje utilizar modelos de mapa tipo 'contornos'. Recomenda-se utilizar o tipo de mapa 'xy' para este tipo de dado. """ # Listas para acomodares os dados a serem lidos do arquivo txt do ONS. chuva = [] lons = [] lats = [] try: with open(nomeArquivo, 'r') as f: for line in f: #nomePosto = line[0:10] # Uso futuro valores = line[10:] tmp = re.findall(r'[-+]?\d*\.\d+|\d+', valores) lons.append(float(tmp[1])) lats.append(float(tmp[0])) chuva.append(float(tmp[2])) except: raise NameError('Erro ao tentar abrir/acessar arquivo: {}'.format(nomeArquivo)) return lons, lats, chuva
6254962976cb04c2eecf5aceaef45d4566cadfd9
79,415
def class_name(obj): """Returns the class name of the object.""" return type(obj).__name__
9b2493e9b647503d065da2ab62b406b1476723bc
79,418
def failure(msg): """ Standard failure result """ return dict(result='usererror', message=msg)
50de925f3e4c842ed8bc78a184c9fb85bbcdd38d
79,419
def get_results_from_file(filename): """ Reads accuracy results from file and returns them. @param filename: the name of the file to get accuracies from. @return: list of accuracies. """ results = [] #for all lines in the given filename for line in open(filename).readlines(): # if we have a line containing results if "{F" in line: # store all accuracy observations of this threshold. results.append(float(line.split(" ")[-2])) return results
7e7bcae770b9b2af73aec0ed14d62af612f586ca
79,433
import random def InitializeClient(db_obj, client_id=None): """Initializes a test client. Args: db_obj: A database object. client_id: A specific client id to use for initialized client. If none is provided a randomly generated one is used. Returns: A client id for initialized client. """ if client_id is None: client_id = "C." for _ in range(16): client_id += random.choice("0123456789abcdef") db_obj.WriteClientMetadata(client_id, fleetspeak_enabled=False) return client_id
5d5c72b539303c3c12abc1d4ffb78e0521b494d7
79,434