content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def human_readable_size(byte_count, unit="B", binary_marker='i'): """ Converts a number of bytes into a human-readable size string. """ SUFFIXES = { 0: "", 1: "k" + binary_marker, 2: "M" + binary_marker, 3: "G" + binary_marker, 4: "T" + binary_marker, 5: "P" + binary_marker } if byte_count is None: return 0 suffix_order =0 while byte_count >= 1024: suffix_order += 1 byte_count /= 1024 return "{} {}{}".format(byte_count, SUFFIXES[suffix_order], unit)
593e96802e2045f63b0140aeba1e59b063cc5f7a
333,234
def const(x, *args, **kwargs): """const: returns x ignoring other arguments""" return x
fd5a9490b81e5c3fa1cca0cd4cfa197ca0903cde
266,112
def _cleanSimData(simData): """ Remove unnecessary columns Args: simData (pandas data frame): Simulation data Returns: pandas data frame: Data frame with sim data """ simData.drop(columns=["doy", "weekDaySLP", "summer", "winter", "intermediate"], inplace=True) return simData
20acbb8b43cbb79956b1a60e72daeee297fcf9eb
551,932
def pixel_perfect_figsize(image, dpi=80): """Return the Matplotlib figure size tuple (w, h) for given image and dpi. Parameters ---------- image : array, shape (M, N[, 3]) The image to be plotted. dpi : int, optional The desired figure dpi. Returns ------- figsize : tuple of float The desired figure size. Examples -------- >>> image = np.empty((768, 1024)) >>> pixel_perfect_figsize(image) (12.8, 9.6) """ hpix, wpix = image.shape[:2] return wpix/dpi, hpix/dpi
a5b6cf66a02792e90fabe67d3b42e392724f1a6f
336,253
def instance_with_scenario_name(instance_name, scenario_name): """Format instance name that includes scenario.""" return f"{instance_name}-{scenario_name}"
2ce13b41e604e7caaa2d9ccc1a7f042dfa223244
661,261
def nz(df, y=0): """ Replaces NaN values with zeros (or given value) in a series. Same as the nz() function of Pinescript """ return df.fillna(y)
5d0b940b88d7c8dfb7109d5e1a2538784231db1c
682,489
def get_job_main_info(job): """ Return a dictionary with the pieces of information that will be presented on the terminal. :param job: A Job object provided by ScrapingHub :return: A dictionary """ info = job.info main_info = { 'id': job.id, 'spider': info.get('spider'), 'state': info.get('state', ''), 'items_scraped': info.get('items_scraped', ''), 'errors_count': info.get('errors_count', ''), 'tags': ', '.join(info.get('tags', [])), 'version': info.get('version', ''), 'started_time': info.get('started_time', ''), 'close_reason': info.get('close_reason', ''), } return main_info
b23679cfe8e4996d120e958929f7663d5662d1cf
669,475
from typing import Any import json def from_json(s: str, **kwargs: Any) -> object: """ Convert a JSON string to an object representation. For usage, see json.loads(...). """ return json.loads(s, **kwargs)
c82bbb38dc0300ddfbc2cba140536886ddbd9b14
617,261
def ensure_directory(file_path): """Checks the given file path for a directory, and creates one if not already present. Args: file_path: a pathlib.Path returns the err message if permission issue, else is None """ if not file_path.exists(): try: file_path.mkdir() except Exception as err: return err
384f6f40b3af97b3b00cc5e9957badc24c4396b3
587,936
import six def original_options(options): """ Return the input options with their original names. This is used to undo the name change the click package applies automatically before passing the options to the function that was decorated with 'click.option()'. The original names are needed in case there is special processing of the options on top of 'options_to_properties()'. The original names are constructed by replacing any underscores '_' with hyphens '-'. This approach may not be perfect in general, but it works for the zhmc CLI because the original option names do not have any underscores. Parameters: options (dict): The click options dictionary as passed to the decorated function by click (key: option name as changed by click, value: option value). Returns: dict: Options with their original names. """ org_options = {} for name, value in six.iteritems(options): org_name = name.replace('_', '-') org_options[org_name] = value return org_options
4a0e0b0b294710cbf8b88da6e0fdbc844b429db6
476,384
def compare_str(str1: str, str2: str) -> bool: """Compare two string and ignore \n at the end of two strings.""" return str1.rstrip() == str2.rstrip()
de0203d12ef009016e07fdf3ba4de6cd64f7a3e2
267,929
def compute_min_refills(distance: int, tank: int, stops: list): """ Computes the minimum number of gas station pit stops. >>> compute_min_refills(950, 400, [200, 375, 550, 750]) 2 >>> compute_min_refills(10, 3, [1, 2, 5, 9]) -1 Example 3: >>> compute_min_refills(200, 250, [100, 150]) 0 """ previous, current = 0, 0 positions = [0] + stops + [distance] num_refills, cur_position = 0, 0 while current <= len(stops): previous = current while current <= len(stops) and ( positions[current + 1] - positions[previous] ) <= tank: current += 1 cur_position = positions[current] if current == previous: return -1 # destination not possible if cur_position < distance: num_refills += 1 return num_refills
41dff6085f3b46b191c40c3dde9b68ee3ee41e3e
705,036
def isCircleCrossingSquare(square,circle): """Detect the collision of a square and a circle.""" qx,qy,qr=square cx,cy,cr=circle return abs(qx-cx)<=qr/2+cr and abs(qy-cy)<=qr/2+cr
1db8decb040db04003f71374b1339d381fcd0b3e
544,301
def room_url_for_room_id(roomid): """ room schedule url from room id """ template = 'https://www-sbhome1.zv.uni-wuerzburg.de/qisserver/rds?state=wplan&act=Raum&pool=Raum&raum.rgid={}' url = template.format(roomid) return url
85dc6cba30c1a11044f4aabd15fda9747e971e41
356,518
from typing import Optional def fillna(raw_text: Optional[str]) -> str: """ >>> fillna("I'm") "I'm" >>> fillna("") '' >>> fillna(None) '' """ if not raw_text: return "" return raw_text
ed195af8a55589150b36eb4ee630e6b7b9af7d3f
356,129
def dunder_partition(key): """Splits a dunderkey into 2 parts The first part is everything before the final double underscore The second part is after the final double underscore >>> dunder_partition('a__b__c') >>> ('a__b', 'c') """ parts = key.rsplit('__', 1) return tuple(parts) if len(parts) > 1 else (parts[0], None)
afd33d6e03de982bb7b71b0e7817fc5f1ae7207f
667,993
def name( path ): """Extracts the resource name.""" return path[1+path.rfind('/'):]
7e8448e5b1c62c30e7ae28c80580731aa9f6f9fb
10,250
def read_landmarks(f_landmarks): """ Reads file containing landmarks for image list. Specifically, <image name> x1 y1 ... xK yK is expected format. :param f_landmarks: text file with image list and corresponding landmarks :return: landmarks as type dictionary. """ landmarks_lut = {} with open(f_landmarks) as f: landmark_lines = f.readlines() for line in landmark_lines: line = line.replace("\n", "").split("\t") landmarks_lut[line[0]] = [int(k) for k in line[1:]] return landmarks_lut
9c4bf6b405f4fb49ace8badb4b3151cc457bbf84
701,094
import requests def _get_repo_list(token: str) -> list: """ Retrieves the list of repositories for the given username. :param token: The GitLab access token :return: A JSON object with the list of repositories """ url = 'https://gitlab.com/api/v4/projects' headers = { 'Private-Token': token } params = { 'simple': True, 'owned': True } response = requests.get(url, params=params, headers=headers) if response.status_code != requests.codes.get('ok'): response.raise_for_status() return response.json()
56b0c2e7c4ffc57abfc2b5c75ba99edbfd0d38bc
430,060
def _reduce_sampled_fluxes(sampled_fluxes, reactions): """ Reduces the input dataframe of sampled fluxes to a subset of selected reactions. Parameters ---------- sampled_fluxes : pandas.Dataframe The calculated fluxes, output of sampling. For each reaction, n fluxes will be calculated (n = number of samples taken). reactions : list List of reactions, e.g. output of `get_subsytem_reactions()`. Returns ------- re_arranged_df : pandas.Dataframe Sampled fluxes reduced to a specified set of reactions. """ reactions_mask = [ True if col in reactions else False for col in sampled_fluxes.columns ] subsystem_df = sampled_fluxes[sampled_fluxes.columns[reactions_mask]] re_arranged_df = subsystem_df.stack().reset_index( level=[1], name="Sampled Fluxes" ) re_arranged_df = re_arranged_df.rename( columns={"level_1": "Reaction"} ).reset_index(drop=True) return re_arranged_df
b6d6a618a0435b9e4f28fe4b10c611e9d269b698
631,515
def pearsonr(pred, target): """ Pearson correlation between target and prediction. Mimics `scipy.stats.pearsonr`. Parameters: pred (Tensor): prediction of shape :math: `(N,)` target (Tensor): target of shape :math: `(N,)` """ pred_mean = pred.float().mean() target_mean = target.float().mean() pred_centered = pred - pred_mean target_centered = target - target_mean pred_normalized = pred_centered / pred_centered.norm(2) target_normalized = target_centered / target_centered.norm(2) pearsonr = pred_normalized @ target_normalized return pearsonr
533cd8ce0e99186907bd7ed9d0a5728c57049080
201,491
def indent(data: str, spaces: int) -> str: """ Indent every line in 'data' by 'spaces' """ result = [] for line in data.splitlines(): result.append(" " * spaces + line) return "\n".join(result)
3d83311d46b98aa277fe86a782c0391d1cd83827
346,931
def create_contours_obj(canvas, contour_groups, colors=None, **kwargs): """Create and return a compound object for ginga `canvas`, consisting of a number of contour polygons. `contour_groups` is a list of numpy arrays of points representing polygons, such as returned by calc_contours() or get_contours(). `colors` (if provided) is a list of colors for each polygon. Any other keyword parameters are passed on to the Polygon class. """ if colors is None: colors = ['black'] Polygon = canvas.get_draw_class('polygon') Compound = canvas.get_draw_class('compoundobject') objs = [Polygon(contour, color=colors[i % len(colors)], **kwargs) for i, contours in enumerate(contour_groups) for n, contour in enumerate(contours) ] contours_obj = Compound(*objs) return contours_obj
996b0082d3db00460fdfd93b7cec14474d618a41
308,990
def make_ordinal(num): """ Create an ordinal (1st, 2nd, etc.) from a number. """ base = num % 10 if base in [0,4,5,6,7,8,9] or num in [11,12,13]: ext = "th" elif base == 1: ext = "st" elif base == 2: ext = "nd" else: ext = "rd" return str(num) + ext
d92069f2d1a88adeb1c72e7551adc75af84beb31
89,183
from typing import Dict def sra_id_to_app_input(sra_id: str) -> Dict: """Generate input from app for sra_fastq_importer Set split files to false so we no merging is needed Args: sra_id: Returns: dictionary containing """ return {"accession": sra_id, "split_files": False}
bf1ca62df98932a05cb6fce476a361273f86c35e
690,645
def __update_agent_state(current_agent_state, transition_probability, rng): """ Get agent state for next time step Parameters ---------- current_agent_state : int Current agent state. transition_probability : ndarray Transition probability vector corresponding to current state. rng : numpy random number generator Returns ------- agent_state : int Agent state at next time step. """ choice = rng.uniform() agent_state = current_agent_state for i in range(len(transition_probability)): if choice < sum(transition_probability[0: i + 1]): agent_state = i break return agent_state
4d447f196ac6a326720cdf69c67286fe5053e5fc
19,597
import time def unix_time_millis(dt): """ Convert datetime to unix timestamp """ return int(time.mktime(dt.timetuple()))
88e306d345452505b94693984da6df7fe292277d
416,315
def get_sorted_paths(paths, highest_first=True): """Return a generator that will yield the items of 'paths' sorted highest first if `highest_first` is True. This will work for numbers as well as strings.""" return sorted(paths, key=lambda x: x[0], reverse=highest_first)
17e2280dfa51b01aac8ec261e59cc7699b9e11a7
477,343
def text_to_list_of_lines(text): """Convert text into a list of lines, each being a list of words.""" return [line.split() for line in text.strip().split('\n')]
cbc7647e4c12432488aa7e59ff99e34a6c6e8c92
502,296
def parse_int(value): """ Parse an int, interpreting an empty string as 0. """ return int(value) if value.strip() != '' else 0
6f0a4360e3273fea848ee11e691bc45a0cd61813
225,324
def countissue(s): """Count number of issues""" if s:#check if Nonetype. if s=='None': #if type(s)==str or type(s)==float:#Handle return 0 else: return len(s) else:#if empty return 0
e1960898476b7a20377293d413b10c9f0ab9b1bb
678,270
def unique_name(name, nlist, max=1000): """return name so that is is not in list, by appending _1, _2, ... as necessary up to a max suffix >>> unique_name('foo', ['bar, 'baz']) 'foo' >>> unique_name('foo', ['foo', 'bar, 'baz']) 'foo_1' """ out = name if name in nlist: for i in range(1, max+1): out = "%s_%i" % (name, i) if out not in nlist: break return out
c22b93846882cd98ade73c91bdb6a8dc23931e9a
512,571
def list_records_params(req): """ Extract the appropriate request parameters for a ListRecords request :param req: flask request object :return: a dictionary of parameters """ from_date = req.values.get("from") until_date = req.values.get("until") oai_set = req.values.get("set") resumption_token = req.values.get("resumptionToken") metadata_prefix = req.values.get("metadataPrefix") return { "from_date" : from_date, "until_date" : until_date, "oai_set" : oai_set, "resumption_token" : resumption_token, "metadata_prefix" : metadata_prefix }
712482b49406f12fa761d19d90c1b13cb3d9b9ee
484,525
from typing import Dict from typing import Any def get_network_config(network_name: str) -> Dict[str, Any]: """Get the network configuration Args: network_name (str): The name of the network to get the configuration for Returns: Dict[str, Any]: The network configuration """ nn_conf = {} if network_name == '1x1_net': nn_conf['neurons_per_layer'] = [1] nn_conf['activations'] = ['logistic'] nn_conf['loss_function'] = 'square_error' nn_conf['learning_rate'] = 5 nn_conf['epochs'] = 5000 nn_conf['print_every'] = 500 elif network_name == '2x1_net': nn_conf['neurons_per_layer'] = [2, 1] nn_conf['activations'] = ['logistic', 'logistic'] nn_conf['loss_function'] = 'square_error' nn_conf['learning_rate'] = 5 nn_conf['epochs'] = 5000 nn_conf['print_every'] = 500 elif network_name == '2x2_net': nn_conf['neurons_per_layer'] = [2, 2] nn_conf['activations'] = ['logistic', 'logistic'] nn_conf['loss_function'] = 'cross_entropy' nn_conf['learning_rate'] = 0.5 nn_conf['epochs'] = 100 nn_conf['print_every'] = 100 else: raise ValueError(f"Network name {network_name} not recognized.") return nn_conf
1f76fdf249e942a149cb6b5febedeb85a22c3077
497,717
def attr_populated(obj, attr): """Return True if attr was populated in obj from source JSON.""" return not not getattr(obj, '_populated_' + attr, False)
5a37a1c07c0aaa87409fbc7fe0a8298c0e266859
592,990
import string def simplestring(length, offset=0): """ Deterministically generates a string. Args: length: Length of the string offset: Offset of the string Returns: A string formed of lowercase ASCII characters. """ return "".join( [ string.ascii_lowercase[(i + offset) % len(string.ascii_lowercase)] for i in range(length) ] )
8e1fb53b7790d8a9b95815cc861c47d5f5281c1d
588,572
def get_singleton_scaffolds(scaffoldgraph): """Get singleton scaffolds within a scaffold graph. Singleton scaffolds represent scaffolds that are direct members of only one compound in the current collection. Parameters ---------- scaffoldgraph : ScaffoldGraph A ScaffoldGraph object to query Returns ------- list A list of scaffold node keys corresponding to virtual scaffolds. """ singletons = [] for scaffold in scaffoldgraph.get_scaffold_nodes(): mol_count = 0 for succ in scaffoldgraph.successors(scaffold): if scaffoldgraph.nodes[succ].get('type') == 'molecule': mol_count += 1 if mol_count == 1: singletons.append(scaffold) return singletons
6847bd77c6ee0be6e5e16aa6885c826309db9554
475,053
def point_distance(p1, p2): """Return the distance between two points. Parameters ---------- p1 : (float, float) (x,y) values of point 1 p2 : (float, float) (x,y) values of point 2 Returns ------- float the distance between the two points """ x1, y1 = p1 x2, y2 = p2 return ((x1 - x2) ** 2 + (y1 - y2) ** 2) ** 0.5
373ca3c1d3ead67e49e892e3a01e3c522b8b6710
548,324
def get_backend_properties_url(config, backend_type, hub=None): """ Util method to get backend properties url """ hub = config.get('hub', hub) if hub: return '/Network/{}/devices/{}/properties'.format(hub, backend_type) return '/Backends/{}/properties'.format(backend_type)
001194c1ae2fa924a14d6abf215427f00161ecca
353,120
def matches_value_in_range(range_start, range_stop, range_step, value): """Helper function that matches the value with the nearest value in the given range. :param int range_start: The start of the range. :param int range_stop: The end of the range. :param int range_step: The gap between two value in the range. :param int value: The value to process. :rtype: int >>> matches_value_in_range(0, 100, 10, 40) 40 >>> matches_value_in_range(0, 100, 10, 42) 40 >>> matches_value_in_range(0, 1000, 100, 51) 100 >>> matches_value_in_range(42, 1000, 100, 150) 142 >>> matches_value_in_range(500, 1000, 100, 100) 500 >>> matches_value_in_range(500, 1000, 100, 4000) 1000 """ if value <= range_start: return range_start if value >= range_stop: return range_stop delta = (value - range_start) % range_step if not delta: return value else: if delta < range_step / 2: return value - delta else: return value - delta + range_step
db4dce08873ebe3ba98ffedf8f3e4177a9ebe74b
234,775
def define_limit_offset(request): """ Define limit and offset variables from request.args """ if request.args: try: limit = int(request.args['limit']) offset = int(request.args['offset']) except: # Default limit and offset limit = 12 offset = 0 else: # Default limit and offset limit = 12 offset = 0 return (limit, offset)
31ef7fbc70ec67c0d646024c580b591238cfecbf
47,591
def create_instance(cls_info): """ Creates instance of a class specified in cls_info. cls_info's format is (class_name, args, kwargs) e.g.: create_instance((dict, None, None)) """ cls, args, kwargs = cls_info if args is None: args = [] if kwargs is None: kwargs = {} return cls(*args, **kwargs)
04e24d26e07a4f1117dd6ef049fc172c15ca7c13
387,387
def _zone_group_topology_location_to_ip(location): """Takes a <ZoneGroupMember Location=> attribute and returns the IP of the player.""" # Assume it is of the form http://ip:port/blah/, rather than supporting # any type of URL and needing `urllib.parse`. (It is available for MicroPython # but it requires a million dependencies). scheme_prefix = 'http://' assert location.startswith(scheme_prefix) location = location[len(scheme_prefix):] port_idx = location.find(':') return location[:port_idx]
e3b038b92d4fcb24650dd6905847c9ca3f1fa13e
35,628
import logging def bad_request(err): """Return a custom 400 error.""" logging.warning(err) return 'The browser (or proxy) sent a request that this server could not understand.', 400
f5f9ef65d58226f079512bb08f8b57918cc9446c
127,359
def is_string_type(s): """ True iff s is a string. """ return type(s) == type('')
4865ed3806057fca5a96663d40c6ddcdb2be333c
119,140
def references(name, tag): """Returns references for space weather dataset Parameters ---------- name : string Name of space weather index, eg, dst, f107, kp tag : string Tag of the space weather index """ refs = {'dst': {'noaa': ''.join([ 'See referenece list and publication at: Sugiura M. and T. Kamei, ' 'http://wdc.kugi.kyoto-u.ac.jp/dstdir/dst2/onDstindex.html, ', 'last updated June 1991, accessed Dec 2020'])}} return refs[name][tag]
0734d643ba0cb385e1cc33f22d336b88dfe8e1f3
508,007
def get_site_by_request(request): """ Return the current site from the given request :param request: request to get the site from :return: site value in the request """ return getattr(request, 'site', None)
bb862c66833e369ce015e41187d08695b5494c88
144,834
def skip(s,n): """ Returns a copy of s, only including positions that are multiples of n A position is a multiple of n if pos % n == 0. Examples: skip('hello world',1) returns 'hello world' skip('hello world',2) returns 'hlowrd' skip('hello world',3) returns 'hlwl' skip('hello world',4) returns 'hor' Parameter s: the string to copy Precondition: s is a nonempty string Parameter n: the letter positions to accept Precondition: n is an int > 0 """ assert type(s) == str and len(s) != 0 assert type(n) == int and n > 0 s2 = '' idx = 0 for element in s: if idx % n == 0: s2 = s[idx] + s2 idx += 1 return s2[::-1] # pass
d08feaaac4fc1afa3f61d160845a4cfb45feb2a6
174,725
def floating_range(buckets): """ Computes a equal distributed list of bucket thresholds Args: buckets (int): Number of buckets Returns: List: List of bucket thresholds of length buckets """ return [x / 100 for x in list(range(0, 100, int(100 / (buckets - 1)))) + [100]]
85378532714d73c40ac6b657c12edab9936e582c
390,889
def join(*args): """Forms join instruction.""" inputs = [arg[0] for arg in args] return { 'cmd': 'join', 'inputs': inputs, 'args': args, }
cb20ae8629ef6d36678bc3b16849302456570a4d
645,012
import textwrap def wrap_headlines(dbhead, width=75): """ wraps lines of a restraint header to prevent too long lines in SHELXL. wrapping is done with = at the end of a line and ' ' at start of the next line :param dbhead: header with restraints :param width: wrap after width characters >>> line = ['foo bar this is text to wrap. blah bub'] >>> wrap_headlines(line, 15) ['foo bar this is =\\n text to wrap. =\\n blah bub\\n'] >>> wrap_headlines(['SADI C1 C2 C3 C4'], 10) ['SADI C1 C2 =\\n C3 C4\\n'] """ for num, line in enumerate(dbhead): line = textwrap.wrap(line, width, subsequent_indent=' ') if len(line) > 1: newline = [] for n, l in enumerate(line): if n < len(line) - 1: l += ' =\n' newline.append(l) dbhead[num] = ' '.join(newline) for num, line in enumerate(dbhead): line = ' '.join(line.strip().split(' ')) dbhead[num] = line + '\n' return dbhead
ecbcede24ad7c02f9ce0fc5a076464811d9f76d8
310,948
def is_ascii(string): """ Returns true if the string only contains ASCII characters, False otherwise. """ try: string.encode('ascii') except UnicodeEncodeError: return False return True
defafb1561e078e0356bbc9099d90b796659760a
310,329
import base64 def b64_encode(value: bytes) -> bytes: """ URL safe base 64 encoding of a value :param value: bytes :return: bytes """ return base64.urlsafe_b64encode(value).strip(b"=")
40a7dfbec7ec390a71cdacc5ab54ce8e2a092754
28,084
def create_exp_id(exp_network, num_layers, num_neurons, batch_size, num_epochs, learning_method, regularization): """Create identifier for particular experiment. Parameters ---------- exp_network : string RNN/Feedforward num_layers : int number of feedforward hidden layers num_neurons : int number of neurons for each hidden layers fixed for simplify situation batch_size : int size of each mini-batch num_epochs : int total number of training epochs learning_method : string SGD, momentum SGD, AdaGrad, RMSprop regularization : string Dropout / L2 regularization Returns ------- exp_id : string experiment identifier """ exp_id = exp_network + "_" exp_id = exp_id + str(num_layers) + "_" exp_id = exp_id + str(num_neurons) + "_" exp_id = exp_id + str(batch_size) + "_" exp_id = exp_id + str(num_epochs) + "_" exp_id = exp_id + learning_method + "_" exp_id = exp_id + regularization return exp_id
4791c6cb0c0d60f00da83a75af237dc113f565a3
422,608
import codecs import json def get_pyobj_from_json(str_or_path): """ This function takes either a json string or a path to a json file, it loads the json into memory and returns the corresponding Python object. """ try: # see if treating str_or_path as a path works fp = codecs.open(str_or_path, mode="r", encoding="utf-8") doc = json.load(fp, encoding="utf-8") except: # if it doesn't work load the text doc = json.loads(str_or_path) return doc
413b06385c6f50d90f94638bf301bc184a5fd574
220,117
def sanitize_code(code): """ Sanitize code removing unnecessary characters Params: code (str): target code Returns: the sanitized code """ if code: code = code.replace('-', '') code = code.ljust(7, '0') return code
714a081ab390d85e9dfb961ba8a89c2abe0cab9a
621,164
def is_external_artifact(label): """Determines whether a label corresponds to an external artifact.""" # Label.EXTERNAL_PATH_PREFIX is due to change from 'external' to '..' in Bazel 0.4.5. # This code is for forwards and backwards compatibility. # Remove the 'external' check when Bazel 0.4.4 and earlier no longer need to be supported. return label.workspace_root.startswith("external") or label.workspace_root.startswith("..")
f0c7025c940532ec6d08a29bdb440f43c27312a9
566,898
def inverse_datum_def(datum_def: list): """ Reverse the order of the datum definition list and prepend 'inv' to each layer. Parameters ---------- datum_def A list describing the layers of a datum definition. Returns ------- list The provided list reversed with 'inv' prepended to each layer. """ inverse = [] for layer in datum_def[::-1]: if '+inv' in layer: nlayer = layer.replace('+inv ', '') inverse.append(nlayer) else: inverse.append(' '.join(['+inv', layer])) return inverse
5b39761d3799a605d6386ca612a55aefe8ec40b4
565,891
import json def read_config(config_file): """ Read parameters from config file. Keyword arguments: config_file -- json file containing input parameters Return: coef -- dictionary containing coefficients input_conf -- dictionary containing input parameters output_conf -- dictionary containing output parameters """ with open(config_file, "r") as jsonfile: data = json.load(jsonfile) coef = data['coef'] input_conf = data['input'] output_conf = data['output'] return coef, input_conf, output_conf
30622fbbf542528254742848aaff47d8fa5e40c5
369,851
import json def load_json(file_source, file_name): """ Load a JSON data dictionary. Parameters ---------- file_source, file_name : str Where is the file stored and what is its name. Returns ------- data : dict Loaded json dictionary. """ f = open(file_source + file_name) data = json.load(f) f.close() return data
cf548b21c59abfadcf23350784945c488d653816
406,769
def map_or_apply(obj, fn): """ If the first argument is iterable, map the function across each item in it and return the result. If it looks like a queryset or manager, call `.all()` and map the function across the result of that. If it's is a single item, just call the function on that item and return the result. """ if obj is None: return None try: # Is the object itself iterable? return [fn(item) for item in iter(obj)] except TypeError: try: # Does the object have a `.all()` method (is it a manager?) return [fn(item) for item in obj.all()] except AttributeError: # It must be a single object return fn(obj)
64d06b611142e2578402f1ed4015d6558bc116e9
521,142
def coco_categories_dict_from_df(df, category_id_col, category_name_col, supercategory_col=None): """Extract category IDs, category names, and supercat names from df. Arguments --------- df : :class:`pandas.DataFrame` A :class:`pandas.DataFrame` of records to filter for category info. category_id_col : str The name for the column in `df` that contains category IDs. category_name_col : str The name for the column in `df` that contains category names. supercategory_col : str, optional The name for the column in `df` that contains supercategory names, if one exists. If not provided, supercategory will be left out of the output. Returns ------- :class:`list` of :class:`dict` s A :class:`list` of :class:`dict` s that contain category records per the `COCO dataset specification`_ . """ cols_to_keep = [category_id_col, category_name_col] rename_dict = {category_id_col: 'id', category_name_col: 'name'} if supercategory_col is not None: cols_to_keep.append(supercategory_col) rename_dict[supercategory_col] = 'supercategory' coco_cat_df = df[cols_to_keep] coco_cat_df = coco_cat_df.rename(columns=rename_dict) coco_cat_df = coco_cat_df.drop_duplicates() return coco_cat_df.to_dict(orient='records')
3fcf4cf8bac900ea7aa48a5918d15eb70feb9c3a
532,631
def markov_forward(p0, A): """Calculate the forward predictive distribution in a discrete Markov chain Args: p0 (numpy vector): a discrete probability vector A (numpy matrix): the transition matrix, A[i,j] means the prob. to switch FROM i TO j Returns: p1 (numpy vector): the predictive probabilities in next time step """ p1 = A.T @ p0 return p1
c4bb47422833055a9f6683917a17d868850e0b52
599,963
import torch def get_laplacian_kernel_3x3(alt=False) -> torch.Tensor: """ Utility function that returns a laplacian kernel of 3x3 https://academic.mu.edu/phys/matthysd/web226/Lab02.htm http://homepages.inf.ed.ac.uk/rbf/HIPR2/log.htm This is called a negative Laplacian because the central peak is negative. It is just as appropriate to reverse the signs of the elements, using -1s and a +4, to get a positive Laplacian. It doesn't matter: laplacian_kernel = torch.Tensor([ [0, -1, 0], [-1, 4, -1], [0, -1, 0] ]) Alternative Laplacian kernel as produced by Kornia (this is positive Laplacian, like: https://kornia.readthedocs.io/en/latest/filters.html laplacian_kernel = torch.Tensor([ [-1, -1, -1], [-1, 8, -1], [-1, -1, -1] ]) """ if alt: return torch.tensor([ [-1, -1, -1], [-1, 8, -1], [-1, -1, -1] ]) else: return torch.tensor([ [0, 1, 0], [1,-4, 1], [0, 1, 0], ])
c74816a855e95d72ae877cda38bd8799f2b4a3f9
319,711
import torch def default_optimizer(params, nb_iters, learning_rate=0.5): """ Create a default optimizer for :class:`trw.train.MeaningfulPerturbation` Args: params: the parameters to optimize nb_iters: the number of iterations learning_rate: the default learning rate Returns: a tuple (:class:`torch.optim.Optimizer`, :class:`torch.optim.lr_scheduler._LRScheduler`) """ optimizer = torch.optim.Adam(params, lr=learning_rate) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=nb_iters // 3, gamma=0.1) return optimizer, scheduler
d633d9bb1a7fdeecb2aaa7405cf03c46f2fbc69a
160,500
def get_layer_nr_id(name): """ For the given full layer parameter name, e.g. language_model.model.encoder.layer.11.attention.output.dense.bias return the numeric layer number (11) and the id ("attention.output.dense.bias") """ if name.startswith("language_model.model.encoder.layer."): suf = name[35:] idx = suf.index(".") layer_nr = int(suf[:idx]) layer_id = suf[idx+1:] return layer_nr, layer_id else: return None, None
2690e23f069413e357bfd7644ae679696f2e7d8c
423,489
def yields_from_leung_nomoto_2018_table10(feh): """ Supernova data source: Leung & Nomoto, 2018, ApJ, Volume 861, Issue 2, Id 143, Table 10/11 The seven datasets are provided for Z/Zsun values of 0, 0.1, 0.5, 1, 2, 3 and 5. Using Zsun = 0.0169 the corresponding FeH values are -1, -0.301, 0.0, 0.301, 0.4771 and 0.69897. We use seven intervals delimited by midpoints of those values. """ if feh <= -1.65: return [0.0, 5.48e-4, 1.3e-11, 2.15e-9, 3.46e-2, 1.63e-4, 2.50e-3, 1.72e-1, 1.14e-1, 2.55e-2, 7.57e-1] elif -1.65 < feh <= -0.65: return [0.0, 5.44e-4, 1.54e-12, 4.34e-10, 3.81e-2, 1.63e-4, 1.84e-3, 1.79e-1, 1.12e-1, 2.24e-2, 7.60e-1] elif -0.65 < feh <= -0.15: return [0.0, 5.88e-4, 3.24e-12, 2.94e-10, 4.85e-2, 6.58e-4, 1.69e-3, 2.30e-1, 1.14e-1, 1.84e-2, 7.20e-1] elif -0.15 < feh <= 0.15: return [0.0, 5.82e-4, 6.45e-12, 3.69e-10, 4.90e-2, 6.56e-4, 1.22e-3, 2.8e-1, 1.9e-1, 1.59e-2, 6.81e-1] elif 0.15 < feh <= 0.39: return [0.0, 5.71e-4, 1.62e-11, 5.52e-10, 4.94e-2, 6.46e-4, 8.41e-4, 2.13e-1, 9.81e-2, 1.26e-2, 6.44e-1] elif 0.39 < feh <= 0.59: return [0.0, 5.47e-4, 5.54e-11, 9.20e-10, 6.23e-2, 6.82e-4, 7.57e-4, 2.21e-1, 9.27e-2, 1.11e-2, 5.87e-1] elif 0.59 <= feh: return [0.0, 5.36e-4, 8.29e-11, 7.60e-10, 7.54e-2, 2.81e-4, 8.39e-4, 2.25e-1, 8.00e-2, 8.93e-3, 4.99e-1]
4a03971e14c80d013259afefdbece6e2c67ccdf8
19,349
def stringified(value, converter=None, none="None"): """ Args: value: Any object to turn into a string converter (callable | None): Optional converter to use for non-string objects none (str | bool | None): Value to use to represent `None` ("" or False represents None as empty string) Returns: (str): Ensure `text` is a string if necessary """ if isinstance(value, str): return value if isinstance(value, bytes): return value.decode("utf-8") if converter is not None: converted = converter(value) if isinstance(converted, str): return converted if converted is not None: value = converted if value is None: if isinstance(none, str): return none if none is True: return "None" if none is False: return "" value = none return "{}".format(value)
5518dd946c3670e79a81a47182b94bda6c3c52f3
514,614
def flatten(x): """ Flatten lists of lists of any depth. Preserves tuples. """ result = [] for el in x: if hasattr(el, "__iter__") and not isinstance(el, str) and type(el)!=tuple and not issubclass(type(el), tuple): result.extend(flatten(el)) else: result.append(el) return result
9040facd784890f5dd96cc4c69637c9d5883cc67
347,442
def asn1_length(n): """Return a string representing a field length in ASN.1 format.""" assert n >= 0 if n < 0x7f: return chr(n) r = "" while n > 0: r = chr(n & 0xff) + r n >>= 8 return r
8d6d555089d823cb39bcdc3fe728c1b1fab01cff
81,307
def compose_slice_query(search_type, search_term): """Extract a filter query given a form search term and search type Args: search_type(str): example -> "case:" search_term(str): example -> "17867" Returns: slice_query(str): example case:17867 """ slice_query = None if search_term and search_type: slice_query = "".join([search_type, search_term]) return slice_query
e24b4a05fbfbe44e73c903375a22b51cae91b895
86,137
import json from typing import OrderedDict def load_json_string(json_input): """Load a JSON-String.""" inpt = json.loads(json_input, object_pairs_hook=OrderedDict) return inpt
3c715575fffa51d94a4faaf5c3b29e0181da3548
499,115
def get_alarm_descriptions( self, data_format: str, default_values: bool, ) -> list: """Get alarm descriptions and type details .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - alarm - GET - /alarm/description2 :param data_format: If ``csv`` ask browser to download a file of Orchestrator alarm descriptions in .csv format. :type data_format: str :param default_values: If ``True``, export alarm descriptions with default values, when ``False``, export alarm descriptions with customized values :type default_values: bool :return: Returns list of all alarm descriptions, corresponding type id's, etc. \n [`dict`]: alarm object \n * keyword **typeId** (`int, optional`): Alarm type id * keyword **name** (`str, optional`): Alarm name * keyword **severity** (`str, optional`): Alarm severity info * keyword **description** (`str, optional`): Alarm description * keyword **recommendedAction** (`str, optional`): recommended action * keyword **serviceAffecting** (`bool, optional`): Is alarm service affecting * keyword **source** (`str, optional`): Module/system that generates alarm * keyword **systemType** (`int, optional`): Identifies which system generated the alaram. ``0`` = Appliance, ``100`` = Orchestartor * keyword **sourceType** (`int, optional`): Identifies the category of alarm. ``1`` = Tunnel, ``2`` = Traffic Class, ``3`` = Equipment, ``4`` = Software, ``5`` = Threshold * keyword **alarmType** (`int, optional`): Uniquely identifies the type of alarm within a sourceType :rtype: list """ path = "/alarm/description2" if data_format is not None: path = path + "?format={}".format(data_format) if default_values is not None: path = path + "&default={}".format(default_values) elif default_values is not None: path = path + "?default={}".format(default_values) else: pass return self._get(path)
df329965c15a7c9ecc5c81b2005216c40df98a1c
305,100
import typing def flatten_list(list_of_lists: typing.List[typing.List[typing.Any]]) -> typing.List[typing.Any]: """flatten a 2 level deep nested list into a single list Parameters ---------- list_of_lists: typing.List[typing.List[typing.Any]] 2 level nested list Returns ------- typing.List[typing.Any] the unrolled list """ return [item for sublist in list_of_lists for item in sublist]
eab91121f66cefc1ab7e43f5644544360f1e678c
659,135
from typing import Iterable from typing import Any from typing import Tuple def tuple_from_iterable(val: Iterable[Any]) -> Tuple[Any, ...]: """Builds a tuple from an iterable. Workaround for https://github.com/python-attrs/attrs/issues/519 """ return tuple(val)
7880b1395f14aa690f967b9548456105b544d337
3,308
def find_data_in_soup(soup, tag: str, class_id:str) -> str: """ Returns the text of a given tag / class id within a Beautiful Soup objhect() """ return soup.find(tag, class_=class_id).get_text()
cfd7adaef45f21e1a945b293e5b767a6a0753677
434,157
def add_args(parser): """ Create parser for command line utility. :meta private: """ data_grp = parser.add_argument_group("Data") proj_grp = parser.add_argument_group("Projection Module") contact_grp = parser.add_argument_group("Contact Module") inter_grp = parser.add_argument_group("Interaction Module") train_grp = parser.add_argument_group("Training") misc_grp = parser.add_argument_group("Output and Device") # Data data_grp.add_argument( "--train", required=True, help="list of training pairs" ) data_grp.add_argument( "--test", required=True, help="list of validation/testing pairs" ) data_grp.add_argument( "--embedding", required=True, help="h5py path containing embedded sequences", ) data_grp.add_argument( "--no-augment", action="store_true", help="data is automatically augmented by adding (B A) for all pairs (A B). Set this flag to not augment data", ) # Embedding model proj_grp.add_argument( "--input-dim", type=int, default=6165, help="dimension of input language model embedding (per amino acid) (default: 6165)", ) proj_grp.add_argument( "--projection-dim", type=int, default=100, help="dimension of embedding projection layer (default: 100)", ) proj_grp.add_argument( "--dropout-p", type=float, default=0.5, help="parameter p for embedding dropout layer (default: 0.5)", ) # Contact model contact_grp.add_argument( "--hidden-dim", type=int, default=50, help="number of hidden units for comparison layer in contact prediction (default: 50)", ) contact_grp.add_argument( "--kernel-width", type=int, default=7, help="width of convolutional filter for contact prediction (default: 7)", ) # Interaction Model inter_grp.add_argument( "--no-w", action="store_true", help="don't use weight matrix in interaction prediction model", ) inter_grp.add_argument( "--no-sigmoid", action="store_true", help="don't use sigmoid activation at end of interaction model", ) inter_grp.add_argument( "--do-pool", action="store_true", help="use max pool layer in interaction prediction model", ) inter_grp.add_argument( "--pool-width", type=int, default=9, help="size of max-pool in interaction model (default: 9)", ) # Training train_grp.add_argument( "--num-epochs", type=int, default=10, help="number of epochs (default: 10)", ) train_grp.add_argument( "--batch-size", type=int, default=25, help="minibatch size (default: 25)", ) train_grp.add_argument( "--weight-decay", type=float, default=0, help="L2 regularization (default: 0)", ) train_grp.add_argument( "--lr", type=float, default=0.001, help="learning rate (default: 0.001)", ) train_grp.add_argument( "--lambda", dest="interaction_weight", type=float, default=0.35, help="weight on the similarity objective (default: 0.35)", ) # Output misc_grp.add_argument( "-o", "--output", help="output file path (default: stdout)" ) misc_grp.add_argument( "--save-prefix", help="path prefix for saving models" ) misc_grp.add_argument( "-d", "--device", type=int, default=-1, help="compute device to use" ) misc_grp.add_argument( "--checkpoint", help="checkpoint model to start training from" ) return parser
7c44797757db29883990f1d4ac4fe4cb8ba9ed7a
228,607
def validate_ecoli(seq_list, metadata_reports): """ Checks if the uidA marker and vt markers are present in the combinedMetadata sheets and stores True/False for each SeqID. Values are stored as tuples: (uida_present, verotoxigenic) :param seq_list: List of OLC Seq IDs :param metadata_reports: Dictionary retrieved from get_combined_metadata() :return: Dictionary containing Seq IDs as keys and (uidA, vt) presence or absence for values. Present = True, Absent = False """ ecoli_seq_status = {} for seqid in seq_list: print('Validating {} uidA and vt marker detection'.format(seqid)) df = metadata_reports[seqid] observed_genus = df.loc[df['SeqID'] == seqid]['Genus'].values[0] uida_present = False verotoxigenic = False if observed_genus == 'Escherichia': if 'uidA' in df.loc[df['SeqID'] == seqid]['GeneSeekr_Profile'].values[0]: uida_present = True if 'vt' in df.loc[df['SeqID'] == seqid]['Vtyper_Profile'].values[0]: verotoxigenic = True ecoli_seq_status[seqid] = (uida_present, verotoxigenic) return ecoli_seq_status
2d038799d6cce9588215893835ea6ae62139d07f
97,136
def get_suit_of_card(position_of_card, deck): """Returns the suit of the card that has the specific position in the deck""" suit_int = deck[position_of_card][0] if suit_int == 0: return "Spades" elif suit_int == 1: return "Hearts"
0ebef9b82530684d6056ec2d0f11a9ab4babdbe8
98,709
def format_pylist_to_mathematica_legend(labelname_syntax, pylabel_list): """ Helper funciton. convert python list to be mathematica list for the legend Parameters ------------- labelname_syntax : str label type syntax in the Wolfram Mathematica . pylabel_list : list the list of names in python Returns ------------- labels : str Mathematica syntax """ labels ='%s -> %s' %(labelname_syntax, pylabel_list) labels = labels.replace("'", "") labels = labels.replace("[", "{") labels = labels.replace("]", "}") return labels
26d42e5330c89dac5351507ca3a37e4d2f8e03c8
607,790
def select_by_hour(df, hour="All"): """ This function takes in a string designating a time window and returns instances from a dataframe which fall within this time window. Required argument: - df: pandas dataframe, contains movie showtimes Optional argument: - hour: string, designates a time window; valid options are: "All", "9-12am", "12-3pm", "3-6pm", "6-9pm", "9-12pm". Defaults to "All". Returns: - df: pandas dataframe, the same as was fed into the function, but filtered to contain only those occurrences where the showtime is within the designated time window. """ if hour == "All": df = df elif hour == "9-12am": df = df[df["dt_showtime"].between('09:00:00', '12:00:00')] elif hour == "12-3pm": df = df[df["dt_showtime"].between('12:00:00', '15:00:00')] elif hour == "3-6pm": df = df[df["dt_showtime"].between('15:00:00', '18:00:00')] elif hour == "6-9pm": df = df[df["dt_showtime"].between('18:00:00', '21:00:00')] else: df = df[df["dt_showtime"].between('21:00:00', '23:59:00')] return df
c79c596d75b0c33c68ecb30efdf6031abb340ab0
579,765
import re def clean_whitespace(text): """Replace whitespace characters with a simple space. Parameters ---------- text : str The text to clean Returns ------- clean_text : str The text with whitespace replaced """ if not isinstance(text, str): return None return re.sub(r"\s", " ", text.strip())
81425799e7e68a7d7c42204e1b9578a7133d0378
379,967
def get_object_dir_prefix(objhash): """Returns object directory prefix (first two chars of object hash)""" return objhash[0:2] + "/"
0b44010ce1a2b5fb25667e85103815667afa95c3
342,013
def does_contain_unicode(name: str) -> bool: """ Check if name contains unicode characters. >>> does_contain_unicode('hello_world1') False >>> does_contain_unicode('') False >>> does_contain_unicode('привет_мир1') True >>> does_contain_unicode('russian_техт') True """ try: name.encode('ascii') except UnicodeEncodeError: return True else: return False
0470aa98e257585e981ef4e850ed3ce98c72b479
119,749
def tlbr2bbox(top, left, bottom, right, oper=int): """ tlbr = [top, left, bottom, right] to -> bbox = [x(left), y(top), width, height] """ x_pos = oper(left) y_pos = oper(top) width = oper(right - left) height = oper(bottom - top) return [x_pos, y_pos, width, height]
6234cf0296095c3b0eeb8e3e00667c60cfbe361f
440,993
def sphere_sre(solution): """ Variant of the sphere function. Dimensions except the first 10 ones have limited impact on the function value. """ a = 0 bias = 0.2 x = solution.get_x() x1 = x[:10] x2 = x[10:] value1 = sum([(i-bias)*(i-bias) for i in x1]) value2 = 1/len(x) * sum([(i-bias)*(i-bias) for i in x2]) return value1 + value2
38987c77a6586a0bfab1d94cc4a1511e9418349f
697,867
import re def sanitize_host_name(name): """Return a sanitized version of the given name, suitable for use as a hostname.""" return re.sub('[^A-Za-z0-9]+', '-', name)[:63].strip('-')
8997b036ced083fb0910a64ec48d6feb45aa4667
388,543
def age_format(value): """ Custom formatting for age, converting from days to years. """ value = value / 365.2524 return "{:,.2f} years".format(value)
a3297c2e7d5f16eacd0de485cd0296fda524577b
487,101
def insertIntoPath(original, insertion='rest'): """ Insert a string after the first block in a path, for example /my/original/path,insertion /my/INSERTION/original/path """ slashIndex = original.index('/',1) newString = '%s/%s%s' % (original[0:slashIndex], insertion, original[slashIndex:len(original)]) return newString
d43a5ea7c428202da9058d45bab289027a873e46
363,896
import dill def run_dill_encoded(what): """ Run a function that's been encoded with Dill (this allows me to do multiprocessing). http://stackoverflow.com/a/24673524 """ fun, args = dill.loads(what) return fun(*args)
70e5ccd8fa8e2b369cb45f80993bb6196287fc28
589,271
def _is_kanji(char): """ Check if given character is a Kanji. """ return ord("\u4e00") < ord(char) < ord("\u9fff")
6c785eca322002b80ff20212d1a914e538d684ab
656,654
def greet(name): """ Greets a person. :param name: a string input. :return: None if string empty or None else greets the name. """ if name == "" or name is None: return None else: return "hello " + name + "!"
40d94316218fac1513ccdcebe5b0b738facfbc6f
393,030
def create_symbol( client, symbol_file, point, drawing=None, replace_values=None, sheet=None ): """Add a symbol instance to a drawing. Args: client (obj): creopyson Client symbol_file (str): Name of the symbol file. point (dict): Coordinates for the symbol in Drawing Units. drawing (str, optional): Drawing name. Defaults: current active drawing. replace_values (dict, optional): Object containing replacement values for any variable text in the symbol. Defaults to None. sheet (int, optional): Sheet number (0 for all sheets). Defaults: the symbol will be added to all sheets. Returns: None """ data = {"symbol_file": symbol_file, "point": point} if drawing: data["drawing"] = drawing if replace_values: data["replace_values"] = replace_values if sheet: data["sheet"] = sheet return client._creoson_post("drawing", "create_symbol", data)
82a90571de8f3a988659d6da8b20afe37044c8eb
644,272
def get_countries(market): """ Get a comma-separated list of countries the Market's serves """ country_list = list(set([str(country) for country in market.countries_served.all()])) country_list.sort(key=lambda country: country) return ", ".join(set(country_list))
9ed65c7244a171fb288f834042f7c2da86f7b9d5
88,838
def get_structure_info(structure): """Get information from a directory structure definition. Parameters ---------- structure : dict Definition of the directory. Returns ------- names : list of str List of all the folder names (at any level). paths : list of str List of all the relative paths for all folders. """ names, paths = [], [] for level in sorted(structure): for label in structure[level]: print(level, label) for name in structure[level][label]: names.append(name) temp = [val for val in paths if val.split('/')[-1] == label] paths.append('/'.join(temp + [name])) return names, paths
e1a4962dea615d528ce28bd63bfb943fc134c0c3
250,655
def values_by_key(dct, keys, fill_val=None): """ return dictionary values for specific keys, filling missing entries """ return tuple(dct[key] if key in dct else fill_val for key in keys)
5fb9ff1fab4a54d11412e7b038545307ace8e363
216,947
def team_info(team, purpose): """ Function that displays and returns the information about the tean. Params: team(dict) contains all information about requested team purpose(string) like "console" Example of team stats displayed: ARSENAL FC FOUNDED: 1886 VENUE: Emirates Stadium CLUB COLORS: Red / White ACTIVE COMPETITIONS: Premier League UEFA Europa League FA Cup ADDRESS: 75 Drayton Park London N5 1BU PHONE: +44 (020) 76195003 WEBSITE: http://www.arsenal.com EMAIL: info@arsenal.co.uk """ if purpose == "console": print("\n" + team["name"].upper() + "\n") print("\t" + "FOUNDED: " + str(team["founded"])) print("\t" + "VENUE: " + team["venue"]) print("\t" + "CLUB COLORS: " + team["clubColors"] + "\n") print("\t" + "ACTIVE COMPETITIONS:") for competition in team["activeCompetitions"]: print("\t " + competition["name"]) print("\n\t" + "ADDRESS: " + team["address"]) if team["phone"] != None: print("\t" + "PHONE: " + team["phone"]) print("\t" + "WEBSITE: " + team["website"]) if team["email"] != None: print("\t" + "EMAIL: " + team["email"]) print() elif purpose == "email": team_contacts = [team["address"], team["phone"], team["website"]] if team["email"] != None: team_contacts.append(team["email"]) return team_contacts
b94df8a22b7571e8f05d4547d734c95fa7939ea3
155,488
def is_translator(permission): """ based on settings.CMS_CONTEXT_PERMISSIONS given a permission code (int) returns a dict{} with translator permission info """ if not permission > 0: return {} allow_descendant = True if permission > 1 else False return {'only_created_by': False, 'allow_descendant': allow_descendant}
8e732ad0dab89df4f63de0f8d9260604d61d7ae0
508,360
import torch def construct_length_mask(seq_lengths): """ construct sequence length mask to rule out padding terms. :param seq_lengths: a list or 1D Tensor containing the lengths of the sequences in a batch :return: a 2D Tensor containing the mask """ max_sequence_length = max(seq_lengths) mask = torch.zeros([len(seq_lengths), max_sequence_length]).bool() for line, length in zip(mask, seq_lengths): line[: length] = True return mask
5b3215cd34e95e6e6ef7db564abf679751bcb9b1
583,702
def count_params(net, exclude=[]): """ Count parameters for net. :param net: mxnet.gluon.Block Net or block to be counted parameters for. :param exclude: list of mxnet.gluon.nn.Block Blocks to be excluded. :return: int The number of parameters of net. """ exclude_params = [] for exc in exclude: exclude_params.extend(list(exc.collect_params())) params_counter = 0 params = net.collect_params() for p in params: if p not in exclude_params: params_counter += params[p].data().size return params_counter
5aa12631d781e53fdce9b47ff7fad9cec9354779
564,800