content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
import ssl import requests def record_asset_submission_render(submission_id, render): """ Save a record of a single render for a submission """ ssl._create_default_https_context = ssl._create_unverified_context url = "https://www.3xr.com/a/submission/record_render" data = { 'submissionId': submission_id, 'render': render, } response = requests.post(url, data=data) if (response.ok): if response.json() == "success": return True else: return False else: return False
f2e7c053d16499ade3edacb07753fbac7fbef455
697,533
def loglines(logspath): """ Read logging lines generated by a test case. Parameters ---------- logspath : str Path to which test case wrote logs. Returns ------- Iterable of str Logs from the test case. """ with open(logspath, 'r') as logfile: return logfile.readlines()
a30680a863fba0b1098536404021b4d32b31abff
697,534
import os def get_dir(fileref=__file__): """ Return absolute path of a file with the trailing slash. """ pwd = os.path.realpath(os.path.dirname(fileref)) return "%s/" % pwd
f90081b15d5ac38cdf18daab348e0ee49ce90ade
697,535
import re def read_best_values(file_list): """Reads the best hypervolume values from files in file_list, where each is formatted as a C source file (starts to read in the next line from the first encountered 'static'). Returns a dictionary containing problem names and their best known hypervolume values. :param file_list: list of file names """ result = {} for file_name in file_list: read = False with open(file_name, 'r') as f: for line in f: if read: if line[0:2] == '};': break split = re.split(',|\"|\t| |\n', line) entries = [item for item in split if item] result.update({entries[0]: entries[1]}) elif line[0:6] == 'static': read = True f.close() return result
9f9088931f64dbeb463223b62c871c1462561ca6
697,537
def dataDirection_2(datas, x_min, x_max): """ 中间型指标正向化:越靠近正中间越好 :param datas: 迭代器 :param x_min: 全部数据的最小值 :param x_max: 全部数据的最大值 """ def normalization(data): if data <= x_min or data >= x_max: return 0 elif data < (x_min + x_max) / 2: return 2 * (data - x_min) / (x_max - x_min) else: return 2 * (x_max - data) / (x_max - x_min) return list(map(normalization, datas))
7953111a43cf74c761e60186147e141261f09b6c
697,538
def GetGems(gem_counts): """Returns a list of GemTypes given a dict from GemType to counts.""" gems_list = [] for gem_type, count in gem_counts.iteritems(): if count < 0: raise NotImplementedError("count cannot be negative") for _ in range(count): gems_list.append(gem_type) return gems_list
08b4f7e669cbf2a0fb621449a2d845500637c4e1
697,539
def _get_obj_name(obj): """Auxiliary function to retrieve the name of an object.""" name = str(obj).replace("class ", "") return name.translate(str.maketrans({c: "" for c in "(<>),'"}))
92e3d774036a1f0d2bab9bdc60ae9e6582b8aa40
697,540
def uniform_heuristic(item): """ Taking in an item, return 1. """ return 1
bf1b25c96d25c56852446e36c6653c141a6f6460
697,541
def can_read_deleted(context): """Indicates if the context has access to deleted objects.""" if not context: return False return context.read_deleted
58b3025f133ee6bb8a1cc17c410bc1b0ea43c3b5
697,542
import requests def get_google_dot_com(use_ssl=True): """Return the contents of Google.com :param use_ssl: Use SSL? You should! Don't turn this off. You've been warned. Scamp! """ protocol = "https" if use_ssl else "http" return requests.get(f"{protocol}://www.google.com/").text
246420a67f5706fab2bf3d13bb6bb9e1bb7d00d7
697,543
import torch def get_gpu_or_cpu_number(device): """Returns the GPU number on which the tensors will be run. Returns -1 if the CPU is used""" if 'cuda' in device: if not torch.cuda.is_available(): raise RuntimeError("Torch cuda check failed, your drivers might not be correctly installed") gpu = device.split(":") if len(gpu) > 1: gpu_n = int(gpu[1]) else: gpu_n = 0 else: gpu_n = -1 # i.e, tensor are CPU based return gpu_n
5a16538e8339747d459ac94467278017b8294f21
697,544
def get_first_key_that_exists(dictin, keylist): """Try the keys in keylist in order and return the value for the first key that exists.""" found_key = 0 for key in keylist: if dictin.has_key(key): found_key = 1 return dictin[key] #we should never reach this point if any key in keylist is found valid_keys = dictin.keys() msg = "Did not find a match for any key in keylist: %s. \n Valid keys: %s" % \ (keylist, valid_keys) assert found_key == 1, msg
418e82c5705e2f0df36a99e48346fc01efc410e1
697,545
def map_calculated_scores_to_user(predictions, user): """ This function replaces the previous scores (only 0's in production) with the computed scores) :param predictions: the list of prediction-scores :param user: the user with its (original) scores :return: the user with its new scores """ for row in predictions: user.scores[row[0]] = row[1] return user
23a1dcc077cab2f5f27750c660abbab09bf0ff4c
697,546
from pathlib import Path def check_db_dir(parent: Path): """Checks whether the db directory exists. If not, create it. Args: parent (Path): the parent to both server_settings and db Returns: Path: the directory itself Raises: Exception: the db dir path exists, but it isn't a directory """ db_dir = parent / 'db' if db_dir.exists() and not db_dir.is_dir(): raise Exception(f"""{db_dir} exists, but it isn't a directory!""") elif not db_dir.exists(): db_dir.mkdir() return db_dir
3f7c0197dc7d7b04c0864d479ac834518815aade
697,547
def stronglyConnectedComponents(graph): """ `graph` here is a dict from keys (of some hashable class) to a list of keys """ indexCounter = [0] stack = [] lowLinks = {} index = {} result = [] def strongConnect(node): index[node] = indexCounter[0] lowLinks[node] = indexCounter[0] indexCounter[0] += 1 stack.append(node) try: successors = graph[node] except: successors = [] for successor in successors: if successor not in lowLinks: # Successor has not yet been visited; recurse on it strongConnect(successor) lowLinks[node] = min(lowLinks[node], lowLinks[successor]) elif successor in stack: # the successor is in the stack and hence in the current SCC lowLinks[node] = min(lowLinks[node], index[successor]) # If `node` is a root node, pop the stack and generate an SCC if lowLinks[node] == index[node]: connectedComponent = [] while True: successor = stack.pop() connectedComponent.append(successor) if successor == node: break component = tuple(connectedComponent) result.append(component) for node in graph: if node not in lowLinks: strongConnect(node) return result
7aadee9f28de3f946e4aca12cdb69b2a63e33a75
697,548
def cloudfront_forward_type(forward): """ Property: Cookies.Forward """ valid_values = ["none", "all", "whitelist"] if forward not in valid_values: raise ValueError('Forward must be one of: "%s"' % (", ".join(valid_values))) return forward
cdc02102eb35331b02ca034b5e726df66e20d9af
697,549
from bs4 import BeautifulSoup def read_XML_file(filename): """Reads file, with specific filename and returns parsed XML-tree :param filename: path to file with urls :return: parsed XML-tree, that contains in specefied file """ with open(filename) as xml: soup = BeautifulSoup(xml, "lxml-xml") return soup
025b45453ef812d6d91d8386dde33f8e59882ce1
697,550
def cond_prob_dike_failure(q_occ, q_critical): """ :param q_occ: occurring overtopping discharge :param q_critical: critical dike discharge :return: """ # to add probabilistic evaluation, make m_o and m_c stochastic m_o, m_c = 1, 1 Z = sum(m_o * q_occ > m_c * q_critical) / len(q_occ) return Z
5e53886c49e13d79af1ffb06125d3f63aeb623b9
697,551
def iterkeys(obj, **kwargs): """Iterate over dict keys in Python 2 & 3.""" return (obj.iterkeys(**kwargs) if hasattr(obj, 'iterkeys') else iter(obj.keys(**kwargs)))
03787c0e8bb493c721871990c4068144782370e2
697,552
def get_continuous_segments(array): """ Get continuous segments for single frame. Args: array (array): | ordered array with integers representing resids ~ single frame | e.g.: array = [5, 6, 7, 12, 13, 18, 19, 20] Returns: SEGMENTS (list) list of continuous segments Example: | >> gdt.get_continuous_segments([1,2,3,22,23,50,51,52]) | [[1, 2, 3], [22, 23], [50, 51, 52]] """ SEGMENTS = [] temp = [] for i in range(len(array)): temp.append(array[i]) try: if array[i]+1 != array[i+1]: SEGMENTS.append(temp) temp = [] except IndexError: SEGMENTS.append(temp) return SEGMENTS
365f3b20a7ec4ae016ccc48dff9fd5500cd48746
697,554
import base64 import struct def _decode_real(blob): """Inverse of _encode_real.""" bytes_ = base64.b64decode(blob) return struct.unpack('<d', bytes_)[0]
b570d5e78177e9f4b783478773665ddf80de4301
697,555
import numpy def createCombos(n_combos: int, names: list) -> dict: """ utility for creating combos of integers """ n = len(names) range_ = range(1, n_combos + 1) mesh = numpy.meshgrid(*(n * [range_])) combos = numpy.array(mesh).reshape(n, -1) return dict(zip(names, combos))
942596bd8d1ec7a265c00f0727392eae52246a58
697,556
def retrieve_attrs(instancenorm): """ Gather the required attributes for the GroupNorm plugin from the subgraph. Args: instancenorm: Instance Normalization node in the graph. """ attrs = {} # The 2nd dimension of the Reshape shape is the number of groups attrs["num_groups"] = instancenorm.i().i(1).attrs["value"].values[1] attrs["eps"] = instancenorm.attrs["epsilon"] # 1 is the default plugin version the parser will search for, and therefore can be omitted, # but we include it here for illustrative purposes. attrs["plugin_version"] = "1" # "" is the default plugin namespace the parser will use, included here for illustrative purposes attrs["plugin_namespace"] = "" return attrs
88d752a90c285952af8cbed6fd1183111fc99b96
697,557
def styleguide_command(styleguide, chdir, tmp_path): """Fixture which will run the styleguide with the passed subcommand. Both `base_args` and `command_args` must be iterables which will be transformed into strings and passed on the cmd line in the following order: `<cmd> <base_args> <command> <command_args>. """ def runner(*, base_args=[], command="", command_args=[]): return styleguide(*base_args, command, *command_args) chdir(str(tmp_path)) yield runner
91e2f8581f2014920ffd2c9bbe1267e1ccf7ecf9
697,558
import argparse def create_parser(): """ Creates the parser for arguments. """ parser = argparse.ArgumentParser(description='Modifies weight of variants \ sequenced twice (once for each read of a pair).') # Required arguments requiredNamed = parser.add_argument_group('required named arguments') requiredNamed.add_argument('-i', '--input', dest='input_file', metavar='input_file', type=str, required=True, help="""The path to the input file containing the list of fragments.""") requiredNamed.add_argument('-o', '--output', dest='output_file', metavar='output_file', type=str, required=True, help="""The path to the output file containing the list of fragments with modified weights.""") requiredNamed.add_argument('-r', '--ref_name', dest='ref_name', metavar='ref_name', type=str, required=True, help="""Name of the genome used as a reference.""") requiredNamed.add_argument('-a', '--alt_name', dest='alt_name', metavar='alt_name', type=str, required=True, help="""Name of the genome used as an alternative.""") # Optional arguments parser.add_argument('-s', '--skip_header', dest='skip_header', action='store_true', help="""Whether or not the input file contains a header that must be skipped. Default: False (No header).""") return parser.parse_args()
bda5318272087900063f6649a5f75fb0beb49ef3
697,559
import textwrap def mk_block(decl, contents, indent=2): """Format a block like this: decl { contents } where `decl` is one line but contents can be multiple lines. """ return decl + ' {\n' + textwrap.indent(contents, indent * ' ') + '\n}'
e57d3fa8f4c94b3a1d4e1145668f8dceccf02025
697,560
import pkg_resources def get_config(): """Returns a string containing the configuration information. """ packages = pkg_resources.require(__name__) this = packages[0] deps = packages[1:] retval = "%s: %s (%s)\n" % (this.key, this.version, this.location) retval += " - python dependencies:\n" for d in deps: retval += " - %s: %s (%s)\n" % (d.key, d.version, d.location) return retval.strip()
29815a02b1892fd66c39456d9a90db9a5259288f
697,561
def _update_together_save_hook(instance, *args, **kwargs): """ Sets ``update_fields`` on :meth:`~django.db.models.Model.save` to include \ any fields that have been marked as needing to be updated together with \ fields already in ``update_fields``. :return: (continue_saving, args, kwargs) :rtype: :class:`tuple` """ if 'update_fields' in kwargs: new_update_fields = set(kwargs['update_fields']) for field in kwargs['update_fields']: new_update_fields.update(instance._meta.update_together.get(field, [])) kwargs['update_fields'] = list(new_update_fields) return(True, args, kwargs)
c677d1b88469430f71c4a5f090a0d1fab5639d45
697,562
import torch def affine_make_square(affine): """Transform a rectangular affine into a square affine. Parameters ---------- affine : (..., ndim[+1], ndim+1) tensor Returns ------- affine : (..., ndim+1, ndim+1) tensor """ affine = torch.as_tensor(affine) device = affine.device dtype = affine.dtype ndims = affine.shape[-1]-1 if affine.shape[-2] not in (ndims, ndims+1): raise ValueError('Input affine matrix should be of shape\n' '(..., ndims+1, ndims+1) or (..., ndims, ndims+1).') if affine.shape[-1] != affine.shape[-2]: bottom_row = torch.cat((torch.zeros(ndims, device=device, dtype=dtype), torch.ones(1, device=device, dtype=dtype)), dim=0) bottom_row = bottom_row.unsqueeze(0) bottom_row = bottom_row.expand(affine.shape[:-2] + bottom_row.shape) affine = torch.cat((affine, bottom_row), dim=-2) return affine
440fef0cf43eb501fe25555ac4e113865108d13d
697,563
import os def construct_notebook_index(title, pthlst, pthidx): """ Construct a string containing a markdown format index for the list of paths in `pthlst`. The title for the index is in `title`, and `pthidx` is a dict giving label text for each path. """ # Insert title text txt = '"""\n## %s\n"""\n\n"""' % title # Insert entry for each item in pthlst for pth in pthlst: # If pth refers to a .py file, replace .py with .ipynb, otherwise # assume it's a directory name and append '/index.ipynb' if pth[-3:] == '.py': link = os.path.splitext(pth)[0] + '.ipynb' else: link = os.path.join(pth, 'index.ipynb') txt += '- [%s](%s)\n' % (pthidx[pth], link) txt += '"""' return txt
bbd3b22585ff049466148f702c048c9ad69c17e4
697,564
import random def randlog10(a, b): """ 10^a ~ 10^bを均等に発生させる :param a: :param b: :return: """ exp = random.uniform(a, b) return 10.0 ** exp
cc3b5ed037bb2be495c56171563d4fa3cc497374
697,565
import re def parse_show_ntp_trusted_keys(raw_result): """ Parse the 'show ntp trusted-keys' command raw output. :param str raw_result: vtysh raw result string. :rtype: dict :return: The parsed result of the show ntp trusted-keys command \ in a dictionary of the form: :: { '11': { 'key_id': '11' }, '12': { 'key_id': '12' } } """ ntp_trusted_key_re = ( r'(?P<key_id>\d+)' ) result = {} for line in raw_result.splitlines(): re_result = re.search(ntp_trusted_key_re, line) if re_result: partial = re_result.groupdict() result[partial['key_id']] = partial return result
37c573548359553b03237ddece068e52e9f51fb4
697,566
def candidate_priority(candidate_component, candidate_type, local_pref=65535): """ See RFC 5245 - 4.1.2.1. Recommended Formula """ if candidate_type == 'host': type_pref = 126 elif candidate_type == 'prflx': type_pref = 110 elif candidate_type == 'srflx': type_pref = 100 else: type_pref = 0 return (1 << 24) * type_pref + \ (1 << 8) * local_pref + \ (256 - candidate_component)
1655802955276dd6166a05817c9ad22ccbbca364
697,567
import argparse def get_arguments(): """Input commands:<interface> <mac> <help> on the terminal.""" parser = argparse.ArgumentParser() parser.add_argument("-i", "--interface", dest="interface", help="Interface to change its MAC address") parser.add_argument("-m", "--mac", dest="new_mac", help="New MAC address") args = parser.parse_args() if not args.interface: parser.error("Please specify an interface, use --help for more info.") elif not args.new_mac: parser.error("Please specify a new MAC address, use --help for more info.") return args
d397077971f64401202388cedf704c5f7c8dfbbd
697,568
def create_user(p_dict): """Creates a user""" p_dict["active"] = True p_dict["site_admin"] = False if "site_admin" not in p_dict else ( p_dict["site_admin"]) p_dict["site_manager"] = False if "site_manager" not in p_dict else ( p_dict["site_manager"]) p_dict["site_spectator"] = False if "site_spectator" not in p_dict else ( p_dict["site_spectator"]) p_dict["created_at"] = "2015-05-23" p_dict["deleted_at"] = None del(p_dict["password"]) return p_dict
f6ff6c16c62178f0955f750d6d91e1d02b4d05e8
697,569
import torch def lddt_ca_torch(true_coords, pred_coords, cloud_mask, r_0=15.): """ Computes the lddt score for each C_alpha. https://academic.oup.com/bioinformatics/article/29/21/2722/195896 Inputs: * true_coords: (b, l, c, d) in sidechainnet format. * pred_coords: (b, l, c, d) in sidechainnet format. * cloud_mask : (b, l, c) adapted for scn format. * r_0: float. maximum inclusion radius in reference struct. Outputs: * (b, l) lddt for c_alpha scores (ranging between 0 and 1) See wrapper below. """ device, dtype = true_coords.device, true_coords.type() thresholds = torch.tensor([0.5, 1, 2, 4], device=device).type(dtype) # adapt masks cloud_mask = cloud_mask.bool().cpu() c_alpha_mask = torch.zeros(cloud_mask.shape[1:], device=device).bool() # doesn't have batch dim c_alpha_mask[..., 1] = True # container for c_alpha scores (between 0,1) wrapper = torch.zeros(true_coords.shape[:2], device=device).type(dtype) for bi, seq in enumerate(true_coords): # select atoms for study c_alphas = cloud_mask[bi]*c_alpha_mask # only pick c_alpha positions selected_pred = pred_coords[bi, c_alphas, :] selected_target = true_coords[bi, c_alphas, :] # get number under distance dist_mat_pred = torch.cdist(selected_pred, selected_pred, p=2) dist_mat_target = torch.cdist(selected_target, selected_target, p=2) under_r0_target = dist_mat_target < r_0 compare_dists = torch.abs(dist_mat_pred - dist_mat_target)[under_r0_target] # measure diff below threshold score = torch.zeros_like(under_r0_target).float() max_score = torch.zeros_like(under_r0_target).float() max_score[under_r0_target] = 4. # measure under how many thresholds score[under_r0_target] = thresholds.shape[0] - \ torch.bucketize( compare_dists, boundaries=thresholds ).float() # dont include diagonal l_mask = c_alphas.float().sum(dim=-1).bool() wrapper[bi, l_mask] = ( score.sum(dim=-1) - thresholds.shape[0] ) / \ ( max_score.sum(dim=-1) - thresholds.shape[0] ) return wrapper
857392ff74fd4323d21c0c3eeb57e156e2b37648
697,571
import configparser def read_aranet_conf(file): """Reads the Aranet Cloud configuration file Args: file (str or os.PathLike): A path-like object giving the pathname of the configuration file. Returns: [configparser.ConfigParser]: A ConfigParser object with the configuration. """ aranet_conf = configparser.ConfigParser( defaults={ "endpoint": "https://aranet.cloud/api" } ) with open(file) as f: aranet_conf.read_file(f) return aranet_conf
36991e18bd4049145f91aa27aecad57647fc3230
697,572
def normalize_text(text): """ Replace some special characters in text. """ # NOTICE: don't change the text length. # Otherwise, the answer position is changed. text = text.replace("''", '" ').replace("``", '" ') return text
0dc15feb2cf9262567c845196403afb4e81c0e0f
697,573
def check_anagrams(first_str: str, second_str: str) -> bool: """ Two strings are anagrams if they are made of the same letters arranged differently (ignoring the case). >>> check_anagrams('Silent', 'Listen') True >>> check_anagrams('This is a string', 'Is this a string') True >>> check_anagrams('This is a string', 'Is this a string') True >>> check_anagrams('There', 'Their') False """ return ( "".join(sorted(first_str.lower())).strip() == "".join(sorted(second_str.lower())).strip() )
345d83fdcde1a8e1a0d0d2b73e0a16f5b5f816d1
697,574
import torch def compute_metric_shapes(marginal_entropies, cond_entropies): """ z, v: Learned latent and true generative factors, respectively. Marginal entropies: H(z) Conditional entropies: H(z|v) --> I(z;v) = H(z) - H(z|v) """ factor_entropies = [6, 40, 32, 32] mutual_infos = torch.unsqueeze(marginal_entropies, dim=0) - cond_entropies mutual_infos = torch.sort(mutual_infos, dim=1, descending=True)[0].clamp(min=0) mi_normed = mutual_infos / torch.Tensor(factor_entropies).log()[:, None] MIG = torch.mean(mi_normed[:,0] - mi_normed[:,1]) return MIG, mutual_infos
e22e064ae2919d262005968065dbd4226ee828d0
697,575
import os def load_folder_files(folder_path, recursive=True): """ load folder path, return all files in list format. @param folder_path: specified folder path to load recursive: if True, will load files recursively """ if isinstance(folder_path, (list, set)): files = [] for path in set(folder_path): files.extend(load_folder_files(path, recursive)) return files if not os.path.exists(folder_path): return [] file_list = [] for dirpath, dirnames, filenames in os.walk(folder_path): filenames_list = [] for filename in filenames: if not filename.endswith(('.yml', '.yaml', '.json')): continue filenames_list.append(filename) for filename in filenames_list: file_path = os.path.join(dirpath, filename) file_list.append(file_path) if not recursive: break return file_list
e2dfe9f62cfc55736f6371d707ecf2fa41139ca7
697,576
import copy def ref_clone_module(module): """ Note: This implementation does not work for RNNs. It requires calling learner.rnn._apply(lambda x: x) before each forward call. See this issue for more details: https://github.com/learnables/learn2learn/issues/139 Note: This implementation also does not work for Modules that re-use parameters from another Module. See this issue for more details: https://github.com/learnables/learn2learn/issues/174 """ # First, create a copy of the module. clone = copy.deepcopy(module) # Second, re-write all parameters if hasattr(clone, '_parameters'): for param_key in module._parameters: if module._parameters[param_key] is not None: cloned = module._parameters[param_key].clone() clone._parameters[param_key] = cloned # Third, handle the buffers if necessary if hasattr(clone, '_buffers'): for buffer_key in module._buffers: if clone._buffers[buffer_key] is not None and \ clone._buffers[buffer_key].requires_grad: clone._buffers[buffer_key] = module._buffers[buffer_key].clone() # Then, recurse for each submodule if hasattr(clone, '_modules'): for module_key in clone._modules: clone._modules[module_key] = ref_clone_module(module._modules[module_key]) return clone
1de5e27435b5a94c239ffb5b3722df8daed0c80f
697,577
def trimExonAndFlip(exStart, exEnd, exStrand, seqLen, seqStrand): """ Put the exon into the current sequence window: - trim exon to the window (0, seqLen), return None if completely outside the view. - reverse the exon coordinates if seqStrand=="-" """ if exStart < 0: if exEnd < 0: # the whole exon is outside the view on the left side return None, None, None else: # truncate the exon to start at 0 exStart = 0 if exEnd > seqLen: if exStart > seqLen: # the whole exon is outside the view on the right side return None, None, None else: # truncate the end exEnd = seqLen if seqStrand=="-": oldExEnd = exEnd exEnd = seqLen - exStart exStart = seqLen - oldExEnd # inputSeq forw and transcript forw -> exon is forw # inputSeq forw and transcript rev -> exon is rev # inputSeq rev and transcript forw -> exon is rev # inputSeq rev and transcript rev -> exon is forw if exStrand=="+": exStrand = "-" else: exStrand = "+" return exStart, exEnd, exStrand
b54a9d6e1ceb3b6cd900b87974ad005ac8e2a050
697,578
def stringify_span(range): """Returns a nicely-formatted string representing a span of years. Arguments: range {range} - A range object """ if len(range) >= 2: timespan = f"{range[0]}-{range[-1]}" else: timespan = range[0] return timespan
043cc8aae9cb2063c5af16dcb55223f05056b903
697,579
import os def env_has_log_config(): """True if there are qpid log configuratoin settings in the environment.""" return "QPID_LOG_ENABLE" in os.environ or "QPID_TRACE" in os.environ
b0958c70af569e663cd544a9673f9be3c617eb22
697,580
def sent_beginning(word_list, i): """ Returns the first word in the given list containing the given word. """ j = i - 1 while (j > 0) and (word_list[j].tag != ".") and (word_list[j].tag != ""): j -= 1 return j + 1
fb9d29008d96da968b631234a0775c674283e668
697,581
import csv def parse_table(data, dialect='unix', headers=True, ordered=False): """Attempts to load a table file in any format. Returns a list of dictionaries (or list of lists if no header is available). This requires does not handle comment lines.""" if headers: rows = csv.DictReader(data.splitlines(), dialect=dialect) if not ordered: rows = [dict(r) for r in rows] else: rows = csv.reader(data.splitlines(), dialect=dialect) return list(rows)
bd1c7701c518c95abf479a9d480e42bba0663895
697,582
def update_dropdown(selected_data): """Update dropdown after neighborhood map selection. Update dropdown menu status after neighborhood map selection is made. If TypeError, returns '92' (University District). Parameters ---------- selected_data : dict Selected data in neighborhood map. Returns ------- neighborhood : int Index of selected neighborhood (0-102). """ try: return selected_data['points'][0]['pointIndex'] except TypeError: return 92
163e327d2919402b6ab1ef137d045b142ee76d62
697,583
import torch def calculate_psnr(img1, img2): """ data range [0, 1] """ img1 = img1.clamp(0, 1) img2 = img2.clamp(0, 1) mse = torch.mean((img1 - img2) ** 2, [1, 2, 3]) # if mse == 0: # return 100 PIXEL_MAX = 1 return 20 * torch.mean(torch.log10(PIXEL_MAX / torch.sqrt(mse)))
0bcb6b0eb3fc1eaabca9447d02c91920cc16b211
697,584
from typing import List import statistics def udf_median(items: List[float]): """ Median of elements in a list """ return statistics.median(items)
6ed0841251de91e2758489d742c071f303444dfe
697,585
def substrings(seq): """ Returns a set of all the substrings of s. Recall we can compute a substring using s[i:j] where 0 <= i, j < len(s). Example: >>> substrings("abc") "a", "ab", "abc", "b", "bc", "c" """ subs = set() for i in range(len(seq)): #determine the starting index for j in range(i + 1, len(seq) + 1): #determine the ending index subs.add(seq[i:j]) return subs
7e27e1e8902410d3ca629edf13c2ce2f54107d08
697,586
def get_as_clause(columns_to_query_lst): """ get_as_clause will return all column names tuples. :param columns_to_query_lst: columns for where clause :return: """ column_str = "" for col in columns_to_query_lst: column_str = column_str + col + "," column_str += "update" as_clause = "as c(" + column_str + ")" return as_clause
e1ba25416a984160d9151887d26afa94448bcfaa
697,587
def truncate(f, n): """The remainder from division by x**n.""" result_dict = {} for e, c in f.coeffs.iteritems(): if e < n: result_dict[e] = c return f.__class__(result_dict)
68f3f882db0c4ef17e7cb3bebf4245ac74fa90e4
697,588
def merge(dict1, dict2): """dict1 takes precedence""" for key, value in dict1.items(): if isinstance(value, dict): node = dict2.setdefault(key, {}) merge(value, node) else: dict2[key] = value return dict2
b4dfc9385359cfdf9beb7fd93310194a8d4338f7
697,589
import pytz def to_zulu_string(dt): """Returns a Zulu time string from a datetime. Assumes naive datetime objects are in UTC. Ensures the output always has a floating-point number of seconds. """ # Assume non-tz-aware datetimes are in UTC. if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None: dt = dt.replace(tzinfo=pytz.UTC) # Convert datetime into UTC. isodate = dt.astimezone(pytz.UTC).isoformat().split('+')[0] # Add fractional seconds if not present. if '.' not in isodate: isodate += '.0' return isodate + 'Z'
2d7cb31cd83f86e9f2be78bbb8c3d9d517e6d31f
697,590
def serialize_value(value): """Serialize a single value. This is used instead of a single-shot `.format()` call because some values need special treatment for being serialized in YAML; notably, booleans must be written as lowercase strings, and floats exponents must not start with a 0. """ if isinstance(value, bool): return repr(value).lower() elif isinstance(value, float): return "{0:.16}".format(value).replace("e+0", "e+").replace("e-0", "e-") else: return repr(value)
6c01f79cd745799402c5d1db2ed6acafa8dd1c2a
697,591
import requests def upload_to_cloud(qa_li): """ upload data to cloud :param qa_li: :return: """ data = [] for q, a in qa_li.items(): data.append({ "question": q, "answers": a }) base_url = "https://bob.36deep.com/v1/assistant/question/" resp = requests.post(base_url, json=data, verify=False) if resp.status_code // 100 not in (2, 3): return False return True
bb6b3f71bd7f959eb10e19dbe34d6cdf7dbf1b57
697,592
def track_title_and_slug_from_penta(tracks, room_slug): """ Return the track title (e.g. Community) based on the room slug (mcommunity) :param tracks: :param room_slug: :return: """ if room_slug in tracks: return tracks[room_slug]['title'], tracks[room_slug]['slug'] return None, None
4fb4965b8e81c82dd6f34b03166e2c2812653328
697,593
import sys import argparse def add_parser(subparsers): """Parser for learning script""" parser = subparsers.add_parser( 'learning', help="""Analyze game using learning""", description="""Perform game analysis with learned model""") parser.add_argument( '--input', '-i', metavar='<input-file>', default=sys.stdin, type=argparse.FileType('r'), help="""Input file for script. (default: stdin)""") parser.add_argument( '--output', '-o', metavar='<output-file>', default=sys.stdout, type=argparse.FileType('w'), help="""Output file for script. (default: stdout)""") parser.add_argument( '--dist-thresh', metavar='<distance-threshold>', type=float, default=1e-3, help="""L2 norm threshold, inside of which, equilibria are considered identical. (default: %(default)g)""") parser.add_argument( '--regret-thresh', '-r', metavar='<regret-threshold>', type=float, default=1e-3, help="""Maximum regret to consider an equilibrium confirmed. (default: %(default)g)""") parser.add_argument( '--supp-thresh', '-t', metavar='<support-threshold>', type=float, default=1e-3, help="""Maximum probability to consider a strategy in support. (default: %(default)g)""") parser.add_argument( '--rand-restarts', metavar='<random-restarts>', type=int, default=0, help="""The number of random points to add to nash equilibrium finding. (default: %(default)d)""") parser.add_argument( '--max-iters', '-m', metavar='<maximum-iterations>', type=int, default=10000, help="""The maximum number of iterations to run through replicator dynamics. (default: %(default)d)""") parser.add_argument( '--converge-thresh', '-c', metavar='<convergence-threshold>', type=float, default=1e-8, help="""The convergence threshold for replicator dynamics. (default: %(default)g)""") parser.add_argument( '--processes', '-p', metavar='<num-procs>', type=int, help="""Number of processes to use to run nash finding. (default: number of cores)""") parser.add_argument( '--one', action='store_true', help="""If specified, run a potentially expensive algorithm to guarantee an approximate equilibrium, if none are found via other methods.""") return parser
58157e64e85cb8c89b66852071655a4f514fa76c
697,594
def list_format(lst): """ Unpack a list of values and write them to a string """ return '\n'.join(lst)
754e8cb4710558f551357bf5ac7ed54cbf0730ca
697,595
def turn(): """This function is called when the game requests the AI to return the piece it wants to move's id and the tile id the target piece should be moved to.""" return None
2ca6a241d00a728b55583e0166d557208d668ae4
697,596
import os def calculateFileSize(filePath): """ Calculates the size of the specified value. Args: filePath (str): The absolute path to the file. Returns: The size of the file in bytes. """ try: return os.stat(filePath).st_size except: return 0
e56a0ac7f349b48a1f0cb1d8c08a0e6b248a0c19
697,598
import statistics def get_length_stats_from_seqs_dic(seqs_dic): """ Get length stats from set of sequences stored in dictionary. Return dictionary with stats. """ assert seqs_dic, "given seqs_dic empty" seq_len_list = [] for seq_id in seqs_dic: seq_len_list.append(len(seqs_dic[seq_id])) seq_stats_dic = {} seq_stats_dic["mean"] = statistics.mean(seq_len_list) seq_stats_dic["median"] = int(statistics.median(seq_len_list)) seq_stats_dic["max"] = int(max(seq_len_list)) seq_stats_dic["min"] = int(max(seq_len_list)) seq_stats_dic["stdev"] = statistics.stdev(seq_len_list) return seq_stats_dic
e49c0099bf9a50b879bd2d3ab222a045dd665e37
697,599
def STRING_BOUNDARY(e): """ :return: expr that matches an entire line """ return r"^{e}$".format(e=e)
aabe1fe79783e5393c0a9307bb2780cdbd558218
697,600
def simplifyDataLine( l, n, callingRoutine, comment ) : """For internal use only.""" ls = l.split( comment )[0] ls = ls.replace( ",", " " ) ls = ls.replace( "\t", " " ) if ( ls == "" ) : return None v = ls.split( ) if( ( n is not None ) and ( len( v ) != n ) ) : raise Exception( "\nError in %s: line has %d values, expected %d\n%s" % ( callingRoutine, len( v ), n, l ) ) return v
7568749d612f6ada783abf6cf5795edb6a63acc8
697,601
def segmentize_geometry(geom, segment_size=1.): """ Segmentizes the lines of a geometry (decreases the point spacing along the lines) according to a given `segment_size`. Parameters ---------- geom : ogr.Geometry OGR geometry object. segment_size : float, optional For precision: distance of longest segment of the geometry polygon in units of the spatial reference system. Returns ------- geom_fine : ogr.Geometry A congruent geometry realised by more vertices along its shape. """ geom_fine = geom.Clone() geom_fine.Segmentize(segment_size) geom = None return geom_fine
324317a1f64ff0794b204c4b0f4b39ff0df9221d
697,604
def _tmpl_solo(self, keys=None, disam=None, bracket=None): """In testing right now. For now, just a dummy function that returns False. Based on beets' built-in aunique() functionality. (tmpl_aunique) Determine if an album is uniquely disambiguated by the given "keys". Example use case: $if{solo, year, aunique{albumartist, year, year-mm, year-mm-dd}} In the example, we reliably return at least a "year", for unique artists, and a more specific release date for artists with multiple albums in the database. """ return False
6afacff63b2dd8b020d46821b0667461601b515e
697,605
def _json_file_name(name): """Returns the name of the statistics file for `name`.""" return name + '.json'
e554b5e40f9fceb6c98986611588c4853d6861da
697,606
def get_model_metadata_fixture() -> dict: """Test fixture for model metadata Returns: dict: Example metadata response structure """ metadata = { "kind": "Model", "apiVersion": "v1alpha4", "metadata": { "displayName": "model display name", "name": "test-model", "summary": "Model summary", "description": "Model description", "type": "Simulation", "owner": "0a0a0a0a-0a00-0a00-a000-0a0a0000000a", }, "spec": { "inputs": { "env": [ { "name": "R_NUMBER", "title": "R Number", "desc": "The reproduction number", "type": "number", "default": 1.5, "min": 0.1, "max": 2.0, }, { "name": "setting", "title": "Setting", "desc": "Mode to run the model in", "type": "string", "default": "long_default_name", }, ], "dataslots": [ { "default": [ { "uid": "11111a1a-a111-11aa-a111-11aa11111aaa", "versionUid": "21111a1a-a111-11aa-a111-11aa11111aaa", } ], "path": "inputs/", "required": True, "name": "Inputs", } ], }, "outputs": { "datasets": [ { "name": "dataset_1.xls", "type": "xls", "desc": "Datset 1 description", }, { "name": "dataset_2.xls", "type": "xls", "desc": "Datset 2 description", }, ] }, "image": "dreg.platform.dafni.rl.ac.uk/nims-prod/test-model:0a0a0a0a-0a00-0a00-a000-0a0a0000000a", }, } return metadata
c4ef1d1088bee664e350c32fe457657255f500da
697,607
def convert_time_to_hour_minute(hour, minute, convention): """ Convert time to hour, minute """ if hour is None: hour = 0 if minute is None: minute = 0 if convention is None: convention = 'am' hour = int(hour) minute = int(minute) if convention.lower() == 'pm': hour += 12 return {'hours': hour, 'minutes': minute}
ec1c129b7c3e07c68622c599f673bd49b99c9872
697,608
def merge(initial: dict, overrides: dict) -> dict: """ Merges overrides into initial :param initial: <dict> :param overrides: <dict> :return: Merged <dict> """ for key in overrides: if key in initial: if isinstance(initial[key], dict) and isinstance(overrides[key], dict): merge(initial[key], overrides[key]) else: initial[key] = overrides[key] else: initial[key] = overrides[key] return initial
4427531fe461e91cdac909841845d959e6f736f5
697,609
import argparse def get_args(): """Get application arguments""" args = argparse.ArgumentParser(description='Generate a pdf resume from a markdown file') group = args.add_mutually_exclusive_group() group.add_argument('-c', '--css', type=str, metavar='path/to/file.css', help='Path for the css style file.') group.add_argument('-simple', action='store_true', help='Generate pdf with simple style.') group.add_argument('-bar', action='store_true', help='Generate pdf with colored bar before headers.') group.add_argument('-divider', action='store_true', help='Generate pdf with colored divider between headers.') args.add_argument('-m', '--md', type=str, metavar='path/to/markdown/file.md', help='Path to input markdown file.') group.add_argument('-l', '--list', action='store_true', help='List all available styles.') group.add_argument('-r', '--remove', action='store_true', help='Remove all pdf files from output.') group.add_argument('-v', '--version', action='store_true', help='Print \'markdown2pdf\' version.') return args
9eec511cc08bb2231c14b65fb22f0980d4e187fe
697,610
def increment_age(model_particles, e_event_ids): """"Increment model particles' age, set event particles to age 0""" model_particles[:,5] = model_particles[:,5] + 1 model_particles[e_event_ids, 5] = 0 return model_particles
52efd3276e48d8c951cb75123104227075220650
697,611
from pathlib import Path def main(file_path: Path) -> str: """ Test function for showing file path. Args: file_path: file path Returns: str: HTML output """ return f"Hello world mission2, filePath is {file_path}."
06c23ddf08dba563a061e4a503b5ba76f33b456a
697,612
def getSequenceEditDistance(SC, path): """ Calculate sequence edit distance of a solution to the constraint """ # IUPAC = { "A": "A", "C": "C", "G": "G", "U": "U", "R": "AG", "Y": "CU", "S": "GC", "W": "AU", "K": "GU", "M": "AC", "B": "CGU", "D": "AGU", "H": "ACU", "V": "ACG", "N": "ACGU", } edit = 0 for i in range(len(SC)): if path[i] not in IUPAC[SC[i]]: edit += 1 return edit / float(len(path))
a850f00e10eb041971ba6468ae75a7cfb614bdb5
697,613
from pathlib import Path def check_projected_player_pts_pulled(year: int, week: int, savepath: Path) -> bool: """ Helper function to check if projected points have been pulled. """ if savepath.exists(): print(f"\nProjected player data already exists at {savepath}") return True return False
426748b799ff561ac1bb75ad5d4d84fff57e40f5
697,614
def is_gscnn_arch(args): """ Network is a GSCNN network """ return 'gscnn' in args.arch
ffe455ad11f62f090e90ba489969350b50e2a332
697,615
def GetNamesList(filepath): """ Open a file with a given filepath containing place names and return a list. """ f = open(filepath) return f.read().splitlines()
1e32772d7483ec9cae7cec2e523a3c7b6ef472fe
697,616
import copy def get_fisnar_segmented_extrusion_coords(fisnar_command_list): """ get a segmented version of the commands, with each sub-list containing a series of coordinates which involve extrusion, as well as the output number. Connecting the coordinates gives the material path. The returned list is of the form: [ [<output int>, [[<x float 1>, <y float 1>, <z float 1>], ... , [<x float n>, <y float n>, <z float n>]] ], ... ] :param fisnar_command_list: 2D list - fisnar commands :type fisnar_command_list: list[list[str, int]] :return: new list object with all non-extruding movements removed. :rtype: list[list[str/int]] """ ret_segment = [] fisnar_commands_copy = copy.deepcopy(fisnar_command_list) i = 0 # iterator while i < len(fisnar_commands_copy): if fisnar_commands_copy[i][0] == "Output" and fisnar_commands_copy[i][2] == 1: # until 'output on' found sub_segment = [fisnar_commands_copy[i][1], []] # finding the most recent travel coords and appending prev_coords_ind = i - 1 while fisnar_commands_copy[prev_coords_ind][0] != "Dummy Point": prev_coords_ind -= 1 sub_segment[1].append([fisnar_commands_copy[prev_coords_ind][1], fisnar_commands_copy[prev_coords_ind][2], fisnar_commands_copy[prev_coords_ind][3]]) # appending all movements until output is turned off i += 1 while not (fisnar_commands_copy[i][0] == "Output" and fisnar_commands_copy[i][2] == 0): if fisnar_commands_copy[i][0] == "Dummy Point": sub_segment[1].append([fisnar_commands_copy[i][1], fisnar_commands_copy[i][2], fisnar_commands_copy[i][3]]) i += 1 # appending segment to return segment list ret_segment.append(sub_segment) else: # current command isn't output on, so keep searching i += 1 return ret_segment
3b04239f2f9b73854bd3fe537d52916ab270d0bd
697,617
def splitOn(splitter): """Return a function that splits a string on the given splitter string. The function returned filters an empty string at the end of the result list. >>> splitOn('X')("aXbXcX") ['a', 'b', 'c'] >>> splitOn('X')("aXbXc") ['a', 'b', 'c'] >>> splitOn('X')("abc") ['abc'] >>> splitOn('X')("abcX") ['abc'] """ def f(s): l = s.split(splitter) if l and not l[-1]: return l[:-1] else: return l return f
c316d0de407aa6ca93208ad7d7ce0ca8cf3c176a
697,618
def get_list_class(context, list): """ Returns the class to use for the passed in list. We just build something up from the object type for the list. """ css = "list_%s_%s" % (list.model._meta.app_label, list.model._meta.module_name) return css
200677c9eb21b8b72e44e533f838009efd5d5cdb
697,619
import requests def get_OIDC_Token(checkin_auth_url, client_id, client_secret, refresh_token): """ Get an OIDC token from the EGI AAI Check-In service""" # Creating the paylod for the request payload = { "client_id": client_id, "client_secret": client_secret, "grant_type": "refresh_token", "refresh_token": refresh_token, "scope": "openid email profile eduperson_entitlement", } curl = requests.post( url=checkin_auth_url, auth=(client_id, client_secret), data=payload ) data = curl.json() # print(data) # Server response return data["access_token"]
1b29f18aa7cf219fc2d161b0ea25d3b601fb39ff
697,620
def apply_awarded_flairs_overrides(all_awarded_flairs, current_selected_flair_list): """Checks to see if any awarded flairs are overriden, if so, updates the CURRENT_EMOJI_FLAIR_LIST with the overriden values""" """This fixes the 'Flair Preview' section when an override is made""" for awarded_flair in all_awarded_flairs: for currently_selected_flair in current_selected_flair_list: if awarded_flair.override: if awarded_flair.override_flair == currently_selected_flair: # Fix up award_counts when overriden: currently_selected_flair.awarded_count = awarded_flair.awarded_count currently_selected_flair.reddit_flair_emoji = awarded_flair.override_flair.reddit_flair_emoji currently_selected_flair.static_image = awarded_flair.override_flair.static_image if awarded_flair.flair_id == currently_selected_flair: # Fix up award_counts when not overriden: currently_selected_flair.awarded_count = awarded_flair.awarded_count return current_selected_flair_list
b7fcc5e014cedb478f8dbacf09ffe3a2d01c7980
697,621
def get_default(key, ctx): """ Get the default argument using a user instance property :param value: The name of the property to use :param ctx: The click context (which will be used to get the user) :return: The default value, or None """ try: value = getattr(ctx.code_builder, key) if value == "": value = None except KeyError: value = None return value
c1c8db3bb996a2c018a5eb4ca45638601e15ce82
697,622
def call(f, *args, **kwargs): """When used in pipe, calls given function. Calls given function, passing pipe argument first, before args and kwargs. Hence function should be (re)designed to handle this. Resulting value is then used as pipe argument and passed along further. Note: kwargs will overwrite pipe argument ``if xarg in kwarg``. Args: f: function to call *args: other positional arguments to the function **kwargs: keyword arguments to the function Returns: function that performs the required action in pipe """ def g(x): pargs = (x,) + args return f(*pargs, **kwargs) return g
4fce00354ee6b185a60d542ed173753967f0ca1f
697,623
def extract_masked_part_from_spectrogram(mask, spectrogram): """ Extract the masked part of the spectrogram """ return spectrogram[:,mask]
76be4d93aca3684edfe374bcce0ce09b833ab687
697,624
def calc_fock_matrix(mol_, h_core_, er_ints_, Duv_): """ calc_fock_matrix - Calculates the Fock Matrix of the molecule Arguments: mol_: the PySCF molecule data structure created from Input h_core_: the one electron hamiltonian matrix er_ints_: the 2e electron repulsion integrals Duv_: the density matrix Returns: Fuv: The fock matrix """ Fuv = h_core_.copy() # Takes care of the Huv part of the fock matrix num_aos = mol_.nao # Number of atomic orbitals, dimension of the mats for i in range(num_aos): for j in range(num_aos): Fuv[i, j] += (Duv_*er_ints_[i, j]).sum() - \ 0.5*(Duv_*er_ints_[i, :, j]).sum() return Fuv
506df9d5bcf87b64d30c4c787e70e1e6451820aa
697,625
def extract_cfda(field, type): """ Helper function representing the cfda psql functions """ extracted_values = [] if field: entries = [entry.strip() for entry in field.split(';')] if type == 'numbers': extracted_values = [entry[:entry.index(' ')] for entry in entries] else: extracted_values = [entry[entry.index(' ')+1:] for entry in entries] return ', '.join(extracted_values)
3a088d7728f38f9d1747f3eab11a3973c6e7ab6f
697,626
import subprocess import yaml def get_juju_status() -> dict: """Get juju status converted into a dictionary""" cmd = ["juju", "status", "--format", "yaml"] result = subprocess.run(cmd, capture_output=True, check=True) return yaml.safe_load(result.stdout)
828835a00a989694b1092571fe8bd8a3cad871f1
697,628
def best_scoring_const_region_hits(frame): """ Returns dataframe subset to the max scoring row for each SEQUENCE_ID """ return frame.loc[frame.groupby('SEQUENCE_ID').C_SCORE.idxmax()]
1ec94e9befd83ff9dbf48caff7ae65b217887484
697,629
def get_stack_output_value(stack, key: str) -> str: """ Get a stack output value :param stack: the boto3 stack resource :param key: the output key :return: str """ results = [i for i in stack.outputs if i["OutputKey"] == key] if not results: raise ValueError(f"could not find output with key {key} in stack") return results[0]["OutputValue"]
0742a2b8558b7aa4ea1656a6645a9b3e5fbdb17a
697,630
import hashlib def make_key(key): """ Returns SHA256 key :param key: :return: """ h = hashlib.sha256() h.update(key) return h.digest()
c3833de2e4e8c289004a7a72039a7c7a3161659d
697,631
def _skip_class_name(name): """Determine if the class name should be skipped.""" return name == "Bundled" or name.startswith("_")
7cc942d327784da2ae1aefc99859e52b3696475b
697,632
def insert_row(df, data): """Insert data as new row at top of df. Keyword arguments: df -- a Pandas dataframe. data -- a list of data to insert. """ # If data length is less then number columns, insert blanks. num_blanks = len(df.columns.values)-len(data) for i in range(num_blanks): data.extend(['']) df.loc[-1] = data # Shift index df.index = df.index + 1 # Sort by index df = df.sort_index() return df
7f32eeb5e5a07790adc48bc7dcdabbb9ba409972
697,633
def get_longest_parent(task): """ Returns the parent of the given task which has the longest execution time Args: task (Node) Returns: Node """ longest_parent = task.get_parents()[0] for parent in task.get_parents(): if longest_parent.get_exec_time() < parent.get_exec_time(): longest_parent = parent return longest_parent
8e41984c1e287f8a2d457150d332f868660c4eef
697,634
def meta_model(row): """ Inputs the metamodel AND operator as condition Retrieves the tamper value of the UL model only when both models agree in classifying as non tampered. Otherwise retrieves the SL classification UL classifier has a higher TPR but lower TNR, meaning it is less restrictive towards tampered assets. SL classifier has higher TNR but is too punitive, which is undesirable, plus it requires labeled data. """ meta_condition = row['ul_pred_tamper'] == 1 and row['sl_pred_tamper'] == 1 if meta_condition: return row['ul_pred_tamper'] return row['sl_pred_tamper']
9c8804840fdf39075716d01808275fafafa7941d
697,635
def get_callnumber(record): """Follows CC's practice, you may have different priority order for your call number.""" callnumber = '' # First check to see if there is a sudoc number if record['086']: callnumber = record['086'].value() # Next check to see if there is a local call number elif record['099']: callnumber = record['099'].value() elif record['090']: callnumber = record['090'].value() # Finally checks for value in 050 elif record['050']: callnumber = record['050'].value() return callnumber
bf5226c2d39c7913f1701f245f48e4ab5f2b1d15
697,636
def LinearTimeScaling(Tf, t): """ Computes s(t) for a quintic time scaling """ return t/Tf
305e09f20f391f1643e6cecaeb84ad7c7e66fea9
697,637
def gen_mapping(args, service, weight=None, labels={}): """ Generate a Mapping for a service/prefix and (optional) weight """ prefix = args.prefix mapping = { "apiVersion": "getambassador.io/v1", "kind": "Mapping", "metadata": { "name": f"mapping-for-{service}" }, "spec": { "prefix": prefix, "service": service } } if args.namespace: mapping["metadata"]["namespace"] = args.namespace if len(labels) > 0: mapping["metadata"]["labels"] = labels if weight: mapping["spec"]["weight"] = weight return mapping
167d044cffed74d5d04498e4941a147395977602
697,638
from bs4 import BeautifulSoup import re def parse_feed_item(feed_item): """ Takes a dict representing a feed item for today and generates a strucutred dict from it: { "text": The plain text of this entry, "links": List of all links in this entry, which do not link to the year or an image file. "year_link": Link to the article of the year (can be None) "image": Link to an image file (can be None) } All links are fully qualified URLs """ soup = BeautifulSoup(feed_item["summary"], features="html.parser") entries = [] for li in soup.find_all("li"): entry = { "text": li.get_text(), "links": [], "year_link": None, "year": None, "image": None } # find integer value for year. Assume text always begins with year. m = re.match(r"^(\d+)", entry["text"]) if m: # first check if year has a trailing "v. Chr." is_bc = re.match(r"^(\d+\xa0v\.\xa0Chr\.)", entry["text"]) if is_bc: # add a minus sign to year, so later calculations yield a correct result entry["year"] = int("-"+m.group(1)) +1 else: entry["year"] = int(m.group(1)) else: raise Exception("Text does not start with year??") first_regular_link_found = False # iterate over all links in the entry for a in li.find_all("a"): href = a.get("href") images = a.find_all("img") # If there are any images, we handle the first one and use it for a media post if len(images) > 0: image = images[0] # find the largest image file # "srcset" contains alternate image files in different sizes. # The format is a comma separated list of "<url> <size>x", e.g. 1.5x, 2x, etc.. # We parse these and select the url with the max size candidates = [image.get("src") + " 1x"] if image.get("srcset"): candidates += image.get("srcset").split(",") tmp = [] for candidate in candidates: candidate = candidate.strip() url, size = candidate.split() size = float(size.strip("x")) tmp.append((size, url)) url = sorted(tmp)[-1][1] entry["image"] = { "url": url, "alt_text": image.get("alt") } continue # Assume a link with only numbers is a year link possible_year_link = re.match(r".*\/wiki\/\d+$", href) # Only make a link the year_link if it appears before # any other regular link if not first_regular_link_found and possible_year_link: entry["year_link"] = href else: entry["links"].append(href) first_regular_link_found = True entries.append(entry) return entries
8e702b587f94874d0df9cc9320168fd78c345820
697,639