content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _select_archive(files): """Selects a single archive from a list of files produced by a static cc_library. In some configurations, cc_library can produce multiple files, and the order isn't guaranteed, so we can't simply pick the first one. """ # list of file extensions in descending order or preference. exts = [".pic.lo", ".lo", ".a"] for ext in exts: for f in files: if f.basename.endswith(ext): return f
31dc3fa75dd9338ae89f6aad17b6da460900abdc
19,637
def regroup_interval(df, interval): """ Summary line. Extended description of function. Parameters: arg1 (int): Description of arg1 Returns: int: Description of return value """ ohlc_dict = {'Open': 'first', 'High': 'max', 'Low': 'min', 'Close': 'last', 'Volume': 'sum'} if interval == 'W': return df.resample('W-Fri').apply(ohlc_dict) # 'Week ending Friday' elif interval == 'M': return df.resample('M').apply(ohlc_dict) # Month ending on 30th or 31st else: return df
62a468df7211b418e30d67e0740c2d360103c0e2
19,638
def production_emissions(suisse): """multiplies production values (kg) by the average production emissions (kg CO2-eq/kg produce)""" suisse['emissions_sans_transport'] = suisse['consumption'] * suisse['median_emissions'] return suisse
418aa86f02ffe0f2102664668f9286d8f4acb14c
19,639
import re def tokenize_per_cluster_args(args, nclusters): """ Seperate per cluster arguments so that parsing becomes easy Params: args: Combined arguments nclusters(int): total number of clusters Returns: list of lists: Each cluster conf per list ex: [[cluster1_conf], [cluster2_conf]...] """ per_cluster_argv = list() multi_cluster_argv = list() common_argv = list() cluster_ctx = False regexp = re.compile(r"--cluster[0-9]+") index = 0 for i in range(1, nclusters + 1): while index < len(args): if args[index] == f"--cluster{i}": cluster_ctx = True elif regexp.search(args[index]): cluster_ctx = False break if cluster_ctx: per_cluster_argv.append(args[index]) else: common_argv.append(args[index]) index = index + 1 multi_cluster_argv.append(per_cluster_argv) per_cluster_argv = [] return multi_cluster_argv, common_argv
be4c8d0ef01a2d2431f46434bd1ca88127b75cb6
19,640
import sys def is_py2() -> bool: """Exists only to avoid mocking :data:`sys.version_info` in tests.""" return sys.version_info.major == 2
fe11f262b3ca446a9c1271e51a23c4698a5aee54
19,641
def _from_quoted_string(quoted): """Strip quotes""" return quoted.strip('"').replace('""', '"')
febde29bb30d54675b1ff5eebf5276d0ef9efdc2
19,642
def get_parent_build(build): """Returns the parent build for a triggered build.""" parent_buildername = build.properties_as_dict['parent_buildername'] parent_builder = build.builders[parent_buildername] return parent_builder.builds[build.properties_as_dict['parent_buildnumber']]
ef4b1041bfd54b7aa2ac1469e6b58bfcf6acd724
19,643
def format_app_name(appname): """ Convert long reverse DNS name to short name. :param appname: Application name (ex. sys.pim.calendar -> "calendar") :type appname: str """ final = appname.split(".")[-1] return final
aceb4f58506fa7fae0358f9fcf3dd0ea6fbab352
19,645
import os import logging def load_requirements(): """Exclude specific requirements based on platform.""" requirements = [] for line in open("requirements.txt").readlines(): line = line.strip() name = line.split('=')[0].strip() if os.name == 'nt': if name in ['psycopg2', 'gunicorn']: logging.warning("Skipped requirement: %s", line) continue requirements.append(line) return requirements
052fe72cebe3751531c4b32d655d2f87a38978ea
19,646
import re def parse_show_vlan_internal(raw_result): """ Parse the 'show vlan internal' command raw output. :param str raw_result: vtysh raw result string. :rtype: dict :return: The parsed result of the show vlan internal command in a \ dictionary of the form. Returns None if no internal vlan found or \ empty dictionary: :: { '1024': { 'interface': '1', 'vlan_id': '1024' }, '1025': { 'interface': '10', 'vlan_id': '1025' } } """ show_re = ( r'\s+(?P<vlan_id>\d+)\s+(?P<interface>\S+)' ) result = {} for line in raw_result.splitlines(): re_result = re.search(show_re, line) if re_result: partial = re_result.groupdict() result[partial['vlan_id']] = partial if result == {}: return None else: return result
f3f7bec8d4afc8d1d65e5eef0f60f772128e8530
19,649
def read_file_to_dict(file): """Extract information from file, and convert to dictionary""" d = {} keys = ['title', 'name', 'date', "feedback"] with open(file, "r") as f: lines = f.readlines() if len(lines)!=len(keys): raise Exception('The number of lines in file:\n {} is {}, must be {}'.format(file, len(lines),len(keys))) for i in range(len(lines)): d[keys[i]] = lines[i].strip() return d
a9eeada6388037aeb25f864fecb411f6d05f833d
19,650
from jinja2 import Environment, meta import os def find_referenced_templates(template, search_path): """ Returns a list of files which can be either {% imported %}, {% extended %} or {% included %} within a template. """ env = Environment() ast = env.parse(template.read()) referenced_templates = list(meta.find_referenced_templates(ast)) def realpath(tpl): for path in search_path: t = os.path.realpath(os.path.join(path, tpl)) if os.path.isfile(t): return t return None return [realpath(t) for t in referenced_templates if t is not None]
59daa6a7a739bb08de5a1cdfe7bb37c644fd7492
19,651
def sqrt(number): """ Calculate the floored square root of a number Args: number(int): Number to find the floored squared root Returns: int: Floored Square Root """ high = number low = 0 while low <= high: mid = (low + high) // 2 midsquare = mid * mid next_midsquare = (mid+1) * (mid+1) if midsquare == number or midsquare < number < next_midsquare: return mid elif midsquare > number: high = mid - 1 else: low = mid + 1
1dca451d1b96ec88a36753d9d07edd9ba2f34de8
19,652
def do_work(func, args, kwargs=None): """ Wrap a function with arguments and return the result for multiprocessing routines. Parameters ---------- func Function to be called in multiprocessing args : tuple Positional arguments to 'func' kwargs : dict Keyword arguments to 'func' Returns ------- result Output of 'func(args, kwargs)' """ # needed for use in the Python parallelization functions (e.g. apply_async) # ¯\_(ツ)_/¯ if kwargs: result = func(*args, **kwargs) else: result = func(*args) return result
0f14dddccc40afdeeb18c3cd16322f85a2008112
19,653
def dash_transfer(word): """ :param word: str, allow users to enter a word that is going to be disguised. :return: str, return a all-dash sequence with the same length. """ r = len(word) dash_word = '' for i in range(r): dash_word += '-' return dash_word
ad3736295c45ab473b5a8e3a5c40b7294d85a74e
19,654
def calculate_result(white_balls, power_ball): """ Computation is lauched here """ return 0
7d34e9a879a078c0b12b60b133bf9cb1b82d9ad0
19,655
import itertools def make_call_summary_report(calls_list, users, for_graphviz=False): """ Makes a graphical or textual report of who has been on a call with who. Only considers calls initiated by one of the tracked users. Parameters ---------- calls_list: list Calls to look through (perhaps representing a section of history) users: list List of users to consider. for_graphviz: boolean A flag indicating this report should be for graphviz dot tool. If false, prepares a human-readable text report section. """ lines = [ "", "Section C - Calls Summary - Who talks to whom? (group-initiated calls only)", ""] summary_of_each_call = [ (users[call['user']]['real_name'], call['duration_m'], [users[participant]['real_name'] for participant in call['participants'] if participant in users] ) for call in calls_list if call['user'] in users] # call['user'] is participant if for_graphviz: # list each user as node # list each call as a list of edges between its participants lines = ["graph CallNetwork {"] for uid in users: lines.append("%s;" % users[uid]['real_name']) for (_, _, participants) in summary_of_each_call: for (f, t) in itertools.combinations(participants, 2): lines.append("%s -- %s;" % (f, t)) lines.append("}") else: for (starter, duration, participants) in summary_of_each_call: lines.append("Call of duration %dm started by %s, \n\twith participants %s" % (duration, starter, ", ".join(participants))) return "\n".join(lines)
607c590e51194100e3fbc0f1f5aa09c4fe94a4ca
19,656
def seen(user, item): """ returns unread items for a user? """ print("---", item.seen_by(user)) return item.seen_by(user)
d4b05238fb60d133041b4e2574a8493f0c0387f0
19,657
def get_port_locations(location): """ Takes input port location given by the user and returns a dictionary with speed mapped to port location as per RG Ex: get_port_location_for_speed("10.36.87.215;1;57") output: {'speed_50_gbps': '10.36.87.215;1;57', 'speed_100_gbps': '10.36.87.215;1;25', 'speed_200_gbps': '10.36.87.215;1;9', 'speed_400_gbps': '10.36.87.215;1;1'} """ port_400g = "" port_200g = "" port_100g = "" port_50g = "" port_location = {} port_num = int(location.split(";")[2]) rg_fanout200g = {} rg_fanout200g_starting_port = 9 rg_fanout100g = {} rg_fanout100g_starting_port = 25 rg_fanout50g = {} rg_fanout50_starting_port = 57 for rg in range(1, 9): rg_fanout200g[rg] = list( range(rg_fanout200g_starting_port, rg_fanout200g_starting_port + 2) ) rg_fanout200g_starting_port += 2 for rg in range(1, 9): rg_fanout100g[rg] = list( range(rg_fanout100g_starting_port, rg_fanout100g_starting_port + 4) ) rg_fanout100g_starting_port += 4 for rg in range(1, 9): rg_fanout50g[rg] = list( range(rg_fanout50_starting_port, rg_fanout50_starting_port + 2) ) rg_fanout50_starting_port += 2 if port_num < 9: port_400g = port_num port_200g = rg_fanout200g[port_num][0] port_100g = rg_fanout100g[port_num][0] port_50g = rg_fanout50g[port_num][0] elif port_num > 8 and port_num < 25: port_200g = port_num port_400g = next( key for key in rg_fanout200g if port_num in rg_fanout200g[key] ) port_100g = rg_fanout100g[port_400g][0] port_50g = rg_fanout50g[port_400g][0] elif port_num > 24 and port_num < 57: port_100g = port_num port_400g = next( key for key in rg_fanout100g if port_num in rg_fanout100g[key] ) port_200g = rg_fanout200g[port_400g][0] port_50g = rg_fanout50g[port_400g][0] elif port_num > 56 and port_num < 73: port_50g = port_num port_400g = next( key for key in rg_fanout50g if port_num in rg_fanout50g[key] ) port_200g = rg_fanout200g[port_400g][0] port_100g = rg_fanout100g[port_400g][0] port_400g = ";".join(location.split(";")[0:2]) + ";" + str(port_400g) port_200g = ";".join(location.split(";")[0:2]) + ";" + str(port_200g) port_100g = ";".join(location.split(";")[0:2]) + ";" + str(port_100g) port_50g = ";".join(location.split(";")[0:2]) + ";" + str(port_50g) port_location["speed_400_gbps"] = port_400g port_location["speed_200_gbps"] = port_200g port_location["speed_100_gbps"] = port_100g port_location["speed_50_gbps"] = port_50g return port_location
57ee33a78547a5fbe02386c58324d2b9c961805a
19,659
def _clean_up_loop_dict(loop_dict): """Clean up loop labels in data This is a private function, not meant for general use. Input: dataframe Output: dataframe """ # Remove the 'data_header' tag if it exists # since it is a list of dataframes # Then re-attach each of them one at a time if u'data_header' in loop_dict.keys(): header_df_list = loop_dict.pop(u'data_header') if isinstance(header_df_list, list): for df in enumerate(header_df_list): loop_dict[u'data_header_'+str(df[0]+1)] = df[1] else: loop_dict[u'data_header_1'] = header_df_list return loop_dict
140d6594fd11c9761e87ec83bc03f5f1325bd06c
19,660
def get_last_record(conn): """ Return last record from table 'person' with connection 'conn' """ query = """ SELECT * FROM person ORDER BY id DESC LIMIT 1 """ c = conn.cursor() c.execute(query) record = c.fetchall() if not record: return ["", "0"] return record[0] # return only record
4f1a4dd55779cf362d9514bc9512fe0f70ac6dd5
19,661
import platform import sys def default_name_prefix(): """ Get the default package name prefix for the Python version we're running. :returns: One of the strings ``python``, ``python3`` or ``pypy``. """ implementation = 'pypy' if platform.python_implementation() == 'PyPy' else 'python' if sys.version_info[0] == 3: implementation += '3' return implementation
9b57904eb01d9904c1f7bbd28402512b1a55f4d8
19,662
def get_dataset(dataloaders: dict) -> dict: """ From dictionary of dataloaders to dictionary of datasets """ datasets = {split: dataloader.dataset for split, dataloader in dataloaders} return datasets
b21b266f377a2edb910bde163ff656988512f964
19,664
from textwrap import dedent def format_program(program, marker): """Preprocess the Python program run by the child process.""" main = f""" import time from pathlib import Path Path({str(marker)!r}).touch() time.sleep(3) """ return dedent(program).format(MAIN=dedent(main))
8c4ff7162fd3018daf4c750eabb775e9ff5be6fd
19,666
def get_binary_arch(binary_file): """Parse a binary's ELF header for arch.""" try: binary_file.seek(0) binary = binary_file.read(0x14) except IOError: raise RuntimeError("failed to read binary file") ei_class = ord(binary[0x4]) # 1 = 32-bit, 2 = 64-bit ei_data = ord(binary[0x5]) # Endianness assert ei_class == 1 or ei_class == 2 if ei_data != 1: raise RuntimeError("binary isn't little-endian?") e_machine = ord(binary[0x13]) << 8 | ord(binary[0x12]) if e_machine == 0x28: assert ei_class == 1 return "arm" elif e_machine == 0xB7: assert ei_class == 2 return "arm64" elif e_machine == 0x03: assert ei_class == 1 return "x86" elif e_machine == 0x3E: assert ei_class == 2 return "x86_64" elif e_machine == 0x08: if ei_class == 1: return "mips" else: return "mips64" else: raise RuntimeError("unknown architecture: 0x{:x}".format(e_machine))
f9a1fdd4c8321a0fc26b39cb139485aba5c15fd5
19,667
def try_fixing_indent(s, diff, align_to=None, first_lines=0): """Given a string, try to fix its internal indentation""" if diff == 0 or '\n' not in s: return s lines = s.split('\n') if len(lines) < 2: return s internal_diff = 0 # If we are making a change, and we have an align_to specified, # shift lines so that the last line is aligned with the first. if align_to is not None: wo_space = lines[-1].lstrip(" ") leading_spaces = len(lines[-1]) - len(wo_space) if wo_space and wo_space[0] == '<': internal_diff = align_to - leading_spaces fixed = [lines[0]] early_spacing = " " * abs(diff), diff late_spacing = " " * abs(diff + internal_diff), diff + internal_diff for i, line in enumerate(lines[1:]): spacing, cdiff = late_spacing if i + 1 >= first_lines else early_spacing if cdiff > 0 and line: line = spacing + line elif cdiff < 0 and line.startswith(spacing): line = line[len(spacing):] fixed.append(line) return '\n'.join(fixed)
91188396e469de4e962eee5096fa398d4adfbe22
19,668
import cmath def twiddle_factor(turns): """ Calculates the FFT twiddle factor of an angle measured in turns (not radian). """ return cmath.exp(-2.0j*cmath.pi*turns)
4521143dc42d2e70c2215a36977d69e7cc519418
19,670
def snap_to_widest(to_snap, widests): """ Snaps the width of each column of the table to the width of the largest element in the respective column. """ new_table = list() for row in to_snap: new_row = list() for i in range(0, len(widests)): new_row.append(f'{str(row[i]):^{widests[i]}}') new_table.append(new_row) return new_table
4f6c7dc28dc041389730813ca38af2e322aa455a
19,671
def wrap_deprecated(func, name): """Return a check function for a deprecated assert method call. If the `assertive-deprecated` option has been enabled and the wrapped check function doesn't yield any errors of its own, this function will yield an A503 error that includes the new name of the deprecated method. """ def wrapper(self, node): for error in func(self, node): yield error else: yield self.error(node, 'A503', func=name, name=node.func.attr) return wrapper
a94e0308ad4271ec669f4c5392a54193754c6b3f
19,672
import os def imagenet_size(train_path): """It calculates the number of examples in ImageNet training-set. Args: path: path to ILSVRC training set folder Returns: n: the number of training examples """ n = 0 for d in os.listdir(train_path): for f in os.listdir(os.path.join(train_path, d)): n += 1 return n
eda47402a3b6c3b93185b559810c1c4bac7bcb86
19,673
def _get_error_message(check, threshold, importance, series_name, value): """ Construct a check's error message, which will differ based on the number of consecutive failed points. For a single failed point: Format: <importance> <series_name>: <value> not <comparator> <threshold> Example: WARNING foo.service.errors: 100 not < 50 For N consecutive failed points: Format: <importance> <series_name>: <N> consecutive points not <comparator> <threshold> Example: CRITICAL foo.service.errors: 10 consecutive points not < 50 """ if check.consecutive_failures == 1: fmt = u'{} {}: {:0.1f} not {} {:0.1f}' return fmt.format(importance, series_name, value, check.check_type, threshold) else: fmt = u'{} {}: {} consecutive points not {} {:0.1f}' return fmt.format(importance, series_name, check.consecutive_failures, check.check_type, threshold)
a91a761fc5bfb59c8ac8bdc3ea71b0e457d40bc1
19,674
def BCEWithLogitsLossConfig(argument_parser): """ Set CLI arguments :param argument_parser: argument parser :type argument_parser: ```ArgumentParser``` :returns: argument_parser :rtype: ```ArgumentParser``` """ argument_parser.description = """This loss combines a `Sigmoid` layer and the `BCELoss` in one single class. This version is more numerically stable than using a plain `Sigmoid` followed by a `BCELoss` as, by combining the operations into one layer, we take advantage of the log-sum-exp trick for numerical stability. The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as: .. math:: \\ell(x, y) = L = \\{l_1,\\dots,l_N\\}^\\top, \\quad l_n = - w_n \\left[ y_n \\cdot \\log \\sigma(x_n) + (1 - y_n) \\cdot \\log (1 - \\sigma(x_n)) \\right], where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'`` (default ``'mean'``), then .. math:: \\ell(x, y) = \\begin{cases} \\operatorname{mean}(L), & \\text{if reduction} = \\text{`mean';}\\\\ \\operatorname{sum}(L), & \\text{if reduction} = \\text{`sum'.} \\end{cases} This is used for measuring the error of a reconstruction in for example an auto-encoder. Note that the targets `t[i]` should be numbers between 0 and 1. It's possible to trade off recall and precision by adding weights to positive examples. In the case of multi-label classification the loss can be described as: .. math:: \\ell_c(x, y) = L_c = \\{l_{1,c},\\dots,l_{N,c}\\}^\\top, \\quad l_{n,c} = - w_{n,c} \\left[ p_c y_{n,c} \\cdot \\log \\sigma(x_{n,c}) + (1 - y_{n,c}) \\cdot \\log (1 - \\sigma(x_{n,c})) \\right], where :math:`c` is the class number (:math:`c > 1` for multi-label binary classification, :math:`c = 1` for single-label binary classification), :math:`n` is the number of the sample in the batch and :math:`p_c` is the weight of the positive answer for the class :math:`c`. :math:`p_c > 1` increases the recall, :math:`p_c < 1` increases the precision. For example, if a dataset contains 100 positive and 300 negative examples of a single class, then `pos_weight` for the class should be equal to :math:`\\frac{300}{100}=3`. The loss would act as if the dataset contains :math:`3\\times 100=300` positive examples. Examples:: >>> target = torch.ones([10, 64], dtype=torch.float32) # 64 classes, batch size = 10 >>> output = torch.full([10, 64], 1.5) # A prediction (logit) >>> pos_weight = torch.ones([64]) # All weights are equal to 1 >>> criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight) >>> criterion(output, target) # -log(sigmoid(1.5)) tensor(0.2014) Shape: - Input: :math:`(N, *)` where :math:`*` means, any number of additional dimensions - Target: :math:`(N, *)`, same shape as the input - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(N, *)`, same shape as input. Examples:: >>> loss = nn.BCEWithLogitsLoss() >>> input = torch.randn(3, requires_grad=True) >>> target = torch.empty(3).random_(2) >>> output = loss(input, target) >>> output.backward()""" argument_parser.add_argument( "--weight", help="""a manual rescaling weight given to the loss of each batch element. If given, has to be a Tensor of size `nbatch`.""", required=True, ) argument_parser.add_argument( "--size_average", type=bool, help="""Deprecated (see :attr:`reduction`). By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple elements per sample. If the field :attr:`size_average` is set to ``False``, the losses are instead summed for each minibatch. Ignored when :attr:`reduce` is ``False``.""", default=True, ) argument_parser.add_argument( "--reduce", type=bool, help="""Deprecated (see :attr:`reduction`). By default, the losses are averaged or summed over observations for each minibatch depending on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per batch element instead and ignores :attr:`size_average`.""", default=True, ) argument_parser.add_argument( "--reduction", help="""Specifies the reduction to apply to the output: ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied, ``'mean'``: the sum of the output will be divided by the number of elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average` and :attr:`reduce` are in the process of being deprecated, and in the meantime, specifying either of those two args will override :attr:`reduction`.""", required=True, default="mean", ) argument_parser.add_argument( "--pos_weight", help="a weight of positive examples. Must be a vector with length equal to the number of classes.", ) return argument_parser
9d02d1b0cb78aac78146869ff5fc8f5635a57510
19,675
def make_subj(common_name, encrypted=False): """ Make a subject string :param common_name: Common name used in certificate :param encrypted: Add the encrypted flag to the organisation :return: A subject string """ return "/C=FR/ST=Auvergne-Rhone-Alpes/L=Grenoble/O=iPOPO Tests ({0})" \ "/CN={1}".format("encrypted" if encrypted else "plain", common_name)
def68ad50d44003c27b2ac0dfa4f67faa8bf9ed9
19,676
def best_fit(nelem): """ Obtain the expected fit for a given number of elements """ return lambda t: 4.2 * nelem * (1.e-3 / t)**2
a06cb722107391cab9a9e0e1eb6553f2ad9587d2
19,677
def string_contains_surrogates(ustring): """ Check if the unicode string contains surrogate code points on a CPython platform with wide (UCS-4) or narrow (UTF-16) Unicode, i.e. characters that would be spelled as two separate code units on a narrow platform. """ for c in map(ord, ustring): if c > 65535: # can only happen on wide platforms return True if 0xD800 <= c <= 0xDFFF: return True return False
f459cfd562cf40e8e5705fa58009fcde6c9b1a0c
19,678
def finalize(cur_aggregate): """Retrieve the mean and variance from an aggregate.""" (count, mean, m_2) = cur_aggregate mean, variance = mean, m_2 / (count - 1) if count < 2: return float('nan') else: return mean, variance
50190036de4eee6b3a5fac3ee0488fc9f21fb734
19,680
def can_be_index(obj): """Determine if an object can be used as the index of a sequence. :param any obj: The object to test :returns bool: Whether it can be an index or not """ try: [][obj] except TypeError: return False except IndexError: return True
899b36096a1aaf3fc559f3f0e6eb08251c36277c
19,681
def get_model(row): """ The vehicle's model is a mandatory field and for each row only one model can be set to 1. The remainder are all set to 0 by default. """ allowed_models = ['other', 'crosstour', 'fit', 'civic', 'ridgeline', 'del sol', 'accord', 'passport', 'odyssey', 'insight', 's2000', 'cr-z', 'accord crosstour', 'hr-v', 'element', 'cr-v', 'prelude', 'pilot'] if row['model'].lower() in allowed_models: return row['model'].lower()
ec44e4e7178db9de08437a1f6be4c94c977f67cd
19,682
def apply_xG_model_to_test(df_shots_test, models): """ Applying the four different logistic regression models to produce four xG values """ log_basic, log_added, log_adv, log_syn, log_adv_on_syn = models print ('Applying models...') df_shots_test['xG_basic'] = log_basic.predict(df_shots_test) df_shots_test['xG_added'] = log_added.predict(df_shots_test) df_shots_test['xG_adv'] = log_adv.predict(df_shots_test) df_shots_test['xG_syn'] = log_syn.predict(df_shots_test) df_shots_test['xG_adv_on_syn'] = log_adv_on_syn.predict(df_shots_test) print (f'Done applying {len(models)} models.') return df_shots_test
95ef33fec6d0db7a53875e5808464700739a7310
19,683
import math def r2d(rval): """ Convert the integer or floating point radian value to degrees. The radian value must be between :math:`-2*\pi <= rval <= 2*\pi`. >>> r2d(math.pi) 180.0 >>> r2d(3 * math.pi / 4) 135.0 >>> r2d(3 * math.pi) --------------------------------------------------------------------------- AssertionError Traceback (most recent call last) """ assert abs(rval) <= (2 * math.pi) return rval * 180 / math.pi
31a87c0548f9238340b6218dcb1ed3f2ed8ec28b
19,688
def _make_pixel( val ): """ Construct a pixel tuple of (R,G,B,A) from the unsigned integer val. Used to explicitly embed font metric data into the png """ return ( (val & 0x000000ff, (val & 0x0000ff00) >> 8, (val & 0x00ff0000) >> 16, (val & 0xff000000) >> 24) )
d7a993817ebbaf482dd83dcf52d5e0414e934132
19,689
def scale(coord_paths, scale_factor): """ Take an array of paths, and scale them all away from (0,0) cartesian using a scalar factor, return the resultinv paths. """ new_paths = [] for path in coord_paths: new_path = [] for point in path: new_path.append( (point[0]*scale_factor, point[1]*scale_factor)) new_paths.append(new_path) return new_paths
e38dc71c0e2361628e428804e41b5314907641d5
19,692
def maybe_parse(val, parse_func): """Parse argument value with function if string. """ if val is None: return [] if isinstance(val, (bytes, str)): return parse_func(val) if isinstance(val, dict): return list(val.items()) if isinstance(val, (list, tuple)): return list(val) return val
8427f129ee0e50ce8d259c68122e81c2a53e4692
19,693
import collections def calculate_postman_solution_stats(circuit, edge_weight_name='distance'): #JC modified for directed graphs """ Calculate summary stats on the route Args: circuit (list[tuple]): output from `cpp` or `rpp` solvers edge_weight_name (str): parameter name for edge attribute with distance/weight Returns: print statements with relevant data summary table (OrderedDict) """ summary_stats = collections.OrderedDict() undirected_edge_passes = {} for e in circuit: edge = frozenset([e[0], e[1]]) if edge not in undirected_edge_passes: undirected_edge_passes[edge] = {'edge_distance' : e[3][edge_weight_name], 'number_of_passes' : 1} else: undirected_edge_passes[edge]['number_of_passes'] += 1 directed_edges = [] for e in circuit: edge = (e[0], e[1]) if edge not in directed_edges: directed_edges += [edge] summary_stats['distance_traveled'] = sum([e[3][edge_weight_name] for e in circuit]) summary_stats['distance_in_circuit'] = sum([undirected_edge_passes[edge]['edge_distance'] for edge in undirected_edge_passes]) summary_stats['distance_traveled_once'] = sum([undirected_edge_passes[edge]['edge_distance'] for edge in undirected_edge_passes if undirected_edge_passes[edge]['number_of_passes'] == 1]) summary_stats['distance_traveled_twice'] = sum([undirected_edge_passes[edge]['edge_distance']*2 for edge in undirected_edge_passes if undirected_edge_passes[edge]['number_of_passes'] == 2]) summary_stats['distance_traveled_thrice'] = sum([undirected_edge_passes[edge]['edge_distance']*3 for edge in undirected_edge_passes if undirected_edge_passes[edge]['number_of_passes'] == 3]) summary_stats['distance_traveled_more_than_thrice'] = sum([undirected_edge_passes[edge]['edge_distance']*undirected_edge_passes[edge]['number_of_passes'] for edge in undirected_edge_passes if undirected_edge_passes[edge]['number_of_passes'] > 3]) summary_stats['road_length_traveled_more_than_thrice'] = sum([undirected_edge_passes[edge]['edge_distance'] for edge in undirected_edge_passes if undirected_edge_passes[edge]['number_of_passes'] > 3]) summary_stats['distance_traveled_optional'] = sum([e[3]['distance'] for e in circuit if e[3].get('required') == 0]) summary_stats['distance_traveled_required'] = summary_stats['distance_traveled'] - summary_stats['distance_traveled_optional'] summary_stats['edges_traveled'] = len(circuit) summary_stats['edges_in_circuit'] = len(undirected_edge_passes) summary_stats['directed_edges'] = len(directed_edges) summary_stats['edges_traveled_once'] = len([edge for edge in undirected_edge_passes if undirected_edge_passes[edge]['number_of_passes'] == 1]) summary_stats['edges_traveled_twice'] = len([edge for edge in undirected_edge_passes if undirected_edge_passes[edge]['number_of_passes'] == 2]) summary_stats['edges_traveled_thrice'] = len([edge for edge in undirected_edge_passes if undirected_edge_passes[edge]['number_of_passes'] == 3]) summary_stats['edges_traveled_more_than_thrice'] = len([edge for edge in undirected_edge_passes if undirected_edge_passes[edge]['number_of_passes'] > 3]) summary_stats['edges_traveled_optional'] = collections.Counter([e[3].get('required') for e in circuit])[0] summary_stats['edges_traveled_required'] = summary_stats['edges_traveled'] - summary_stats['edges_traveled_optional'] print('\nRoute Statistics\n') print('\tTotal Distance Traveled: {} miles'.format(round(summary_stats['distance_traveled']*0.000621371, 2))) print('\t\tRoad Length Covered by Circuit: {} miles'.format(round(summary_stats['distance_in_circuit']*0.000621371, 2))) print('\tDistance Traveled Once: {} miles'.format(round(summary_stats['distance_traveled_once']*0.000621371, 2))) print('\tDistance Traveled Twice: {} miles'.format(round(summary_stats['distance_traveled_twice']*0.000621371, 2))) print('\t\tRoad Length Traveled Twice: {} miles'.format(round(summary_stats['distance_traveled_twice']*0.000621371/2, 2))) print('\tDistance Traveled Thrice: {} miles'.format(round(summary_stats['distance_traveled_thrice']*0.000621371, 2))) print('\t\tRoad Length Traveled Thrice: {} miles'.format(round(summary_stats['distance_traveled_thrice']*0.000621371/3, 2))) print('\tDistance Traveled more than Thrice: {} miles'.format(round(summary_stats['distance_traveled_more_than_thrice']*0.000621371, 2))) print('\t\tRoad Length Traveled more than Thrice: {} miles'.format(round(summary_stats['road_length_traveled_more_than_thrice']*0.000621371, 2))) print('\tDistance of Passes over Required Edges: {} miles'.format(round(summary_stats['distance_traveled_required']*0.000621371, 2))) print('\tDistance of Passes over Optional Edges: {} miles\n'.format(round(summary_stats['distance_traveled_optional']*0.000621371, 2))) print('\tNumber of Edge Passes: {}'.format(summary_stats['edges_traveled'])) print('\tNumber of Directed Edges in Circuit: {}'.format(summary_stats['directed_edges'])) print('\tNumber of Undirected Edges in Circuit: {}'.format(summary_stats['edges_in_circuit'])) print('\t\tNumber Traveled Once: {}'.format(summary_stats['edges_traveled_once'])) print('\t\tNumber Traveled Twice: {}'.format(summary_stats['edges_traveled_twice'])) print('\t\tNumber Traveled Thrice: {}'.format(summary_stats['edges_traveled_thrice'])) print('\t\tNumber Traveled More than Thrice: {}'.format(summary_stats['edges_traveled_more_than_thrice'])) print('\tNumber of Passes over Required Edges: {}'.format(summary_stats['edges_traveled_required'])) print('\tNumber of Passes over Optional Edges: {}'.format(summary_stats['edges_traveled_optional'])) return summary_stats
1ea0e585904c9388065438cfe990ed10c2a840b2
19,694
def get_documentation_str(node): """ Retrieve the documentation information from a cwl formatted dictionary. If there is no doc tag return the id value. :param node: dict: cwl dictionary :return: str: documentation description or id """ documentation = node.get("doc") if not documentation: documentation = node.get("id") return documentation
6fce12b94d6000aee862c7dc8f105f4420357370
19,695
def make_folder_name(old_name): """ :param old_name: unformatted name :return: gets rid of useless words for a easier to find folder name in dst_folder """ for sign in ["Vorlesung", "Übung", "Tutorium", " - Dateien", ": "]: old_name = old_name.replace(sign, "") return old_name
79312448ab73cd887661f21fe79fe4518fb978dd
19,696
def some_other_data(): """Raise an exception from fixture.""" x = 43 assert x == 42 return x
4a261f7f0bbac52b1c0891e348fb531bbfeabfaa
19,697
import json def generateKoldieQueryCampaignIDJSONpayload(koldie_query_campaign_id): """ Input: Takes in Kolide query campaign ID Output: Returns JSON payload for querying result(s) of query """ koldie_query_campaign_id_payload = { "type":"select_campaign", "data":{ "campaign_id": koldie_query_campaign_id } } return json.dumps(koldie_query_campaign_id_payload)
bd71b1d1f0d6eb57169e5fe93e6a8184b3149bb7
19,698
def get_shape_xyzct(shape_wh, n_channels): """Get image shape in XYZCT format Parameters ---------- shape_wh : tuple of int Width and heigth of image n_channels : int Number of channels in the image Returns ------- xyzct : tuple of int XYZCT shape of the image """ xyzct = (*shape_wh, 1, n_channels, 1) return xyzct
7b8ec67ccbfd33811904393dfe0905a169900513
19,699
import numpy def calc_normalized_sym_elems_b(sym_elems_b): """Brings elements of sym_elems_b to a common denominator Parameters ---------- sym_elems_b : [{"num_x", "num_y", "num_z", "denominator"}, n_symm_elems] DESCRIPTION. Returns ------- normalized_sym_elems_b : [{"num_x", "num_y", "num_z"}, n_symm_elems], common_denominator """ sym_elems_b_denom = sym_elems_b[3, :] denom_common = numpy.lcm.reduce(sym_elems_b_denom) coeff = denom_common // sym_elems_b_denom norm_sym_elems_b = numpy.stack([sym_elems_b[0, :]*coeff, sym_elems_b[1, :]*coeff, sym_elems_b[2, :]*coeff], axis=0) return norm_sym_elems_b, denom_common
38facca31e7da2306c97440d0848f65072245357
19,700
def rescale(num, old_min, old_max, new_min, new_max): """ Rescale num from range [old_min, old_max] to range [new_min, new_max] """ old_range = old_max - old_min new_range = new_max - new_min new_val = new_min + (((num - old_min) * new_range)/old_range) return new_val
f823f46267d3b666ae0921957c2c3a3ca8c0a393
19,701
def find_fired_conditions(conditions, guard=None, *args, **kwargs): """ For an iterable (e.g. list) of boolean functions, find a list of functions returning ``True``. If ``guard`` is given, it is applied to a function to get the predicate - a function ``() -> bool``. If this predicate is not ``None``, it is checked and the condition is then evaluated if and only if the predicate returns ``True``. If ``guard`` is not provided, or the predicate is ``None``, condition is tested without additional check. Normally the predicate should be a very short function allowing to test whether a complex condition need to be evaluated. Args: conditions: an iterable of boolean functions guard: a ``(condition) -> predicate`` function, where ``predicate`` is ``() -> bool``. *args: positional arguments passed to each condition **kwargs: keyword arguments passed to each condition Returns: a list of conditions evaluated to ``True`` """ fired = [] if guard is not None: for condition in conditions: g = guard(condition) if g is not None and g(): if condition(*args, **kwargs): fired.append(condition) else: if condition(*args, **kwargs): fired.append(condition) return fired
0ae88df1df36667c7d771380c12d413437ebed11
19,702
def is_admin(context, user, dc=None): """Check if user is DC admin""" return user.is_admin(context['request'], dc=dc)
0569dd4fc49e85da4ba29761bd1c425d6342e82b
19,703
def get_month(date): """ Extract month from date """ return int(date.split('-')[1])
eabf8f51554f537bbf12148eb9a9151fabe1cfad
19,704
def lollipop_compare(old_epoch: int, new_epoch: int) -> int: """ Compares two 8-bit lollipop sequences :returns: a value indicating if the given new_epoch is in fact newer than old_epoch 1 if the new_epoch is newer than old_epoch 0 if the new_epoch is newer and the sequence has been reset -1 if new_epoch is not newer than old_epoch """ if old_epoch == new_epoch: return -1 if new_epoch > old_epoch: # Case 1: new epoch is greater, 43 > 42 return 1 elif new_epoch < 0 <= old_epoch: # Case 2: new epoch is lesser, but is negative, -127 < 0 <= 10 return 0 elif new_epoch < old_epoch < 0: # Case 3: both negative, this is either a reset or a delayed packet # from another neighbor. Let's assume it's a reset. -126 < -120 < 0 return 0 elif new_epoch < old_epoch and (old_epoch - new_epoch > 32): # Case 3: wrap around, 10 < 128, (128 - 10 > 32) return 1 else: return -1
b925a333324c905ee89f368b218ffe584501fc9b
19,705
def height_to_imperial(height): """Converts height in cm to feet/inches.""" height_inches = height / 2.54 feet = int(height_inches) // 12 inches = height_inches % 12 return feet, inches
bdec165de4b1576e53f2c81e6d3fc9d60b82d8ee
19,707
import os def load_dir(dir_path): """Load template directory""" template_list = [f'{dir_path}/{item}' for item in os.listdir(dir_path) if item.endswith('.yaml') or item.endswith('.yml') or item.endswith('.sls')] return template_list
9d4556d489caf1a96c9167c330a68e5b91e44228
19,709
def _clean_dag(dag): """Clean the DAG.""" for node in dag.nodes: dag.nodes[node].clear() return dag
85488a71bdc4214b20f50c21793df7c14f92b21b
19,710
def f(B, x): """A linear function for the ODR.""" return B*(x)
ac23fd53c27d784ad826a2ac4330a51f71e13d96
19,711
import os def probePreProcess(path): """ instImport is a function for importing probes in txt file and outputting dictionary with probe and metadata required inputs: path: path of input file (as .txt) outputs: Variable with processed data as a list of dictionaries """ # probably also write checks that the path points to a .txt and the outputName is a string with open(path, 'r') as f: #open file as object processed_text = f.readlines() # Get other info # pos or neg if "positive" in os.path.basename(path): pos_neg = 'positive' elif "negative" in os.path.basename(path): pos_neg = 'negative' elif "subneg" in os.path.basename(path): # relative clauses change which statement is correct based on obj or sub extracted pos_neg = 'subneg_objpos' elif "subpos" in os.path.basename(path): pos_neg = 'subpos_objneg' else: pos_neg = None # main or relative clause if "MC" in os.path.basename(path): clause = 'main_clause' elif "RC" in os.path.basename(path): clause = 'relative_clause' else: clause = 'other' final_output = [] for n in range(len(processed_text)): temp = {'probe':processed_text[n].replace('\n', ''), 'pos_neg': pos_neg, 'clause': clause, 'probe_n': n,} final_output.append(temp) return final_output
149c1fa9981e07564313c0836a0fb65087277594
19,712
def whodunnit(): """:return who done it -- """ return 'Guido van Rossum'
911f4bf17a4ccf9b88f96a6c1572d1b10c4c4163
19,718
import struct def write_varint(data: int) -> bytes: """Given an integer, encode the integer into a varint. Args: data (int): The integer to encode into a varint. Returns: bytes: The encoded varint. """ packed_packets = list() while data != 0: current_byte = data & 0x7F data >>= 7 compiled_bytes = struct.pack("B", current_byte | (0x80 if data > 0 else 0)) packed_packets.append(compiled_bytes) return b"".join(packed_packets)
377235dcfb8737be3047d02912e44c806e1d61c4
19,721
def health(): """only for health checks""" return "ping"
ab595d7e234b8713729677df5fa8b14e8a28880c
19,722
def load_ldap_settings(config): """ Load all the ldap configuration settings into a dict LDAP configuration settings contain an ldap_ prefix. Args: config (dict): the global config Returns: (dict) All the ldap_ settings """ ldap_config = {} for key, value in config.items(): if key.lower().startswith("ldap_"): ldap_config[key] = value return ldap_config
658c54bd26240c85829e1ff96fa9a7d2b6ee045c
19,725
def schema_url(base_url): """URL of the schema of the running application.""" return f"{base_url}/swagger.yaml"
96926681bcbfdebc8da3a142571452c41fdfb785
19,726
import itertools def feature_level_to_stage_index(strides, offset=1): """ calculate the level of each stage feature map by stride """ levels = itertools.accumulate([offset] + list(strides), lambda x, y: x + y - 1) return {l: i for i, l in enumerate(levels, -1)}
0c0a11e1639d3c300239475149bd68d6db0f3070
19,727
import re def filter_vowel_cons_ratio(word, ratio=0.5): """Return True if the ratio of vowels to consonants is > `ratio`. This can be used as an ad-hoc pronunciation filter. :param word (str): The word :param ratio (float, optional): The ratio :rtype: int """ vowels = re.compile(r'[aeiouy]') consonants = re.compile(r'[^aeyiuo]') vmatch = re.findall(vowels, word.lower()) cmatch = re.findall(consonants, word.lower()) _ratio = float(len(vmatch)) / float(len(cmatch)) return _ratio > ratio
0bb87d6b6d40f83c9826eb0988888a9c1105db3b
19,728
def get_central_values(flat_input, input_size, center_size, palette_size): """ Takes a flat array which is assumed to represent input_size by input_size by palette_size data, and returns a flat array that represents the center_size by center_size central values of the original array. """ lc = input_size//2 - center_size//2 rs = flat_input.reshape((input_size, input_size, palette_size)) sel = rs[lc:lc+center_size, lc:lc+center_size, :] return sel.reshape([-1])
2b62988e4fd9dcee35fba949e29a3e8045b5f909
19,731
def normalizeCUAddr(addr): """ Normalize a cuaddr string by lower()ing it if it's a mailto:, or removing trailing slash if it's a URL. @param addr: a cuaddr string to normalize @return: normalized string """ lower = addr.lower() if lower.startswith("mailto:"): addr = lower if ( addr.startswith("/") or addr.startswith("http:") or addr.startswith("https:") ): return addr.rstrip("/") else: return addr
4a3c3a994fc07c17e3e0cbea1eb6be4dab67632f
19,733
def stringify_var_names(var_list, delimiter=""): """ Parameters ---------- var_list : list[str] Each list element is the name of a variable. Returns ------- result : str Concatenated variable names. """ result = var_list[0] for var_name in var_list[1:]: result += delimiter + var_name return result.lower()
159c5e2b6081afa33d835a8e784e6be029c0496b
19,735
def _map_key_to_format(key): """ given key, get FITS format code. Works for arbitrary number of apertures. see http://docs.astropy.org/en/stable/io/fits/usage/table.html for details. """ # example keys being mapped for TESS: # ['bge', 'bgv', 'fdv', 'fkv', 'fsv', # 'ife1', 'ife2', 'ife3', 'ifl1', 'ifl2', # 'ifl3', 'ire1', 'ire2', 'ire3', 'irm1', 'irm2', 'irm3', 'irq1', 'irq2', # 'irq3', 'tmid_utc', 'rstfc', 'xic', 'yic'] if key in ['bge','bgv','fdv','fkv','fsv','xic','yic']: return 'D' elif 'ife' in key: # flux err return 'D' elif 'ifl' in key: # flux return 'D' elif 'ire' in key: # raw mag err return 'D' elif 'irm' in key: # raw mag return 'D' elif 'irq' in key: # quality flag return '1A' elif 'tmid_utc' in key: # timestamp return 'D' elif 'rstfc' in key: # frame id return '48A'
b82451a78ae6a3bdbfc3d0d783078767645393fb
19,736
def community(community_service, community_owner_identity, community_creation_input_data): """Community fixture.""" return community_service.create( community_owner_identity, community_creation_input_data )
9dac6c31eb344137a8376250e9105f957c870ee1
19,739
import sys def is_pyinstaller(): """ Returns: True if we are running inside a bundled pyinstaller package. False otherwise. """ try: getattr(sys, '_MEIPASS') return True except AttributeError: return False
f85404a8aeb5ff1a3efb63c80d8c5d2599caff02
19,743
def summarize_results(df, level='sample_name'): """ Summarize quality control results Args: df (pd.DataFrame): loaded using :func:`madic.io.read_transition_report` level (str): Choices: 'sample_name' or 'rep' Whether to return summary on a sample or replicate basis Returns: pd.DataFrame: DataFrame containing final pass / fail quality control results by peptide and sample """ eval_cols = [x for x in df.columns if x.startswith('pass_')] summarized = df.groupby([ level, 'pep'])[eval_cols].agg('all').reset_index() if 'interference' in df.columns: ic = df.groupby(['sample_name', 'pep']).interference.agg('any').reset_index() ic.rename(columns={'interference': 'interference_corrected'}, inplace=True) summarized = summarized.merge(ic) return summarized
caba6b77098d91cf0285559cf3e0965763112428
19,745
def return_number(): """ Used to test that a single value can be returned properly :return: A number, 777 """ return 777
e70490184120e9e6a492bb81ee2cf2eb0abfe9cf
19,746
import os import csv def fetch_lesson_status(dirs): """ load the config lesson data with some error checking """ lesson_config_file = os.path.join(dirs['config'], "lessons.csv") lesson_status = {} with open(lesson_config_file) as csvfile: reader = csv.reader(csvfile) reader.__next__() for row in reader: if row[0] in lesson_status: raise Exception("A duplicate entry was identified in lesson.csv: '{}'".format(row[0])) if row[0] not in os.listdir(dirs['src']): raise Exception("The src for lesson '{}' cannot be found".format(row[0])) lesson_status[row[0]] = int(row[1]) return (lesson_status)
3ec599184fa9229faebf3b1d906864b58d95e454
19,747
def create_warehouse(): """Create the global dictionary This function will create a global dictionary in the Patient GUI that store all the input and processed result temporarily before sending them to the server and store them in the mongoDB. The keys of the dictionary includes: patient_name which is the patient's name; MRI: medical record number; medical_image: medical image from the patient's client end; medical_title:user defined title of the medical image ; medical_des: user defined description of the medical image; heart_rate: heart rate in bps unit processed by the ecg_analysis file; ecg_image: cg_image generated; ecg_title: users defined title for the ecg image; ecg_des: users defined description of the ecg trace image. Returns: dict: The created dictionary """ inform_warehouse = dict() inform_warehouse["patient_name"] = "" inform_warehouse["MRI"] = "0" inform_warehouse["medical_image"] = "" inform_warehouse["medical_title"] = "" inform_warehouse["medical_des"] = "" inform_warehouse["heart_rate"] = "0" inform_warehouse["ecg_image"] = "" inform_warehouse["ecg_title"] = "" inform_warehouse["ecg_des"] = "" return inform_warehouse
615b79ba83738f2c4410238a1735c9398276a2cf
19,749
import sys import codecs def getfilesystemencoding(): """Returns the file system encoding, but substitutes UTF-8 for ASCII.""" enc = sys.getfilesystemencoding() try: if codecs.lookup(enc).name.lower() == "ascii": return "utf-8" except LookupError: return "utf-8" return enc
29d7e644dae97a51f77acb46f81d81a08f777410
19,750
def rotate(given_array): """ n = # rows = # columns in the given 2d array. Time: O(rows * cols) Space: O(2*1) = O(1) """ n = len(given_array) rotated = [[0 for i in range(n)] for j in range(n)] for i in range(n): for j in range(n): rotated[j][n - 1 - i] = given_array[i][j] return rotated
e79e8e97a338150f82484b9fdf650c0f7474b002
19,751
import re def _looks_like_url(txt): """ Return True if text looks like an URL (probably relative). >>> _looks_like_url("foo.bar") False >>> _looks_like_url("http://example.com") True >>> _looks_like_url("/page2") True >>> _looks_like_url("index.html") True >>> _looks_like_url("foo?page=1") True >>> _looks_like_url("x='what?'") False >>> _looks_like_url("visit this page?") False >>> _looks_like_url("?") False """ if " " in txt or "\n" in txt: return False if "/" in txt: return True if re.search(r'\?\w+=.+', txt): return True if re.match(r"\w+\.html", txt): return True return False
50fb8a40260b7d69bd535024f9ed2e348f88f5bf
19,752
def broj_prolaza(pocetni_promjer, zavrsni_promjer, dodatak_fino, dubina_rezanja): """ """ return ((pocetni_promjer-zavrsni_promjer)/2.-dodatak_fino)/dubina_rezanja
6e2b75c09119cd961fafd79672a860c1fb27340a
19,754
def script_from_saved_model(saved_model_dir, output_file, input_arrays, output_arrays): """Generates a script for saved model to convert from TF to TF Lite.""" return u"""# --- Python code --- import tensorflow as tf lite = tf.compat.v1.lite saved_model_dir = '{saved_model_dir}' output_file = '{output_file}' converter = lite.TFLiteConverter.from_saved_model( saved_model_dir, input_arrays={input_arrays}, output_arrays={output_arrays}) tflite_model = converter.convert() with tf.io.gfile.GFile(output_file, 'wb') as f: f.write(tflite_model) print('Write file: %s' % output_file) """.format( saved_model_dir=saved_model_dir, output_file=output_file, input_arrays=input_arrays, output_arrays=output_arrays)
b00592217f316cd8bad99127e4e92f14a0f5910c
19,755
def priority(x, y, goal): """Priority for a State.""" return abs(goal[0] - x) + abs(goal[1] - y)
d9c4229bb6329b6c9dcbe2469f1f18f4dfbb1d36
19,756
from typing import Iterable from typing import Any def argsort_iterable(x: Iterable[Any]) -> Iterable[int]: """Iterable of indexes that sort an array. This is equivalent to numpy argsort but works on any sortable python iterable and returns a python iterable. Evaluation is lazy, evaluating x only as needed for sorting. Args: x (Iterable[Any]): The array to be sorted; must be sortable Returns: Iterable[int]: The indexes that sort x """ return (i for _,i in sorted((xi,i) for i,xi in enumerate(x)))
60ebfeecbd71e613d13f6c4e26e1cbf72bf85c75
19,758
import re def check_email_valid(submission): """ Check if submission is a valid email address """ if re.match(r"[^@]+@[^@]+\.[^@]+", submission): return True else: return False
1528df90d59c4e0cedc8c030acec1cfcd6984e64
19,760
def _resolve_collections(collections): """ Split a list of raw collections into a list of database/collection tuples :param list[str] collections: :rtype: list[(str, str|None)] """ ret = [] for raw_collection in collections: attr_chain = raw_collection.split('.', 1) database = attr_chain[0] if len(attr_chain) == 2: ret.append((database, attr_chain[1])) else: ret.append((database, None)) return ret
dca56d8ea52317bf48ddcf0383b691eacbb34f95
19,761
def humidity(params, x, etc): """ This function creates a model that fits the change in relative humidity. Parameters ---------- rha: multiplier rhb: offset Returns ------- This function returns an array of y values. Revisions --------- 2015-01-27 Kevin Stevenson kbs@uchicago.edu """ rha = params[0] rhb = params[1] rh = x[2] rhmean = rh.mean() return rha + rhb*(rh-rhmean)
013c39176dfb2994ec943ee127619f98d8f83d5e
19,762
def removeVowels(word): """ Recursive Function to remove alll vowels in a words/sentence. Parameters: word (string); the word in which the vowels are to be removed. Returns: A string with no vowels after the all recursions are complete. Raises: TypeError: If user enters an invalid number such as floats. Exception: If any unexpected error occurs. Eg, RuntimeError when there are too many recursive calls. """ try: if not(isinstance(word,str)): #Checking if input is a valid string. raise TypeError if len(word) == 0: #Base Case return word elif word[0] in "AEIOUaeiou": return removeVowels(word[1:]) #Skip that letter and proceed with the rest of letters in word else: # keep the first letter and proceed until length of word becomes 0. return word[0] + removeVowels(word[1:]) except TypeError: #If the provided input is not a string. print("Error: Please provide a valid word/sentence of type string and try again.") except: #If any other unexpected error occurs print("Error in removing vowels. Please try again.")
3e4d679160b911937df0fb3e9f94d913178330bc
19,763
def make_cache_key(visitor_key): """ make the cache key for visitor """ return 'visitor_%s' % (visitor_key)
e304abc765634e32b4a6b904e519207f8b71c4d9
19,764
def _format_cmd_shorty(cmd): """Get short string representation from a cmd argument list""" cmd_shorty = (' '.join(cmd) if isinstance(cmd, list) else cmd) cmd_shorty = '{}{}'.format( cmd_shorty[:40], '...' if len(cmd_shorty) > 40 else '') return cmd_shorty
356f3323b2ccb76322cd7df0ac76b5e573544c16
19,765
import math def rotate(x,y, angle): """Transform coordinate (x,y) by angle angle is in radians not in degrees. """ new_x = x*math.cos(angle) - y * math.sin(angle) new_y = x*math.sin(angle) + y * math.cos(angle) return new_x, new_y
7a9a9d25ac1197d272ad969be57c932a6adbe35d
19,766
def XYZ_to_xy(XYZ): """Convert XYZ to xy Args: XYZ ([float, float, float]: X, Y, Z input values Returns: .[float, float] """ X, Y, Z = XYZ divider = (X + Y + Z) x = X / divider y = Y / divider return [x, y]
cc41bd7dda4339c813619171d2a885c420a276a5
19,770
def parse_chunks(arg): """Returns file name, chunks, and frame number. File string format: file-<filename>.<framenum>.<chunk1>%<chunk2>%<chunk3>&user_or_cache """ filestr = arg.split('&')[0] binarystr = arg.split('&')[1] if filestr.find('file-') != -1: filestr = (filestr.split('file-'))[-1] parts = filestr.split('.') if len(parts) < 2: return None filename, framenum = parts[0], parts[1] if len(parts[2]) == 0: return (filename, framenum, [], int(binarystr)) else: chunks = map(int, (parts[2]).split('%')) return (filename, framenum, chunks, int(binarystr))
4f39fb164884847aabbd274dcae2aead1649a430
19,772
def check_for_permissions(context, permissions_list): """Helper function for parsing workspace-level permissions. """ if not (context.user and context.user.is_authenticated): return False if not hasattr(context, 'workspace') or not context.workspace: return False return context.user.permissions_for(context.workspace.id) in permissions_list
e99637815138ae112f4d4842cb33a94c012592af
19,773
def filter_for_ascii(text, placeholder="_"): """Some unicode characters are used in the eCalendar, and some command line consoles don't like this, so this script replaces characters that give Exceptions placeholder is '_' by default """ result = "" for letter in text: try: result += str(letter) except Exception: result += placeholder return result
3dd3b22eb54fc2d5fa30bcd090421d0605f01583
19,774
def config_path(): """Return the path of the config file""" path = "/etc/jdma/jdma_config.json" return path
51f6a1a9ba7385e2bb46d36f0f60cc9641cdfa52
19,775
def get_ints(min, max): """Return range based iterator with given min and max""" return range(min, max)
0791a4378b89cf96187b80255835c41ef32a41c5
19,776
import requests from bs4 import BeautifulSoup def _get_folder_list(url,search_filter): """ Get url folder link list. """ r = requests.get(url) soup = BeautifulSoup(r.content, "html.parser") ss = soup.findAll('a') url_list = [] for s in ss: if 'href' in s.attrs: if search_filter in s.attrs['href']: url_list.append(url.split('contents.html')[0]+s.attrs['href']) return url_list
4763f23ca9b90ff55f0b82e32c92665ffc62cdbb
19,777
def make_string(seq): """ Don't throw an exception when given an out of range character. """ string = '' for c in seq: # Screen out non-printing characters. try: if 32 >= c < 256: string += chr(c) except TypeError: pass # If no printing chars if not string: return str(seq) return string
ab7264afecc481852a9de7c26cec4ba61fb8b413
19,778
def tag_key_value_list(tags_dict): """ Builds list of tag structures to be passed as parameter to the tag APIs :param tags_dict: dictionary of tags :return: list of tags """ if tags_dict is None: return [] valid_tags = {tag_key: tags_dict[tag_key] for tag_key in tags_dict if not (tag_key.startswith("aws:") and not tag_key.startswith("cloudformation:") and not tag_key.startswith("rds:")) } return [{"Key": t, "Value": tags_dict[t]} for t in valid_tags] if valid_tags is not None else []
34646687b042f1fcdc5dedaebb15f5897143d22f
19,779