content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def solution(arr1: list, arr2: list, arr3: list) -> int: """ Работаем с указателями в рамках каждого массива :param arr1: :param arr2: :param arr3: :return: """ pointer_arr1 = 0 pointer_arr2 = 0 pointer_arr3 = 0 if len(arr1) == 0 or len(arr2) == 0 or len(arr3) == 0: return 0 while True: if arr1[pointer_arr1] == arr2[pointer_arr2] and arr2[pointer_arr2] == arr3[pointer_arr3]: return arr1[pointer_arr1] if arr1[pointer_arr1] > arr2[pointer_arr2]: pointer_arr2 += 1 elif arr2[pointer_arr2] > arr3[pointer_arr3]: pointer_arr3 += 1 elif arr3[pointer_arr3] > arr1[pointer_arr1]: pointer_arr1 += 1
3d62d02b23c428356b5fa944aeb324dd10d095fd
30,003
import pathlib import os import sys def get_site_packages(prefix): """ Returns the path to the `site-packages/` directory where Python modules are installed to via Pip given that the specified *prefix* is the same that was passed during the Pip installation. """ if isinstance(prefix, str): prefix = pathlib.Path(prefix) if os.name == 'nt': lib = 'Lib' else: lib = 'lib/python{}.{}'.format(*sys.version_info) return prefix.joinpath(lib, 'site-packages')
71e5a89efd59e322ad28dc0fad09f8cdc8a81a12
30,004
def solution1(nums): """ Solution by myself --- Runtime: 48ms --- :type nums: list[int] :rtype: list[int] """ repeated_nums = [] num_set = set() for num in nums: if num in num_set: repeated_nums.append(num) else: num_set.add(num) return list(set(nums) - set(repeated_nums))
d0da5fa501e387280b9cf0546474bd8032a81028
30,005
def pyx_is_cplus(path): """ Inspect a Cython source file (.pyx) and look for comment line like: # distutils: language = c++ Returns True if such a file is present in the file, else False. """ for line in open(path, 'rt'): if line.startswith('#') and '=' in line: splitted = line.split('=') if len(splitted) != 2: continue lhs, rhs = splitted if lhs.strip().split()[-1].lower() == 'language' and \ rhs.strip().split()[0].lower() == 'c++': return True return False
d8ad5c7884453a5dc3cec6b340d8fa5ac3094cb3
30,006
from typing import Counter def compute_frequencies(a): """Frequencies from 1D array""" return list(Counter(a).values())
08c8b19dacfbd0d9bf4efff28254bd7823fc3f13
30,007
from typing import Dict from typing import Union from typing import List import json def load_connections() -> Dict[str, Dict[str, Union[int, str, List[str]]]]: """Loads the static set of connections.""" with open('data/connections.json') as f: return json.load(f)
cee39b0cb7d34b6c71de9d625fe2202c1a8d3a6b
30,008
def is_float(num): """ given a string variable, returns True if the string can be directly converted to a float, otherwise returns False :param num: string to check whether or not it can be converted to a float :type num: string :return: True: the string can be converted to a float (via the float(<string>) function). False: it cannot. :rtype: boolean """ try: float(num) return True except ValueError: return False
57de4de786f711f609499de337cfc33994ee0a3d
30,009
def get_descendant_ids(node): """ This filter returns the ids of all the node's descendants. :param node: The requested node :type node: ~integreat_cms.cms.models.abstract_tree_node.AbstractTreeNode :return: The list of all the node's descendants' ids :rtype: list [ int ] """ return [ descendant.id for descendant in node.get_cached_descendants(include_self=True) ]
eef8b9c0e26c2dccf061bdd402b809b9eff51b83
30,010
def kinetic_energy(momentum, mass): """Compute the kinetic energy of moving particles Arguments: momentum (array-like of float): momentum of the particles mass (array-like of float): mass of the particles """ if momentum.ndim == 3: mass = mass[None, :] return 0.5 * (momentum / mass[..., None]).sum(axis=(-2, -1))
4361c1497d1107def2ae4e74f8cb7ec7f1312b8d
30,013
def partitionp(n,k=-1): """ partitionp(n) is the number of distinct unordered partitions of the integer n. partitionp(n,k) is the number of distinct unordered partitions of the integer n whose largest component is k.""" if (k == -1): return sum([partitionp(n,i) for i in range(1,n+1)]) if (n < k): return 0 if ((n==0) or (n==1)): return 1 # base cases, {0} and {1} if ((k == 1) or (n == k)): return 1 # base cases, {1+1+...} and {n} return sum([partitionp(n-k,i) for i in range(1,min(k,n-k)+1)])
0e50477be95b0267453fe092a1dcd97f9c9378f5
30,014
def pollutants_from_summary(summary): """ Get the list of unique pollutants from the summary. :param list[dict] summary: The E1a summary. :return dict: The available pollutants, with name ("pl") as key and pollutant number ("shortpl") as value. """ return {d["pl"]: d["shortpl"] for d in summary}
d880a1f693139786a3ee799594ed1bd664d4b93e
30,015
def _dest_path(path): """Returns the path, stripped of parent directories of .dSYM.""" components = path.split("/") res_components = [] found = False for c in components: # Find .dSYM directory and make it be at the root path if c.endswith(".dSYM"): found = True if found: res_components.append(c) return "/".join(res_components)
869efc4265e09c182a948f6580075846a6af7c9f
30,016
from typing import Counter def calculate_total_word_occurrences(report): """ Takes a report object and returns the combined word occurrence counts""" total_word_occurrences = Counter() for document in report.document_set.all(): total_word_occurrences = total_word_occurrences + Counter(document.word_occurrences_count) report.word_occurrences_count = total_word_occurrences report.save() return total_word_occurrences
3da8bfcafa3612cf61651d4f1a82d1bec7d39864
30,017
def get_thresholds(data): """Function that attempts to get the high and low thresholds. Not working very well""" max_val = max(data[len(data)//2:len(data)//2 + 10000000]) #Looking for a max in a portion of the data, from the middle high_thresh = max_val*3/4 # High threshold set at 3/4th of the max low_thresh = max_val*1/4 return low_thresh, high_thresh
b3b9b3257d0068be5dc80c987af7d66d3bac0181
30,018
def get_max(num_list): """Recursively returns the largest number from the list""" if len(num_list) == 1: return num_list[0] else: return max(num_list[0], get_max(num_list[1:]))
a1ba81d12e1a9f7aa7cfa1c9846a790a5789485d
30,019
import json def get_device(token): """ Read the device configuration from device.json file. :return: dict - the device configuration """ try: with open("/tmp/{}/device.json".format(token), "r") as f: return json.load(f) except: pass
5675113a87ee2d4abf8ce630cd167113c25e3626
30,020
import os import pickle def get_correlation(col1, col2): """ Return correlation between col1 and col2 @param col1: @param col2: @return: """ file_path = f'{os.environ["WORKING_DIRECTORY"]}/results/jaccard.obj' with open(file_path, 'rb') as file: jaccard = pickle.load(file) file_path = f'{os.environ["WORKING_DIRECTORY"]}/results/pandas_correlation.obj' with open(file_path, 'rb') as file: pearson = pickle.load(file) jaccard_columns = set(jaccard.columns) pearson_columns = set(pearson.columns) if col1 in jaccard_columns and col2 in jaccard_columns: return jaccard[col1][col2] elif col1 in pearson_columns and col2 in pearson_columns: return pearson[col1][col2] else: print( f'Correlation calculation between numerical and string type columns is not supported. Given columns: {col1} and {col2}.') return
cc67f3b11cdadaef31cce57afb4ac00512ff1866
30,021
import os def subdirs(path, name): """Return subdirs of `path. Filter case-insensitively against `name` if `name` is not None. """ f = lambda x: name is None or x.lower() == name.lower() return [file_path for file_name in os.listdir(path) if f(file_name) and not file_name.startswith('.') for file_path in (os.path.join(path, file_name),) if os.path.isdir(file_path)]
5db6e93f335b3d6199bec4243c8191aa251ec504
30,022
def unique(valuelist): """Return all values found from a list, but each once only and sorted.""" return sorted(list(set(valuelist)))
1218bb7c353a898b2815c5cd95b8ef71e386b91f
30,023
from typing import List from typing import Any def get_list_elements_containing(l_elements: List[Any], s_search_string: str) -> List[str]: """get list elements of type str which contain the searchstring >>> get_list_elements_containing([], 'bc') [] >>> get_list_elements_containing(['abcd', 'def', 1, None], 'bc') ['abcd'] """ if (not l_elements) or (not s_search_string): return l_elements ls_results = [] for s_element in l_elements: if isinstance(s_element, str): if s_search_string in s_element: ls_results.append(s_element) else: continue return ls_results
7a9ea19fcf94ca0493487c3a343f49cd45e85d08
30,024
import os def _get_user_dir(): """Get user directory.""" if os.name.lower() == "nt": user_dir = os.getenv("USERPROFILE") else: user_dir = os.path.expanduser("~") return user_dir
560e9b34a4e0797a4e034786f7477fe50e6079de
30,025
def floatformatter(*args, sig: int=6, **kwargs) -> str: """ Returns a formatter, which essantially a string temapate ready to be formatted. Parameters ---------- sig : int, Optional Number of significant digits. Default is 6. Returns ------- string The string to be formatted. Examples -------- >>> from dewloosh.core import Library >>> data = Library() >>> data['a']['b']['c']['e'] = 1 >>> data['a']['b']['d'] = 2 >>> data.containers() """ return "{" + "0:.{}g".format(sig) + "}"
aead01fd8a2b1f97d20091af4326a7f248ea43a7
30,026
def comp_active_surface(self): """Compute the active surface of the conductor Parameters ---------- self : CondType11 A CondType11 object Returns ------- Sact: float Surface without insulation [m**2] """ Sact = self.Hwire * self.Wwire * self.Nwppc_tan * self.Nwppc_rad return Sact
302bb23c4ed94084603c02437c67ef4ea8046b4b
30,028
import hashlib def get_checksum(address_bytes: bytes) -> bytes: """ Calculate double sha256 of address and gets first 4 bytes :param address_bytes: address before checksum :param address_bytes: bytes :return: checksum of the address :rtype: bytes """ return hashlib.sha256(hashlib.sha256(address_bytes).digest()).digest()[:4]
40e515603223ec68ce2b07208de47f63905dafd2
30,029
import os def read_matrix_from_file_into_hash(path_matrix_file): """Read tab or space delimited file into 2D-dictionary. TO DO ----- Add full documentation of the function (not needed at the moment) """ assert os.path.exists(path_matrix_file), ( "ERROR. There does not exist file " + path_matrix_file ) input_file = open(path_matrix_file) input_lines = input_file.readlines() input_file.close() assert len(input_lines) > 0, "ERROR. Input file " + path_matrix_file + " is empty." header_entries = input_lines[0].strip().split() column_ids = header_entries[1 : len(header_entries)] # noqa num_columns = len(column_ids) assert num_columns != 0, ( "ERROR. Number of columns in " + path_matrix_file + " equals zero. Exiting!!!" ) input_lines_without_header = input_lines[1 : len(input_lines)] # noqa D = {} for line in input_lines_without_header: line_columns = line.strip().split() row_id = line_columns[0].strip() assert row_id not in D.keys(), "ERROR. " + row_id + " is already in keys." D[row_id] = {} for i in range(num_columns): assert column_ids[i] not in D[row_id].keys(), ( "ERROR. " + column_ids[i] + " is already in keys." ) D[row_id][column_ids[i]] = line_columns[i + 1] return D
b0972e5b068e8ce9200c1827fa96018580275158
30,031
import torch def logit(x: torch.Tensor, eps=1e-5) -> torch.Tensor: """ Compute inverse of sigmoid of the input. Note: This function has not been tested for numerical stability. :param x: :param eps: :return: """ x = torch.clamp(x, eps, 1.0 - eps) return torch.log(x / (1.0 - x))
75031bc395455784504a4dbcc6d1d58d4fce43c9
30,034
import json def build_pagination_data(pagination_obj): """Assemble the pagination data.""" pagination_data = [] for item in pagination_obj.items: pagination_data.append(json.loads(item.to_json())) return pagination_data
4e55c3aa875ba3af7a69add069875ddf6ec79eb9
30,036
import pickle def unserialize_analysis_scores(filename): """Function to unserialize_analysis_scores""" PIK = filename print(PIK) with open(PIK, "rb") as f: output = pickle.load(f) return output
7a92dd6b91e1519320b3a1449136e4b44feeb717
30,039
def multiplexer(conditions): """Apply the rule that matches the condition, else None""" def multiplexer_rl(expr): for key, rule in conditions.items(): if key(expr): return rule(expr) return multiplexer_rl
86ead383036357e9964c388983a47aa5d6908911
30,041
def FindCatNode(category_name, current_node, srcloc): """ Search upwards (toward the ancester nodes), looking for a node containing a category matching category_name (first argument). Useful when the user specifies a category name, but neglects to specify which node it was defined in. Note: there is no gaurantee that the category node returned by this function contains an entry in it's "categories" list corresponding to this category name. You must check for this condition and handle it.""" cat_node = None node = current_node while True: if category_name in node.categories: cat_node = node break elif node.parent != None: node = node.parent else: # node.parent is None, ... we're done break if cat_node is None: assert(node.parent is None) # sys.stderr.write('Warning near ' + # ErrorLeader(srcloc.infile, # srcloc.lineno)+'\n'+ # ' no category named \"'+category_name+'\" found.\n'+ # ' Creating a new global category: /'+ # category_name+':\n') cat_node = node # the global node assert(cat_node != None) return cat_node
96836939159e295b3aaa13b481a9c3c726190faf
30,042
import json import yaml def _JsonToYaml(string): """Converts a JSON string to YAML.""" obj = json.loads(string) return yaml.safe_dump(obj, default_flow_style=False)
129b7d0cb9259a8f91b5a143b62c5ac0adb20859
30,043
import re from typing import List import os def find_all_files(root_dir: str, pattern: re.Pattern) -> List[str]: """Find all files under root_dir according to relative pattern.""" file_list = [] for dirname, _, files in os.walk(root_dir): for f in files: absolute_path = os.path.join(dirname, f) if re.match(pattern, absolute_path): file_list.append(absolute_path) return file_list
009fe3d720c200beb01b190919ca9285df000d1c
30,044
def isHeteroplasmy(variant, depth_min = 40, depth_strand = 0, depth_ratio_min = 0.0, freq_min = 0.01): """ determine whether a variant is a heteroplasmy according to the filers specified Attributes ---------- variant: MTVariant depth_min: the minimum depth depth_strand: the minimum depth on either the forward and the reverse strand depth_ratio_min: the minimum ratio of reads passing QC freq_min: the minimum minor allele frequency Returns ---------- True: is a heteroplasmy False: not a heteroplasmy None: does not pass QC """ if (variant.depth_qc >= depth_min and variant.depth_fwd >= depth_strand and variant.depth_rev >= depth_strand and variant.depth_ratio >= depth_ratio_min): if (variant.alt_freq >= freq_min): return True else: return False else: return None
0f36082b2d545a3f4ceec9474f3dfd63feda36d0
30,045
import json import random def generatePO(): """Create dumby Purchase Orders and store them in pos.json. Each PO is asigned one random vendor and department number, along with a random length list of items belonging to said department. Returns: True if items.json successfully opens, False otherwise. """ try: with open('items.json', 'r') as f: items_dict = json.load(f) except FileNotFoundError: return False vendors = ['Dyson', 'Ingrammicro', 'LKG', 'Inland', 'Sandisk', 'Seagate', 'Hasbro', 'Mattel',\ 'Gear Head', 'Logitech', 'NTE', 'Dell', 'Microsoft', 'Right Stuff', 'Alliance', 'Energizer'] po_dict = {} for i in range(50): po_num = 24000000 + random.randint(1, 999999) if po_num in po_dict: continue po_dict[po_num] = {'department': (po_num % 7) + 1, 'items': {}, 'vendor': random.choice(vendors)} for key in items_dict: match_found = False loops = 0 while not match_found: loops += 1 if loops > 200: print('\n\nToo many loops.\n\n') break po, department = random.choice(list(po_dict.items())) department = department['department'] print('PO department: {}'.format(department)) print('item plu: {} department: {}'.format(key, items_dict[key]['department'])) if items_dict[key]['department'] == department: max_count = random.randint(1, 20) po_dict[po]['items'][key] = max_count match_found = True with open('pos.json', 'w') as f: json.dump(po_dict, f) return True
31cfc91d8fd56106ef89627b54186d0111f46dcb
30,046
from typing import Union from typing import Tuple from typing import List from typing import Callable import argparse def in_sequence_strings(sequence: Union[Tuple[str], List[str]], show_on_invalid: bool = False, case_sensitive: bool = True, ) -> Callable: """ excepts a value that is in the tuple or list and returns that value as a string show_on_invalid = True will show a list of acceptable values case_sensitive = False will convert to lower case then check if value is in the sequence """ seq_string = ", ".join(sequence) if not case_sensitive: sequence = [x.lower() for x in sequence] def _in_sequence_strings(value: str) -> str: if not case_sensitive: value = value.lower() msg = f"{value} is not in the excepted value list" if show_on_invalid: error_msg = f"{msg}\n{seq_string}" else: error_msg = msg if value in sequence: return value else: raise argparse.ArgumentTypeError(error_msg) return _in_sequence_strings
b991bfb894b63e8695280d019e026dc8f26cea5b
30,047
def p_simplify(r, tolerance=2.5): """ Helper function to parallelise simplification of geometries """ return r.geometry.simplify(tolerance)
06e365d0aba0434fadc2cbea057abfd213104caf
30,048
def get_fmt_and_header( column_names, all_column_groups, all_data_types, delimiter="\t", precision=2, column_width=10, ): """ out_format, out_header = get_fmt_and_header(column_names, all_column_groups, all_data_types, delimiter="\t", precision=2, column_width=10) Prepares an fmt string and a header string for saving in a nice, human-readable table of fixed column width using np.savetxt INPUT: column_names is a list of strings for the current table columns all_column_groups is a list of strings for all possible table columns all_data_types is a list of strings for each of the columns in all_column_groups (two strings are currently allowed: "d" indicating integer and "f" indicating float) delimiter is the delimiter to be used (default is tab) precision is the precision to be used for data with type "f" (default is 2) column_width is the width of each column (default is 10) OUTPUT: out_format is the fmt string required by np.savetxt out_header is the header required by np.savetxt """ out_format = [] out_header = [] for curr_column in column_names: for curr_column_group, curr_data_type in zip(all_column_groups, all_data_types): if curr_column in curr_column_group: if curr_data_type == "d": out_format.append("%s%dd" % (r"%", column_width)) elif curr_data_type == "f": out_format.append("%s%d.%df" % (r"%", column_width, precision)) break out_header.append("%s%ds" % (r"%", column_width) % curr_column) return out_format, delimiter.join(out_header)
9da2cc8129598f11f7b222f5deb3dd681bb3e291
30,049
import hashlib def hash_ecfp(ecfp, size): """ Returns an int < size representing given ECFP fragment. Input must be a string. This utility function is used for various ECFP based fingerprints. Parameters ---------- ecfp: str String to hash. Usually an ECFP fragment. size: int, optional (default 1024) Hash to an int in range [0, size) """ ecfp = ecfp.encode('utf-8') md5 = hashlib.md5() md5.update(ecfp) digest = md5.hexdigest() ecfp_hash = int(digest, 16) % (size) return (ecfp_hash)
4850ee88735ae172216a5ae5cbd08505d8a2f42e
30,050
import os def get_config_job_dir(job_dir, config_file): """Read in the file given at specified config path Args: job_dir (str): File directory path to klio config file config_file (str): File name of config file, if not provided ``klio-job.yaml`` will be used Returns: job_dir - Absolute path to config file directory config_file - Path to config file """ if job_dir and config_file: config_file = os.path.join(job_dir, config_file) elif not job_dir: job_dir = os.getcwd() job_dir = os.path.abspath(job_dir) if not config_file: config_file = os.path.join(job_dir, "klio-job.yaml") return job_dir, config_file
bc2c471a234f5535b0f8583e412c4c2abbdedfa8
30,051
import os def get_obj_texture_file_name(filename): """ Given an OBJ file name return a file name for the texture by removing .obj and adding .png """ file_no_ext = os.path.splitext(os.path.basename(filename))[0] return file_no_ext + ".png"
574f2ee74ef0b017c35c628463c79d1405e60058
30,052
import os import hmac import hashlib def verify_hmac_hash(data, signature): """ verify_hmac_hash 通过github webhook secret 计算msg内容是否正确 """ github_secret = bytes(os.environ['GITHUB_SECRET'], 'UTF-8') mac = hmac.new(github_secret, msg=data, digestmod=hashlib.sha1) return hmac.compare_digest('sha1=' + mac.hexdigest(), signature)
295e24d017f8d49a17e12cee7f98f472bc56a253
30,054
def dict_union(a, b): """ Return the union of two dictionaries without editing either. If a key exists in both dictionaries, the second value is used. """ if not a: return b if b else {} if not b: return a ret = a.copy() ret.update(b) return ret
5f228221a4a0fc5f322c29b8500add4ac4523e04
30,055
def convert_neg_indices(indices, ndim): """converts negative values in tuple/list indices""" def canonicalizer(ax): return ax + ndim if ax < 0 else ax indices = tuple([canonicalizer(axis) for axis in indices]) return indices
f209e057f3b4744e0df30dc9a214076d3e80dfec
30,057
def gef_pybytes(x: str) -> bytes: """Returns an immutable bytes list from the string given as input.""" return bytes(str(x), encoding="utf-8")
aa349940a72129329b850363e8cb807566d8d4b8
30,058
import imp import importlib.machinery import sys def import_file(path): """ Returns imported file """ if sys.version_info[0] == 2 or sys.version_info[1] < 3: return imp.load_source("tmp", path) elif sys.version_info[0] == 3: return importlib.machinery.SourceFileLoader("tmp", path).load_module("tmp")
f0bc3b55bc14f9b1f932a902e53879b869276cbf
30,059
import numpy def ndim(a): """ Return the number of dimensions of an array. Parameters ---------- a : array_like Input array. If it is not already an ndarray, a conversion is attempted. Returns ------- number_of_dimensions : int The number of dimensions in `a`. Scalars are zero-dimensional. See Also -------- ndarray.ndim : equivalent method shape : dimensions of array ndarray.shape : dimensions of array Examples -------- >>> from cucim.numpy import ndim >>> ndim([[1,2,3],[4,5,6]]) 2 >>> ndim(cupy.asarray([[1,2,3],[4,5,6]])) 2 >>> ndim(1) 0 """ try: return a.ndim except AttributeError: return numpy.asarray(a).ndim
bcf0a23535ae78d22eed6b45c833b44db851cc66
30,060
def parse_STATS_header(header): """ Extract the header from a binary STATS data file. This extracts the STATS binary file header information into variables with meaningful names. It also converts year and day_of_year to seconds at midnight since the epoch used by UNIX systems. @param header : string of binary data @return: tuple (spcid, vsrid, chanid, bps, srate, errflg, year, doy, sec, freq, orate,nsubchan) """ (spcid, # 1) station id - 10, 40, 60, 21 vsrid, # 2) vsr1a, vsr1b ... chanid, # 3) subchannel id 0,1,2,3 bps, # 4) number of bits per sample - 1, 2, 4, 8, or 16 srate, # 5) number of samples per second in samples per second errflg, # 6) hardware error flag, dma error or num_samples # error, 0 ==> no errors year, # 7) time tag - year doy, # 8) time tag - day of year sec, # 9) time tag - second of day freq, # 10) frequency in Hz orate, # 11) number of statistics samples per second nsubchan # 12) number of output sub chans ) = header return spcid, vsrid, chanid, bps, srate, errflg, year, doy, sec, freq, \ orate,nsubchan
87ee5bd8971f62304c7e48f7b573a1a89c604c65
30,061
def parse_params(params): """parse kedro_mlflow config from params""" if __package__ not in params: return {"params": params} config = params.pop(__package__) assert isinstance(config, dict), "params:kedro_mlflow must be a dictionary or None" if "params" not in config: config["params"] = params else: selection = config["params"] assert isinstance(selection, list), "params:kedro_mlflow.params must be a list" config["params"] = { name: value for name, value in params.items() if name in selection } return config
5cca5fef099ce88363de3b065abbe9d32e991525
30,063
import numpy def dddspline(alpha, control_points): """Computes the third derivative of a Cubic Bezier curve wrt alpha. Args: alpha: scalar or list of spline parameters to calculate the curve at. control_points: n x 4 matrix of control points. n[:, 0] is the starting point, and n[:, 3] is the ending point. Returns: n x m matrix of spline point second derivatives. n is the dimension of the control points, and m is the number of points in 'alpha'. """ if numpy.isscalar(alpha): alpha = [alpha] ddalpha_matrix = [[ -2.0 * 3.0, 2.0 * 3.0 + 2.0 * 3.0 + 2.0 * 3.0, -2.0 * 3.0 - 2.0 * 3.0 - 2.0 * 3.0, 2.0 * 3.0 ] for a in alpha] return control_points * numpy.matrix(ddalpha_matrix).T
b6f7554da52b57a6bbd84efce81e730670ce6958
30,064
import decimal def recall(qtd_true_positives, list_of_true_positive_documents, ref=0): """ TP / (TP + FN) TP - True Positive FN - False Negative - a document is positive but was classified as negative """ fn = 0 for d in list_of_true_positive_documents: if d.predicted_polarity < ref: fn = fn + 1 qtd_true_positives = decimal.Decimal(qtd_true_positives) fn = decimal.Decimal(fn) return (qtd_true_positives / (qtd_true_positives + fn))
7656a565a9ef42d06d8d7deeb3f14966e1fcf138
30,066
def build_resized_image_url(image_server_url, original_url, width, height, process_mode="fitfill"): """ Convert an image url to the appropriate image server format to get a resized version of the image. Needs attributes : width, height and process_mode. NB: the original_url needs to be publicly accessible (as it will be downloaded by the image server). """ SUPPORTED_MODES = ["crop", "fit", "fitfill"] if process_mode not in SUPPORTED_MODES: raise Exception("unkwnown process mode '%s'. should be one of %s" % (process_mode, SUPPORTED_MODES)) if not image_server_url: ## no image server, returning the origin url return original_url cleaned_url = original_url.replace("http://","").replace("https://","").replace("//","") ## no protocol nor // return "%s/ext/%s/%sx%s/%s" % (image_server_url, process_mode, width, height, cleaned_url)
e5d9187aae6a160bf21976282bb13e15b0bbd76e
30,067
from typing import Callable from typing import Tuple from typing import Any def _run_tests_sequential(parameter_combinations: Callable[[], Tuple[Any, ...]], run_test: Callable[..., bool]) -> bool: """Tests all combinations sequentially.""" return all(run_test(*c) for c in parameter_combinations())
e11acf52095a7c1e6a6f4e0930d7bb6cd4034a96
30,068
def bool_converter(s): """Return the same as built-in function bool() except for arguments which are string representations of a boolean value. :param s: a variable :return: True or False """ answer = bool(s) if isinstance(s, str): if s in ('False', 'false', '0'): answer = False elif s in ('True', 'true', '1'): answer = True else: raise ValueError( 'Expected one of {}, received : {}'.format( ('False', 'false', '0', 'True', 'true', '1'), s ) ) return answer
72f869ed4b2f2dde075662a6decfd8db0852c514
30,072
from datetime import datetime def fromisoformat(isoformat): """ Return a datetime from a string in ISO 8601 date time format >>> fromisoformat("2019-12-31 23:59:59") datetime.datetime(2019, 12, 31, 23, 59, 59) """ try: return datetime.fromisoformat(isoformat) # Python >= 3.7 except AttributeError: return datetime.strptime(isoformat, "%Y-%m-%d %H:%M:%S")
bcb7e277e907a5c05ca74fd3fbd7d6922c0d7a36
30,073
import argparse def get_command_line_arguments(): """ Get the command line arguments :return:(ArgumentParser) The command line arguments as an ArgumentParser """ parser = argparse.ArgumentParser(description='Training script for CartPole-v0 RL model.') parser.add_argument('model_path', help='Path to load model state from.', type=str, default='models/model.pt') args = parser.parse_args() return args
35b3294e8fe20b1a2a014c0b3ab8b603beb88df0
30,075
def _generator_to_list(subnets_generator): """Returns list of string representations of each yielded item from the generator. """ subnets = [] for subnet in subnets_generator: subnets.append(str(subnet)) return subnets
f72c1f735d692a820d20eb14cd13e88abe21b2bb
30,076
def calculateFreq(counted_char_dict): """ Calculate Probability (w/ replacement) based on Frequency of Characters """ # Temporary dictionary to hold probability values. probability_dict = {} # Counting the total number of characters. totalChars = sum(counted_char_dict.values()) # For each Key-Value pair in the character count dictionary, calculate probability. for key, value in counted_char_dict.items(): # Calculating probability with replacement on each character. probability_dict[key] = value / totalChars # Cannot divide Log(1/0) in Shannon Entropy, set low value for underscore ("_") if probability_dict["_"] == 0: probability_dict["_"] = 1e-100 return probability_dict
0005e83aeb6ae52a924aa44bf2f6b381ac6e5f4c
30,077
def part1(data): """ >>> part1([ ... '00100', '11110', '10110', '10111', '10101', '01111', ... '00111', '11100', '10000', '11001', '00010', '01010' ... ]) 198 >>> part1(read_input()) 2250414 """ count = [0 for _ in range(len(data[0]))] for string in data: for i, c in enumerate(string): if c == '1': count[i] += 1 digit = 1 middle = len(data) // 2 gamma = 0 epsilon = 0 for tally in count[::-1]: if tally > middle: gamma += digit else: epsilon += digit digit *= 2 return gamma * epsilon
f05da26dc1bb4f9f4cbb14b523ebc92a9de3f936
30,079
def is_selected_branch(branch: str) -> bool: """Determine whether a branch is the current branch.""" return branch.startswith("* ")
f7569964a3aee08a7995f66332a890ec03cee096
30,080
import os import sh def expand_filenames(filenames): """ expands the filenames, resolving environment variables, ~ and globs """ res = [] for filename in filenames: filename = os.path.expandvars(os.path.expanduser(filename)) if any((c in filename) for c in "?*["): res += sh.glob(filename) else: res += [filename] return res
d100b166356f7753c554cb81551d90e4183a08d9
30,081
def getFile(file): """ Read out a file and return a list of string representing each line """ with open (file, "r") as out: return out.readlines()
b869a7252fc45fdc6877f22975ee5281650877b9
30,083
def format_chores(chores): """ Formats the chores to properly utilize the Oxford comma :param list chores: list of chores """ if len(chores) == 1: return f'{chores[0]}' elif len(chores) == 2: return f'{chores[0]} and {chores[1]}' else: chores[-1] = 'and ' + chores[-1] return f"{', '.join(chores)}"
38b7115520867e2545f7be7364e6147ca12dc8e1
30,085
import math def get_line_size(l): """ Return the size of the given line """ return math.sqrt( math.pow(l[0] - l[2], 2) + math.pow(l[1] - l[3], 2))
07a1d92e19e2b6104bf8d63459e588449d45912e
30,086
def _cal_distance2(dx, dy): """欧式距离的平方(Euclidean distance squared)""" return dx ** 2 + dy ** 2
9fd9d6e7e755694b68a0cea64bb26655949eb5f6
30,087
def preprocess_input(x): """Zero-center by mean pixel.""" x[0, :, :] -= 103.939 x[1, :, :] -= 116.779 x[2, :, :] -= 123.68 return x
242be2a92cc15f54fed94a431750e1fd142346a9
30,088
from typing import Optional from pathlib import Path import json def get_parameters(path: Optional[Path]) -> dict: """ Get hyper parameters to creating model """ parameters_path = path if parameters_path is None: parameters_path = Path(__file__).parent / "config.json" if not parameters_path.exists(): raise IOError("The path to the config was not found.") with open(parameters_path, "r", encoding="utf8") as file: parameters = json.load(file) return parameters
37a4af2878dcaa386231ebbce4cea1735d8027b8
30,089
def centercrop(pic, size): """ Center-crops a picture in a square shape of given size. """ x = pic.shape[1]/2 - size/2 y = pic.shape[0]/2 - size/2 return pic[int(y):int(y+size), int(x):int(x+size)]
fa89253af6be414ef242ee0ba4b626ab676980a0
30,091
import time def get_current_month(formatted='%Y-%m'): """ Get current month for a particular year. Keyword arguments: format -- the current month format to be used """ current_month = time.strftime(formatted) # e.g. 2018-02 return current_month
b9507eceaf0c35d4a159e118f012dfec4deb0bc8
30,093
def early_stopping(val_bleus, patience=3): """Check if the validation Bleu-4 scores no longer improve for 3 (or a specified number of) consecutive epochs.""" # The number of epochs should be at least patience before checking # for convergence if patience > len(val_bleus): return False latest_bleus = val_bleus[-patience:] # If all the latest Bleu scores are the same, return True if len(set(latest_bleus)) == 1: return True max_bleu = max(val_bleus) if max_bleu in latest_bleus: # If one of recent Bleu scores improves, not yet converged if max_bleu not in val_bleus[:len(val_bleus) - patience]: return False else: return True # If none of recent Bleu scores is greater than max_bleu, it has converged return True
dafc48f674673736e5a129aab8ce4c40fdbcbbeb
30,096
def indent_code(input_string): """ Split code by newline character, and prepend 4 spaces to each line. """ lines = input_string.strip().split("\n") output_string = "" for line in lines: output_string += " {}\n".format(line) ## one last newline for luck output_string += "\n" return output_string
5b110ed9bb1a4bbf471c5c4126c7460b6eb6ba1c
30,097
import torch def sigmoid2predictions(output: torch.Tensor) -> torch.Tensor: """ model.predict(X) based on sigmoid scores """ return (torch.sign(output - 0.5) + 1) / 2
79c144f1b942aba81be33f2e84a1dface05358bf
30,099
import torch def set_devices(model, loss, evaluator, device): """ :param: model, loss, evaluation_loss : torch.nn.Module objects :param: device: torch.device,compatible string, or tuple/list of devices. If tuple, wrap the model using torch.nn.DataParallel and use the first specified device as the 'primary' one. :return: None Sets the model and loss the the specified device. Evaluation loss is performed on CPU. """ print("Using device: ", device) if isinstance(device, (tuple, list)): model = torch.nn.DataParallel(model, device_ids=device) print("Using multi GPU compute on devices:", device) device = torch.device(device[0]) device = torch.device(device) # Will throw a more explicit error if the device specification is invalid model.to(device) loss.to(device) evaluator.loss.cpu() evaluator.model_device = device evaluator.model = model return model, evaluator
d15de114b4e5099c9c33edf5e82d95d2babed3cd
30,100
def find_where_wires_cross(coords1, coords2): """Find and return intesections of the wires based on their coordinates """ matches = [] coords1 = set(coords1) coords2 = set(coords2) for coord in coords1: if coord in coords2: matches.append(coord) return matches
a98d1ce7ee0a3abfb72b7f46e7b078475807934c
30,101
import subprocess def run(cmd): """Run commands. .. code-block:: python >>> from utils import sysx >>> print(sysx.run('echo "hello"')) ['hello'] """ process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) (output, _) = process.communicate() return output.decode().split('\n')[:-1]
516798beca4852cf8ed6f910689c907125ff996d
30,102
def fix_hardmix_file(hardmix_str, tmpdir_factory): """Fixture for a hard profile file""" mix_file = str(tmpdir_factory.mktemp("profs").join("hardmix.json")) with open(mix_file, "w") as fil: fil.write(hardmix_str) return mix_file
3629d6529c26f5a9af82c52e80092030742ef16a
30,106
def btod(binary): """ Converts a binary string to a decimal integer. """ return int(binary, 2)
202ce460bf9fa5675e195ca2714d8f5f0576020c
30,107
import importlib def builtin_packages(): """Test builtin packages.""" builtin_packages = "" with open('../builtin_list.txt') as f: builtin_packages = f.readlines() list_builtin_errors = list() for i in builtin_packages: itm = i.strip(" ").strip("\n") try: importlib.import_module(itm) except: list_builtin_errors.append(itm) return list_builtin_errors
b6bb083304c8fde619a9d2a902d91e444eaebfa6
30,108
import math def circle_area(radius: float) -> float: """ >>> circle_area(10) 314.1592653589793 >>> circle_area(0) 0.0 """ return math.pi * radius * radius
12117b7c491435375215625edaa62f4172594bfc
30,109
from typing import Optional from typing import Dict from typing import Any import ssl from typing import TYPE_CHECKING def create_internal_client_ssl_context( ssl_settings: Optional[Dict[str, Any]], ) -> Optional[ssl.SSLContext]: """Create an SSL context based on the given SSL settings for connecting to internal Director servers (i.e. manager -> orchestrator, shell server -> manager). WARNING: Do not use this for anything other than internal connections! If ssl_settings is None, this function returns None. Otherwise, ssl_settings must be a dictionary in the following format: { "cafile": "<path to CA file used to verify server certificates>", "client_cert": { "certfile": "<path to client certificate file>", # Required "keyfile": "<path to client private key file>", # Taken from certfile if not passed "password": "<private key password>", # Required if private key is encrypted }, } (The "client_cert" parameter is optional.) Some additional parameters are set on the SSL context: - It is set up for use authenticating servers (creating client-side sockets) - A certificate is required and validated upon reception. - HOSTNAME CHECKING IS DISABLED. As a result, "cafile" MUST point to a trusted CA! It is recommended to use a self-signed certificate on the remote server and point "cafile" to this certificate. """ if ssl_settings is None: return None context = ssl.create_default_context( purpose=ssl.Purpose.SERVER_AUTH, cafile=ssl_settings["cafile"], ) context.verify_mode = ssl.CERT_REQUIRED context.check_hostname = False if hasattr(ssl, "TLSVersion"): # Added in Python 3.7 if not TYPE_CHECKING: context.minimum_version = ( # pylint: disable=no-member ssl.TLSVersion.TLSv1_2 # pylint: disable=no-member ) client_certinfo = ssl_settings.get("client_cert", None) if client_certinfo is not None: context.load_cert_chain( certfile=client_certinfo["certfile"], keyfile=client_certinfo.get("keyfile"), password=lambda: client_certinfo["password"], # type: ignore ) return context
0d165693e71bd56de8aa2338c68b7e06e6012d58
30,110
def set_progress_percentage(iteration, total): """ Counts the progress percentage. :param iteration: Order number of browser or version :param total: Total number of browsers or versions :return: Percentage number """ return float((iteration + 1) / total * 100)
1881cd84e2af051379dc293b0e71067fc7be92a0
30,111
def check_data_names(data, data_names): """ Check *data_names* against *data*. Also, convert ``data_names`` to a tuple if it's a single string. Examples -------- >>> import numpy as np >>> east, north, scalar = [np.array(10)]*3 >>> check_data_names((scalar,), "dummy") ('dummy',) >>> check_data_names((scalar,), ("dummy",)) ('dummy',) >>> check_data_names((scalar,), ["dummy"]) ['dummy'] >>> check_data_names((east, north), ("component_x", "component_y")) ('component_x', 'component_y') """ # Convert single string to tuple if isinstance(data_names, str): data_names = (data_names,) # Raise error if data_names is None if data_names is None: raise ValueError("Invalid data_names equal to None.") # Raise error if data and data_names don't have the same number of elements if len(data) != len(data_names): raise ValueError( "Data has {} components but only {} names provided: {}".format( len(data), len(data_names), str(data_names) ) ) return data_names
7f888fe28fd3a3f11d95713b0920902142af2a1c
30,112
import random def broken_shuffle_7(values): """this is broken in a hard to find way...""" new_values = [] while True: value_index = random.randrange(0, len(values)) new_values.append(values.pop(value_index)) if len(values) == 0: break return new_values
02bd16e15a4223506375bf2b0dfc953bc2b2fb45
30,113
import json def read_json(filename): """FILENAME is the name of a json file containing option information. This function loads the json object into the script to be edited. The return object is the option dictionary to be edited.""" data_file = open(filename, "r") data = json.load(data_file) data_file.close() return data
865db98a6f8f5faf729b72e63a44fb3ce45ee7a0
30,114
def camelsplit(camel_string): """Splits 'camel_string' at 'camel case aware' word boundaries and returns the result a list of strings. >>> camelsplit('CamelCase') ['Camel', 'Case'] >>> camelsplit('HTTPRequest') ['HTTP', 'Request'] >>> camelsplit('IEEE 802.11ac') ['IEEE', ' 802.11', 'ac'] """ res = [] w = '' last_upper = False last_alpha = True for c in camel_string: if c.isalpha(): if last_alpha: if c.isupper(): if len(w) > 0 and not last_upper: res.append(w) w = '' last_upper = True else: if len(w) > 1 and last_upper: res.append(w[0:-1]) w = w[-1] last_upper = False else: if w != '': res.append(w) w = '' last_upper = c.isupper() last_alpha = True else: if last_alpha and w != '': res.append(w) w = '' last_alpha = False w += c if w != '': res.append(w) return res
78fafbdc5c19b34cc55c732837e6e5cb2182dc81
30,115
def round_probabilities(p): """ for given row, set element with highest value to one, and all others to zero. e.g. [[0.1,0.2,0.7], [0.5,0.2,0.3]] - > [[0,0,1], [1,0,0]]. not been tested for case of equal probabilities in a row """ return (p == p.max(axis=1)[:,None]).astype(float)
98c5e3ca4b618db6f6097380ce982506e8c876a8
30,116
def has_loop(page_list): """ Check if a list of page hits contains an adjacent page loop (A >> A >> B) == True. :param page_list: list of page hits derived from BQ user journey :return: True if there is a loop """ return any(i == j for i, j in zip(page_list, page_list[1:]))
461691e62a900e84f779ed96bdf480ce8a5367af
30,117
def linear(a, b, x, min_x, max_x): """ b ___________ /| / | a _______/ | | | min_x max_x """ return a + min(max((x - min_x) / (max_x - min_x), 0), 1) * (b - a)
d6bbac95a0a53663395cd4d25beeadbaa5f9f497
30,118
import platform def process_include_filename(include_file): """ The path to the include file may contain white space if it is escaped with a backslash (‘\’). Alternately, the entire path may be enclosed in double quotes (""), in which case no escaping is necessary. To include a literal backslash in the path, ‘\\’ should be used. The file name may also include the %h escape, signifying the short form of the host name. In other words, if the machine's host name is “xerxes”, then @include /etc/sudoers.%h will cause sudo to include the file /etc/sudoers.xerxes. """ include_file = include_file.replace('\\\\', '\\') if len(include_file) > 2: if include_file[0] == '"' and include_file[-1] == '"': include_file = include_file[1 : -1] else: include_file = include_file.replace('\\ ', ' ') if '%h' in include_file: hostname = platform.uname()[1] include_file = include_file.replace('%h', hostname) return include_file
a6af24e366d3575f1a6b569572d4201d2a953e14
30,120
def splitAt( s, i, gap=0 ): """split s into two strings at index i with an optional gap""" return s[:i], s[i+gap:]
96f902ae6d0c26c4f7ab22325db067a7331333ec
30,121
def form_message(sublines, message_divider): """Form the message portion of speech bubble. Param: sublines(list): a list of the chars to go on each line in message Return: bubble(str): formaatted to fit inside of a bubble """ bubble = "" bubble += message_divider bubble += "".join(sublines) bubble += message_divider return bubble
c3ca1c2a684d25b439a594cfc5ade187f272a2c1
30,123
from typing import BinaryIO from typing import Optional from io import StringIO def decode_object( handle: BinaryIO, num_lines: Optional[float] = None, ) -> StringIO: """ Converts a binary file-like object into a String one. Files transferred over http arrive as binary objects. If I want to check the first few lines I need to decode it. """ # If we don't specify a number of lines, just use infinity if num_lines is None: num_lines = float("inf") lines = [l.decode() for i, l in enumerate(handle) if i < num_lines] # Create an in memory file like object out = StringIO() # Write the lines to the file like object. out.writelines(lines) # Rewind the files to the start so that we can read them out.seek(0) handle.seek(0) return out
5c08438727290ea797ee93261ccd6f03763b7f36
30,125
import torch def get_grad_norm_from_optimizer(optimizer, norm_type=2): """ Get the gradient norm for some parameters contained in an optimizer. Arguments: optimizer (torch.optim.Optimizer) norm_type (int): Type of norm. Default value is 2. Returns: norm (float) """ total_norm = 0 if optimizer is not None: for param_group in optimizer.param_groups: for p in param_group['params']: if p.grad is not None: with torch.no_grad(): param_norm = p.grad.data.norm(norm_type) total_norm += param_norm ** norm_type total_norm = total_norm ** (1. / norm_type) return total_norm.item()
c8987953f23d6023d3b4f3abf894b98d720662fb
30,128
import heapq def find_closest(x, visited, y=None): """ Returns leaf label for closest leaf to the node x through path not travelling through visited. If y is populated returns path from x to y not travelling through nodes in visited. Parameters ---------- x : dendropy node object visited : list containing dendropy node objects y : dendropy node object Returns ------- If y == None : dendropy node object of closest leaf y to the node x through path not travelling through nodes in visited, list containing dendropy node objects on path to that leaf y from node x If y != None : dendropy node object y, list containing dendropy node objects on path from node x to leaf y not travelling through nodes in visited """ queue = [] cnt = 1 visited.add(x) if x.get_parent() and x.get_parent() not in visited: tmp = [] tmp.append(x) heapq.heappush(queue, [x.get_edge_length(), cnt, tmp, x.get_parent()]) cnt += 1 for child in x.child_nodes(): if child and child not in visited: tmp = [] tmp.append(child) heapq.heappush(queue, [child.get_edge_length(), cnt, tmp, child]) cnt += 1 while len(queue) > 0: try: [length, _, path, node] = heapq.heappop(queue) except IndexError: break visited.add(node) if node.is_leaf(): if (not y) or node.get_label()==y.get_label(): return node, path else: continue if node.get_parent() and node.get_parent() not in visited: tmp = path.copy() tmp.append(node) heapq.heappush(queue, [length+node.get_edge_length(), cnt, tmp, node.get_parent()]) cnt += 1 for child in node.child_nodes(): if child and child not in visited: tmp = path.copy() tmp.append(child) heapq.heappush(queue, [length+child.get_edge_length(), cnt, tmp, child]) cnt += 1 return x, [x]
af5f79910479e534e4dc5a531d352f881de8830a
30,129
def push_recent_data(recent_data, current_dict): """ 引数recent_dataで渡された配列の最期に引数current_data値を 格納し、先頭データを削除した配列を返却する。 引数: recent_data 配列、編集元 current_data 追加する要素 戻り値: 編集後配列 """ return_data = recent_data[1:] return_data.append(current_dict) return return_data
8ec7888c560e119b8eb1f639a0146b77d7a75c42
30,131
def count_days_between(dt1, dt2): """Function will return an integer of day numbers between two dates.""" dt1 = dt1.replace(hour=0, minute=0, second=0, microsecond=0) dt2 = dt2.replace(hour=0, minute=0, second=0, microsecond=0) return (dt2 - dt1).days
94fb2cf51af9007ab0c47229d485524af1f18656
30,132
import statistics def forecast_means(data): """ Calculate mean temperature for full days in forecast. Full days are those, that have eight three-hour interval in dictionary (0, 21, 3). :param data: dictionary with key=date <datetime.date> and value=temperature <float>. :return: dictionary with key=date <datetime.date> and value=temperature <float>. """ # collect dates date_keys = [x.date() for x in list(data)] # filter out full days days = set([x for x in date_keys if date_keys.count(x) == 8]) # group temperature by dates from the filtered list temps_grouped = map(lambda x: [v for (k, v) in data.items() if x == k.date()], list(sorted(days))) # return a dictionary with dates and mean temperature return dict([(x, round(statistics.mean(y), 2)) for x, y in zip(list(sorted(days)), list(temps_grouped))])
44fb98ea8c1bc0ec2233675c62ed5f578e4e1656
30,133
import string import random def generate_random_alphanumeric_string(str_length=12): """ Generates a random string of length: str_length :param str_length: Character count of the output string :return: Randomly generated string :rtype: str """ letters_and_digits = string.ascii_letters + string.digits return ''.join((random.choice(letters_and_digits) for i in range(str_length)))
5f0cbd817e3d9dcb5a0b5e785a634bfa2e166968
30,135
def _buffer_list_equal(a, b): """Compare two lists of buffers for equality. Used to decide whether two sequences of buffers (memoryviews, bytearrays, or python 3 bytes) differ, such that a sync is needed. Returns True if equal, False if unequal """ if len(a) != len(b): return False if a == b: return True for ia, ib in zip(a, b): # Check byte equality, since bytes are what is actually synced # NOTE: Simple ia != ib does not always work as intended, as # e.g. memoryview(np.frombuffer(ia, dtype='float32')) != # memoryview(np.frombuffer(b)), since the format info differs. # Compare without copying. if memoryview(ia).cast('B') != memoryview(ib).cast('B'): return False return True
c88d9d87684d27007a73c196f5aaa12e963a8656
30,136
import os def expand_env_vars(data): """Interpolate some environment variables.""" for key in ('HOME', 'USER', 'LOCAL', 'SCRATCH', 'PWD'): var = '$' + key if var in data and key in os.environ: data = data.replace(var, os.environ[key]) return data
f9e5aeaf47c844b37f14f2d1bdd6baf7dd483dc2
30,138
def select_final_output(df): """Select a small subset of final output""" return { 'total_irrigation': df['Irri'].sum(), 'final_yield': df['Brelative'][-1], 'time': df.index[-1].to_pydatetime() }
00b12953acf10b0cb02260fe5eddb0d25c6868d7
30,141