content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def cubeSum(num): """assumes num is an int returns an int, the sum of cubes of the ints 1 to n""" cube_sum = 0 for i in range(1, num+1): cube_sum += i**2 return cube_sum
2c7c814b758047103ea93d4733c8d2401952b083
49,933
from bs4 import BeautifulSoup def html_remove_all_a(html) -> str: """去掉html中所有的a连接""" soup = BeautifulSoup(html, "html.parser") for a in soup.find_all('a'): a.replace_with(a.string) return str(soup)
358ba91eef7624a2d0bc207e886b24aa0d490253
49,934
def sortByChanValue(chan): """ Use in .sort() when you want to sort a list of channels by value :param chan: channel obj :return: value """ return chan.value
0d1e2ac8a8d078aa985236d3c0edea14be49a16b
49,936
def alwayslist(value): """If input value if not a list/tuple type, return it as a single value list.""" if value is None: return [] if isinstance(value, (list, tuple)): return value else: return [value]
b463836d6b647ea81803333e0fdbd7eefb981446
49,937
import os import sys def _build_align_cmdline(cmdline, pair, output_filename, kbyte=None, force_type=None, quiet=False): """Helper function to build a command line string (PRIVATE). >>> os.environ["WISE_KBYTE"]="300000" >>> if os.isatty(sys.stderr.fileno()): ... c = _build_align_cmdline(["dnal"], ("seq1.fna", "seq2.fna"), ... "/tmp/output", kbyte=100000) ... assert c == 'dnal -kbyte 100000 seq1.fna seq2.fna > /tmp/output', c ... c = _build_align_cmdline(["psw"], ("seq1.faa", "seq2.faa"), ... "/tmp/output_aa") ... assert c == 'psw -kbyte 300000 seq1.faa seq2.faa > /tmp/output_aa', c ... else: ... c = _build_align_cmdline(["dnal"], ("seq1.fna", "seq2.fna"), ... "/tmp/output", kbyte=100000) ... assert c == 'dnal -kbyte 100000 -quiet seq1.fna seq2.fna > /tmp/output', c ... c = _build_align_cmdline(["psw"], ("seq1.faa", "seq2.faa"), ... "/tmp/output_aa") ... assert c == 'psw -kbyte 300000 -quiet seq1.faa seq2.faa > /tmp/output_aa', c """ cmdline = cmdline[:] ### XXX: force_type ignored if kbyte is None: try: cmdline.extend(("-kbyte", os.environ["WISE_KBYTE"])) except KeyError: pass else: cmdline.extend(("-kbyte", str(kbyte))) if not os.isatty(sys.stderr.fileno()): cmdline.append("-quiet") cmdline.extend(pair) cmdline.extend((">", output_filename)) if quiet: cmdline.extend(("2>", "/dev/null")) cmdline_str = ' '.join(cmdline) return cmdline_str
3ce50726181bff4605c11e0e0297013fa1add2af
49,938
from typing import Union def expo(num1: Union[int, float], num2: Union[int, float]) -> Union[int, float]: """Raises given number to given power and returns result""" expo: Union[int, float] = num1 ** num2 return expo
10478539f38f0569342ec4defc6c69ade5b25aff
49,940
def dim(v) -> int: """ 获取向量维度. """ return len(v)
3a02c01ce928aa5c95b236220ef26804ce92ac8e
49,941
def _spark_calc_op_on_chunks(bucket, data, operators_list): """ Calculate operators on chunk of data return None if no data provided :param bucket: bucket number :type bucket: int :param data: timeseries data :type data: 2-d array :param operators_list: list of operators calculated on data :type operators_list: list :return:tuple of (bucket number, result dict of calculated operators on chunk - Keys are operators.) :rtype: tuple (int, dict) """ result = {} # number of points processed nb_points = len(data) result['NB_POINTS'] = nb_points # keep only values values = data[:, 1] if values.size: for operator in operators_list: if operator == 'MIN': result['MIN'] = min(values) if operator == 'MAX': result['MAX'] = max(values) if operator == 'AVG' or operator == 'STD': result['SUM'] = sum(values) if operator == 'STD': result['SQR_SUM'] = sum([x ** 2 for x in values]) else: return None return bucket, result
61ebabc41a4869321d60da89cc1eb8df70045bb9
49,942
import time import os def make_timestamp_dir(dest): """Creates time-stamped directory. YYYY.MM.DD If already exists, creates directory with an underscore integer, 1-50. Parameters: dest : string Path to where the time-stamp directory should be created. Returns: path_to_time_dir : string Path to and including the time-stamped directory. Outputs: Directory at 'dest' with a time-stamped name. """ time_tuple = time.localtime() year = str(time_tuple[0]) month = str(time_tuple[1]) day = str(time_tuple[2]) if len(month) == 1: month = '0' + month if len(day) == 1: day = '0' + day time_dir = year + '.' + month + '.' + day path_to_time_dir = os.path.join(dest, time_dir) # If one does not exist for today, create the time-stamp dir. if not os.path.isdir(path_to_time_dir + '/'): os.mkdir(path_to_time_dir + '/') return path_to_time_dir + '/' # If one already exists for today, create it with underscore index 1-50. else: for num in range(1,50): path_to_time_dir_num = path_to_time_dir + '_' + str(num) + '/' if not os.path.isdir(path_to_time_dir_num): os.mkdir(path_to_time_dir_num) return path_to_time_dir_num
25f7c72b41a85e444994c358c349ec9c8c363ccb
49,943
def get_steps(length, input_dimension, output_dimension): """Calculates each step along a dimension of the tile. length is the total length of the dimension, model resolution is the detector's expected input length, and padding is the model's required padding (zero if none.) """ steps = [] padding = (input_dimension - output_dimension)//2 remainder = (input_dimension - output_dimension)%2 step_size = output_dimension - remainder current_step = 0-padding steps.append(current_step) current_step += step_size #Iterate until final prediction "falls off" edge of input image while (steps[-1]+(2*step_size+padding)) < length: steps.append(current_step) current_step += step_size #Situation with no overlap on final tile or small length; if current_step+step_size+padding == length or length <= output_dimension: return steps, [step+padding-remainder for step in steps] else: final_step = length - step_size - padding - remainder steps.append(final_step) return steps, [step+padding for step in steps]
0abc0a1c59f4a03c84d0038d0ed207ce47905062
49,944
def get_genotype(gt): """ Get the genotypes 0/0 = 0 0/1 = 1 1/1 = 2 ? What happens if there a non-biallelic SNPs """ if ('0/0'): return "0" elif ('0/1'): return "1" else: return "2"
12fa523199a983b862cf7978c988c5580dd3f05c
49,945
import argparse def parse_opt(): """Parses the input arguments.""" parser = argparse.ArgumentParser() parser.add_argument('-f', type=str, default='') parser.add_argument('--comment', type=str, default='test_notebook') parser.add_argument('--dataset', type=str, default='css3d') parser.add_argument( '--dataset_path', type=str, default='../imgcomsearch/CSSDataset/output') parser.add_argument('--model', type=str, default='tirg') parser.add_argument('--embed_dim', type=int, default=512) parser.add_argument('--learning_rate', type=float, default=1e-2) parser.add_argument( '--learning_rate_decay_frequency', type=int, default=9999999) parser.add_argument('--batch_size', type=int, default=32) parser.add_argument('--weight_decay', type=float, default=1e-6) parser.add_argument('--num_iters', type=int, default=210000) parser.add_argument('--loss', type=str, default='soft_triplet') parser.add_argument('--loader_num_workers', type=int, default=4) args = parser.parse_args() return args
e1b272ba787a4f34a64398766306530881acf273
49,946
def create_key2list(keys): """ keyがkeys,valueがlist()の辞書を作成する. Args: keys: keyに設定したい値(リスト) return: key2list: keyがkeys,valueがlist()の辞書 """ key2list = dict() for i in keys: key2list[i] = list() return key2list
298c7efe9b8b2ebbc5c06067fcc46dfb34c5ee22
49,947
def usage(err=''): """ Prints the Usage() statement for the program """ m = '%s\n' %err m += ' Default usage is to rebuild the python base code from a wsdl.\n' m += ' ' m += ' genBase <wsdl path> \n' m += ' or\n' m += ' genBase -b <base name> -p <output_path> <wsdl path> \n' return m
ccd9966944e902643bd49fc9563caf1b5a20ff4c
49,948
import argparse def get_args(): """! Command line parser """ parser = argparse.ArgumentParser( description='Augmented online mixing and loading') parser.add_argument("-i", "--input_dataset_p", type=str, nargs='+', help="Hierarchical Dataset paths you want to load from", default=None, required=True) parser.add_argument("-priors", "--datasets_priors", type=float, nargs='+', help="The prior probability of finding a sample from " "each given dataset. The length of this list " "must be equal to the number of dataset paths " "given above. The sum of this list must add up " "to 1.", default=None, required=True) parser.add_argument("-fs", type=float, help="""Sampling rate of the audio.""", default=8000.) parser.add_argument("--selected_timelength", type=float, help="""The timelength of the sources that you want to load in seconds.""", default=4.) parser.add_argument("--max_abs_snr", type=float, help="""The maximum absolute value of the SNR of the mixtures.""", default=2.5) parser.add_argument("-bs", "--batch_size", type=int, help="""The number of samples in each batch. Warning: Cannot be less than the number of the validation samples""", default=3) parser.add_argument("--n_sources", type=int, help="""The number of sources inside each mixture which is generated""", default=2) parser.add_argument("--n_jobs", type=int, help="""The number of cpu workers for loading the data, etc.""", default=4) parser.add_argument("--fixed_seed", type=int, help="""Whether the dataset is going to be fixed ( e.g. test partitions should be always fixed) give the random seed. If seed is zero then it means that the dataset is not going to be fixed.""", default=0) parser.add_argument("--n_samples", type=int, help="""Define the number of this dataset samples.""", required=True) parser.add_argument("-ri", "--return_items", type=str, nargs='+', help="""A list of elements that this dataloader should return. See available choices which are based on the saved data names which are available. There is no type checking in this return argument.""", default=['wav'], choices=['wav', 'wav_norm']) return parser.parse_args()
a85c0c5b96dd37e39fdd995bffce20507483e286
49,949
import copy def override_repo_refs(repos, override_ref=None, overrides=None): """ Apply ref overrides to the `repos` dictionary. Arguments: repos (dict): A dict mapping Repository objects to openedx.yaml data. override_ref (str): a ref to use in all repos. overrides (dict mapping repo names to refs): refs to use in specific repos. Returns: A new dict mapping Repository objects to openedx.yaml data, with refs overridden. """ repos = {r: copy.deepcopy(data) for r, data in repos.items()} overrides = overrides or {} if override_ref or overrides: for repo, repo_data in repos.items(): local_override = overrides.get(repo.full_name, override_ref) if local_override: repo_data["openedx-release"]["ref"] = local_override return repos
26d264b16dd8ff826362387d08868f99e3c0ab5d
49,950
def glissando_rate(times, start_freq, freq_rate): """returns frequency array to represent glissando from start at a given rate""" return (start_freq + (times*freq_rate))
0ce89936aaebd6145005f3195d40a6de004b9b44
49,951
import random def mutations(children, mutate_odds,mutate_min, mutate_max): """Check if the child will be mutated""" for index, person in enumerate(children): if mutate_odds >= random.random(): children[index] = round(person * random.uniform(mutate_min, mutate_max)) return children
bd755a34cd0ec890dd8b1c90c4e319d9d02d97e3
49,952
def dataset_get_projection_wkt(gdal_ds): """returns a gdal dataset's projection in well known text""" ds_wkt = gdal_ds.GetProjectionRef() if ds_wkt == '': ds_wkt = gdal_ds.GetGCPProjection() return ds_wkt
12febdeafc819c0987c7dece168aac1fb89b8275
49,954
from typing import Optional from typing import Tuple from typing import Any def get_columns_from_str(columns: Optional[str], separator: str = ',') -> Tuple[Any, ...]: """Converts columns input as separated string to tuples for further processing. A helper function to convert strings containing column names to tuples of column names. Args: columns: List of columns as a string with separators. separator: Character that separates the column names in the string. Returns: A tuple containing the columns names or empty if the column string doesn't exist or is empty. """ if not columns: return () return tuple(columns.split(separator))
482bd797b426062fbc36bcabb4af7ca3cc72cfb8
49,955
def IsLatencyField(field): """Check if the field is latency. Args: field: string. The name of the field. Returns: A boolean indicates if the field contains keyword 'latency'. """ return 'latency' in field
bd5d330460c000fa3fe1045db5ff26cfe40abf3c
49,957
def make_order_by(mode: str): """SQLの表示順文字列を作成する""" if mode in ('1','a','artists',): r_val = "order by a.name collate nocase, p.release_date desc " else: r_val = "order by p.release_date desc, a.name collate nocase " return r_val
5e58f1407722b5904f5f7a3bc065c66d641a8363
49,958
def moving_average(data, beta): """ Moving average function """ MVA = [] v = 0 for i in range(len(data)): v = beta * v + (1 - beta) * data[i] MVA.append(v / (1 - beta ** (i + 1))) return MVA
c3fad67a88bb0f93a1d156efafe38aa9325395b6
49,959
def card_str(c): """Return string representation of a card.""" return '__23456789TJQKA'[c>>4] + 'SDHC'[c&15]
6e452325b323c9c053b19c30f47b7252f06f1ecd
49,960
def list_matching_metrics(): """ list the abbreviations of the different implemented correlation metrices there are: * 'peak_abs' : the absolute score * 'peak_ratio' : the primary peak ratio i.r.t. the second peak * 'peak_rms' : the peak ratio i.r.t. the root mean square error * 'peak_ener' : the peaks' energy * 'peak_noise' : the peak score i.r.t. to the noise level * 'peak_conf' : the peak confidence * 'peak_entr' : the peaks' entropy See Also -------- get_correlation_metric, entropy_corr, peak_confidence, peak_to_noise, peak_corr_energy, peak_rms_ratio, num_of_peaks, peak_winner_margin, primary_peak_margin, primary_peak_ratio """ metrics_list = ['peak_ratio', 'peak_rms', 'peak_ener', 'peak_nois', 'peak_conf', 'peak_entr', 'peak_abs', 'peak_marg', 'peak_win', 'peak_num'] return metrics_list
a46915ba4213480aad0b2ab898d389a2145b90f9
49,963
import os import hashlib def sha1_file(path): """Calculate sha1 of path. Read file in chunks.""" assert os.path.isfile(path) chunk_size = 1024 * 1024 # 1M sha1_checksum = hashlib.sha1() with open(path, "rb") as f: byte = f.read(chunk_size) while byte: sha1_checksum.update(byte) byte = f.read(chunk_size) return sha1_checksum.hexdigest()
ce041b13e6e5abc336153ec5ff420d9e52896a49
49,964
def ConvertWPtoSurrogateParams(P,**kwargs): """ Takes P, returns arguments of the form usually used in gwsurrogate. (currently, just returns 1/q = P.m1/P.m1, the mass ratio parameter usually accepted) """ q = P.m2/P.m1 # return {"q":1./q} return 1./q
bb9a34a87c364c3f5683a12f953420d5ae795a67
49,965
import argparse def argument_parser(): """ Builds argument parser """ parser = argparse.ArgumentParser() # actions parser.add_argument("-u", "--umount", type=str, help="unmount location") parser.add_argument("-i", "--mount-from-index", type=str, help="mount from an index file [yaml, json]") parser.add_argument("-d", "--mount-from-dir", type=str, help="mount from a directory") # extra options parser.add_argument("--unsafe", action='store_true', help="practice unsafe unmounting techniques (default: false)") parser.add_argument("-k", "--index-key", type=str, help="path to sub-item when loading index object (delimited by dots ex: key1.item3)") parser.add_argument("-t", "--tmp-prefix", type=str, help="Use this location as a prefix for creating mount point") parser.add_argument("-s", "--keep-structure", type=str, help="Keep directory structure when mounting from dir") parser.add_argument("-p", "--pattern", action="append", help="Pattern to match when mounting from dir (list)") return parser
72f00c8135d66d529b0ad49bb3662dafd90322c6
49,966
def _delete_some_attributes(attrs): """ delete some attributes from the orginal data set that lose meaning after data processing :param attrs: the attribute list """ to_delete_attrs = ['actual_range', 'valid_range'] for del_attrs in to_delete_attrs: if del_attrs in attrs: del attrs[del_attrs] return attrs
7e6f5aa704b5b7ab65bdb95fe051d9195ba226cd
49,967
def generate_delays(delay, max_delay, multiplier=2): """Generator/iterator that provides back delays values. The values it generates increments by a given multiple after each iteration (using the max delay as a upper bound). Negative values will never be generated... and it will iterate forever (ie it will never stop generating values). """ if max_delay < 0: raise ValueError("Provided delay (max) must be greater" " than or equal to zero") if delay < 0: raise ValueError("Provided delay must start off greater" " than or equal to zero") if multiplier < 1.0: raise ValueError("Provided multiplier must be greater than" " or equal to 1.0") def _gen_it(): # NOTE(harlowja): Generation is delayed so that validation # can happen before generation/iteration... (instead of # during generation/iteration) curr_delay = delay while True: curr_delay = max(0, min(max_delay, curr_delay)) yield curr_delay curr_delay = curr_delay * multiplier return _gen_it()
b576362f6a0613e29c52ad10fc35e8b95635acc3
49,969
def checkGuid(guid): """ Checks whether a supplied GUID is of the correct format. The guid is a string of 36 characters long split into 5 parts of length 8-4-4-4-12. INPUT: guid - string to be checked . OPERATION: Split the string on '-', checking each part is correct length. OUTPUT: Returns 1 if the supplied string is a GUID. Returns 0 otherwise. """ guidSplit = guid.split('-') if len(guid) == 36 \ and len(guidSplit[0]) == 8 \ and len(guidSplit[1]) == 4 \ and len(guidSplit[2]) == 4 \ and len(guidSplit[3]) ==4 \ and len(guidSplit[4]) == 12: return True else: return False
7a0923721e825f40ad69e816bdbfe9504d7038bb
49,970
def _split_multi_objective_into_single_objective(X, Y): """ Splits k-objective observations into k sets of single objective observations. """ num_obj = len(Y[0]) for y in Y: assert len(y) == num_obj data_pairs = [] for i in range(num_obj): pair = (X, [y[i] for y in Y]) data_pairs.append(pair) return data_pairs
0d52debc678dbb97946dbc22054e77f94c755848
49,972
def is_password_good(password): """Функция проверки пароля, на соответствие правилам безопасного пароля""" if len(password) < 8: return False flag_u, flag_l, flag_d = False, False, False for s in password: if not flag_u and s >= "A" and s <= "Z": flag_u = True if not flag_l and s >= "a" and s <= "z": flag_l = True if not flag_d and s >= "0" and s <= "9": flag_d = True if flag_u and flag_l and flag_d: return True return False
4feb13bb3a44dcd2a17f17a777d4cce2f0adde46
49,973
from typing import Any import re def regular_expression(check_value: Any, item: Any) -> bool: """Run a regular expression search given a regex and item to search. :param check_value: Regular expression. :type check_value: Any :param item: Item to search against. :type item: Any :return: Bool of comparison. :rtype: bool """ return bool(re.search(check_value, str(item)))
e9ed664a0f4fcf3166a5ff9b854a33b0e7472ee5
49,974
import time import os def generate_timestamp_filename(dirname: str, basename: str, file_format: str) -> str: """ Generate a timestamped filename for use in saving files. """ timestr = time.strftime("%Y%m%d-%H%M%S") return os.path.join(dirname, f"{timestr}.{basename}{file_format}")
9d1de1ad137a118c67fbe21761811e3cb42dade5
49,975
import re def mkPrefixCss(css, prefix=""): """Add the prexix (css selector) to all rules in the 'css' (used to scope style in context) """ medias = [] while "@media" in css: p1 = css.find("@media", 0) p2 = css.find("{", p1) + 1 lv = 1 while lv > 0: lv += 1 if css[p2] == "{" else -1 if css[p2] == "}" else 0 p2 += 1 block = css[p1:p2] mediadef = block[: block.find("{")].strip() mediacss = block[block.find("{") + 1 : block.rfind("}")].strip() css = css.replace(block, "") medias.append((mediadef, mkPrefixCss(mediacss, prefix))) lines = [] css = re.sub(re.compile("/\*.*?\*/", re.DOTALL), "", css) css = re.sub(re.compile("[ \t\n]+", re.DOTALL), " ", css) for rule in re.findall(r"[^}]+{[^}]+}", css): sels, decs = rule.split("{", 1) if prefix: l = [ (prefix + " " + i.replace(":scope", "").strip()).strip() for i in sels.split(",") ] else: l = [(i.strip()) for i in sels.split(",")] lines.append(", ".join(l) + " {" + decs.strip()) lines.extend(["%s {%s}" % (d, c) for d, c in medias]) return "\n".join(lines).strip("\n ")
c7cd971f143e257280e59f85d5bc8207056c737f
49,976
def find(x, labels): """ Better version of find - Much faster """ y = x.copy() z = 0 while (labels[y] != y): y = labels[y] #second part collapses labels while (labels[x] !=x): z = labels[x].copy() labels[x] = y x = z return y
5a02647ce2e7d987cd6f5019837fe54a64c16aeb
49,977
def num_to_alpha(integer): """ Transform integer to [a-z], [A-Z] Parameters ---------- integer : int Integer to transform Returns ------- a : str alpha-numeric representation of the integer """ ascii = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' if integer < 52: return ascii[integer] else: raise ValueError('Too large index for einsum')
9ec27d1f7c28bb4bded81aa2a7171eca80d3efdc
49,981
import inspect def infer_parameter_type(parameter: inspect.Parameter): """Infer callable parameter type using also the default value.""" parameter_type = parameter.annotation if parameter_type == inspect.Parameter.empty: if parameter.default is not None and not parameter.default == inspect.Parameter.empty: parameter_type = type(parameter.default) return parameter_type
4ee9c70721ba6457bad58b3c1b049a062546287b
49,984
def _make_linear_ramp(white): """ generate a palette in a format acceptable for `putpalette`, which expects [r,g,b,r,g,b,...] """ ramp = [] r, g, b = white for i in range(255): ramp.extend((r*i/255, g*i/255, b*i/255)) return ramp
5f75464c3773ca1ebb70331d2e943930e7d2152f
49,986
def sql_render(hpo_id, cdm_schema, results_schema, vocab_schema, sql_text): """ Replace template parameters :param hpo_id: will be the source name in Achilles report :param cdm_schema: schema of the cdm :param results_schema: schema of the results tables :param vocab_schema: schema of the vocabulary tables :param sql_text: SQL command text to render :return: command text with template parameters replaced """ result = sql_text replacements = {'@cdm_database_schema': cdm_schema, '@results_database_schema': results_schema, '@vocab_database_schema': vocab_schema} for raw_placeholder, schema in replacements.items(): placeholder = raw_placeholder + '.' if schema is None else raw_placeholder replace_with = '' if schema is None else schema result = result.replace(placeholder, replace_with) return result.replace('@source_name', hpo_id)
02848fed8426cb8fc20dcfaaf845e6c6f7de5708
49,987
import yaml def dump_yaml(obj, stream): """Attempt to dump `obj` to a YAML file""" return yaml.dump(obj, stream=stream, default_flow_style=False)
7869d9124021b913d9baac7124b7c8aea5925b55
49,988
def check_event_attrs(attrs): """ Verify the event has the expected attributes for Flog Apache logs and custom app logs """ if 'app' in attrs and attrs['app'] == 'customApp': # custom app expected_attrs = ['activity_type', 'categories', 'dest_country', 'dest_ip', 'dest_port', 'device_type', 'domain', 'forwarder', 'id', 'is_phishing_domain', 'is_ransomware_dest_ip', 'is_ransomware_src_ip', 'is_threat_dest_ip', 'is_threat_src_ip', 'outcome', 'source_component', 'src_country', 'src_ip', 'src_port', 'subcategory', 'username', 'version'] elif 'dataset' in attrs and attrs['dataset'] == 'accesslog': # filebeats flog apache log expected_attrs = ['agent', 'authUser', 'bytes', 'ip', 'protocol', 'referrer', 'status', 'uriPath', 'user'] elif 'tag' in attrs and attrs['tag'] == 'fluentd-apache': # fluentd flog apache log expected_attrs = ['agent', 'code', 'host', 'method', 'path', 'size', 'container_id', 'container_name'] elif 'tag' in attrs and attrs['tag'] == 'fluentbit-cpu': # fluentbit cpu expected_attrs = ['cpu_p', 'system_p', 'user_p', 'cpu0.p_cpu', 'cpu0.p_system', 'cpu0.p_user'] else: print("Unexpected event. Event did not match expected event types. {0}".format(attrs)) return False has_expected_attrs = all (k in attrs for k in expected_attrs) if not has_expected_attrs: print("Did not get expected attributes {0}. Query returned attributes {1}".format(expected_attrs, attrs)) return has_expected_attrs
52071fc756194fec1338bc9a69c53aa70fa25cb0
49,989
def _as_range_str(iterable): """ Return a string representing the range as a string span. """ l = list(iterable) if len(l) > 1: return '{0}-{1}'.format(l[0], l[-1]) return '{0}'.format(l[0])
cab3c70297fc8f75ab6b5d7350462de9d1501a4d
49,990
import time import logging def write_mutations(batch, throttler, rpc_stats_callback, throttle_delay=1): """A helper function to write a batch of mutations to Cloud Datastore. If a commit fails, it will be retried up to 5 times. All mutations in the batch will be committed again, even if the commit was partially successful. If the retry limit is exceeded, the last exception from Cloud Datastore will be raised. Assumes that the Datastore client library does not perform any retries on commits. It has not been determined how such retries would interact with the retries and throttler used here. See ``google.cloud.datastore_v1.gapic.datastore_client_config`` for retry config. Args: batch: (:class:`~google.cloud.datastore.batch.Batch`) An instance of an in-progress batch. rpc_stats_callback: a function to call with arguments `successes` and `failures` and `throttled_secs`; this is called to record successful and failed RPCs to Datastore and time spent waiting for throttling. throttler: (``apache_beam.io.gcp.datastore.v1.adaptive_throttler. AdaptiveThrottler``) Throttler instance used to select requests to be throttled. throttle_delay: (:class:`float`) time in seconds to sleep when throttled. Returns: (int) The latency of the successful RPC in milliseconds. """ # Client-side throttling. while throttler.throttle_request(time.time() * 1000): logging.info("Delaying request for %ds due to previous failures", throttle_delay) time.sleep(throttle_delay) rpc_stats_callback(throttled_secs=throttle_delay) try: start_time = time.time() batch.commit() end_time = time.time() rpc_stats_callback(successes=1) throttler.successful_request(start_time * 1000) commit_time_ms = int((end_time-start_time) * 1000) return commit_time_ms except Exception: rpc_stats_callback(errors=1) raise
93c097ec667eb1e1c90dec53c32203b493a0b841
49,992
def trapez(f, a, b, n): """ Computes and returns a trapezoidal approximate integral of f. f -- function a -- lower boundary b -- upper boundary n -- number of sections returns approximate integral using the trapezoidal method """ h = (b-a)/float(n) sum_fx = 0.0 for i in range(n-1): xi = a + (i+1) * h sum_fx += f(xi) return h * (f(a) + f(b) + 2.0 * sum_fx) / 2.0
79035891f41f01ec7244e218125cf372c9de50c0
49,993
import math def periodicize(num_pair_list, period): """Returns a list num_pair_list = [[3,2], [3,2],231215 3512234211 2455224412 2233544124 1242421224 522441] period=5 should take the first ten, match 1 with 5, 2 with 6, 3 with 7 and so on """ #Divided up into the first period of pairs, second period #so [ [[], [], [], [], []] , [ [], [], [], [], []] ... length = len(num_pair_list) periodicized_pair_list = [] limit = math.floor(length/period) for num in range(limit): periodicized_pair_list.append(num_pair_list[period*num:period*(num+1)]) #If there's extra stuff on the end, if not limit == length/period: periodicized_pair_list.append(num_pair_list[period*limit:]) return(periodicized_pair_list)
05f1f0cce670d0ab0164c21d02b3acba2a63f88c
49,994
def branch_define_udp_rate(rate_list, title='%g', titlelabel='UDP Rate [Mb/s]'): """ This method don't actually change the setup, it only sets a variable that can be used when running the actual test. """ def branch(testdef): for rate in rate_list: testdef.udp_rate = rate yield { 'tag': 'udp-rate-%s' % rate, 'title': title % rate, 'titlelabel': titlelabel, } return branch
6e1c6927d62e27a0e3140b628a18d691d6ae9a24
49,995
import math def image_entropy(im): """ Calculate the entropy of an image. Used for "smart cropping". """ hist = im.histogram() hist_size = float(sum(hist)) hist = [h / hist_size for h in hist] return -sum([p * math.log(p, 2) for p in hist if p != 0])
356cd2b1eeb671c5e3e82ddd8a6e2e8c73f38758
49,996
def pd_stat_jupyter_profile(df, savefile="report.html", title="Pandas Profile"): """ Describe the tables #Pandas-Profiling 2.0.0 df.profile_report() """ print("start profiling") profile = df.profile_report(title=title) profile.to_file(output_file=savefile) colexclude = profile.get_rejected_variables(threshold=0.98) return colexclude
64fc1bf75b92f7d40bedaeee50d709f77b202419
49,997
def hrrr_file_url(date, CC, FF): """ HRRR 2D surface data file names are hrrr.tCCz.wrfsubhfFF.grib2 where CC is the model cycle runtime (i.e. 00, 01, 02, 03) and FF is the forecast hour (i.e. 00, 03, 06, 12, 15). """ date_str = str(date.year) + str(date.month).zfill(2) + str(date.day).zfill(2) filename = "hrrr.t" + str(CC).zfill(2) + "z.wrfsubhf" + str(FF).zfill(2) + ".grib2" url = "https://ftp.ncep.noaa.gov/data/nccf/com/hrrr/prod/hrrr." + date_str + "/conus/" + filename return url, filename
433aa408986b197a6b40fa61c090a933740a372b
49,998
def set_overlap(source_set, target_set): """Compute the overlap score between a source and a target set. It is the intersection of the two sets, divided by the length of the target set.""" word_overlap = target_set.intersection(source_set) overlap = len(word_overlap) / float(len(target_set)) assert 0. <= overlap <= 1. return overlap
1feb0f25d711f23c62594d18316f7c96750b42c0
49,999
def after_request(response): """ Apply CORS for all requests :param response: :return: """ return response
3463c0b1bdba345f52588f67a96de5ac11f8fa7b
50,000
def get_ranx0(rng): """ Uniformly sample from the feasible space. Parameters ---------- rng : prng.MRG32k3a object Returns ------- x0 : tuple of int The randomly chosen point """ tau = 100 q = 9 mr = range(tau) x0 = tuple(rng.choice(mr) for i in range(q)) return x0
578c0964877f4f1560af9cea3136b04f43b9fed4
50,001
def sec_to_time(sec): """ Returns the formatted time H:MM:SS """ mins, sec = divmod(sec, 60) hrs, mins = divmod(mins, 60) return f"{hrs:d}:{mins:02d}:{sec:02d}"
cffe3060a1a98f3dbba53da4666175994c606a31
50,002
def _faster_comp(candidates, pairs): """ helper function for run(), evaluates winner of pairs, but faster (by about two orders of magnitude) than _graph() """ # This tentatively works, it failed a test once (and only once), and I # reran two orders of magnitudes more random tests and it passed them all # This works by tracking all nodes that have an edge pointing into them # and not checking any edge coming out of that node (as it wouldn't be a # source on the graph in that case). edges = set() children = set() for (i, j) in pairs: if i in candidates and j in candidates and \ i not in children and (j, i) not in edges: children.add(j) edges.add((i, j)) winners = set() for c in candidates: if c not in children: winners.add(c) return winners
6be8a5effa49fd1a0f648b83bca16f3235981656
50,004
def override(BaseClass): """Decorator used to document that the decorated function overrides the function of same name in BaseClass.""" def decorator(func): return func return decorator
5c9d7ada6534a6d77a56cc985d707e615460caa1
50,005
def get_sanitized_bot_name(dict, name): """ Cut off at 31 characters and handle duplicates. :param dict: Holds the list of names for duplicates :param name: The name that is being sanitized :return: A sanitized version of the name """ if name not in dict: new_name = name[:31] # Make sure name does not exceed 31 characters dict[name] = 1 else: count = dict[name] new_name = name[:27] + "(" + str(count + 1) + ")" # Truncate at 27 because we can have up to '(10)' appended dict[name] = count + 1 return new_name
d433ae8309a6cded6dc9177a8590848e04eb769d
50,007
import os import subprocess import time def executeInstances(PESTO_client, number_of_users,workingdir, resourcesdir, resultsdir, startingport, numberoftheVM, shareddrive, password, loglevel): """ executes Instances wait till all terminates wihout h doesnt work from remote VM gives the connection 60 sec timeout. """ Process = [] for i in range(number_of_users): userworkingdir = workingdir + '\\MyUser'+str(i) port = startingport + (number_of_users*numberoftheVM) + i moreINSTANCEs = os.path.join(PESTO_client, 'PESTO-client\\Instance\\Instance.py') print('Executing instance as MyUser' + str(i), flush=True) try: P = subprocess.Popen(['psexec.exe','-n','60','-h','/accepteula','-u', "MyUser"+str(i) , '-p', 'redhat', 'python', moreINSTANCEs, workingdir, userworkingdir, resultsdir, resourcesdir, str(port),shareddrive,password, PESTO_client, loglevel], stdout=subprocess.PIPE, stderr= subprocess.PIPE) time.sleep(1) except Exception as e: print('Error while executing instance. /returned/',flush=True) print(e,flush=True) return 1 Process.append(P) #wait processes to terminate for p in Process: p.wait() flag = 0 for p in Process: print('Process on MyUser' + str(Process.index(p)) +' returned: ', p.returncode, flush=True) if p.returncode != 0: flag = 1 print('All terminated\n',flush=True) return flag
88c1b48a991fae7e6a15216342cd4d92de8042fd
50,008
def parse_dec_or_hex(string): """Parses a string as either decimal or hexidecimal""" base = 16 if string.startswith('0x') else 10 return int(string, base)
90c8ee433245d0b5c2cc5d41c0007a5dda8039a6
50,009
def format_option_name(name): """Return a formatted string for the option path.""" if name is None: return "None" try: section, name = name.split('/') except ValueError: # name without a section ('log_dir') return "[DEFAULT] %s" % name try: # If we're dealing with swift, we'll have a filename to extract # ('proxy-server|filter:tempurl/use') filename, section = section.split('|') return "%s.conf: [%s] %s" % (filename, section, name) except ValueError: # section but no filename ('database/connection') return "[%s] %s" % (section, name)
c3d794b67eb2c42669c5513878c7704a6d516626
50,011
def isduplicate(pattern, lst_winners, lst_losers=None): """ Checks if a pattern is in the list of winner GPs or loser GPs :param pattern: a GP :param lst_winners: list of GPs :param lst_losers: list of GPs :return: True if pattern is either list, False otherwise """ if lst_losers is None: pass else: for pat in lst_losers: if set(pattern.get_pattern()) == set(pat.get_pattern()) or \ set(pattern.inv_pattern()) == set(pat.get_pattern()): return True for pat in lst_winners: if set(pattern.get_pattern()) == set(pat.get_pattern()) or \ set(pattern.inv_pattern()) == set(pat.get_pattern()): return True return False
b854932a0a00585db86e610cacda52611dacfdde
50,012
def make_arguments(action: str) -> dict: """ Make an argument dictionary Parameters ---------- action : string The action to execute """ return { "action": action, "file": None, "folder": None, "device": None, "all": False, "move_path": None, "from_device": None, }
2661ec75813af047ccf4f565153d03d0d1f53ab8
50,014
import pickle def loadPRRp(ifile='input.p'): """ Loads in a pradreader (PRR) pickled object with no CLI input from user Should be only used for quick and dirty applications, as pickle not a recommended way to share between users or computers """ pr = pickle.load(open(ifile, 'rb')) return pr
2b60258892206c059f04f9f03fac10adf3ae9f92
50,016
def gen_subsets(L): """ Exponential Complexity 1. it's prevalent to think about size of smaller 2. Remember that for a set of size K there are pow(2, k) cases 3. To solve this we need something like pow(2, n-1) + pow(2, n-2) + ... + pow(2, 0) >>> gen_subsets([1,2]) [[], [1], [2], [1, 2]] >>> gen_subsets([1,3]) [[], [1], [3], [1, 3]] >>> gen_subsets([1,2,3]) [[], [1], [2], [1, 2], [3], [1, 3], [2, 3], [1, 2, 3]] """ res = [] # empty list if len(L) == 0: return [[]] # list of an empty list smaller = gen_subsets(L[:-1]) # recursive return all subsets without last element extra = L[-1:] # create a list of just the last element new = [] # again, empty list for small in smaller: new.append(small + extra) # for all smaller solutions, add one with last element return smaller + new
db4d91ea270e93b44a40f3403ec205c4f23c05cf
50,017
def show_type(something): """Собственная реализация type для вывода типа аргумента. Немного отличается от настоящей. Тут надо бы вернуть класс, а не его имя, но у меня классы реализованы как словари, поэтому на экране будет всё его содерживое, что не очень удобно. """ if '__my_name__' in something: return 'class ' + something['__my_name__'] return 'inst of ' + something['__my_type__']['__my_name__']
2756f6a75cdd307a89a4a66d7743a9318c4b0b62
50,018
def to_float(MIC): """ Ensures input can be compared to float, in case input is string with inequalities """ MIC=str(MIC) for equality_smb in ['=','<','>']: MIC = MIC.replace(equality_smb,'') return float(MIC)
78b216584c20bd76731ab400a501480ce6cd9f94
50,019
def calc_company_check_digit(number): """Calculate the check digit for the 10-digit ИНН for organisations.""" weights = (2, 4, 10, 3, 5, 9, 4, 6, 8) return str(sum(weights[i] * int(n) for i, n in enumerate(number[:9])) % 11 % 10)
33c1ca3209fbe819dcbbd80a7b33df1aa6051216
50,020
def is_string(in_str): # type: (...) -> bool """Test Unicode (text) string literal. Examples: >>> is_string('abc') True >>> is_string(u'abc') True >>> is_string(u'北京') True >>> is_string(123) False >>> is_string(None) False >>> is_string(['a', 'b']) False # Python 3 # >>> is_string(b'avoid considering byte-strings as strings.') # False """ return isinstance(in_str, (str, u''.__class__))
2b9bba08733119ecc4255c9a8f053d588e97e19d
50,021
def restoreMember(memberType, name, extra, params, body): """Re-creates an XBL member element from parts created by iterateMembers.""" if memberType == "method": paramText = "" for param in params: paramText += """ <parameter name="%s"/>\n""" % param return """<method name="%s">\n%s <body><![CDATA[%s]]>""" \ % (name, paramText, body) elif memberType == "handler": return """<handler %s>\n <![CDATA[%s]]>""" % (extra, body) elif memberType == "constructor": return """<constructor>\n <![CDATA[%s]]>""" % (body) elif memberType == "destructor": return """<destructor>\n <![CDATA[%s]]>""" % (body) elif memberType == "property": return """<property name="%s">""" % (name) elif memberType == "getter": return """<getter>\n <![CDATA[%s]]>""" % (body) elif memberType == "setter": return """<setter>\n <![CDATA[%s]]>""" % (body)
c9a5d816fcd11fd7a8a41723bfb51fd0143174b4
50,024
def get_text(root) -> str: """Проверяет у выбранного xml.etree.ElementTree.Element наличие текста и возвращает его. В случае отсутствия, возвращает None """ try: text = root.text except: return 'н/д' else: return text
6102115ad7795f93ba9bd329df3648e8783310f9
50,025
def _enable_custom_widget_manager(package_name: str) -> str: """Return additional finally block for packages that require a custom widget manager.""" if any([requires_custom_widget_manager in package_name for requires_custom_widget_manager in ( "itkwidgets", )]): return "\n" + """finally: import google.colab google.colab.output.enable_custom_widget_manager()""" else: return ""
51bc09baf516cc6fd010d8ca3ea4f2a030dbe111
50,027
from pathlib import Path def change_file_extensions_to_tif(each_file_dict: dict, file_extensions_to_protect_from_changing_to_tif: list) -> dict: """ Change all file extensions to tif except those defined to be protected.""" for node_name in ['id', 'filePath', 'description', 'title']: if node_name in each_file_dict: node_value = each_file_dict[node_name] file_extension = Path(node_value).suffix if file_extension and '.' in file_extension and file_extension.lower() not in file_extensions_to_protect_from_changing_to_tif: each_file_dict[node_name] = node_value.replace(file_extension, '.tif') return each_file_dict
97fec7c642dfca6eff60f31efff5410bc41a4ee6
50,028
def make_change(amount): """ Assume que amount é um inteiro > 0, em centavos Retorna o menor número de moedas cujos valores somam a amount. * Moedas possíveis são 100, 50, 25, 10, 5, 1 centavos * Um algoritmo ganancioso funciona aqui """ out = [] for p in 100, 50, 25, 10, 5, 1: if amount >= p: n = amount // p r = amount - p * n print(f"valor ={amount} | notas ={n} | nota ={p} | resto ={r}") amount = r out.append(n) print(out) return sum(out)
789460ac42fc3ad005fc67e67325a07d4e539122
50,029
def health(): """ Returns 200 to test health endpoint :return: 200 """ return "Works", 200
e7c530774f275a91f1764210c22648a15cd3eecc
50,030
def list_protection(to_append, final_result): """Adjust the return data to be a valid list.""" if isinstance(to_append, list): final_result = final_result + to_append else: final_result.append(to_append) return final_result
db75041fb6b2b3f1c1d12f79b0be2fcedf476f6a
50,031
def day_of_month(d): """ d: Datetime object """ def suffix(d): return "th" if 10 < d < 14 else {1: "st", 2: "nd", 3: "rd"}.get(d % 10, "th") def custom_strftime(format, t): return t.strftime(format).replace("{S}", str(t.day) + suffix(t.day)) return custom_strftime("{S}", d)
f6439f01a94efd9135f272ec1e9d027d518d08ad
50,032
import os import json def read_json_file_to_dict(file_path): """ Read json file to dict :param file_path: Json file path :return: Dict of json file """ if not os.path.exists(file_path): return {} with open(file_path, "r") as json_file: data = json_file.read() return json.loads(data)
32d705a2233db99d2829b7956b56a8777b7f3a11
50,033
import hashlib def md5_hash(file_name): """Helper to compute and return a hash of a file""" return hashlib.md5(open(file_name, 'rb').read()).hexdigest()
678ee1663128ff175b997ef144edd11718a82037
50,037
def bayesian_targeting_policy(tau_pred, contact_cost, offer_accept_prob, offer_cost, value=None): """ Applied the Bayesian optimal decision framework to make a targeting decision. The decision to target is made when the expected profit increase from targeting is strictly larger than the expected cost of targeting. tau_pred : array-like Estimated treatment effect for each observations. Typically the effect on the expected profit. If tau_pred is the treatment effect on conversion, 'value' needs to be specified. contact_cost : float or array-like Static cost that realizes independent of outcome offer_cost : float or array-like Cost that realizes when the offer is accepted value : float or array-like, default: None Value of the observations in cases where tau_pred is the change in acceptance probability (binary outcome ITE) """ if value: tau_pred = tau_pred * value return (tau_pred > (offer_accept_prob * offer_cost - contact_cost)).astype('int')
eb22fafd73acd9bcb75114dd18391f865a32f112
50,038
def _to_encode(s, encoding='utf-8'): """ Convert string to unicode as default. >>> _to_encode('中文') == u'\u4e2d\u6587' True """ return s.decode(encoding)
ff61046c36a07d3bc880a4d80fd54d74d96b7817
50,039
def parse_authors(auth_str): """returns list of all author last names from a string with `last_name, first_initial and last_name, first_inital and etc` :param auth_str: string containing all author names as exported by EndNoteX9 default BibTex export :type auth_str: str :return: list of author last names :rtype: list of str """ a_list = auth_str.split(' and ') out = [] for x in a_list: out.append(x.split(',')[0]) return out
a56236c05b72930430e865e9846674b975e5ce29
50,040
import zipfile def extract_from_zip_by_name(filename, open_name): """ Search for a specific file name in a zip and return the location. """ if not zipfile.is_zipfile(filename): raise RuntimeError(filename+' is not a zip file!') output = [] def open_recurse(zip_path): # This is to recurse through subdirectories if needed # Only one subdirectory is expected but this will handle everything # even if that changes if zip_path.is_dir(): for child in zip_path.iterdir(): open_recurse(child) return if zip_path.is_file() and open_name in zip_path.name: output.append(zip_path.at) with zipfile.ZipFile(filename) as fil: open_recurse(zipfile.Path(fil)) if len(output) == 0: print('No files found matching "' + open_name + '"!') return tuple(output)
3a0ffef807c3fdc4daccfbfd107cb6e2e850c607
50,041
def ed(fn, iterable, *args, **kwargs): """If ``fn`` maps ``iterable`` to a generator (e.g. :func:`flatten` and others below), ``ed`` will consume the result and produce a tuple or list. If ``iterable`` has a finite length (e.g. tuples, lists), uses the same type to consume it. If not (e.g. generators), use a tuple. The cypy functions have an `ed` member to make this convenient. Why `ed`? Because its the past tense -- take the lazy generator and consume it by creating a tuple or list out of it. """ if hasattr(iterable, '__len__'): return iterable.__class__(fn(iterable, *args, **kwargs)) else: return tuple(fn(iterable, *args, **kwargs))
a60defb77d409c388239d71727f1dab3926b08e6
50,042
def p(mode, code=None): """Dibuja la figura especificada.""" if not code: code, mode = mode, 1 return '{\\p%d}%s{\\p0}' % (mode, code)
05a152e19d36659aa86844834ce64dc3a99f7e72
50,044
def _extract_dir_data(path): """ Expects the directory to be named like: "tmp_<run>_with_<images in run>" Args: path: Path of the directory. Returns: The run index and image count per run. """ split = path.stem.split("_") run = int(split[1]) count_per_run = int(split[3]) return run, count_per_run
748a6b8facd360cb7491d0d6d66c8621cbd93f71
50,045
import os import subprocess def run_asci2gdf(outfile, asci2gdf_bin, verbose=False): """ Helper function to convert ASCII to GDF using asci2gdf """ tempfile = outfile+'.txt' os.rename(outfile, tempfile) asci2gdf_bin = os.path.expandvars(asci2gdf_bin) assert os.path.exists(asci2gdf_bin), f'{asci2gdf_bin} does not exist' cmd = [asci2gdf_bin, '-o', outfile, tempfile] if verbose: print(' '.join(cmd)) result = subprocess.run(cmd) # Cleanup os.remove(tempfile) if verbose: print('Written GDF file:', outfile) return outfile
40497990b841ce58cda0b20fbed89fd6cfc2f8e8
50,046
def handle_ret_json(ctx): """ return JSON """ return {'this': 'will', "return": "JSON"}
369b13243bafed18e2ef0b6443112b2aa659d9e8
50,047
import math def norm(x, Mu=0, Sigma=1): """ 【函数说明】 功能:一般正态分布的概率密度函数(Normal),若省略期望与方差的参数,则为标准正态 参数: + x:[float 浮点型] 某个样本点(x)的横坐标值 + Mu:[float 浮点型] 总体的期望(μ),若省略则为0 + Sigma:[float 浮点型] 总体的方差(σ) ,若省略则为1 返回值: + [float 浮点型] 该样本点(x)处的概率密度 """ y = (1/((math.sqrt(2*math.pi))*Sigma)) * \ math.exp(-((x-Mu)**2)/(2*Sigma**2)) return y
13d7b2e7e1d138dcc75d96f9cd706f1d8060360d
50,048
import sys def convert_to_unicode(value): """Convert value to unicode.""" if sys.version_info < (3,) and is_str(value) and not isinstance(value, unicode): return value.decode('utf-8') return value
24d47a259bca4f41808820c2acdc4a9b01df2f60
50,049
import argparse def parse_args(): """ Create an argument parser and return parsed arguments """ parser = argparse.ArgumentParser(description="Transfer weights of various sub-models") parser.add_argument('base', type=str, help='Path of the base model') parser.add_argument('submodel', type=str, help='Path of the sub-model') parser.add_argument('savepth', type=str, help='Checkpoint path where to save the model') parser.add_argument('--task_head', default=False, action='store_true', help='Transfer Task Head (GLCU) weights') parser.add_argument('--cls_head', default=False, action='store_true', help='Transfer Cls Head weights') parser.add_argument('--mid_glcu', default=False, action='store_true', help='Transfer middle-GLCU weights') parser.add_argument('--reverse', default=False, action='store_true', help='Reverse mode - transfer weights from base to submodel') args = parser.parse_args() return args
db3f71e88aafba23b6da5d2ab91deed3c89a2e3a
50,050
def normalize(X, lb, ub): """ Normalize data between 0 and 1 # Arguments: X: Input data (scalar/vector/matrix) lb: Lower boundry (scalar/vector) ub: Upper boundry (scalar/vector) # Return: X normalized (scalar/vector/matrix) """ return (X - lb) / (ub - lb)
5baf9d1fdb0824418bc52ebec872c95befa5a72d
50,052
def get_description(): """ Return a dict describing how to call this plotter """ desc = dict() desc['data'] = True desc['description'] = """This chart presents the rank a station's yearly summary value has against an unweighted population of available observations in the state. The green line is a simple average of the plot.""" desc['arguments'] = [ dict(type='station', name='station', default='IA0000', label='Select Station:', network='IACLIMATE'), ] return desc
a014d516b3dd00b464005c82b14b96aeb2a8f025
50,053
def is_increasing(lst): """ >>> is_increasing([1, 2, 3, 4]) True >>> is_increasing([1, 3, 2, 4]) False """ if not lst: return False elif len(lst) == 1: return True elif lst[1] > lst[0]: return is_increasing(lst[1:]) else: return False
ccf3fcb532206dd8db5792145dc554b0d07a3d98
50,054
import random def __random_four_number(): """随机生成4位数字字符串 :return: Examples: | ${num1} | Random Four Number | | ${num2} | Random Four Number | | ${num3} | Random Four Number | | ${num4} | Random Four Number | => | ${num1} = 0001 | ${num2} = 0011 | ${num3} = 0111 | ${num4} = 1111 """ number = random.randint(0, 9999) if number < 10: number_s = '000' + str(number) elif 10 <= number < 100: number_s = '00' + str(number) elif 100 <= number < 1000: number_s = '0' + str(number) else: number_s = str(number) return number_s
bad066dc98f7336c3a73c89e772b01755fa8331d
50,056
def search_matrix(matrix, target): """ :type matrix: List[List[int]] :type target: int :rtype: bool """ if len(matrix) == 0 or len(matrix[0]) == 0: return False if matrix[0][0] > target or matrix[-1][-1] < target: return False row = len(matrix) - 1 col = 0 while row >= 0 and col < len(matrix[0]): e = matrix[row][col] if e < target: col += 1 elif e > target: row -= 1 else: return True return False
d96eee03c708288ac44bf7afb6de30fd4ed83b95
50,057
def all_byte_labels_are_defined(byte_labels): """ Check whether all labels have already been defined. """ return (False not in [label["definition"] for label in byte_labels.values()])
3ed2f9ec575dd0b22fc26b14fd12b2b8e9654adc
50,059
from datetime import datetime def dateparse_notnull(datestring): """ dateparse returns today if given an empty string. Don't do that. """ if datestring: datestring = datetime.strptime(datestring, '%Y%m%d') else: return None
5da6bbf8cb7f59cde51bbc205df2c99aed296b5f
50,060
def median(data): """Returns the median of data. data -- sequence to unpack or separate numerical arguments >>> median((0,2,3,10)) 2.5 >>> median([13,42,666]) 42 >>> median(['a string', 'string', 'a string']) 'a string' >>> median() Traceback (most recent call last): TypeError: median() missing 1 required positional argument: 'data' >>> median(range(10)) 4.5 """ try: data = sorted(list(data)) n = len(data) if n%2==0: return (data[(n//2)-1]+data[n//2])/2 else: return data[n//2] except IndexError: raise TypeError("needs at least one argument")
c09b764007757fe412af687f344ea0f4744a2014
50,061
def excel_string_width(str): """ Calculate the length of the string in Excel character units. This is only an example and won't give accurate results. It will need to be replaced by something more rigorous. """ string_width = len(str) if string_width == 0: return 0 else: return string_width * 1.1
7359c6442a699a1bcba22cdd718e9c0152dd2c7b
50,062