content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _to_code(d): """7位->6位代码""" d['code'] = d['code'][1:] return d
9abf31e26b8b0bd19e82f2b1d8376787f3de3e2f
37,981
def pip_split_version(package): """ For pip install strings: Splits the part of the package name and the version part """ split_at_index = False for c in ["<", "=", ">"]: if c in package: index = package.index(c) if split_at_index is False or index < split_at_index: split_at_index = index if split_at_index: return package[:split_at_index], package[split_at_index:] return package, ""
8255562c2779b4d4560037f7e6a1add83f3f44ab
37,982
def all_subclasses(cls): """ Find all subclasses of a class :rtype: object :return: "Set of subclasses" :param object cls: Baseclass """ return set(cls.__subclasses__()).union( [s for c in cls.__subclasses__() for s in all_subclasses(c)])
7461f4a00826f8929f6a88822ca14690a279a578
37,984
def validate_sequence_length(sequence): """ Validates that the sequence passed into it has a minimum length of 100 n.t. """ try: assert len(sequence) >= 100 return True except AssertionError: return False
3f9aecbd050e52a6d264a4753acc4215e6227c9e
37,985
def config_section_data(): """Produce the default configuration section for app.config, when called by `resilient-circuits config [-c|-u]` """ config_data = u"""[fn_microsoft_security_graph] # Graph URL with version number microsoft_graph_url=https://graph.microsoft.com/v1.0/ tenant_id=<Tenant directory id> client_id=<App client id> client_secret=<App client secret> ## Polling options # How often polling should happen. Value is in seconds. To disable polling, set this to zero. msg_polling_interval=0 #incident_template=<location_of_template_file> # If not set uses default template. # String query to apply to the alert polling component. This will be added to the end of the url # when searching for alerts. The example shown below would make the whole search url equal to # https://graph.microsoft.com/v1.0/security/alerts/?$filter=assignedTo eq 'analyst@m365x594651.onmicrosoft.com' and severity eq 'high' # This query string is full OData so alert query can start with 'top=', 'skip=', 'filter=', etc. Do not add a '$' at the start # of the value as that character is reserved for environment variables #alert_query=filter=assignedTo eq 'analyst@m365x594651.onmicrosoft.com' and severity eq 'high' # Alert Time range sec - Optional value in seconds to set the start dateTime values for the createdDateTime field when filtering alerts. # This is calculated by adding to the filter 'createdDateTime ge (current_dateTime - alert_time_range_sec) #alert_time_range_sec=3600 """ return config_data
b341b419614136c6177a30107e3dcc9f6fb40de2
37,986
import os def perc_weights(): """A Percolator weights file""" return os.path.join("tests", "data", "weights.txt")
485f874d683cf984ea493484822360d50f74a0de
37,987
import sys def data_progress_measure(data): """Checks and filters data for indicator for which progress is being calculate. If the Year column in data contains more than 4 characters (standard year format), takes the first 4 characters. If data contains disaggregation columns, take only the total line data. Removes any NA values. Checks that there is enough data to calculate progress. Args: data: DataFrame. Indicator data for which progress is being calculate. Returns: DataFrame: Data in allowable format for calculating progress. """ # check if the year value contains more than 4 digits (indicating a range of years) if (data['Year'].astype(str).str.len() > 4).any(): data['Year'] = data['Year'].astype(str).str.slice(0, 4).astype(int) # take the first year in the range # get just the aggregate values from data cols = data.columns.values if len(cols) > 2: cols = cols[1:-1] data = data[data[cols].isna().all('columns')] data = data.iloc[:, [0, -1]] data = data[data["Value"].notna()] # remove any NA values from data if data.shape[0] < 1: sys.exit() return data
9b66c5c3f66a631bb1c447e7f546ffc4c1de4168
37,988
import json def load_data(filename): """ 解析JSON数据文件 """ data = {} try: with open(filename) as data_file: data = json.load(data_file) print("parser json data file success!") except Exception as error: print("parser json data file error: %s" % error) data = {} return data
1f5b12a1cb96b20003071fbdefea9eb2885579db
37,989
import collections def convert_pyradiomics_featurevector(featureVector): """Convert a PyRadiomics feature vector to WORC compatible features.""" # Split center of mass features in the three dimensions # Save as orientation features if 'diagnostics_Mask-original_CenterOfMassIndex' in list(featureVector.keys()): COM_index = eval(featureVector['diagnostics_Mask-original_CenterOfMassIndex']) featureVector['of_original_COM_Index_x'] = COM_index[0] featureVector['of_original_COM_Index_y'] = COM_index[1] if len(COM_index) == 3: featureVector['of_original_COM_Index_z'] = COM_index[2] if 'diagnostics_Mask-original_CenterOfMass' in list(featureVector.keys()): COM = eval(featureVector['diagnostics_Mask-original_CenterOfMass']) featureVector['of_original_COM_x'] = COM[0] featureVector['of_original_COM_y'] = COM[1] if len(COM) == 3: featureVector['of_original_COM_z'] = COM[2] # Delete all diagnostics features: omitted = ['Image', 'Mask', 'diagnostics'] keys = list(featureVector.keys()) for k in keys: if any(k.startswith(om) for om in omitted): del featureVector[k] # Change label to be similar to PREDICT new_featureVector = collections.OrderedDict() for k in featureVector.keys(): if '_glcm' in k: kn = 'tf_' + k.replace('_glcm_', '_GLCM_') elif '_gldm' in k: kn = 'tf_' + k.replace('_gldm_', '_GLDM_') elif '_glrlm' in k: kn = 'tf_' + k.replace('_glrlm_', '_GLRLM_') elif '_glszm' in k: kn = 'tf_' + k.replace('_glszm_', '_GLSZM_') elif '_ngtdm' in k: kn = 'tf_' + k.replace('_ngtdm_', '_NGTDM_') elif '_shape' in k: kn = 'sf_' + k elif '_firstorder' in k: kn = 'hf_' + k elif 'of_' in k: # COM kn = k else: message = ('Key {} is unknown!').format(k) raise ValueError(message) # Add PyRadiomics to the key kn = 'PyRadiomics_' + kn # Add to new feature Vector new_featureVector[kn] = featureVector[k] return new_featureVector
6a1ba83c881394b40a561bfdc0713a8274314c03
37,990
def get_lighter_color(color): """Generates a lighter color. Keyword arguments: color -- color you want to change, touple with 3 elements (Doesn't matter if it is RGB or BGR) Return: Return a lighter version of the provided color """ add = 255 - max(color) add = min(add,30) return (color[0] + add, color[1] + add, color[2] + add)
7613b298fd470f549fd426ffd8ea61ddefbd9b5b
37,992
import numpy def rastrigin(x): """ The Rastrigin function. Parameters ---------- x : array_like 1-D array of points at which the Rastrigin function is to be computed. Returns ------- float The value of the Rastrigin function. """ x = numpy.asarray(x) ndim = x.size sum1 = (numpy.square(x) - 10.0 * numpy.cos(2.0 * numpy.pi * x)).sum() return 10.0 * ndim + sum1
f197ec580e63d538cee4031da15c42a23ee2b20f
37,993
def mycroft_responses(context): """Collect and format mycroft responses from context. Args: context: behave context to extract messages from. Returns: (str) Mycroft responses including skill and dialog file """ responses = '' messages = context.bus.get_messages('speak') if len(messages) > 0: responses = 'Mycroft responded with:\n' for m in messages: responses += 'Mycroft: ' if 'meta' in m.data and 'dialog' in m.data['meta']: responses += '{}.dialog'.format(m.data['meta']['dialog']) responses += '({})\n'.format(m.data['meta'].get('skill')) responses += '"{}"\n'.format(m.data['utterance']) return responses
368d85e75742a4afb1c0d127ab05cc2a148bd8e8
37,995
def obtain_valid_painter(painter_class, **kwargs): """Returns a valid painter whose class is <painter_class>. You can try any argument you want ; only arguments existing in painter's __init__ method will be used. """ try: painter = painter_class(**kwargs) except TypeError: painter = painter_class() args_okay = {} for arg in kwargs: if hasattr(painter, arg): args_okay[arg] = kwargs[arg] painter = painter_class(**args_okay) return painter
259f36dc3f75de608dfb53c3db23cf018df07adb
37,997
def attribute_loaded(model, attribute): """Returns true if the attribute of the model was already loaded.""" # XXX: this works but it relys on a specific implementation in # SQLAlchemy. Figure out if SA provides a way to query that information. return attribute in model.__dict__
a4ea95a9b91a12563d116ba7846779be2b4849fc
37,998
def percent_diff(value1: float, value2: float, frac: bool = False) -> float: """ Return the percentage difference between two values. The denominator is the average of value1 and value2. value1: float, first value value2: float, second value frac: bool, Default is False. Set to True returns a fraction instead of percentages. This is useful for performing calculations on spreadsheets. return: float, absolute value of the difference in percentages Usage ----- >>> percent_diff(5, 7) 33.33333333333333 >>> percent_diff(5, 7, frac=True) 0.3333333333333333 """ assert value1 >= 0 and value2 >= 0, 'Values must not be negative' perdiff = abs(value1 - value2)/((value1 + value2)/2.0) if frac is False: perdiff *= 100 return perdiff
cffe298fb4218adc60bf75ff659090c41ae86922
37,999
def moving_vehicles(trans, treshold=5): """Count number of moving vehicles.""" results = [] for tr in trans: v = tr['vector'] move_y = v[0][0] - v[1][0] move_x = v[0][1] - v[1][1] moving = False if abs(move_y) > treshold or abs(move_x) > treshold: moving = True results.append(moving) return results
f88d360a2627c8f728a63f012549a0525b354e98
38,001
def scheduler(epoch): """ Learning rate scheduler """ lr = 0.0001 if epoch > 25: lr = 0.00001 elif epoch > 60: lr = 0.000001 print('Using learning rate', lr) return lr
c65e57cc31926c4eb911c312e709f3209102e92a
38,002
def occurs_once_in_sets(set_sequence): """Returns the elements that occur only once in the sequence of sets set_sequence. The elements are returned as a set.""" occuronce = set() deleted = set() for setx in set_sequence: for sety in setx: if (sety in occuronce): deleted.add(sety) occuronce.remove(sety) elif (sety not in deleted): occuronce.add(sety) return occuronce
21344df038bf3af83584a9a1ef2dda1d6642af72
38,003
def kwargs_to_filter(kw): """Converts key/values in the form year=yyyy to key/values usable as queryset filters, pretty inefficient and with no checks on data. """ d = {} for k in ('year', 'month', 'day', 'hour', 'minute', 'second'): if k in kw: try: d['date__'+k] = int(kw[k]) except (TypeError, ValueError): continue return d
c55cc804d9ad36add56913dcf269a4e9ba90146c
38,004
def getInfoOfOSMSearch(feature): """ getNodeInfo(idNode): From feature list inside geojson Features get a better structure for analyze data. Parameters ---------- feature : List All list containing all features Returns ------- List List of dictionaries with all info splited by (geometry, type, properties). """ feat = feature["features"] lst = [] if len(feat) > 0: for geo in feat: T = dict() r = geo["geometry"] if r["type"].lower() == "point".lower(): T["geometry"] = tuple([r["coordinates"][1],r["coordinates"][0]]) else: allCoord = [] for c in r["coordinates"]: allCoord.append(tuple([c[1],c[0]])) T["geometry"] = allCoord T["type"] = r["type"] T["properties"] = geo["properties"] lst.append(T) return lst
d341ab30f7958b835dbe65e739f50d995ff80b7c
38,005
def label_conditions(df, plate_columns_map, plate_rows_map): """Assigning the conditions used in each well, according to the plate maps. Receives dataframe, plate_columns_map, plate_rows_map. Returns the dataframe.""" # add strain and moi info df['strain'] = df['column'].apply(lambda value: plate_columns_map[value]) df['moi'] = df['row'].apply(lambda value: plate_rows_map[value]) # change the strain of all moi=0 wells to null (so that they are grouped together) df.ix[df.moi == 0, 'strain'] = 'null' # condition is defined as the strain + moi df['condition'] = df.strain.str.cat(df.moi.astype(str)) return df
49e10273dd0ca84a746b3588f30965572f1b8aea
38,006
def path_distance(g, s, p, base): """ >>> ns = Namespace("urn:x-ns:") >>> g = Graph() >>> subpropof = RDFS.subPropertyOf >>> g.add((ns.name, subpropof, ns.label)) >>> g.add((ns.title, subpropof, ns.name)) >>> g.add((ns.notation, subpropof, ns.title)) >>> g.add((ns.notation, subpropof, ns.name)) >>> path_distance(g, ns.comment, subpropof, ns.label) >>> path_distance(g, ns.label, subpropof, ns.label) 0 >>> path_distance(g, ns.name, subpropof, ns.label) 1 >>> path_distance(g, ns.title, subpropof, ns.label) 2 >>> path_distance(g, ns.notation, subpropof, ns.label) 2 """ if s == base: return 0 def find_path(s, distance=1): shortest = None for o in g.objects(s, p): if o == base: return distance else: candidate = find_path(o, distance+1) if shortest is None or (candidate is not None and candidate < shortest): shortest = candidate return shortest return find_path(s)
490c9cc4743fce70bd71e8ef45326f735b86c058
38,007
def merge(jsons): """ Merge a list of CMS lumi json data Args: jsons (list) : List of CMS lumi json data (dicts with run : list of LS) """ outjson = {} for f in jsons: print("Adding json form "+f+" to merge json") injson = jsons[f] for run in injson: if run in outjson: outjson[run] += injson[run] outjson[run] = list(set(outjson[run])) #Should not be any dublicates in the list else: outjson[run] = injson[run] return outjson
cd0bcf2415cb6f48ba32d69793dcdc6fb77eadf6
38,008
def allow_guess(number): """ Input: Takes in the number which the user is to guess Gets user input and tells the user whether the number is too high or too low Returns false if the guess is wrong and True if correct """ print('Guess a number') guess = input() guess = int(guess) if guess < number: print('Your guess is too low.') return False elif guess > number: print('Your guess is too high.') return False else: # If guess is equal to number return True
be3f97b8118c6a80bf0cc907fcc946bf89f0e9a0
38,009
def check_def_file(universe, res_name, atoms_name): """Check if atoms from the definition file are present in the structure in `universe`. This function return false if there is one missing in the structure. Print also an error message. Parameters ---------- universe : MDAnalysis universe instance res_name : str lipid residue name atoms_name : list of str list of atom names Returns ------- Bool True if all atoms are found in the structure. False otherwise. """ # get all atom names of the res_name in the system all_names = set(universe.select_atoms(f"resname {res_name}").names) if not set(atoms_name).issubset(all_names): miss_atoms = ",".join(set(atoms_name) - all_names) print(f"Some atoms ({miss_atoms}) of residue {res_name} from definition " "file are not found in your system.") return False return True
f2cff1286aca9a3be7e71b1d2f2d5cd810a93838
38,010
import re import logging def GetChromeosVersion(str_obj): """Helper method to parse output for CHROMEOS_VERSION_STRING. Args: str_obj: a string, which may contain Chrome OS version info. Returns: A string, value of CHROMEOS_VERSION_STRING environment variable set by chromeos_version.sh. Or None if not found. """ if str_obj is not None: match = re.search(r'CHROMEOS_VERSION_STRING=([0-9_.]+)', str_obj) if match and match.group(1): logging.info('CHROMEOS_VERSION_STRING = %s' % match.group(1)) return match.group(1) logging.info('CHROMEOS_VERSION_STRING NOT found') return None
d0dc48eb6c5f9c501024f155535e6e9adb1061c0
38,011
def JavaFileForUnitTest(test): """Returns the Java file name for a unit test.""" return 'UT_{}.java'.format(test)
6a524204c50084188b5144ba10434b586f2bc735
38,012
def get_descendant(node, desc_id): """Search the descendants of the given node in a scipy tree. Parameters ---------- node : scipy.cluster.hierarchy.ClusterNode The ancestor node to search from. desc_id : int The ID of the node to search for. Returns ------- desc : scipy.cluster.hierarchy.ClusterNode If a node with the given ID is not found, returns None. """ if node.id == desc_id: return node if node.is_leaf(): return None if node.left.id == desc_id: return node.left if node.right.id == desc_id: return node.right # search left l = get_descendant(node.left, desc_id) if l is not None: return l # search right r = get_descendant(node.right, desc_id) return r
99e081b2ee8dce513aad8fccbadb6cd94a017365
38,014
def length(x): """Calculate the length of a vector. This function is equivalent to the `length` function in GLSL. Args: x (:class:`~taichi.Matrix`): The vector of which to calculate the length. Returns: The Euclidean norm of the vector. Example:: >>> x = ti.Vector([1, 1, 1]) >>> length(x) 1.732051 """ return x.norm()
bab7dfde88c3cb7d9dc4a2f697dfe2443e9acabd
38,017
import hashlib def hash_csv(csv_path): """Calculates a SHA-256 hash of the CSV file for data integrity checking. Args: csv_path (path-like) : Path the CSV file to hash. Returns: str: the hexdigest of the hash, with a 'sha256:' prefix. """ # how big of a bit should I take? blocksize = 65536 # sha256 is the fastest relatively secure hashing algorith. hasher = hashlib.sha256() # opening the file and eat it for lunch with open(csv_path, 'rb') as afile: buf = afile.read(blocksize) while len(buf) > 0: hasher.update(buf) buf = afile.read(blocksize) # returns the hash return f"sha256:{hasher.hexdigest()}"
690c7c281d6c2f74c37195462f89dc75bf227fc1
38,018
import requests def retrieve_info(url, apidata): """ Return a dictionary from the HTSworkflow API """ web = requests.get(url, params=apidata) if web.status_code != 200: raise requests.HTTPError( "Failed to access {} error {}".format(url, web.status_code)) result = web.json() if "result" in result: result = result["result"] return result
c30cdc0bc8b556062195da0dc43d836446652a6b
38,020
def pvct(pv: float, compr_total: float): """ Pore Volume times Total Compressibility Parameters --- pv : float pore volume compr_total : float total compressibility Return pvct : float pore volume total compressibility """ return pv*compr_total
31c84e4dc94cb2f1c78c9e26ba02cec4c81f0800
38,022
def cubocta_layer(f: int) -> int: """ Number of contact points between equal spheres arranged in layer n of a cuboctahedron with n intervals between balls along in each edge. """ x = f-1 return 8*x**3 + 36*x**2 + 52*x + 24
1719c075dcf02cdec79387ecc5b0577be67de6b5
38,023
def format_datestr(v): """ Formats a datetime or date object into the string format shared by xml and notation serializations.""" if hasattr(v, 'microsecond'): return v.isoformat() + 'Z' else: return v.strftime('%Y-%m-%dT%H:%M:%SZ')
3f149e3babf7703281583d5b31b56e2b1d261fcb
38,024
def betterFib(n): """ Better implementation of nth Fibonacci number generator Time complexity - O(n) Space complexity - O(n) :param n: The nth term :return: The nth fibonnaci number """ fib = [None] * n #print fib if n == 0: return 0 elif n == 1: return 1 elif fib[n-1] is not None: return fib[n-1] else : fib[n-1] = betterFib(n-1) + betterFib(n-2) return fib[n-1]
9374446b2f63943862b5b07c24d087c0083b319f
38,025
def top_level(symbol): """A rule that matches top-level symbols.""" return (symbol and ('.' not in symbol)) or None
71a80314e80e2242d7b505a19939e26eb060dded
38,026
def convert_string_key_to_int_key(orig_dict): """Converts the key from string to integer.""" return dict([(int(key), val) for key, val in orig_dict.items()])
9d08902d57b4df9be7a82fb6478357bae3c0af22
38,027
def _seq_id_filter(id: str) -> str: """ Replaces underscores and semicolons with dashes in the sequence IDs. This is needed to have nice output filename templates with underscore as a delimiter for parameters """ result = id.replace("_", "-") return result.replace(";", "-")
e6355b1f94e76d255a1052072706619299ea4b51
38,028
import os def save_separately_filenames(self, path): """Compute paths for separately saved attributes. Parameters ---------- path : str Path to which the main checkpoint file is being saved. Returns ------- paths : dict A dictionary mapping attribute names to derived paths based on the `path` passed in as an argument. """ root, ext = os.path.splitext(path) return {attribute: root + "_" + attribute + ext for attribute in self.save_separately}
eb8cfd96143c4cb9001a400e2d0c223bf3edaa32
38,029
import hashlib def _md5_hash_as_long(input_value): """Return the hash of the input value converted to a long.""" hex_hash = hashlib.md5(str(input_value).encode('utf-8')).hexdigest() return int(hex_hash, 16)
748cf6d783a17f07c3ea25280b9faae3d76211be
38,032
def _get_name(f): """Gets the name of underlying objects.""" if hasattr(f, '__name__'): return f.__name__ # Next clause handles functools.partial objects. if hasattr(f, 'func') and hasattr(f.func, '__name__'): return f.func.__name__ return repr(f)
c6f5c35b004afea321d981b5db9dd7be52b5efa6
38,036
def combineSameLocationChanges(changes): """Destructively modifies changes""" j = 1 while j < len(changes): # if possible, combine - and + changes at the same location into -+ changes if (changes[j-1].totalCol == changes[j].totalCol) and ((changes[j-1].editType + changes[j].editType) == "+-"): changes[j-1].editType = "-+" changes[j-1].newText = changes[j-1].text changes[j-1].text = changes[j].text changes[j:] = changes[j+1:] elif (changes[j-1].totalCol == changes[j].totalCol - len(changes[j-1].text)) and \ (changes[j-1].editType + changes[j].editType) == "-+": changes[j-1].editType = "-+" changes[j-1].newText = changes[j].text changes[j:] = changes[j+1:] j += 1 return changes
421f2f51eb1fbcdf504d7a986329c6633e0900ec
38,039
def g_factory(epoch, gamma_param, f): """Generate g inplace function.""" return lambda vec: (1 - gamma_param) * f[epoch + 1](vec) + gamma_param * f[epoch](vec)
bbf267272784f01ee7b12efe7e9e215a634aa2b8
38,040
def get_sampler_state(sampler): """Returns sampler as a dictionary so its properties can be saved """ sampler_dict = sampler.__dict__.copy() del sampler_dict['pool'] return sampler_dict
ad64f1cadb7b29baf09900db023000b5c6841ca8
38,041
def if_none(obj, default): """ Returns `obj`, unless it's `None`, in which case returns `default`. >>> if_none(42, "Hello!") 42 >>> if_none(None, "Hello!") 'Hello!' """ return default if obj is None else obj
fd851c9eb1eaa0048e3a0ac2d45b15fb208080a3
38,043
def start(parser, argv, scope, callback_before=None): """Utility function to execute a subcommand. The function will look up in the ``scope`` if there is a function called ``run_<parser.args.command>`` and will run it using ``parser.args`` as first positional argument. Args: parser: an ArgumentParser instance. argv: the list of command line arguments without the script name. scope (dict): map containing (eventually) the functions to be called. Raises: NotImplementedError: if ``scope`` doesn't contain a function called ``run_<parser.args.command>``. """ args = parser.parse_args(argv) if not args.command: parser.print_help() raise SystemExit() # look up in the current scope for a function called 'run_<command>' # replacing all the dashes '-' with the lowercase character '_' func = scope.get('run_' + args.command.replace('-', '_')) # if no command has been found, raise a `NotImplementedError` if not func: raise NotImplementedError('Command `{}` not yet implemented'. format(args.command)) if args.peer is None: args.peer = ['http://localhost:9984,localhost:27017'] if args.auth: app_id, app_key = args.auth.split(':') args.auth = {'app_id': app_id, 'app_key': app_key} else: args.auth = {} if callback_before: callback_before(args) return func(args)
73a10dab72d6f6ead14d4519e47c67fbfa8a6fbc
38,044
def _swiftmodule_for_cpu(swiftmodule_files, cpu): """Select the cpu specific swiftmodule.""" # The paths will be of the following format: # ABC.framework/Modules/ABC.swiftmodule/<arch>.swiftmodule # Where <arch> will be a common arch like x86_64, arm64, etc. named_files = {f.basename: f for f in swiftmodule_files} module = named_files.get("{}.swiftmodule".format(cpu)) if not module and cpu == "armv7": module = named_files.get("arm.swiftmodule") return module
59e978f22f4b1959ef32b0f2d68b0d92ec7fabe0
38,046
import os import json def generate_db_json(source_file, command): """ Generates a single compilation database string from all provided files """ compdb = [ { "arguments": command.split(), "directory": os.getcwd(), "file": source_file } ] return json.dumps(compdb, indent=4)
87d11cbac043bc7acf26970c17f939eabe15fdec
38,047
def get_output_subfolder(only_filename, limit_examples, limit_classes): """ Returns: subfolder name for records """ if only_filename is not None: return 'only-{}'.format(only_filename) elif limit_examples is not None and limit_classes is not None: return 'limit-{}-classes-{}'.format(limit_examples, limit_classes) elif limit_examples is not None: return 'limit-{}'.format(limit_examples) elif limit_classes is not None: return 'classes-{}'.format(limit_classes)
725a213c428fab526aa6e3627d75f295b15decb4
38,051
from typing import Dict def get_bags_inside(color: str, dependency_dict: Dict[str, Dict[str, int]]) -> int: """Recursively count the bags stored wthin color, including itself""" count = 1 inner_bags = dependency_dict[color] for bag_color, bag_count in inner_bags.items(): count += bag_count * get_bags_inside(bag_color, dependency_dict) return count
b97da4a194d3aba89f7eec9ac684812e139d116b
38,052
def decay_function(key: int): """ Returns a decay function to create a weighted sequence graph. """ return lambda x: (x - 1) ** (-key)
bafd0b5cf57d96465c3c42a111226301c2f69444
38,053
import os def filesize(path, stat=os.stat): """ Return the file size in bytes """ return stat(path)[6]
b8fc0199198f439865bbd1bac9cdb59735cd031e
38,054
def remove_unknown_locations(locations, logi): """Remove unknown locations.""" return locations[logi]
983ffd42dab56e363fe9a508c875d3fe6a783bc9
38,056
def default_feature_minscore(): """Relation between word length (keys) and minimum score for named entity recognition of features (values).""" dct = { 1: 100, 3: 100, 4: 90, 5: 85, 6: 80, 8: 75 } return dct
1eff7bc495d8befee569fab570187f7b9dd9bdc0
38,057
def pick(seq, func, maxobj=None): """Picks the object obj where func(obj) has the highest value.""" maxscore = None for obj in seq: score = func(obj) if maxscore is None or maxscore < score: (maxscore, maxobj) = (score, obj) return maxobj
707f9534fdec3b66bd311238689e2fe8e3456fbd
38,060
from typing import BinaryIO from typing import List def _create_test_file(tfile: BinaryIO, nlines=10) -> List[str]: """Helper function for populating a testing temp file with numbered example lines for comparison""" lines = [f"This is an example line {i}\n".encode('utf-8') for i in range(1, nlines+1)] tfile.writelines(lines) tfile.flush() return [l.decode().strip("\n") for l in lines]
4eac8c5e351c415ddc5734fa93e7e2aed0e61e2e
38,061
import ntpath import base64 def send_file(path, filename=None, mime_type=None): """ Convert a file into the format expected by the Download component. :param path: path to the file to be sent :param filename: name of the file, if not provided the original filename is used :param mime_type: mime type of the file (optional, passed to Blob in the javascript layer) :return: dict of file content (base64 encoded) and meta data used by the Download component """ # If filename is not set, read it from the path. if filename is None: filename = ntpath.basename(path) # Read the file into a base64 string. with open(path, 'rb') as f: content = base64.b64encode(f.read()).decode() # Wrap in dict. return dict(content=content, filename=filename, mime_type=mime_type, base64=True)
efd32f249f292ec5e15924b6e30f5b107029e90b
38,062
import re def extract_bibtex_items(latex_source): """Extract all bibtex items in a LaTeX file which are not commented out.""" bibtex_item_regex = re.compile(r"""(?<!%) # Lookbehind to check that the bibtex item is not commented out. (\\bibitem{.*?}.+?) # Match the entire bibtex item. (?=\\bibitem{|\\end{thebibliography}|$) # Match only until the next bibtex item, end of bibliography or end of line. """, re.DOTALL | re.VERBOSE) return bibtex_item_regex.findall(latex_source)
995a9d9559a6da564af010254fd466a8b729beb2
38,065
import base64 def get_content_b64(filepath): """Given a filepath, return the base64 encoded representation of the file's content. """ with open(filepath, 'rb') as f: content = f.read() return base64.b64encode(content).decode('utf-8')
b95ad844796425a9b5e112b5d1801dd2c8ee28c3
38,066
import ast def isConstant(x): """Determine whether the provided AST is a constant""" return (type(x) in [ast.Num, ast.Str, ast.Bytes, ast.NameConstant])
232946a37e8bce7c093d6f8a446e11019cfc798c
38,069
def get_instances_not_in_output(possible_instances, stdout_output): """return instances where their id is not in the stdout output""" return [instance for instance in possible_instances if instance['id'] not in stdout_output]
540663c38511c42f95466da06cb05153ce15a872
38,070
def get_layer_names(module, display=True, names=None, upper_name='', _title=True): """Recursively show a network's named layers Parameters: module (nn.Module): The pytorch module to display the submodule names. display (bool): print out a table of the names with attempts at sizes. Returns (List): List of submodule names. """ fstr = "{:^40} | {:^18} | {:^10} | {:^10}" if names is None: names = [] if _title: print(fstr.format("Layer", "Class Name", "Input Size", "Output Size")) print(f"{'-'*40} | {'-'*18} | {'-'*10} | {'-'*10}") for name, m in module._modules.items(): if m is not None: if display: print(fstr.format( upper_name+name, m.__class__.__name__, m.weight.size(1) if hasattr(m, 'weight') and len(m.weight.shape)>1 else '-', m.weight.size(0) if hasattr(m, 'weight') else '-')) names.append(upper_name+name) get_layer_names(m, display, names, upper_name+name+"/", False) return names
e918eb6e7a1241d2153f0a1e77989d5a8344a033
38,071
from typing import List def _tf_format(x_data: List[List[int]], max_len: int, zero_index: int): """ Pad with elements until it has max_len or shorten it until it has max_len. When padding insert the zero index so it doesn't contribute anything :param x_data: :param max_len: :return: """ for i in range(len(x_data)): row = x_data[i] while len(row) < max_len: row.append(zero_index) x_data[i] = x_data[i][:max_len] return x_data
ad5d5fac86c8bc66150ef91a14f51ab5d3476bb6
38,076
import numpy def mse (estimation, target): """Mean square error between a set of outputs and target values Uses the formula: .. math:: MSE(\hat{\Theta}) = E[(\hat{\Theta} - \Theta)^2] Estimation (:math:`\hat{\Theta}`) and target (:math:`\Theta`) are supposed to have 2 dimensions. Different examples are organized as rows while different features in the estimated values or targets are organized as different columns. Parameters: estimation (array): an N-dimensional array that corresponds to the value estimated by your procedure target (array): an N-dimensional array that corresponds to the expected value Returns: float: The average of the squared error between the estimated value and the target """ return numpy.mean((estimation - target)**2, 0)
ecb8a51e7fcced135a3efe7b6fec42e6003a7c33
38,077
import re def is_url(url: str): """Uses RegEx to check whether a string is a HTTP(s) link""" # https://stackoverflow.com/a/17773849/8314159 return re.search(r"(https?://(?:www\.|(?!www))[a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|www\.[a-zA-Z0-9]" r"[a-zA-Z0-9-]+[a-zA-Z0-9]\.[^\s]{2,}|https?://(?:www\.|(?!www))[a-zA-Z0-9]+\.[^\s]{2,}" r"|www\.[a-zA-Z0-9]+\.[^\s]{2,})", url)
c58be1bc1775024ef412ff15529de7a780bde5ea
38,078
def format_usage(usage): """Method that doesn't output "Usage:" prefix""" return usage
623872d7c88d01b04fb4204bb3136b1ae692e95b
38,079
from random import randint, choice def random_chinese_character(n: int = 3, surname: bool = True) -> str: """随机中文字符 :param n: 数量 :param surname: 姓氏 :return str """ """ 01李 02王 03张 04刘 05陈 06杨 07赵 08黄 09周 10吴 11徐 12孙 13胡 14朱 15高 16林 17何 18郭 19马 20罗 21梁 22宋 23郑 24谢 25韩 26唐 27冯 28于 29董 30萧 31程 32曹 33袁 34邓 35许 36傅 37沈 38曾 39彭 40吕 41苏 42卢 43蒋 44蔡 45贾 46丁 47魏 48薛 49叶 50阎 51余 52潘 53杜 54戴 55夏 56钟 57汪 58田 59任 60姜 61范 62方 63石 64姚 65谭 66廖 67邹 68熊 69金 70陆 71郝 72孔 73白 74崔 75康 76毛 77邱 78秦 79江 80史 81顾 82侯 83邵 84孟 85龙 86万 87段 88漕 89钱 90汤 91尹 92黎 93易 94常 95武 96乔 97贺 98赖 99龚 100文 """ surnames = [0x674E, 0x738B, 0x5F20, 0x5218, 0x9648, 0x6768, 0x8D75, 0x9EC4, 0x5468, 0x5434, 0x5F90, 0x5B59, 0x80E1, 0x6731, 0x9AD8, 0x6797, 0x4F55, 0x90ED, 0x9A6C, 0x7F57, 0x6881, 0x5B8B, 0x90D1, 0x8C22, 0x97E9, 0x5510, 0x51AF, 0x51AF, 0x8463, 0x8427, 0x7A0B, 0x66F9, 0x8881, 0x9093, 0x8BB8, 0x5085, 0x6C88, 0x66FE, 0x5F6D, 0x5415, 0x82CF, 0x5362, 0x848B, 0x8521, 0x8D3E, 0x4E01, 0x9B4F, 0x859B, 0x53F6, 0x960E, 0x4F59, 0x6F58, 0x675C, 0x6234, 0x590F, 0x949F, 0x6C6A, 0x7530, 0x4EFB, 0x59DC, 0x8303, 0x65B9, 0x77F3, 0x59DA, 0x8C2D, 0x5ED6, 0x90B9, 0x718A, 0x91D1, 0x9646, 0x90DD, 0x5B54, 0x767D, 0x5D14, 0x5EB7, 0x6BDB, 0x90B1, 0x79E6, 0x6C5F, 0x53F2, 0x987E, 0x4FAF, 0x90B5, 0x5B5F, 0x9F99, 0x4E07, 0x6BB5, 0x6F15, 0x94B1, 0x6C64, 0x5C39, 0x9ECE, 0x6613, 0x5E38, 0x6B66, 0x4E54, 0x8D3A, 0x8D56, 0x9F9A, 0x6587, ] unicode_min = 0x4E00 unicode_max = 0x9FA5 result = [chr(randint(unicode_min, unicode_max)) for _ in range(n)] if surname: result[0] = chr(choice(surnames)) result = result[:3] return ''.join(result)
447d0b4c8250d440bef245afaa6726a890f56292
38,081
def post_order_traversal(tree): """left -> right -> curr""" to_visit = [tree] visited = set() path = [] while to_visit: node = to_visit[-1] # make sure we visit left before right if node.left and node.left not in visited: to_visit.append(node.left) continue # make sure we visit right before curr if node.right and node.right not in visited: to_visit.append(node.right) continue # since we visited both left and right, we can visit node to_visit.pop() path.append(node.value) return path
37fbdf2b815121a836c2cb4b7c84e74b8f784f6e
38,083
import os def to_filelist(dir_path): """ Return a list of file names of a given directory and save as "0.filelist" in the directory. """ flist= [ fname for fname in os.listdir(dir_path) if not fname.startswith('.') and not fname.startswith('0') ] with open(os.path.join(dir_path, '0.filelist'), 'w') as f: print('itemnum: '+str(len(flist)), file=f) print('\n'.join(flist), file=f) return flist
5caa7f67cbe17ba848f506afdfdb70fe0a5ef13f
38,084
def _make_dataset( df, label_column_name, left_value_column_name, right_value_column_name, standing_out_label_name_list): """ Make the required dataset from the data frame. Parameters ---------- df : pandas.DataFrame The target data frame. label_column_name : str Column name of the label. left_value_column_name : str Column name of the value on the left. right_value_column_name : str Column name of the value on the right. standing_out_label_name_list : list of str List of label names to make it stand out. Returns ------- dataset : list of dicts The generated data set. The following keys are set to the dictionary in the list. - label : str - left : int or float - right : int or float - isStandingOutData : int, 0 or 1. """ df = df.copy() df.rename(columns={ label_column_name: 'label', left_value_column_name: 'left', right_value_column_name: 'right', }, inplace=True) dataset = [] for index, sr in df.iterrows(): is_in = sr['label'] in standing_out_label_name_list if is_in: continue data_dict = { 'label': sr['label'], 'left': sr['left'], 'right': sr['right'], 'isStandingOutData': 0, } dataset.append(data_dict) for index, sr in df.iterrows(): is_in = sr['label'] in standing_out_label_name_list if not is_in: continue data_dict = { 'label': sr['label'], 'left': sr['left'], 'right': sr['right'], 'isStandingOutData': 1, } dataset.append(data_dict) return dataset
3eb220101354ea919369dfeb1317310f18a4df97
38,085
import sys import os def get_script_path(build_dir): """ Returns the platform-specific path to the pychecker script within the build dir. """ if sys.platform == "win32": return os.path.join(build_dir, "pychecker.bat") else: return os.path.join(build_dir, "pychecker")
49d5bfb02a12b4ab3d9791d4fc86179de118968c
38,086
from pathlib import Path import logging def evaluate_implementation(callback_file: Path, function_name: str) -> bool: """Checks whether a function name is found in a source file or not.""" found = False impl = callback_file.read_text() if function_name in impl: logging.info(f"Found '{function_name}' in '{callback_file}'.") found = True else: logging.error(f"Did not find '{function_name}' in '{callback_file}'.") return found
3ad087dc2db09aeb78874c9b934a553f7457e511
38,088
def _get_user_id(data): """ Get user ID which this event is generated by. Event payload schema highly depends on event type. :param data: event payload :type data: dict :rtype: non empty string or None """ for key in ['user', 'user_id']: user = data.get(key) if user: return user return None
4f4b418da99bdd6ae99e628a30fc0c02c2cfe4ab
38,089
def validate_number(num, low, high): """ Takes user input as a string and validates that it is an integer between low and high """ try: num = int(num) except ValueError: return False if num < low or num > high: return False return True
ffe695934b09c8c6e34a3ea1880f473efb3a36c9
38,090
def merge_data(dfs): """ Merges prepared Dataframes into two dataframes (df_global and df_local) :param dfs: List of DataFrames :return df_global: DataFrame containing Covid-Data for every country :return df_local: DataFrame containing Covid-Data for every canton in switzerland """ df_global, df_local = None, None for df in dfs: if 'country' in df.columns: if df_global is None: df_global = df else: df_global = df_global.merge(right=df, on=['country','date'],how='left') elif 'canton' in df.columns: if df_local is None: df_local = df else: df_local = df_local.merge(right=df, on=['canton','date'],how='left') return df_global, df_local
e2de5369e6d4974b204f720c4007726742147923
38,094
def has_path(coll, path): """Checks if path exists in the given nested collection.""" for p in path: try: coll = coll[p] except (KeyError, IndexError): return False return True
9904656c367c466dc6fcf352a828e7601a5ce6d2
38,097
def headers(questions): """ Generate the headers for the CSV file - take an array of questions. The order is important - Each question id is a column header - Paired with the index+1 of it's position in the array - Example: 43:SQ3-1d - SQ3-1d is the old question id - 43 is it's number on the supplier application - Note array is prefixed with the DM supplier ID :param questions: :return array of strings representing headers: """ csv_headers = list() csv_headers.append('Digital Marketplace ID') csv_headers.append('Digital Marketplace Name') csv_headers.append('Digital Marketplace Duns number') csv_headers.append('State of Declaration') for index, value in enumerate(questions): csv_headers.append("{}:{}".format(index+1, value)) return csv_headers
81f7da32157ea4bbc2467ccf112f97c7f68fe688
38,100
from typing import Optional from typing import Union def _post_init_start_end( # noqa: F811 value: Optional[Union[int, str]] ) -> Optional[Union[int, str]]: """ parses start and end values, returns int when castable, raises value error if not and string is not "#" """ if value is None: return None if isinstance(value, str): if "#" in value: return "#" else: raise ValueError("start/end must be int or '#'") else: return value
998efb7f184b8d55ec71417e10bcb68849a21c16
38,101
import logging def scope_logger(cls): """ Class decorator for adding a class local logger Example: >>> @scope_logger >>> class Test: >>> def __init__(self): >>> self.log.info("class instantiated") >>> t = Test() """ cls.log = logging.getLogger('{0}.{1}'.format(cls.__module__, cls.__name__)) return cls
84e00f8f668accd362d4fc29016fd2ec95f0bcef
38,102
def cvtInt(intstr): """ Convert integer string into integer. Parameters: intstr - integer string format: "[0-9]+" Return Value: Returns converted integer value on success. None on error. """ try: val = int(intstr) return val except (SyntaxError, NameError, TypeError, ValueError): return None
907a73c358d6de3231caffc610afaf3f032613ab
38,104
def circle_fitness(individual): """ Determine the similarity of the dataframe to the unit circle. """ columns = [meta.name for meta in individual.metadata] df = individual.dataframe angle_idx = columns.index("AngleUniform") radius_idx = columns.index("RadiusUniform") return df[angle_idx].var() - (df[radius_idx] - 1).abs().max()
7fc957c0476968c798b29c1d6676e38329e2f510
38,105
def add_variable_to_dataset(dataset, var_name): """ Convenience function to add a variable to the dataset only if the variable doesn't already exist. This function is case sensitive. "radius" and "Radius" are two different variable names. returns the Variable object associated with the variable name """ if var_name not in dataset.variable_names: return dataset.add_variable(var_name) return dataset.variable(var_name)
109359e3f986ea08c1166fd72a76d878238f6a13
38,106
import unicodedata def filter_non_printable(string_to_filter): """ Filter string 's' by removing non-printable chars :param string_to_filter: :return: """ output_string = ''.join( c for c in string_to_filter if not unicodedata.category(c) in set('Cf') ) return output_string
dc9a3513d29ea8891a952b879b31bcb0752f4bb0
38,107
def _type_to_template(qname, subject_type, predicate, object_type): """ >>> _type_to_template(lambda x: "q:"+x, "subject", "pred", "object") 'rdf/q:subject/q:pred/q:object.html' >>> _type_to_template(lambda x: "q:"+x, "subject", "pred", None) 'rdf/q:subject/q:pred/rdf:Resource.html' >>> _type_to_template(lambda x: "q:"+x, "subject", None, "object") 'rdf/q:subject/q:object.html' >>> _type_to_template(lambda x: "q:"+x, "subject", None, None) 'rdf/q:subject.html' >>> _type_to_template(lambda x: "q:"+x, None, None, None) 'rdf/rdf:Resource.html' """ if not subject_type: subject_qname = "rdf:Resource" else: subject_qname = qname(subject_type) if not object_type: object_qname = "rdf:Resource" else: object_qname = qname(object_type) if predicate: bits = [subject_qname, qname(predicate), object_qname] elif object_type: bits = [subject_qname, object_qname] else: bits = [subject_qname] return "rdf/{}.html".format("/".join(bits))
1d7243071cca7226947bfd763d00b7be50620c15
38,108
import os import codecs def read(*paths: str) -> str: """ Reading file as str using codecs.open() :param paths: file path :return: file content as string """ path = os.path.join(os.path.dirname(__file__), *paths) with codecs.open(path) as f: return f.read()
9dfcf0fc5f8501399aebb7f4632509913afee051
38,111
def __normalize_str(name): """Removes all non-alphanumeric characters from a string and converts it to lowercase. """ return ''.join(ch for ch in name if ch.isalnum()).lower()
bf174967de9d8aa812fd78ad9a18e6f076bc137f
38,113
def c1c3c2_2_c1c2c3(m): """ m is hex string. """ aaa=bytes.fromhex(m) C1=aaa[:64] C3=aaa[64:64+32] C2=aaa[64+32:] b=b''.join([C1, C2, C3]) print(b.hex()) return b.hex()
735498b3f41af04c14cde03d552f804b091acb21
38,114
def without_ends(string: str) -> str: """ >>> without_ends('abc') 'b' """ return string[1:-1]
eedc605702d67a22341a10a1df3a719b78b6174d
38,115
def home(): """List of all returnable API routes.""" return( f"Welcome to the Hawaii weather API!<br/>" f"Available Routes: <br/>" f"/api/v1.0/precipitation<br/>" f"Returns a JSON representation of dates and precipitation.<br/><br/>" f"/api/v1.0/stations<br/>" f"Returns a JSON list of weather stations in Hawaii.<br/><br/>" f"/api/v1.0/tobs<br/>" f"Returns dates and temperature observations for the last year of data.<br/><br/>" f"/api.v1.0/<start><br/>" f"Returns a JSON list of the minumum, average and max temperatures after a given date.<br/><br/>" f"/api.v1.0/<start>/<end><br/>" f"Returns a JSON list of the minimum, average and max temperatures between a specified date range.<br/><br/>" )
bd9d9748b57f6cb781c042b06c5fd0db9a8614ab
38,116
async def healthcheck(): """ Return the API status. """ return {"ping": "pong"}
cad4018cda809dcc3713b5a104bc25acf4830936
38,117
def mark_comment(comment, claim_lis=None, premise_lis=None): """ Adds <claim>/<premise> tags to comment. """ comment = ' '.join(comment.split(' ')) comment = ' '+comment+' ' if claim_lis is not None: for claim in claim_lis: claim = ' '.join(claim.split(' ')) claim = claim.strip(' ') print("Replacing CLAIM : ", claim) comment = comment.replace(claim, '<claim>'+claim+'</claim>') if premise_lis is not None: for premise in premise_lis: premise = ' '.join(premise.split(' ')) premise = premise.strip(' ') print("Replacing PREMISE : ", premise) comment = comment.replace(premise, '<premise>'+premise+'</premise>') return comment[1:-1]
cc6d4e3ae72b82edaca1f35cfd4915cd7df0eb09
38,118
def vbar(vmodel, event_depth, station_elevation): """ Calculates the average velocity between source and receiver for a given velocity model. Only need the difference in the vertical axis as sines of angles will cancel. Parameters ---------- vmodel : pandas DataFrame Contains the velocity model, ordered by deepest layer first. event_depth : float Depth of event. Units should be consistent with station_elevation. station_elevation : float Elevation of event. Units should be consistent with depth of event. Returns ------- vbar : float Average velocity within the model. """ average = 0.0 for i, layer in vmodel.iterrows(): layer_top = vmodel.iloc[i+1][0] layer_bottom = layer[0] if station_elevation < layer_bottom and station_elevation > layer_top: layer_top = station_elevation if layer_top == -100.0: break if event_depth <= layer_top: continue elif event_depth > layer_top and event_depth <= layer_bottom: # Handle interpolated distance dist_in_layer = abs(event_depth - layer_top) else: # Handle full layer dist_in_layer = abs(layer_bottom - layer_top) average += dist_in_layer * layer[2] return average / (event_depth - station_elevation)
ce10c0148d9b292b9a825a49bc983b7c81d975b2
38,119
def _h13_s(s): """Define the boundary between Region 1 and 3, h=f(s) >>> "%.6f" % _h13_s(3.7) '1632.525047' >>> "%.6f" % _h13_s(3.5) '1566.104611' """ sigma=s/3.8 I=[0, 1, 1, 3, 5, 6] J=[0, -2, 2, -12, -4, -3] n=[0.913965547600543, -0.430944856041991e-4, 0.603235694765419e2, 0.117518273082168e-17, 0.220000904781292, -0.690815545851641e2] suma=0 for i in range(6): suma+=n[i]*(sigma-0.884)**I[i]*(sigma-0.864)**J[i] return 1700*suma
cd08377072d0ae84efc00214abaa767e8001424e
38,120
def first_player_point(ended_state): """先手プレイヤーのポイント """ # 1:先手勝利, 0: 先手敗北, 0.5, 引分け if ended_state.is_lose(): return 0 if ended_state.is_first_player() else 1 return 0.5
fd5ee6db94082d00fa8d51fbad84a8edeccadd23
38,121
def is_weekend(data): """ Adds a binary is_weekend column to a pandas DataFrame Args: data - a pandas DataFrame containing a 'created_time' column of datetime objects Returns: A DataFrame with an additional 'is_weekend' column if data['created_time'][i] is Friday, Saturday or Sunday then data['is_weekend'][i] = 1 """ data['is_weekend'] = 0 weekend_days = {4, 5, 6} for idx in data.index: wd = data.created_time[idx].weekday() if wd in weekend_days: data['is_weekend'][idx] = 1 return data
1dd00dda7e41031bc7bac462618124ca8fd133bd
38,122
def route(url, endpoint=None, methods=None): """ A decorator that apply a route to the view or action. :param str url: The url rule. :param str endpoint: The endpoint. :param list methods: A list of http methods. :return: A function. """ if not url: raise ValueError('url cannot be empty.') def decorator(func): routes = getattr(func, 'routes', None) if not routes: func.routes = routes = [] routes.append((url, endpoint, methods)) return func return decorator
18b5c3fb287e8f21e8a8eeeac8643f123df75b7f
38,123
def get_lix_score(n_sents, words): """Return LIX score""" n_words = len(words) long_words = len([w for w in words if len(w)>6]) LIX = (n_words/n_sents) + ((long_words*100)/n_words) return LIX
1abb5e51d7bc271df29c8be2cd5ee0a311888857
38,124
import string import secrets def rand_token(length: int = 25, chars: str = string.ascii_uppercase + string.digits) -> str: """ Generate a random token. Does not check for duplicates yet. A length of 25 should give us 8.082812775E38 keys. length: - length of token to generate chars: - characters used in seeding of token """ return "".join(secrets.choice(chars) for i in range(length))
1cecef09eca30dee6bb607d7e5ce359d977e8beb
38,127
import logging def parse_reddit_url(url): """Return dict or None Try to parse url into reddit style. """ segments = url.split("/") if len(segments) is not 7: logging.error("Invalid sub-reddit url: {}".format(url)) return None return { "id": segments[4], "sub-reddit": segments[2], "safe_title": segments[5] }
767650972fdd462b1ddc089c89224434fbdb5a23
38,128
def parser_data(args): """ receive the args an create the data return the data as a dictionary """ data = { 'date': args.date.strftime("%Y-%m-%d"), 'title': args.title[0], 'category': args.category[0], 'tags': args.tags } return data
660a794b943818b715e10268f75792b111696deb
38,130