content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import math def is_pentagonal(p: int) -> int: """ P = n * (3n - 1) / 2 If P is pentagonal, the above equation will have a positive integer solution for n. We use the quadratic formula to check if either solution for n is a positive integer """ root = math.sqrt(24 * p + 1) return root.is_integer() and ((1 + root) / 6).is_integer()
0561f7453eeda33c780917c708eaa8fa88ac0df4
685,199
def increment_filename(filename): """ Increment a counter on the filename, so that duplicate filenames can be avoided. We do this by adding a counter as a path component at the start of the filename, so that the original name is not changed. """ if '/' not in filename: counter = 0 rest = filename else: counter, rest = filename.split('/', 1) try: counter = int(counter) except ValueError: # eg. foo/bar.pdf -> 1/foo/bar.pdf counter = 0 rest = filename return '%d/%s' % (counter + 1, rest)
9ebace8952fbb585bfc3c1a2236bbed59288095b
421,271
def calculate_statistics(delta_pred, delta_real, delta_pred_bin, delta_real_bin): """ Computes the accuracy statistics for the predicted and real changes in price. Provides validation for the model if the accuracy of the binary predictions are within a target threshold. Also computes the number of predictions that are within a threshold deviation of the average. Args: delta_pred (tensor) - the predicted deltas delta_real (tensor) - the real deltas delta_pred_bin (tensor) - the binary predictions delta_real_bin (tensor) - the binary real values Returns: avg_p (float) - the avg percent difference between the real and predicted data avg_pos_correct (float) - the proportion of correct positive ids avg_neg_correct (float) - the proportion of negative correct ids """ # true_pos, false_pos, true_neg, false_neg res = [0] * 4 P = 0 N = delta_real_bin.shape[0] for i in range(N): P += abs(delta_pred[i][0] - delta_real[i][0]) / delta_real[i][0] real, pred = delta_real[i][0], delta_pred[i][0] if real == 1 and pred == 1: res[0] += 1 elif real == 0 and pred == 1: res[1] += 1 elif real == 0 and pred == 0: res[2] += 1 elif real == 1 and pred == 0: res[3] += 1 avg_p = P / N avg_pos_correct = res[0] / (res[0] + res[1]) avg_neg_correct = res[2] / (res[2] + res[3]) return avg_p, avg_pos_correct, avg_neg_correct
045a658d92a04c2a0f19b0bc3893f584ff3a8d56
507,412
def groupify(data): """ Takes a dict with groups identified by ./ and turns it into a nested dict """ new = {} for key in data.keys(): if "./" in key: group, field = key.split("./") if group not in new: new[group] = {} new[group][field] = data[key] else: new[key] = data[key] return new
59295a90203d8eb7a9af901ab3f62df3132fea27
637,278
def greatest_common_divisor(a: int, b: int) -> int: """ Euclid's Lemma : d divides a and b, if and only if d divides a-b and b Euclid's Algorithm >>> greatest_common_divisor(7,5) 1 Note : In number theory, two integers a and b are said to be relatively prime, mutually prime, or co-prime if the only positive integer (factor) that divides both of them is 1 i.e., gcd(a,b) = 1. >>> greatest_common_divisor(121, 11) 11 """ if a < b: a, b = b, a while a % b != 0: a, b = b, a % b return b
f2e707a614e2c0b5fa73ce83fe79b1451fbdb910
684,469
def mosek_test_tags(mosek_required = True): """Returns the test tags necessary for properly running MOSEK tests. By default, sets mosek_required=True, which will require that the supplied tag filters include "mosek". MOSEK checks a license file outside the workspace, so tests that use MOSEK must have the tag "no-sandbox". """ nominal_tags = [ "no-sandbox", ] if mosek_required: return nominal_tags + ["mosek"] else: return nominal_tags
c477bb6def894d21c6f46a5be445ee8f5fe6ff63
508,421
def serialize(self, request, contents, serializer = None, lazy = True): """ Serializes the provided contents (wither map or list) using the infra-structure (serializer) that is currently defined for the request. An optional serializer attribute may be used to "force" the serializer that is going to be used in the contents serialization. This value may be either the concrete serializer instance or a string representing the type of serializer to be used (runtime resolution). The definition of the serializer is not part of the method and such behavior should be defined by the upper layers. The resulting data from the serialization is set as the contents for the current request and the proper mime type defined. :type request: Request :param request: The request object that is going to be used as reference for the retrieval of the serializer object and for the setting of the serialization result contents. :type contents: List/Dictionary :param contents: The contents that are going to be serialized using the context defined in the request. :type serializer: Object :param serializer: The serializer (protocol) compliant object that is going to be used for the "forced" serialization process. :type lazy: bool :param lazy: If a lazy based serialization process should be used in case there's such support from the serializer. :rtype: String/Generator :return: The final serialized value that may be used for reference, please note that this value should not be set again in the request as that may create some unnecessary performance issues. """ # in case the provided serializer value is string based it must # be "resolved" using the default string based plugin resolution # as the serializer value must always be object compliant if type(serializer) == str: serializer = getattr(self.plugin, serializer + "_plugin") # verifies if the serializer attribute is defined in the provided # request if that's the case and there's no provided serializer # such value is going to be used as the serializer for the contents is_defined = hasattr(request, "serializer") if is_defined: serializer = serializer or request.serializer if not serializer: return # retrieves the output (character) encoding value from the request # so that it may be used for the lazy version of the dumps operation # for the generation of the data, as required by specification encoding = request.get_encoding() # verifies if lazy dumping support exists for the current serializer # and if that's the case and lazy serialization is requested the lazy # mode is enabled so that a generator based strategy is used has_lazy = hasattr(serializer, "dumps_lazy") lazy = lazy and has_lazy # runs the serialization process on the contents (dumps call) and # then retrieves the mime type for together with the data string # value set the contents in the current request if lazy: data = self.dumps_lazy(serializer, contents, encoding = encoding) else: data = serializer.dumps(contents) mime_type = serializer.get_mime_type() self.set_contents(request, data, content_type = mime_type) # returns the final serialized data to the caller method so that it # may be inspected and analyzed to check for any issue, it may also # be re-used for a different context that the request one return data
9a6041b9782aebd9b175dd7bed6ed65b67db9edb
144,095
def _generateParams(param_dict): """ Will take a dictionary, where *potentially* some values are lists. Will return a list of dictionaries with no lists, where each possible list member combination is included. """ keys = param_dict.keys() list_key = None for key in keys: if isinstance(param_dict[key], list): list_key = key break if list_key is None: return [param_dict] new_dics = [] for list_val in param_dict[list_key]: new_dict = {} for key in keys: if key is not list_key: new_dict[key] = param_dict[key] else: new_dict[key] = list_val new_dics.append(new_dict) ret = [] for dic in new_dics: ret += _generateParams(dic) return ret
380ce32accaaded36d0096a5698835e8d1e2fd75
134,678
import uuid def is_uuid(v): """ Check value is UUID :param v: :return: """ try: uuid.UUID(v) return True except ValueError: return False
1a21f75202b4c27904b39084e60b3d4ed260264f
377,647
def parse_version_tag(version_tag): """ :param str version_tag: the version tag. :return: Given a version tag, return it as a list of ints :rtype: list[int] """ return [int(x) for x in version_tag.lower().replace('v', '').split('.')]
a4f0c3473e5bae57c689c7a62e206314e2dacfea
593,409
def create_pixmap(conn, wid, width, height): """Creates a window of the given dimensions and returns the XID""" pixmap = conn.generate_id() default_screen = conn.setup.roots[conn.pref_screen] conn.core.CreatePixmap( default_screen.root_depth, # depth pixmap, wid, # pixmap id, drawable id (window) width, height ) return pixmap
df8c47f01f2c66e730d63702f5a31cbb68ef6622
405,223
def parse_conf(raw_conf_list): """Parse configuration items for spark-defaults.""" conf_dict = {} for raw_conf in raw_conf_list: if "=" in raw_conf: key, value = raw_conf.split('=', 1) conf_dict[key] = value return conf_dict
e41e92061adae96ac02c654ea38eb8ba603898af
526,868
def add_data_to_json(json_obj, query_data, candidate_data, joined_data=None): """Adds query and candidate datasets to json object. """ json_obj['query_data'] = query_data json_obj['candidate_data'] = candidate_data if joined_data: json_obj['joined_data'] = joined_data return json_obj
681ec141690c2018cb6c9f6b3257d1fa2c0412ba
337,820
def update_residue(rating_matrix, rate_bar): """update residue matrix for each iteration in Gradient descent method :param rating_matrix: users' rating matrix :param rate_bar: rating matrix generate by approximation in each GD step :return residue: residue matrix, rating_matrix - rate_bar :rtype: ndarray """ residue = rating_matrix - rate_bar index = (rating_matrix == 0) residue[index] = 0 return residue
cbd79fbb2f4cbd2382d0d89e395481ab5f4592c4
337,758
import re def getStrainID(inStr): """ get strain ID should start with LL or AG then 3-4 digits """ result = '' p = re.compile('((AG|LL)\d{3,4})') if type(inStr) is str: regexResult = p.findall(inStr) if len(regexResult) > 0: result = regexResult[0][0] return result
e756c490e02d9f4839797e7446483d357cd49b60
240,510
def non_valid_devs(host): """Inventory Filter for Nornir for all non-valid devices. Return True or False if a device is not valid Args: host(Host): Nornir Host Returns: bool: True if the device do not have a config, False otherwise. """ if host.data["has_config"]: return False return True
020b7184d138357e169f0ac19b7e0d0f7ac56f73
643,001
def validate_same_rank(cards): """ validate 3 of a kind or 4 of a kind :param cards: list of Card objects :return: Boolean """ if len(cards) not in (3, 4): return False return all(card.rank == cards[0].rank for card in cards)
6624b06ca7baad475fd820bd870d1b36397f8091
283,696
def is_source(f): """Returns true iff the input file name is considered a source file.""" return not f.endswith('_test.go') and ( f.endswith('.go') or f.endswith('.s'))
8beea19b8f2918088d02970f571fb87a47bbf9a7
642,567
def get_sequence_ends(cxn): """Get a list of all seq_ends in the database.""" return cxn.execute('SELECT DISTINCT seq_end FROM sequences')
6e71d86a9ff6fdb7f286b7a8348417f99ffe0714
151,381
def fsearch(f, s): """ Search for a line in a file. Parameters ---------- f : file_like File_like object to search in. s : string String to search for. Returns ------- line : string or None The first line that contains the string. None if not found. p : integer or None The position where `s` begins in `line`. None if not found. Notes ----- The search begins at the current file position. """ for line in f: p = line.find(s) if p > -1: return line, p return None, None
1ab86a83dfc2b99b2d1622c01f77647faca87f0e
532,887
def clean_dict(dictionary, to_del='_'): """ Delete dictionary items with keys starting with specified string Works recursively Args: :dictionary: Dictionary object :to_del: Starts-with identifier """ to_delete = [] for k, v in dictionary.items(): if isinstance(v, dict): v = clean_dict(v) if k.startswith('_'): to_delete.append(k) for k in to_delete: del dictionary[k] return dictionary
04f1275186c03b77e80c4536dab62af93439d6e4
327,390
def rreplace(string, old, new, occurrence = 1): """replace <occurence> number of old by new in string starting with the right""" split = string.rsplit(old, occurrence) return new.join(split)
4c96959d18e46e1c5bc82b429efe365fdb698223
116,548
def trim_float(f): """ returns a trimmed string from a float: 4.20000000000000000 -> 4.2 """ return str(f).rstrip('0').rstrip('.')
66026be578b77189239988a5cc428e353b306765
175,145
import struct def read_plain_double(file_obj, count): """Read `count` 64-bit float (double) using the plain encoding.""" return struct.unpack("<{}d".format(count).encode("utf-8"), file_obj.read(8 * count))
8e00848f37c43949e46edcbf3ffa37bea916b913
562,812
def executeCommand(cur, command, vals, returnValue): """ Function for for executing a command with values and handling exceptions :param Psycopg.Cursor cur: cursor object from psycopg2 library :param str command: the sql command string :param list vals: list of values for the command :param bool returnValue: boolean values for if the command returns a value :returns: Values returned by the query or None if returnValue is False """ cur.execute(command, vals) if returnValue: return cur.fetchall() else: return None
b5a92a19e5bda9bacb8c622636a827ccc94ed81d
609,830
import torch def predict_cmap_interaction(model, n0, n1, tensors, use_cuda): """ Predict whether a list of protein pairs will interact, as well as their contact map. :param model: Model to be trained :type model: dscript.models.interaction.ModelInteraction :param n0: First protein names :type n0: list[str] :param n1: Second protein names :type n1: list[str] :param tensors: Dictionary of protein names to embeddings :type tensors: dict[str, torch.Tensor] :param use_cuda: Whether to use GPU :type use_cuda: bool """ b = len(n0) p_hat = [] c_map_mag = [] for i in range(b): z_a = tensors[n0[i]] z_b = tensors[n1[i]] if use_cuda: z_a = z_a.cuda() z_b = z_b.cuda() cm, ph = model.map_predict(z_a, z_b) p_hat.append(ph) c_map_mag.append(torch.mean(cm)) p_hat = torch.stack(p_hat, 0) c_map_mag = torch.stack(c_map_mag, 0) return c_map_mag, p_hat
a90efa0026a50ed5fa6d59e9d0dd42eefcfc2394
661,479
def summary_stat_line(start_t, end_t, moments): """ Moments should be a tuple of number, mean, variance, skewness, kurtosis """ line_template = "{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}" number, mean, variance, skewness, kurtosis = moments energy = number * (mean*mean + variance) line = line_template.format(start_t.isoformat(), end_t.isoformat(), number, mean, variance, skewness, kurtosis, energy, energy/number) return line, energy, energy/number
f07cd00066fb1bb2f0faee9a41aff5da72aecf73
186,408
def Handler(self, func, *args, **kwargs): """ Returns a function that takes obj as it's first parameter, then takes received arguments, and then takes extra passed (here as args and kwargs) """ def decf(*locargs, **lockwargs): lockwargs.update(kwargs) print(locargs+args) return func(self, *(locargs + args), **lockwargs) return decf
226530df4e04519f3a488eed325faa8f8d832fd3
275,031
from datetime import datetime def to_ISO8601(dt: datetime) -> str: """Convert datetime object to ISO 8601 standard UTC string""" return dt.strftime("%Y-%m-%dT%H:%M:%SZ")
a3a0550871fa904d3fd61d2591034cdbf103b66c
642,793
import re def safe_filename(name: str, file_ending: str = ".json") -> str: """Return a safe version of name + file_type.""" filename = re.sub(r"\s+", "_", name) filename = re.sub(r"\W+", "-", filename) return filename.lower().strip() + file_ending
98a887788046124354676a60b1cf7d990dbbc02f
5,211
def get_ids(xml): """ input xml full name, return series id and file id """ series_id,xml_name = xml.split('-') file_id,_ = xml_name.split('_') return series_id,file_id
081f4e6c0434a0bbb99d115d9e67684d3e34212f
116,758
def model_type(model): """Returns the Model Type from the model name """ model = model.upper() if model in ['CO-A4', 'SO2-A4', 'NO-A4', 'NOX-A4', 'OX-A4']: ans = 'toxic' elif model in ['HIH6130', 'PT1000']: ans = 'met' elif model in ['OPC-N2']: ans = 'particle' elif model in ['PID-AH', 'PID-A1']: ans = 'pid' elif model in ['MIT', 'X1', 'X2']: ans = 'mit' elif model in ['E-BAM', 'EBAM']: ans = 'ebam' elif model in ['MITv2', 'mit_v2', 'MIT_v2']: ans = 'mit_v2' elif model.lower() in ['trex', 'trex2017']: ans = 'trex' else: ans = 'other' return ans
15b3a0d6731f01639b1fdc2afece424e1a0f0d83
527,880
def add_ordinal(num): """Returns a number with ordinal suffix, e.g., 1st, 2nd, 3rd. Args: num (int): a number Returns: (str): a number with the ordinal suffix Examples: >>> add_ordinal(11) == '11th' True >>> add_ordinal(132) == '132nd' True """ switch = {1: "st", 2: "nd", 3: "rd"} end = "th" if (num % 100 in {11, 12, 13}) else switch.get(num % 10, "th") return "%i%s" % (num, end)
8a6fc3b4d8fe3dc76dcd6c35ed29fde29b82e0a2
110,564
def _convert_to_dataset_class(df, dataset_class, expectation_suite=None, profiler=None): """ Convert a (pandas) dataframe to a great_expectations dataset, with (optional) expectation_suite Args: df: the DataFrame object to convert dataset_class: the class to which to convert the existing DataFrame expectation_suite: the expectation suite that should be attached to the resulting dataset profiler: the profiler to use to generate baseline expectations, if any Returns: A new Dataset object """ if expectation_suite is not None: # Create a dataset of the new class type, and manually initialize expectations according to # the provided expectation suite new_df = dataset_class.from_dataset(df) new_df._initialize_expectations(expectation_suite) else: # Instantiate the new Dataset with default expectations new_df = dataset_class.from_dataset(df) if profiler is not None: new_df.profile(profiler) return new_df
acf1414ffbd8de9ddb4111266ea3279446522711
464,932
def get_event(app_type): """Helper function to get a dict that is reflective of a valid input event for an App""" return { 'app_type': app_type, 'schedule_expression': 'rate(10 minutes)', 'destination_function_name': 'unit_test_prefix_unit_test_cluster_streamalert_classifier' }
778f397e5b938dad007c2fb308d6744d0bb781b3
53,514
def str_get(series, index): """Implements series.str[index]""" return series.str[index]
baff462bf6ae61039ac7f537ff4e712309c90280
220,766
def grade_distribution(parsed_list, max_grade, bin_number=10): """ This funtion calculates the distribution of the given grades by splitting them into 'n' equal bins (intervals) and finding the number of grades corresponding to each bin. The bins are left-closed, right-open: [a, b) = x | a <= x < b, except from the last one that is closed: [c, d] = x | c <= x <= d. :param parsed_list: the parsed list of the grades :param max_grade: the maximum grade that you can score :param bin_number: the number of bins that is calculated in the distribution, default is 10 :return: a list of the number of grades in each bin """ bin_length = max_grade / bin_number grade_distribution_list = [0] * bin_number for item in parsed_list: index = int(item / bin_length) if index == bin_number: grade_distribution_list[index-1] = ( grade_distribution_list[index-1] + 1 ) else: grade_distribution_list[index] = grade_distribution_list[index] + 1 return grade_distribution_list
73fbd33cd1c7f9fb043f62d949749a39c2db33d1
44,637
def history_parser(arg): """ @param: arg is a string that contains the words seperated by spaces @return: Returns two strings. The first word removed from arg and everything after the space """ v = -1 try: v = arg.index(' ') except ValueError: return None, None first_word = arg[0:v] remain = arg[v + 1: len(arg)] return first_word, remain
267f0cd8ddfc0bfa9106d18341421d5d4d48ed1f
15,888
def has_dict_changed(new_dict, old_dict): """ Check if new_dict has differences compared to old_dict while ignoring keys in old_dict which are None in new_dict. """ if new_dict is None: return False if not new_dict and old_dict: return True if not old_dict and new_dict: return True defined_options = dict( (option, value) for option, value in new_dict.items() if value is not None ) for option, value in defined_options.items(): old_value = old_dict.get(option) if not value and not old_value: continue if value != old_value: return True return False
32d9e34e35906e6eefd5157cadb78953a96f43b0
597,030
def harmonic_in_band(fvco, bands): """Get description of band containing fvco frequency :param fvco: frequency :type fvco: ``int`` or ``float`` :param bands: Tuple of tuples for each band: ((lo_freq, hi_freq, description), ()...) :type bands: ((``float``, ``float``, ``str``), ()...) :return: Description of the band containing fvco, otherwise None :rtype: ``str`` """ for lower, upper, desc in bands: if int(lower/fvco) != int(upper/fvco): return desc return None
4b608a61c17d3d9b84687946fdb21f9df088a8d7
518,362
def convert_yaml_to_tuple(yaml_dictionary): """Converts a yaml dictionary with two keys: `key` and `value` into a two argument tuple of those values.""" return (yaml_dictionary["key"], yaml_dictionary["value"])
c1479601564ac8791bd6c5750b6bd1eb73e12a72
302,278
def get_sec_names(cfg): """Get section names as list from top to bottom ['sec0','sec1',...]""" if cfg.depth == 0: return [] secnames = [cfg.name] for i in range(cfg.depth - 1): cfg = cfg.parent secnames.append(cfg.name) return secnames[::-1]
0a885e46989a795e4fdc0b00c54db4d0651b9569
318,273
def get_type( type_name ): """Return the type of the given node type string (without *? modifier).""" if type_name[ -1 ] == '*' or type_name[ -1 ] == '?': return type_name[ : -1 ] return type_name
b6fc297d16d704a67d094b1f7e40c0bec51ce049
661,793
def _is_folder_type(item_json): """Determine if one drive file type can be represented as a CFolder""" return ('folder' in item_json or 'album' in item_json)
34c477538d054290f5a6a686c0d61d1bd2852d7a
431,451
def as_tuple(x, N): """ Coerce a value to a tuple of length N. Parameters: ----------- x : value or iterable N : integer length of the desired tuple Returns: -------- tuple ``tuple(x)`` if `x` is iterable, ``(x,) * N`` otherwise. """ try: X = tuple(x) except TypeError: X = (x,) * N if len(X) != N: raise ValueError("input must be a single value " "or an iterable with length {0}".format(N)) return X
58e84013074ee4a483f8334ec1fdc345688d2fe7
210,800
def _f2s(number, dec=4): """ Return string representation of ``number``. Returned string is: * without trailing decimal zeros, * with at most ``dec`` decimal places. """ if not isinstance(number, (int, float)): return number return '{{:.{:d}f}}'.format(dec).format(number).rstrip('0').rstrip('.')
20cbf5e5bf26e35b075b2785a7da15260bb92974
45,118
def merge_overlapping_intervals_v2(intervals): """ Given a collection of intervals, merge all overlapping intervals. Return them in a sorted order """ if len(intervals) == 0: return [] # we need sorted array intervals = sorted(intervals) # O(nlog(n)) # to hold the merged intervals merged_stack = [intervals[0]] j = 0 # counter for merged stack for i in range(1, len(intervals)): # check if the interval in the merged stack and intervals are # overlapping if merged_stack[j][0] < intervals[i][1] and merged_stack[j][1] >= intervals[i][0]: curr_item = merged_stack.pop(j) # print("last item: ", curr_item) merged_stack.append([curr_item[0], intervals[i][1]]) # otherwise, add the current interval from intervals to merged stack else: merged_stack.append(intervals[i]) j += 1 return merged_stack
81422832361474a3207cef1557706b1d80a32a9b
329,429
def cambio_a_futuro(tc, tiL, tiE, n): """Estima el tipo de cambio a futuro :param tc: Tipo de cambio actual :param til: Tasa de interés local :param tiE: Tasa de interés extranjero :param n: Número días a futuro """ tiL = tiL / 100 tiE = tiE / 100 return round(tc * ((1 + tiL)/(1 + tiE))**(n/365), 4)
4e8f9f9c76734e90451b7bf6c7a5c3b58b293984
235,450
def is_event(message): """Check if a message contains event data""" if len(message.embeds) > 0: embed = message.embeds[0] return (message.channel.name == 'upcoming-events' and embed.fields and embed.fields[0] and embed.fields[1] and embed.fields[2] and embed.fields[0].name == "Time" and embed.fields[1].name.startswith("Accepted") and embed.fields[2].name.startswith("Declined"))
6378600fb3c60204b9bb9719ffbc6c82aeab351b
182,375
def query_to_json(query, name): """This query is useful to fetch a complex join with some aggregations as a single blob, and later, just hydrate it without having to iterate over the resultset .. Example: SELECT u.id::varchar, to_jsonb(array_agg(scopes)) as scopes, FROM auth.auth_user u LEFT join LATERAL ( SELECT id, scope FROM auth.auth_user_scope WHERE user_id=u.id ) scopes on true WHERE user_id = ANY($1) GROUP BY u.user_id; This query will fetch a list of users, and aggregate it's scopes as an array of dicts """ return f""" select array_to_json(array_agg(row_to_json(t))) as {name} from ( {query} ) as t """
07bee3c4c7f541cafccd1b31a3134bd8378f9de4
650,372
def fitness(individual, Environment): """ In this case getting fitness from our result is trivial """ fitness = individual.result return fitness
5069a3567480f9928d048108ae8010c2940c4372
501,323
def _answer(result, line): """Answer the question as best as we can""" msgs = [u"Q: {}".format(result["Input"][1])] del result["Input"] for pod_type in ["Result", "Solution", "Derivative"]: if pod_type in result: msgs.append(u"A: {}".format(result[pod_type][1])) return '\n'.join(msgs) # We didn't find a specific answer - go into more detail for title, val in result.values(): msgs.append(u"{}: {}".format(title, val)) return '\n'.join(msgs)
a41cc92b5e9734dc4f5f116a8e8b4a06b03641a5
639,992
def has_duplicates(lst: list) -> bool: """Checks if a list has duplicate values""" return len(lst) != len(set(lst))
6f6ac90894a96900af92177898d7e1a0151b2d03
667,037
def get_scale(x): """Finds the lowest scale where x <= scale.""" scales = [20, 50, 100, 200, 400, 600, 800, 1000] for scale in scales: if x <= scale: return scale return x
b04c16605bd2cde85205ceae19a471e304053e21
223,881
def parse_frame_message(msg:str): """ Parses a CAN message sent from the PCAN module over the serial bus Example: 't1234DEADBEEF' - standard (11-bit) identifier message frame 'R123456784' - extended (29-bit) identifier request frame Returns a tuple with type, ID, size, and message Example: ('t', '00000123', '4', 'DEADBEEF') ('R', '00000123', '4') """ _type = msg[0:1] # type is the first character of the message _ext = _type == 'T' or _type == 'R' # Determine if the message is an extended (29-bit) identifier frame _rtr = _type.lower() == 'r' # Determine if the message is a request frame _id = msg[1:4] if not _ext else msg[1:9] # Grab the ID depending on length of it (type-dependent) _id = _id.zfill(8) _size = msg[4:5] if not _ext else msg[9:10] # Grab the data size if not _rtr: _data = msg[5:5+int(_size)*2+1] if not _ext else msg[10:10+int(_size)*2+1] # Get the message data bytes depending on the size indicated by _size else: _data = "" return(_type, _id, _size, _data)
70ba228738a681f1f2d1e8fc9a5d35a5a5a93dd6
516,437
def unique(ngb): """Return unique values from vector.""" uni = list() for n in ngb: if n not in uni: uni.append(n) return uni
58a830d24027cf6362fee298bd3524aa5ccc0356
300,389
def parse(message): """Parse a message.""" message_parts = message.split(";") message_type = message_parts[0] content = message_parts[1].strip() return message_type, content
9795714f2986c351272d1fabe292be06e215749f
327,795
import itertools def all_combinations(worlds): """ For a list of worlds (worlds = a possible situation), calculate the combinations of them (each element from one world with the elements of the other worlds). Additionally, statements are converted to sets of tuples. Parameters ---------- worlds : list(list(list(str))) list of worlds Returns ------- list(set(tuple(str))) List containing all combinations of the initial world list. """ tupled_worlds = [] for world in worlds: tupled_worlds.append([tuple(x) for x in world]) combinations = [set(c) for c in itertools.product(*tupled_worlds)] return combinations
51e6dbf36a0c3b1b4dcf5092e23b22da1ba0113d
216,912
def contains_sep(name): """ Test if name contains a mode name, e.g. TEM, ITG, ETG""" return any(sub in name for sub in ["TEM", "ITG", "ETG"])
414497454191394783dec87e772fd5f7f0e3b717
47,280
import hashlib def get_sha_digest(s, strip=True): """Generate digest for s. Convert to byte string. Produce digest. """ if strip: s = s.rstrip() s = s.encode("utf-8") return hashlib.sha256(s).hexdigest()
41a1b8d1088d86d283b12e382da4c3848608fd8c
678,876
def pattern_match(query, patterns=None): """ Simple helper function to see if a query matches a list of strings, even if partially. Parameters ---------- query : string String to check for matches patterns : list of string or None (optional) Patterns to reference, return False if unspecified Returns ---------- match : bool Whether the query matches some pattern, fully or partially """ # Default the returned value match = False # Check if there are any patterns to analyze if patterns is not None: # Compare the query to each pattern match = any([p in query for p in patterns]) return match
9179314de92552273aa4a6c2533fa3f86fd02067
503,404
def path_exists(g, start, end): """ Given a networkx graph g, do a dfs and determine if there exists a path between start and end. :return: True if there is a path from start to end, False otherwise. """ st = [] visited = set() st.append(start) while st: current = st.pop() visited |= {current} if current == end: return True for c in g.neighbors(current): if c not in visited: st.append(c) return end in visited
dde0712ae91dddf25354d8fecfefa420c6f8dec1
552,290
def sum_ascii_values(text: str) -> int: """Sum the ASCII values of the given text `text`.""" return sum(ord(character) for character in text)
7a45396e528c6e2d6c54b611f18d0cf648e418c8
690,015
import math def locate_number(num): """For a given number, locate it's x, y co-ordinate in an infinite spiral Example: 5 4 3 6 1 2 7 8 9 >>>locate_number(2) (1, 0) >>>locate_number(9) (1, 1) NOTE: Based on answers found: - https://math.stackexchange.com/questions/1263541/ - https://stackoverflow.com/questions/11550153/ """ if num == 1: return (0, 0) cycle = lambda num: math.floor((math.sqrt(num-1) + 1) / 2) first = lambda c: (2*c -1)**2 + 1 length = lambda c: 8 * c sector = lambda num: math.floor(4 * (num - first(cycle(num))) / length(cycle(num))) def position(index): c = cycle(index) s = sector(index) offset = index - first(c) - s * length(c) // 4 if s == 1: #north return c - offset - 1, -c if s == 0: #east return c, c - offset - 1 if s == 3: #south return -c + offset + 1, c # else, west return -c, -c + offset + 1 return position(num)
080abcc55e0ebf3099874cedfb99a7fd9f2b01ab
533,122
def normalize_features(y, method): """ Normalize intensities or other features. Parameters ---------- y : array(nsamples, nfeatures) Data matrix, e.g. raw spectra or decomposition components. method : str One of 'none', 'mean1', 'mean0', 'mean0var1' Returns ------- Normalized data. """ if method == 'none': return y elif method == 'mean1': means = y.mean(0) means[means == 0] = 1 return y / means elif method == 'mean0': return y - y.mean(0) elif method == 'mean0var1': y = y - y.mean(0) sds = y.std(0) sds[sds == 0] = 1 return y / sds raise ValueError('undefined normalization method ' + method)
b22bd8560b479a35ca71513042004ad8c9ebef67
185,509
def snake_to_camel( s, separator='_' ): """ Converts snake to camel case >>> snake_to_camel('') '' >>> snake_to_camel('_x____yz') 'XYz' >>> snake_to_camel('camel_case') 'CamelCase' >>> snake_to_camel('r2_d2') 'R2D2' >>> snake_to_camel('m1.small', '.') 'M1Small' """ return ''.join( [ w.capitalize( ) for w in s.split( separator ) ] )
6bc0f6386e7e5d39a4d9f4f40b846b7c39fec85d
423,798
import zipfile def get_parts(fname): """Returns a list of the parts in an OPC package. """ with zipfile.ZipFile(fname) as zip_archive: parts = [name for name in zip_archive.namelist()] return sorted(parts)
458bd85f94df5ab78804eb1df791e6ad54c266c5
694,145
import math def getHeading(lat_diff, long_diff): """Return directional heading (0=North) given lat and long diffs Args: lat_diff: (float) difference in latitude long_diff: (float) difference in longitude Returns: The heading value """ angleEast = int(math.atan2(lat_diff, long_diff)*180/math.pi) heading = 90 - angleEast if heading < 0: heading += 360 return heading
1f0207c8172c6fd06a92f0460a87ec664b76b7bc
108,535
def coverage_section(*coverItems): """Combine multiple coverage items into a single decorator. Args: *coverItems ((multiple) :class:`CoverItem`): coverage primitives to be combined. Example: >>> my_coverage = coverage.coverageSection( ... coverage.CoverPoint("x", ...), ... coverage.CoverPoint("y", ...), ... coverage.CoverCross("z", ...), ... ... ... ) >>> >>> @my_coverage >>> def decorated_fun(self, arg): ... ... """ def _nested(*decorators): def _decorator(f): for dec in reversed(*decorators): f = dec(f) return f return _decorator return _nested(coverItems)
8ffa571c3a2370a4a5861b68dd63f890def776b8
378,954
def has_deployments_ended(deployments): """Check if deployments has ended""" for deployment in deployments: if not deployment.has_ended: return False return True
bf79303165f2847bb1678b5b333c039a884e5ad4
601,278
def init_filter(df, report=False): """Keep relevant subset of topics""" keep_topics = ['economy', 'europe', 'health', 'crime', 'education', 'immigration', 'law'] groups = df.groupby('topic') sub_df = df[df['topic'].isin(keep_topics)] sub_groups = sub_df.groupby('topic') if report: print("Found {} topics in data:".format(len(groups))) for g in groups: print(" {:20s} {:3d}".format(g[0], len(g[1]))) print("Keeping {} topics".format(len(sub_groups))) return sub_df
40f1b09f3e1bda21b2a28d31303250c19a437b71
642,129
def merge_dicts(left, right): """ Merges two dictionaries, keeing left and right as passed. If there are any common keys between left and right, the value from right is use. Returns the merger of the left and right dictionaries """ result = left.copy() result.update(right) return result
d64049e4d21ad0031878427fb238e57f8abe9915
305,034
def union_keys(*pos_args): """Return a union of all the keys in multiple dicts. Args: pos_args: [list of dict] """ return set().union(*pos_args)
961cd442bce2a3963d98ad12cb99453f01d41f4b
540,460
def _core_dist(point, neighbors, dist, n_features): """ Computes the core distance of a point. Core distance is the inverse density of an object. Args: point (int): number of the point in the dataset neighbors (np.ndarray): array of dimensions (n_neighbors, 1): array of all other points indexes in object class dist (np.ndarray): array of dimensions (n, n): precalculated distances between all points Returns: core_dist (float) inverse density of point """ n_neighbors = len(neighbors) distance_vector = dist[point][neighbors] distance_vector = distance_vector[distance_vector != 0] numerator = ((1/distance_vector)**n_features).sum() core_dist = (numerator / (n_neighbors - 1)) ** (-1/n_features) return core_dist
ec3801709b5fbce4107f94cd19f3ccd59c971887
100,198
def expand_as(x, y): """Add dimensions to x so it's dimensions are not less than y's dimensions. Dimensions are added to the end of x. If x has more dimensions than y from the beginning, nothing is done. Args: x (tensor): tensor to unsqueeze y (tensor): tensor whose dimensions are to be matched. Returns: a new view of x. """ num_add_dims = max(0, len(y.shape) - len(x.shape)) x = x.reshape(x.shape + (1,) * num_add_dims) return x
ec31ab223b894caac45518831a964b3de8be8722
439,471
def _csa_to_dict(csa_info): """Convert a csa header to a dictionary Arguments: csa_header (dict): the CSA record from `csareader.get_csa_header` Returns: json_dict: a dictionary with non-empty field value pairs """ csa_dict = csa_info.get('tags') json_dict = dict() for k in csa_dict.keys(): if csa_dict[k]['n_items'] != 0: item = csa_dict[k]['items'] if len(item) > 1: json_dict[k] = item else: json_dict[k] = item[0] return(json_dict)
ed945fdcbd653ec9d9fa669d3ee3daeee837c0b2
492,341
def _models_get_all_function_name(model): """Returns the name of the function to retrieve all models from the database""" return '{}_get_all'.format(model.get_table_name())
4380181feb5a9049454ff7e06e8b9f25a48d8cb9
326,124
def identify(rod, x, y): """Adds chip ids (chip_x, chip_y) to the key for a rod Args: rod: dict of (x, y): [values] x: x coordinate that identifies the source chip y: y coordinate that identifies the source chip Returns: dict: {(chip_x, chip_y, x, y): [values]} """ return {(x, y, k[0], k[1]): v for k, v in rod.items()}
1fd1b94c87d2bcd4f685d3d8738deab026ad002c
627,689
def is_versioned(obj): """ Returns whether or not given object is versioned. :param obj: SQLAlchemy declarative model object. """ return ( hasattr(obj, '__versioned__') and ( ( 'versioning' in obj.__versioned__ and obj.__versioned__['versioning'] ) or 'versioning' not in obj.__versioned__ ) )
f08e6db40283b46d6882429c045afe8af725c73e
88,976
def square_matrix_dim(M): """Check if the input is square Matrix and return its first dimension""" assert M.shape[0] == M.shape[1] and len(M.shape) == 2 return M.shape[0]
aeeed3033fd28c6c7c042f1841d8de4b49302fdc
140,494
import torch def log_domain_matmul(log_A, log_B, use_max=False): """ log_A : m x n log_B : n x p output : m x p matrix Normally, a matrix multiplication computes out_{i,j} = sum_k A_{i,k} x B_{k,j} A log domain matrix multiplication computes out_{i,j} = logsumexp_k log_A_{i,k} + log_B_{k,j} This is needed for numerical stability when A and B are probability matrices. """ m = log_A.shape[0] n = log_A.shape[1] p = log_B.shape[1] log_A_expanded = log_A.repeat(p, 1, 1).permute(1, 2, 0) log_B_expanded = log_B.repeat(m, 1, 1) elementwise_sum = log_A_expanded + log_B_expanded out = torch.logsumexp(elementwise_sum, dim=1) return out
856b72fc5f53e8ebd25d3d07bb8999c6b60eb13e
124,590
from typing import List def escape_quote_split(line: str) -> List[str]: """Split quote values on a line, handling \\" correctly.""" out_strings = [] was_backslash = False # Last character was a backslash cur_part = [] # The current chunk of text for char in line: if char == '\\': was_backslash = True cur_part.append('\\') continue if char == '"': if was_backslash: cur_part.pop() # Consume the backslash, then drop to append it. else: out_strings.append(''.join(cur_part)) cur_part.clear() continue # Backslash only applies for character.. was_backslash = False cur_part.append(char) # Part after the last quotation out_strings.append(''.join(cur_part)) return out_strings
b29464c41f545848ce52cb5229c631b16d3825fa
547,314
def perovskite_order_param(Rs_order_param=None): """ Define an order parameter for peroskite. """ if Rs_order_param is None: Rs_order_param = [(1, 0, 1), (1, 0, -1), (1, 1, 1), (1, 1, -1), (1, 2, 1), (1, 2, -1)] Rs_in = [(3, 0, 1)] + Rs_order_param return Rs_in
0525349d0e0bc3cb6068d38c3e1e71732d3c8a44
44,792
def ODEStep(u, um, t, dt, F): """2nd order explicit scheme for u''=F(u,t).""" up = 2*u - um + dt*dt*F(u, t) return up
c0f9b946dac7eafdf4fef4bb21fed09cc3d41c0b
542,666
import string def MakeValidTableName(name): """Turn name string into a valid bigquery table name. Table names (or tableId) can be any letter or number or _ but cannot start with a number. Args: name: the table name you wish to sanitize. Returns: a sanitized version of name with only letters, digits or _ """ allowed_characters = string.ascii_letters + string.digits + '_' name = ''.join(x for x in name if x in allowed_characters) if not name or name[0] in string.digits: name = '_' + name return name
cb1dc0e76a00bf18ba1cba9548ae765fb4d55af6
258,803
def get_ss(ss_string): """ Converts secondary structure string into a list of indices of where secondary structure changes (plus start and end coordinates of the sequence) """ output = [0] for i in range(1,len(ss_string)): if ss_string[i] != ss_string[i-1]: output.append(i) if output[-1] != len(ss_string)-1: output.append(len(ss_string)-1) return output
387f6173a2464402cd2b20b6ec781ecf6ca855d3
499,874
def noSameEdge(G, sol): """ The car should never drive along the same directed edge more than once. Input: G: A NetworkX graph that represents the input problem sol: List of edges in the solution path Returns: Boolean value indicating if the solution enforces this lemma """ seen = {} for edge in sol: s, e = edge[0], edge[1] if e in seen.get(s, []): return False else: seen[s] = seen.get(s, []) + [e] return True
3f5eaf4cb946e82efba246e3c75fdace2b8b41db
246,334
def is_subdir(subdir_path, dir_path): """Returns "True" if it's the second path parameter is a subdirectory of the first path parameter.""" return (subdir_path.rstrip('/') + '/').startswith(dir_path.rstrip('/') + '/')
d76e43dc19c005d87e9c9263af080c77b7d0fc77
130,067
import re def regex_match(pattern): """A curried regex match. Gets a pattern and returns a function that expects a text to match the pattern with. >>> regex_match(r"phone:(\d*)")("phone:1234567").group(1) # noqa: W605 '1234567' """ def regex_match_inner(text): return re.match(pattern, text) return regex_match_inner
ae466d108291fecc1cca985c0caa9d98c75e3d0a
673,180
from typing import Iterable from typing import Optional def argmax(iterable: Iterable) -> Optional[int]: """ Find the first index of the biggest value in the iterable. Parameters ---------- iterable : Iterable Returns ------- argmax : Optional[int] Examples -------- >>> argmax([0, 0, 0]) 0 >>> argmax([1, 0, 0]) 0 >>> argmax([0, 1, 0]) 1 >>> argmax([]) """ max_value = None max_index = None for index, value in enumerate(iterable): if (max_value is None) or max_value < value: max_value = value max_index = index return max_index
2ab12ddad93b1e96058f2c3de834b206f99d1bcc
607,898
def fsign(n, imply_pos=True): """ Format SIGN prefix '-' if n < 0 else '' (or '+' if imply_pos is false) """ return "-" if n < 0 else "" if imply_pos else "+"
1140100e3a606fc386488e06511636061274d4b3
51,886
def es_base(caracter): """ Str -> Bool Ingresa un caracter, se determina si es True o False >>> es_base('A') True >>> es_base('T') True >>> es_base('C') True >>> es_base('G') True >>> es_base('AT') Traceback (most recent call last): .. ValueError: AT no es una base >>> es_base('BB') Traceback (most recent call last): .. ValueError: BB no es una base >>> es_base('1') Traceback (most recent call last): .. ValueError: 1 no es una base >>> es_base('') Traceback (most recent call last): .. ValueError: Ingrese un caracter :param caracter: Una letra en mayuscula :return: Si es valida o no """ letra = caracter.upper() if len(letra) == 1 and letra in ('ATCG'): return True else: raise ValueError(caracter + ' no es una base')
b4b706cc87e37ae1fa5b343a384610a4aed6d130
602,682
def millis_to_srt(millis): """Convert milliseconds time to the SRT subtitles format time string.""" result = '' # milliseconds milliseconds = millis % 1000 result = ('%03d' % milliseconds) + result millis = (millis - milliseconds) / 1000 # seconds seconds = millis % 60 result = ('%02d,' % seconds) + result millis = (millis - seconds) / 60 # minutes minutes = millis % 60 result = ('%02d:' % minutes) + result millis = (millis - minutes) / 60 # hours result = ('%02d:' % millis) + result # ready return result
cfcf2376ee9a0f12150e0f6cd960cdf8b1c497a0
208,494
import math import torch def gaussian_probability(sigma, mu, data): """Returns the probability of `data` given MoG parameters `sigma` and `mu`. Arguments: sigma (BxGxO): The standard deviation of the Gaussians. B is the batch size, G is the number of Gaussians, and O is the number of dimensions per Gaussian. mu (BxGxO): The means of the Gaussians. B is the batch size, G is the number of Gaussians, and O is the number of dimensions per Gaussian. data (BxI): A batch of data. B is the batch size and I is the number of input dimensions. Returns: probabilities (BxG): The probability of each point in the probability of the distribution in the corresponding sigma/mu index. """ data = data.unsqueeze(1).expand_as(sigma) ret = 1.0 / math.sqrt(2 * math.pi) * torch.exp(-0.5 * ((data - mu) / sigma) ** 2) / sigma return torch.prod(ret, 2)
5757e051af16b692fba9e483990df1d5c4fd3870
25,247
import math def df2idf(docfreq, totaldocs, log_base=2.0, add=0.0): """ Compute default inverse-document-frequency for a term with document frequency `doc_freq`:: idf = add + log(totaldocs / doc_freq) """ return add + math.log(1.0 * totaldocs / docfreq, log_base)
983b31e2b15471091f482445f624c2d412b20df7
45,131
def check_project(name, projects_list): """ Checks if entry in tv list is a project instance. If entry name is identical to project name returns entry name. If some part of entry name matches project name or vice versa returns '1' to mark partial matching. If entry name does not match project name at all returns '2' to mark error. """ if name in projects_list: return name else: for project in projects_list: if name in project or project in name: return project return '2'
03687f111dbcfdbf38c9f040aae0163656b2a470
208,382
def IntConv(value): """Returns the integer value of a string, if possible.""" try: int_val = int(value) return int_val except ValueError: return value
f836e4896ba61cbe78f70213e01c22cfae37f585
495,057
import hashlib def ComputeFileHash(filepath): """Generate a sha1 hash for the file at the given path.""" sha1 = hashlib.sha1() with open(filepath, 'rb') as fp: sha1.update(fp.read()) return sha1.hexdigest()
2660f1e03bf6f42e18af5ed9324d514b412526e4
428,034
def proc_communicate(subproc): """get the stdout output of subprocess, decode it and return results """ output = subproc.communicate()[0] return output
8ae4a449474b29a4390be5555e8d607c74135f48
211,153
from pathlib import Path def index(directory: str) -> list: """ Index all files from directory (including all subdirectories). :param directory: directory to index. :return: list containing all files found into given directory. """ path = Path(directory) return [p for p in path.expanduser().iterdir()]
646d28a6c942996e87f7f8dc9610f681b91ab7bb
562,432