content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def _dir(obj): """Verb: print dir(obj)""" print(dir(obj)) return obj
257fd71e4ba66a13f7ceed2f5fd429adda23ffb2
22,498
def build_tdbank_config_param( group_id, task_name, rt_id, topic, tasks, master, bid, table_name, kv_splitter, field_splliter, ): """ 生成tdbank任务配置 :param group_id: 集群名 :param rt_id: rt_id :param topic: 数据源topic :param tasks: 消费并发数 :param master: tdbank数据接收地址 :param bid: tdbank bid :param table_name: tdbank配置接口名 :param kv_splitter: kv对分割符,如get请求中的 & :param field_splliter: 域分隔符,如get请求中的 = :return: tdbank任务配置 """ return { "group.id": group_id, "rt.id": rt_id, "connector.class": "com.tencent.bk.base.datahub.databus.connect.tdbank.sink.TdbankSinkConnector", "tasks.max": "%s" % tasks, "tdbank.td_manage.ip": master, "tdbank.bid": bid, "table.name": table_name, "topics": topic, "msg.field.splitter": field_splliter, "msg.kv.splitter": kv_splitter, }
1861ca8ba5423ebdfd32a1cf50bb40aafbeb6a29
22,502
def getListItem(inputtext='',inputlist=[]): """Prompts the user for a value. Values are accepted if they can be found in a predefined list. Other values prompt the user to re-enter a response. *inputtext defines the text preceding the user interaction *inputlist defines the values acceptable as a user response ## *note that the values in inputlist and returned value should all be text strings """ while 1: outputtext=input(inputtext + '\n\t' + '\n\t'.join(inputlist) + '\nPlease Select From The Listed Options (case sensitive):\n') if outputtext in inputlist: return outputtext else: print('INVALID SELECTION')
a2b567c78fdd5adba5b137f3509cf4115ebae15d
22,503
def to_applescript(num): """ Convert a Python number to a format that can be passed to Applescript. A number doesn't need coerced to print to stdout, but it's best to be thorough and explicit. """ return str(num)
7d4fe31e276267668078cedc0b5fa6c5f97dc035
22,504
def button(url, title, style, _method="get", **context): """ A single button like [EDIT] :param url: Action this button goes to. :param title: Title of the button. :param style: Style of the button (if applicable): primary|danger|info|warning|success :param _method: (optional) A method. If "get" is passed, AdminController's get will be use. Otherwise, a method <method> will be called, with arguments from context :param context: Context of the button. See 'link'. :return: """ return { "class": "button", "title": title, "style": style, "url": url, "method": _method, "context": context }
5b48e1b76d8df98f55ba198002e6ad4cf3f00313
22,505
import random def sample(population, k, seed=42): """Return a list of k elements sampled from population. Set random.seed with seed.""" if k is None or k > len(population): return population random.seed(len(population) * k * seed) return random.sample(population, k)
0f087c675e87fef721426229f570e95ae84b10bc
22,507
def increment(value, list_): """ This updates the value according to the list. Since we seek 4 consecutive numbers with exactly 4 prime factors, we can jump 4 numbers if the last doesn't have 4 factors, can jump 3 if the second to last doesn't have 4 factors, and so on """ if list_[-1] != 4: return value + 4 # We can assume the last element is a 4 if list_[-2:] != [4, 4]: return value + 3 # We can assume the last 2 elements are [4,4] if list_[-3:] != [4, 4, 4]: return value + 2 # We can assume the last 3 elements are [4,4,4] return value + 1
cae8e71c052aa0121a677304c45d4d6261d757d5
22,509
from pathlib import Path def _get_all_entries(entry_list, keep_top_dir): """ Returns a list of all entries (files, directories) that should be copied. The main purpose of this function is to evaluate 'keep_top_dir' and in case it should not be kept use all the entries below the top-level directories. """ all_files = [] entry_list = [Path(entry) for entry in entry_list] if keep_top_dir: all_files = entry_list else: for entry in entry_list: if entry.is_dir(): all_files.extend(list(entry.glob('*'))) else: all_files.append(entry) return all_files
71add58859cd4880fb46f4657ee6e0e9938a4d5f
22,510
import argparse def parse_args(argv=None): """ Parses command line arguments. :return: args :rtype: Namespace """ parser = argparse.ArgumentParser(description="Plot PK models using 2 or 3 compartment models") parser.add_argument('-d', '--data_root', type=str, required=False, help="Path to location of csv file (default = './'", default='./') parser.add_argument('-f', '--file_name', type=str, required=False, help="Filename for csv file containing model parameters", default='example.csv') parser.add_argument('-n', '--no_graph', action="store_true", help="Do not show the graph") args = parser.parse_args(argv) return args
26e5cd995b0f09019c99109407832a4ebb5df0d8
22,511
def convertBinaryToDecimal(num: str) -> int: """ Converts a binary string to a decimal number """ multiplier: int = 1 decimalNum: int = 0 for c in reversed(num): decimalNum += (int(c) * multiplier) multiplier *= 2 return decimalNum
6ae6ee26f8f66347284c4db4a6a0e76dd88f749f
22,512
def dataInit(): """ 初始化数据和属性集。 Returns: (数据, 属性集) """ Data = [['青绿', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', 0.697, '好瓜'], ['乌黑', '蜷缩', '沉闷', '清晰', '凹陷', '硬滑', 0.774, '好瓜'], ['乌黑', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', 0.634, '好瓜'], ['青绿', '蜷缩', '沉闷', '清晰', '凹陷', '硬滑', 0.608, '好瓜'], ['浅白', '蜷缩', '浊响', '清晰', '凹陷', '硬滑', 0.556, '好瓜'], ['青绿', '稍蜷', '浊响', '清晰', '稍凹', '软粘', 0.403, '好瓜'], ['乌黑', '稍蜷', '浊响', '稍糊', '稍凹', '软粘', 0.481, '好瓜'], ['乌黑', '稍蜷', '浊响', '清晰', '稍凹', '硬滑', 0.437, '好瓜'], ['乌黑', '稍蜷', '沉闷', '稍糊', '稍凹', '硬滑', 0.666, '坏瓜'], ['青绿', '硬挺', '清脆', '清晰', '平坦', '软粘', 0.243, '坏瓜'], ['浅白', '硬挺', '清脆', '模糊', '平坦', '硬滑', 0.245, '坏瓜'], ['浅白', '蜷缩', '浊响', '模糊', '平坦', '软粘', 0.343, '坏瓜'], ['青绿', '稍蜷', '浊响', '稍糊', '凹陷', '硬滑', 0.639, '坏瓜'], ['浅白', '稍蜷', '沉闷', '稍糊', '凹陷', '硬滑', 0.657, '坏瓜'], ['乌黑', '稍蜷', '浊响', '清晰', '稍凹', '软粘', 0.360, '坏瓜'], ['浅白', '蜷缩', '浊响', '模糊', '平坦', '硬滑', 0.593, '坏瓜'] # ['青绿', '蜷缩', '沉闷', '稍糊', '稍凹', '硬滑', 0.719, '坏瓜'] ] Attribute = ['色泽', '根蒂', '敲声', '纹理', '脐部', '触感', '密度'] IsDiscrete = [True, True, True, True, True, True, False] return Data, Attribute, IsDiscrete
3adbc5b70440660918c363d1d451dcd4da87d73c
22,513
import mimetypes def get_content_type(filename): """ Uses mimetype's guess functionality to take a shot at guessing the provided filename's mimetype. :param str filename: name of the file, should include extension :return: the guessed value or `application/octet-stream` by default :rtype: str """ return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
7bc7c33763157ba3104de854dde9106d93db0205
22,514
def change_soup(adventuredoc, section): """Create the HTML for the provided file contents! Should maybe have the callback.... Returns: BeautifulSoup: -- """ soup = section.soup total_number_of_sections = len(adventuredoc.sections) # Section Progress/Position progress = section.soup.new_tag("progress") progress['value'] = section.index + 1 progress['max'] = total_number_of_sections soup.insert(0, progress) # If there's a next section add the "next section" link! try: next_section_name = adventuredoc.sections[section.index + 1].name link = soup.new_tag("a", href="#" + next_section_name) link["class"] = "next" link.string = "Next Section" soup.append(link) except IndexError: pass section_wrapper = soup.new_tag("section") section_wrapper["id"] = section.name section_wrapper.append(soup) return section_wrapper
b0af5ee299d641778edd8b7766d841b72e3c59c8
22,515
import pytz from datetime import datetime def get_caption(): """ Returns a human readable string based on the current time. """ timezone = pytz.timezone('America/New_York') return f"{datetime.now(timezone).strftime('%A, %b %d %Y @ %l:%M:%S %p')}"
05fde430464063341198796262f6e4d1faed130a
22,516
def dict_list_eq(l1, l2): """Compare to lists of dictionaries for equality. """ sorted_l1 = sorted(sorted(d.items()) for d in l1) sorted_l2 = sorted(sorted(d.items()) for d in l2) return sorted_l1 == sorted_l2
3f873598ba7a6f60eca4fa7524ca87992ce63528
22,518
import os import pickle def load_mnist(data_dir, use='train'): """ Load preprocessed mnist and normalize it to [-1, 1] parameter - data_dir: directory path where mnist pickles exist. default MNIST_PATH - use: load 'train' or 'test' return: normalized images and labels """ print('loading mnist image dataset..') data_file = 'train.pkl' if use == 'train' else 'test.pkl' data_dir = os.path.join(data_dir, data_file) with open(data_dir, 'rb') as f: mnist = pickle.load(f) images = mnist['X'] / 127.5 - 1 labels = mnist['y'] print('finished loading mnist image dataset..!') return images, labels
bb4765ac65f67c2bbfefa9e00812bbc38517632c
22,519
def acceptance_probability(previousConfigCost, newConfigurationCost, NumberOfSteps): """ e = previous config e' = new config T = NumberOfSteps * Implementation of P(e, e', T). * The probability of making a transition from the current state s * to a candidate state s' is specified by the acceptance probability P(). * e ==> getCost(s) * e' ==> getCost(s') * T ==> Temperature [number of steps/iterations in our setting]. * * s and s' are configurations in our setting. * * According to the kirkpatrick 1983 paper: * P(e, e', T) = 1 if e' < e * exp( -(e' - e) / T ) otherwise */ """ if newConfigurationCost < previousConfigCost: return 1 else: acceptance_prob = pow(2.7, -(newConfigurationCost - previousConfigCost) / NumberOfSteps) return acceptance_prob
ea56f38830f00115e567fc426961e6eafe42695e
22,522
def adcm_credentials(): """ Provides ADCM username and password by default Examples: login(**adcm_credentials) """ return {'username': 'admin', 'password': 'admin'}
c8a9822f047aaeb9853b4e26e67fba5bd1cd910b
22,525
def multiple_split(source_string, separators, split_by = '\n'): """ This function allows the user to split a string by using different separators. Note: This version is faster than using the (s)re.split method (I tested it with timeit). Parameters: * source_string: string to be splitted * separators: string containing the characters used to split the source string. * split_by: all the ocurrences of the separators will be replaced by this character, then the split will be done. It defaults to '|' (pipe) """ translate_to = split_by * len(separators) translation = str.maketrans(separators, translate_to) return source_string.translate(translation).split(split_by)
0310d60a225fe156f86d6c0b4ce02781773750de
22,526
def base36decode(base36_string): """Converts base36 string into integer.""" return int(base36_string, 36)
66da9d391705cd0748e0e7c0ea5c69be2366ed4e
22,527
from typing import Any def xml2dict(xml: Any) -> Any: """Convert xml to dict.""" data = {} for child in list(xml): if len(list(child)) > 0: data[child.tag] = xml2dict(child) else: data[child.tag] = child.text or "" return data
9cba1d08813b24581a173b77e4a92d46a31ae5c9
22,528
def get_topic_data(topics_with_summaries): """ This function takes in a list of ranked topic objects and returns a dictionary of the data. Keys are document object and values are list of tuples of (sent, set of nouns) for each sent in the doc. {doc_obj: [(sent_index, sent_noun_set)] """ # Master list of 2D feature vector permuations of each document all_training_vectors = [] topic_dict = dict() for topic in topics_with_summaries: sentences = topic.summary # List to hold (sent_pos, sent_noun_set) tuples topic_list = [] for sent_obj in sentences: topic_list.append((sentences.index(sent_obj), sent_obj.nouns)) # Add the list of sentence tuples to the dictionary topic_dict[topic] = topic_list # Return the dictionary return topic_dict
b0abf8ba28bc4359c2453050cb1a24340ac21158
22,529
def decode(symbol_list, bit_count): """ Decodes the value encoded on the end of a list of symbols. Each symbol is a bit in the binary representation of the value, with more significant bits at the end of the list. - `symbol_list` - the list of symbols to decode from. - `bit_count` - the number of bits from the end of the symbol list to decode. """ assert bit_count > 0, "The given number of bits (%d) is invalid." % bit_count assert bit_count <= len(symbol_list), "The given number of bits (%d) is greater than the length of the symbol list. (%d)" % (bit_count, len(symbol_list)) # Take the last `bit_count` number of symbols from the end of the given symbol list. bits = symbol_list[-bit_count:] # Reverse the list of bits, and make a string out of them. bits.reverse() bit_string = ''.join(map(str, bits)) # Return the bit string as an integer via the built-in int command, telling it that the number in the string is binary/base 2. return int(bit_string, 2)
f7cbfe783b32db099713d357fc6a20a7deff7e9f
22,530
import requests import json def make_post_request(url, payload, headers=None): """Wrapper for requests.post""" return requests.post(url, data=json.dumps(payload), headers=headers)
3d02fe19bfd8c3c80d0f181679b563d4d2429a6a
22,533
def newman_conway(num): """ Returns a list of the Newman Conway numbers for the given value. Time Complexity: O(n) Space Complexity: O(n) """ if num == 0: raise(ValueError) if num == 1: return "1" if num == 2: return "1 1" answer = [None, 1, 1] i = 3 while i in range(3, (num + 1)): p = answer[answer[i - 1]] + answer[i - answer[i - 1]] answer.append(p) i += 1 almost = answer[1:] final_answer = " ".join(str(n) for n in almost) return final_answer
57d8f5465884e34d4db0e977e53518086eb605d7
22,535
import json def format_entry_as_json(entry): """ Return a blog entry in a JSON format :param entry: input blog :return: JSON format of the entry """ if entry: entry_as_json = {"subject": entry.subject, "content": entry.content, "date": entry.created.strftime('%b %d, %Y - %H:%M')} return json.dumps(entry_as_json, indent=4, separators=(',', ':')) else: return json.dumps({"subject": "", "content": "", "date": ""}, indent=4)
c057c317db40322ecea24098d231c6591ae99453
22,536
def make_scoped_name(*args): """ Convert a series of strings into a single string representing joined by points. This convertion represents Pythons scope convention.i.e. pkg.subpkg.module Args: *ags: list of string. The strings will joined in FIFO order. Returns: str: string-scope representation. """ return '.'.join(args)
39895772eb6d4cb8b0c63b54f8f2cfd4c83964c9
22,537
def AND(p: bool, q: bool) -> bool: """ Conjunction operator used in propositional logic """ return bool(p and q)
5e166eff3b1b998490fd5ed1c9e6e034de1efea0
22,538
import copy import os def _compute_paths_to_names(env): """ Single-environment version of conversion of filepath(s) to name(s). This is similarly motivated by allowing tests' assertions about equality between Mappings to be independent of Project instance's effort to ensure that filepaths are absolute. :param Mapping env: environment datum by name :return Mapping: same as the input, but with conversion(s) performed """ reduced = copy.deepcopy(env) for pathvar in ["submission_template"]: _, reduced[pathvar] = os.path.split(reduced[pathvar]) return reduced
613920c946a9379199ff6ce3f700f90eb0ebed08
22,540
def compare_tree_to_dict(actual, expected, keys): """Compare parts of lxml.etree objects to dicts.""" for elem, data in zip(actual, expected): for key in keys: if elem.get(key) != data.get(key): return False return True
45d9d2784ed559cc0bba8f69ad12e1662379eb09
22,542
from typing import Sequence import difflib def did_you_mean(message: str, user_input: str, choices: Sequence[str]) -> str: """Given a list of choices and an invalid user input, display the closest items in the list that match the input. """ if not choices: return message else: result = { difflib.SequenceMatcher(a=user_input, b=choice).ratio(): choice for choice in choices } message += "\nDid you mean: %s?" % result[max(result)] return message
f3400d27dc31d3d56f90511afacf290d24c60c7e
22,544
import torch def calculate_assignment_probabilites(assignments, num_clusters): """ Just counts the occurence of each assignment to get an empirical pdf estimate """ assignment_counts = torch.bincount(assignments, minlength=num_clusters) empirical_density = assignment_counts.float()/assignment_counts.sum().float() return empirical_density
1b04d9978b91f230eca9487e8bd31cdc16b33e8d
22,545
def array_to_string(array, delimiter=" ", format="{}", precision=None): """ Converts a numeric array into the string format in mujoco. Examples: [0, 1, 2] => "0 1 2" """ if precision is not None and format == "{}": return delimiter.join([format.format(round(x, precision)) for x in array]) else: return delimiter.join([format.format(x, precision) for x in array])
8b308b41d5b6f82d58a8b9a4afd529fc7cbf7408
22,546
import argparse import os def parse_args(args): """Parse list/tuple of arguments with argparse module - **Parameters** and **returns**:: :param list args: arguments to be parsed :returns namespace: parsed arguments """ parser = argparse.ArgumentParser(description='jumpdir') subparsers = parser.add_subparsers(help='sub-command help', dest='commands') # jumpdir search ... parser_search = subparsers.add_parser('search', help='search home directory for a directory matching given search term') parser_search.add_argument('search_term', help='directory name to search for (case insensitive).',) parser_search.set_defaults(which='search') # jumpdir add ... parser_add = subparsers.add_parser('add', help='add bookmark') parser_add.add_argument('name', help='name of bookmark to add') parser_add.add_argument('-p', '--path', default=os.getcwd(), help="define path that bookmark points to") # jumpdir delete ... parser_delete = subparsers.add_parser('delete', help='delete bookmark') parser_delete.add_argument('name', help='name of bookmark to remove') # jumpdir list ... subparsers.add_parser('list', help='list saved bookmarks') return parser.parse_args(args)
ca5c977fc6568b545fa8e5add087f8742af531f5
22,547
import json def json_roundtrip(data: dict) -> dict: """Input `data` is returned after JSON dump/load round trip.""" return json.loads(json.dumps(data))
2e15814d1e975f5f3e845196365de5b521e60cd8
22,548
def is_apple_os(os_): """returns True if OS is Apple one (Macos, iOS, watchOS or tvOS""" return str(os_) in ['Macos', 'iOS', 'watchOS', 'tvOS']
77b85f8e4fec837c5009fff3d8e8446c9f1d0d58
22,550
def getTZLookup(tzfname='cities15000.txt'): """Returns a mapping from gps locations to time-zone names. The `tzfname` file is read to map gps locations to timezone names. This is from: http://download.geonames.org/export/dump/cities15000.zip Returns a list of `((lat, lon), timezone)` pairs. """ ret = [l.rstrip('\n').split('\t') for l in open(tzfname) if l.strip()] ret = [((float(l[4]), float(l[5])), l[17]) for l in ret] return ret
3dcb3b297be72eb55c2d75ffc0bf269e27775232
22,553
def pages_to_article(article, pages): """Return all text regions belonging to a given article.""" try: art_id = article['m']['id'] print("Extracting text regions for article {}".format(art_id)) regions_by_page = [] for page in pages: regions_by_page.append([ region for region in page["r"] if region["pOf"] == art_id ]) convert_coords = [page['cc'] for page in pages] article['m']['cc'] = sum(convert_coords) / len(convert_coords) == 1.0 article['has_problem'] = False article['pprr'] = regions_by_page return article except Exception as e: article['has_problem'] = True return article
2ae0105eaf60f57482e2ab5646318408eba8862a
22,556
def format_with_default_value(handle_missing_key, s, d): """Formats a string with handling of missing keys from the dict. Calls s.format(**d) while handling missing keys by calling handle_missing_key to get the appropriate values for the missing keys. Args: handle_issing_key: A function that takes a missing key as the argument and returns a value for the value of the missing key. s: A format string. d: A dict providing values to format s. Returns s.format(**d) with missing keys handled by calling handle_missing_key to get the values for the missing keys. """ copy = dict(**d) while True: try: return s.format(**copy) except KeyError as ex: key = ex.args[0] copy[key] = handle_missing_key(key)
957b851dcacfc98c5bcdb5f1c76850014f8262f5
22,559
from operator import mul def dot(A, B): """ Computes dot product between input 1-D list 'A' and 2-D list B. """ arr = [] for i in range(len(B)): val = sum(map(mul, A, B[i])) arr.append(val) return arr
18ed7e4333190421d8abe25748e0dcfa244b7701
22,562
def remove_backslash(path): """Removes all the backslashes from a path and replace them with spaces Returns a new path without backslashes Parameters ---------- path: a path (str) a string that represents a path that leads to a valid dir """ if '\\ ' in path: path = path.replace('\\ ', ' ') return path
ab978a77e0c8e8980d1220eb96898ed2b1fd648a
22,563
def check_duplicate_codewords(C): """Check if there are two codewords in C""" duplicate_list=[] n=0 for w in range(len(C)): for w2 in range(len(C)): if w < w2: if w is not w2: if (C[w] == C[w2]).all(): print('Duplicates detected! C[{0}] = C[{1}]'.format(w, w2)) print('C[{0}] = {1}'.format(w, C[w])) print('C[{0}] = {1}'.format(w2, C[w2])) duplicate_list.append([w, w2]) n = n+1 print('{} duplicates found'.format(n)) return duplicate_list
bb4027cfe235338e6e04ce5c01ff079e1d7dd7fb
22,564
def sum_combinations(numbers): """Add all combinations of the given numbers, of at least one number""" combinations = [0] for element in numbers: new_combinations = list(combinations) for element2 in combinations: new_combinations.append(element + element2) combinations = new_combinations combinations.remove(0) # Remove 0 return combinations
193f3c7285f70f13435e971844288cb6faeb1d98
22,565
import json def load_json(filepath): """ Load a json file Inputs filepath: string, path to file Outputs data: dictionary, json key, value pairs Example path = "~/git/msc-data/unity/roboRacingLeague/log/logs_Sat_Nov_14_12_36_16_2020/record_11640.json" js = load_json(path) """ with open(filepath, "rt") as fp: data = json.load(fp) return data
bca35e8e10da33ac599a8895e8495fb5eec829e0
22,566
def parse_cfg_args(arg_list): """Parse command-line style config settings to a dictionary. If you want to override configuration file values on the command line or set ones that were not set, this should make it simpler. Given a list in format [section.key=value, ...] return a dictionary in form { (section, key): value, ...}. So we might have: .. code-block:: python ['corpus.load=english-mz', 'corpus.data_in=/home/user/corpora/ontonotes/data/'] we would then return the dictionary: .. code-block:: python { ('corpus', 'load') : 'english-mz', ('corpus', 'data_in') : '/home/user/corpora/ontonotes/data/' } See also :func:`load_config` and :func:`load_options` """ if not arg_list: return {} config_append = {} for arg in arg_list: if len(arg.split("=")) != 2 or len(arg.split("=")[0].split('.')) != 2: raise Exception("Invalid argument; not in form section.key=value : " + arg) key, value = arg.split("=") config_append[tuple(key.split("."))] = value return config_append
96e4a0ce5af3c6e9085e8674ce77a3119d3e0dc1
22,567
from math import log def idf(term, corpus): """ computes inverse document frequency. IDF is defined as the logarithm of the total number of documents in the corpus over the number of documents containing the search term: log(all documents/documents containing the search term) Note that if *no* document contains the search term, it would result in a division by zero. This is mitigated by adding 1 to the denominator in that case. Parameters: term: a string containing the search term corpus: a list of lists; the outer list is the corpus, while the inner lists should represent the document texts, split into tokens (make sure that punctuation is split, too!) Return Value: a float representing the idf value """ documents_with_term = 0 for document in corpus: for token in document: if token == term: documents_with_term += 1 break try: return log(len(corpus)/documents_with_term) except ZeroDivisionError: return log(len(corpus) / 1 + documents_with_term)
15ca03e272a0e535500f38e3bb73bab342e42390
22,569
def special_cases(): """Returns the special cases which are not handled by other methods""" charactersSet = set() # Cyrillic charactersSet.add((0x0401, "\u0415")) # CYRILLIC CAPITAL LETTER IO charactersSet.add((0x0451, "\u0435")) # CYRILLIC SMALL LETTER IO # Symbols of "Letterlike Symbols" Unicode Block (U+2100 to U+214F) charactersSet.add((0x2103, "\xb0C")) # DEGREE CELSIUS charactersSet.add((0x2109, "\xb0F")) # DEGREE FAHRENHEIT charactersSet.add((0x2117, "(P)")) # SOUND RECORDING COPYRIGHT return charactersSet
0679077141d573a4f841edf76d0b48afa0fc371f
22,570
def get_points_dict(encounters, applied=None): """given a queryset contain encounter objects, return a dictionary of tagids and thier associated lat-longs. The order of the lat-long should appear in chronological order that is consistent/determined by observation date. returns dictionary of the form: tagid: [[lat1,lon1],[lat2, lon2], ... ] Arguments: - `encounters`: """ tags = [] if applied: tags = [[x.tagid, x.dd_lat, x.dd_lon] for x in applied] # get the tag and spatial data for our encounter events and turn it # into a list of three element lists recaps = [[x.tagid, x.dd_lat, x.dd_lon] for x in encounters] tags.extend(recaps) # stack the encounters by tag id - for each tag, add each # additional occurence tag_dict = {} for tag in tags: if tag_dict.get(tag[0]): tag_dict[tag[0]].append(tag[1:]) else: tag_dict[tag[0]] = [tag[1:]] return tag_dict
48bae54fa4f2956afedd6910977bab286a6ef092
22,573
def hot_fix_label_issue(measurements: list) -> list: """ Aligns measurement labels with upper and lower case letters. :param measurements: List of measurements that could contain labels with upper case letters. :return: List of measurements only containing labels with lower case letters. """ for i in range(len(measurements)): measurements[i].label = measurements[i].label.lower() return measurements
7635838dbc4cae00d3803bb3d9ddcccf7872037f
22,574
def two_oldest_ages(ages): """Return two distinct oldest ages as tuple (second-oldest, oldest).. >>> two_oldest_ages([1, 2, 10, 8]) (8, 10) >>> two_oldest_ages([6, 1, 9, 10, 4]) (9, 10) Even if more than one person has the same oldest age, this should return two *distinct* oldest ages: >>> two_oldest_ages([1, 5, 5, 2]) (2, 5) """ # find two oldest by sorting unique; this is O(n log n) uniq_ages = set(ages) oldest = sorted(uniq_ages)[-2:] return tuple(oldest) # a longer, but O(n) runtime would be: # # uniq_ages = set(ages) # oldest = None # second = None # # for age in uniq_ages: # if oldest is None or age > oldest: # second = oldest # oldest = age # elif second is None or age > second: # second = age # # return (second, oldest)
38944082fdf1ca44ff1813b9570dcb0377f40960
22,578
def replace_pad(l, new_symbol='-'): """<pad> refers to epsilon in CTC replace with another symbol for readability""" new_l = [] for x in l: if x == "<pad>": new_l.append(new_symbol) else: new_l.append(x) return new_l
94671a5d035a4ce2fae1f26e65c52ad55e4dba6c
22,579
import random import string def get_random_string(digit=8): """ for Cache Control Hash """ return ''.join([random.choice(string.ascii_letters + string.digits) for i in range(digit)])
df504710188be247ebad0c144ce8b8f6954c3423
22,580
def getLemma(line): """ retreves the second word in a line in the coha corpus, or nothing if given an empty line """ if line == "": return "" s = line.split("\t") return s[1]
34c51925d6a9d3908bf8b3114256e50ae3712467
22,581
from typing import Optional import os def get_template( path: Optional[str] = None, platform: Optional[str] = None, command: Optional[str] = None, yang: Optional[str] = None, misc: Optional[str] = None, ): """ Function to locate template file and return it's content **Attributes** * path (str) - OS path to template to load * platform (str) - name of the platform to load template for * command (str) - command to load template for * yang (str) - name of YANG module to load template for * misc (str) - OS path to template within repository misc folder **Valid combinations of template location** ``path`` attribute is always more preferred * ``path="./misc/foo/bar.txt"`` * ``platfrom="cisco_ios", command="show version"`` * ``yang="ietf-interfaces", platform="cisco_ios"`` * ``misc="foo_folder/bar_template.txt"`` """ # form path to template file if path: if path.strip().startswith("ttp://"): path = path.strip()[6:] elif platform and command: platform = platform.lower() command = command.lower() command = command.replace("|", "pipe") for symbol in [" "]: platform = platform.replace(symbol, "_") command = command.replace(symbol, "_") path = "platform/{}_{}.txt".format(platform, command) elif platform and yang: platform = platform.lower() yang = yang.lower() for symbol in [" "]: platform = platform.replace(symbol, "_") yang = yang.replace(symbol, "_") path = "yang/{}_{}.txt".format(yang, platform) elif misc: path = "misc/{}".format(misc) else: return None template_filename = os.path.join(os.path.dirname(__file__), path) # open template file and return content with open(template_filename, "r") as f: return f.read()
35336707920339021e7443fba3a4d6a733cef498
22,582
import re def _regex_search(pattern: str, string: str, group: int): """Shortcut method to search a string for a given pattern. :param str pattern: A regular expression pattern. :param str string: A target string to search. :param int group: Index of group to return. :returns: Substring pattern matches. """ regex = re.compile(pattern) results = regex.search(string) if not results: return False return results.group(group)
c703ac3eed3cbb981586b5a950f071c8535f32a5
22,583
import json def jsonString(obj, pretty=False): """Creates a json object, if pretty is specifed as True proper formatting is added Args as data: obj: object that needs to be converted to a json object pretty: Boolean specifying whether json object has to be formatted Returns: JSON object corresponding to the input object """ if pretty == True: return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': ')) + '\n' else: return json.dumps(obj)
7fb621029ee509240dfd46bc641dcde34c87170c
22,584
def calculateConsensus(u, d, t, U_sum, D_sum): """ Calcluate consensus score. This is a heuristic for the percentage of the community who finds a term useful. Based on the observation that not every user will vote on a given term, user reptuation is used to estimate consensus. As the number of voters approaches the number of users, the votes become more equitable. (See doc/Scoring.pdf for details.) :param u: Number of up voters. :param d: Number of donw voters. :param t: Number of total users. :param U_sum: Sum of up-voter reputation. :param D_sum: Sum of down-voter reputation. """ v = u + d R = U_sum + D_sum return (u + (float(U_sum)/R if R > 0 else 0.0) * (t-v)) / t if v else 0
fddf0708ba8ed2bba7c6351f8b2b1451c56caee9
22,585
def newStats(): """Allocate and initialize an new stats object. Returns: Status array for tabulation.""" return { 'emailID': None, 'type': None, 'recipients': 0, 'opened': 0, 'clicked': 0, 'converted': 0, 'unsubscribed': 0, 'numberOfLinksClicked': 0, 'conversion': {}, 'domain': {}, 'splitName': {}, 'status': {}, 'blast_status': {}, 'blast_splitName': {} }
70dbcbf91caf6b3978155c4d896253d8fb7b60d3
22,586
import struct def read_plain_int96(fo): """Reads a 96-bit int using the plain encoding""" tup = struct.unpack("<qi", fo.read(12)) return tup[0] << 32 | tup[1]
39d924fe211a17192b4b3158340d40b3f28948d1
22,587
import typing def _simple(item: typing.Any) -> bool: """Check for nested iterations: True, if not. :param item: item to check for repr() way :type item: typing.Any :return: use repr() iver item by default :rtype: bool """ return not isinstance(item, (list, set, tuple, dict, frozenset))
c02443c0bf75de321f5af68b1e4c45a1348113c0
22,588
def get_common_date_pathrow(red_band_name_list, green_band_name_list, blue_band_name_list): """ :param red_band_name_list: :param green_band_name_list: :param blue_band_name_list: :return: """ red_date_pathrow = [ '_'.join(item.split('_')[:2]) for item in red_band_name_list] green_date_pathrow = [ '_'.join(item.split('_')[:2]) for item in green_band_name_list] blue_date_pathrow = [ '_'.join(item.split('_')[:2]) for item in blue_band_name_list] # for item in red_date_pathrow: # print(item) # print('************************') # get common bands (same date and pathrow) # the order of this list is random, how to avoid it? solution: sorted the output date_pathrow = sorted(set(red_date_pathrow) & set(green_date_pathrow) & set(blue_date_pathrow)) # date_pathrow = set(red_date_pathrow) & set(green_date_pathrow) return date_pathrow
a5ede92c0d8d8ca95d173e23179e4c3ec0d4c1c5
22,589
def computeAirProperties(T, p, pInhPa=False): """ Calculates air density in kg/m3 and viscosity in m2/s given pressure in mmHg and temperature in deg C. Can also specify pressure in hPa with the flag.""" mmHgTohPa = 1.3332239 if pInhPa: p = p/mmHgTohPa # from engineering toolbox: # http://www.engineeringtoolbox.com/air-temperature-pressure-density-d_771.html rho = 1.325 * p/25.4 / ((T+273.15)*9/5) * 16.0185 # from Sutherland equation: # http://www-mdp.eng.cam.ac.uk/web/library/enginfo/aerothermal_dvd_only/aero/fprops/propsoffluids/node5.html mu = 1.458e-6 * (T+273.15)**(3./2.) / ((T+273.15) + 110.4) # Alternative air density calculations as a function of temperature and pressure """ # pressure is in mmHg, temperature in deg C # 1 mmHg = 133.322368 Pa # T_R = (T_C = 273.15) * 9/5 # from Wikipedia: https://en.wikipedia.org/wiki/Density_of_air R = 287.058 # J / (kg K) rho0 = (p * 133.322368) / (R * (T+273.15)) # from engineering toolbox: http://www.engineeringtoolbox.com/air-temperature-pressure-density-d_771.html rho1 = 1.325 * p/25.4 / ((T+273.15)*9/5) * 16.0185 """ return rho, mu/rho
645db891cc775bbd1702a8ee437a2fa1fe9b62a3
22,590
import re def tags_to_dict(doc): """Process the tags from a single document and return a dict. Assumes a tag schema with the fields in `fields`. Boolean values are represented as '0' and '1'. Also retrieves the `country` and `language` fields as tags if they are present in the document. """ # Initialise dict with all fields d = {'education': '0'} fields = ['affiliation', 'demographic', 'emphasis', 'funding', 'identity', 'institution', 'media', 'perspective', 'politics', 'reach', 'region', 'religion'] for prop in fields: d[prop] = '0' # Add country and language if available if 'country' in doc: d['country'] = doc['country'] if 'language' in doc: d['language'] = doc['language'] # If the doc contains tags... if 'tags' in doc: # Iterate through the doc tags for tag in doc['tags']: # Set education to True if the education tag is detected if re.search('^education', tag): d['education'] = '1' tag = re.sub('^education/', '', tag) # Find all subtags and get the penult as the key subtag_properties = '^demographic|^emphasis|^funding|^identity|^institution|^media|^politics|^reach|^region|^religion' subtags = re.findall(subtag_properties, tag) if len(subtags) > 0: tail = tag.split('/') head = tail.pop(0) tail = '/'.join(tail) if tail.startswith('demographic/religion/'): head = 'religion' tail = tail.replace('demographic/religion/', '') if head.startswith('demographic'): head = 'demographic' tail = tail.replace('demographic/', '') if head.startswith('affiliation'): head = 'affiliation' tail = tail.replace('affiliation/', '') if head.startswith('institution'): head = 'institution' tail = tail.replace('institution/', '') if head.startswith('funding'): head = 'funding' tail = tail.replace('funding/', '') if head.startswith('emphasis'): head = 'emphasis' tail = tail.replace('emphasis/', '') if tail.startswith('religion'): head = 'religion' tail = tail.replace('religion/', '') # Combine UK and US with the rest of the tag tail = re.sub('^UK/', 'UK-', tail) tail = re.sub('^US/', 'US-', tail) # Set the new dict key and value d[head] = tail # Return the dict return d
a7e40a669b3f59a9226cef97e79b47adb69e7474
22,591
def check_default_attack_params(args_): """ This function is responsible for setting the default parameters of the adversarial attacks if no values are provided. A full description of the default parameters can be found in the supplementary of our paper. """ # Model if args_.replicate: if args_.model == "glvq": if args_.dataset == "mnist": if args_.model_norm == "inf": args_.eval_norm = "inf" args_.weights_path = "weight_files/GLVQ/mnist/" \ "linf_trained/glvq_loss.h5" args_.prototypes = 128 elif args_.model_norm == "2": args_.eval_norm = "2" args_.weights_path = \ "weight_files/GLVQ/mnist/l2_trained/trained_model.h5" args_.prototypes = 256 elif args_.dataset == "cifar10": if args_.model_norm == "inf": args_.eval_norm = "inf" args_.weights_path = "weight_files/GLVQ/cifar10/" \ "linf_trained/trained_model.h5" args_.prototypes = 64 elif args_.model_norm == "2": args_.eval_norm = "2" args_.weights_path = "weight_files/GLVQ/cifar10/" \ "l2_trained/trained_model.h5" args_.prototypes = 128 if args_.model == "rslvq": args_.model_norm = "inf" args_.eval_norm = "inf" if args_.dataset == "mnist": args_.weights_path = "weight_files/RSLVQ/mnist/" \ "trained_model.h5" args_.prototypes = 128 elif args_.dataset == "cifar10": args_.weights_path = "weight_files/RSLVQ/cifar10/" \ "trained_model.h5" args_.prototypes = 128 if args_.model == "gtlvq": args_.model_norm = "2" args_.eval_norm = "2" if args_.dataset == "mnist": args_.weights_path = "weight_files/GTLVQ/mnist/" \ "trained_model.h5" args_.prototypes = 10 args_.tangents = 12 elif args_.dataset == "cifar10": args_.weights_path = "weight_files/GTLVQ/cifar10/" \ "trained_model.h5" args_.prototypes = 1 args_.tangents = 100 # Default attack params if args_.eval_norm == "": Exception("--eval_norm has to be set when not replicating the paper " "results") if args_.eval_norm == 'inf': if args_.restarts == -1 or args_.replicate: args_.restarts = 3 if args_.epsilon == -1 or args_.replicate: if args_.dataset == "mnist": args_.epsilon = 0.3 else: args_.epsilon = 8 / 255 if args_.steps == -1 or args_.replicate: args_.steps = 200 if args_.eval_norm == '2': if args_.restarts == -1 or args_.replicate: args_.restarts = 10 if args_.epsilon == -1 or args_.replicate: if args_.dataset == "mnist": args_.epsilon = 1.58 else: args_.epsilon = 36 / 255 if args_.steps == -1 or args_.replicate: if args_.model == "rslvq": args_.steps = 3000 else: args_.steps = 1000 return args_
3ca98d2713cf1ff18d6c596bd71d307e046796e2
22,592
def load_card(data, schema): """Validate that the card is correctly formed. Parameters ---------- data : `dict` raw card data schema : `dict` card schema dictionary Returns ------- `dict` card dictionary Raises ------ ValueError if data is malformed """ out = data.copy() out['description'] = '\n'.join(data['description']) out['draw_chance'] = 1 / schema['rarity'][data['rarity']] return out
236e3aca87b14fa0bb8792a11a94b7852d0c5678
22,593
def getSpkrs(comboSplitSegs, name_type): """ This function returns the list of ground truth speakers. Inputs: - comboSplitSegs: list of ground truth and diarization segments after all iterations have reduced it to non-overlapping segments with a constant number of speakers in each segment Outputs: - lstOracleSpkrs: list of ground truth speakers, form: "['FEE029', 'FEE030', 'MEE031', 'FEE032']" """ lstSpkrs =list(set(sum([x['name'][name_type] for x in comboSplitSegs], []))) lstSpkrs.sort(key=lambda x:x[-2:]) return lstSpkrs
0279a30bbd12b41672a608d0a68d35e013b185e5
22,594
def readonly(label, value): """Return HTML markup for a readonly display like a form input.""" return {"label": label, "value": value}
c153ae42b074dea68101ca2e3abce03d1ed6c552
22,596
def course_str(course): """Format course as a string.""" return ( f"[{course['pk']}] {course['name']} " f"{course['semester']} {course['year']}" )
a9b1d6663ab18da220eceedc3c2319f9da80a08c
22,599
import functools def loss_function_for_data(loss_function, X): """ Get a loss function for a fixed dataset Parameters ---------- loss_function : function The loss function to use. The data parameter for the function must be `X` X : coo_matrix coo_matrix of data to apply loss function to Returns ------- fixed_data_loss_function : function A loss function which takes all the same parameters as the input `loss_function`, except for the data parameter `X` which is fixed """ return functools.partial(loss_function, X=X)
782d050ed313146d9cdae38b7e63ed9dd287e3cc
22,600
import numpy def cov_flat2polidx(cvc, parity_ord=True): """ Convert flat array covariance matrix (visibilities) to polarization component indexed array covariance matrix. Parameters ---------- cvc : array_like Covariance cube to be converted. Shape should be (..., 2*N, 2*N), where N is the number of dual-polarized elements. parity_ord : Boolean If True (default) then the polarization component is determined from the index's parity: even index maps to component 0, odd to 1. If False the baseline indices are split into a first and second half and mapped to pol component 0,1 respectively. Returns ------- cvcpol : array_like Polarization index array covariance cube. Shape will be (2, 2, ..., N, N). Polarization component order from flat visibility will Notes ----- This function is agnostic to whether the polarization basis is linear or circular. Also the ordering is conserved from the flat ordering, so the mapping of index 0,1 to say L,R or X,Y components is determined from the original ordering. Examples -------- Minimal example: >>> cov_flat2polidx(numpy.arange(4*4).reshape((4,4)) (2.0, 0.0, 2.0, 2.0) """ if parity_ord: pp = cvc[..., ::2, ::2] qq = cvc[..., 1::2, 1::2] pq = cvc[..., ::2, 1::2] qp = cvc[..., 1::2, ::2] else: # First-half, second-half order n = cvc.shape[-1]/2 pp = cvc[..., :n, :n] qq = cvc[..., n:, n:] pq = cvc[..., :n, n:] qp = cvc[..., :n, n:] cvpol = numpy.array([[pp, pq], [qp, qq]]) return cvpol
8cbac455141693bb0b8261c4a52ad9e83f509583
22,602
def get_app_specific_information(json_list_of_transactions): """Function that extracts the application through which the venmo transaction was made (ie iPhone app, desktop, etc) and stores each type in a table in the venmo transactions database.""" apps = [] # Only extracting app information app_subkeys = ['id', 'image_url', 'description', 'site_url', 'name'] app_ids = set() for app_detail in json_list_of_transactions: app_details = app_detail['app'] app_id = app_details['id'] # There are only 8 diff types of apps, so by checking the id # the process becomes much less computationally expensive if app_id in app_ids: continue else: app_ids.add(app_id) app = {} for key in app_details: app[key] = app_details.get(key) apps.append(app.copy()) return apps
96610d947d11965b12935a29f86c56dd0b57b740
22,603
import typing def encoded(string: typing.Union[str, bytes], encoding='utf-8') -> bytes: """Cast string to bytes in a specific encoding - with some guessing about the encoding. :param encoding: encoding which the object is forced to """ assert isinstance(string, (str, bytes)) if isinstance(string, str): return string.encode(encoding) try: # make sure the string can be decoded in the specified encoding ... string.decode(encoding) return string except UnicodeDecodeError: # ... if not use latin1 as best guess to decode the string before encoding as # specified. return string.decode('latin1').encode(encoding)
d00a1b857dd80533d1f269a5ef3961cb98e68095
22,606
def bin2dec(x): """ Returns the binary input into a valid decimal number Parameters ---------- x : Integer Data in the binary format """ # print(type(x)) dec = 0 for i,j in enumerate(x): dec += j<<i return dec
624ac298487a89bc2822f7fc1442a4d89af4f6b3
22,607
def get_nested_h5_field(h, fields, resolve_value=True, is_matrix=False): """ Meant to be a replacement for _extract_time_from_disk Attributes ---------- h : HDF5 group fields : list or string """ if not isinstance(fields, list): fields = [fields] # string to list for key in fields: h = h[key] if not resolve_value: return h temp = h.value if is_matrix: return temp if temp.shape[0] > temp.shape[1]: wtf = temp[:, 0] else: wtf = temp[0, :] return wtf
b7656734bcd67e15b4ef9fcc68d267a613a3fdc1
22,608
import os import subprocess def _download(target_path, name, url): """ Downloads the playlist `name` found at `url`. Returns the path to the directory that the playlist was downloaded to. Note that this function makes no guarantees at all that it actually managed to do this. """ path = target_path + "/" + name os.makedirs(path, exist_ok=True) # Download the playlist to that directory print(" |-> Executing youtube-dl on the playlist...") dl_command = "youtube-dl --extract-audio --audio-format wav --yes-playlist --ignore-errors --max-filesize 3G "\ + url + " -o " + path + "/%(title)s-%(id)s.%(ext)s" subprocess.run(dl_command.split(' ')) # Don't check result, who knows what youtube-dl returns return path
f04f30b55500b1df198538d9f999960dad0ea27d
22,609
def time_duration_formatter(x): """Format time duration in seconds """ mm, ss = divmod(x, 60) hh, mm = divmod(mm, 60) dd, hh = divmod(hh, 24) res = '' if dd: res += '%dd ' % dd if dd or hh: res += '%dh ' % hh if dd or hh or mm: res += '%dm ' % mm res += '%ds' % ss return res
a0b28b2dd6cd81cb297b2b1dbbc184ff1be896b8
22,611
def get_keys_from_post(request, *args): """Get a tuple of given keys from request.POST object.""" return tuple(request.POST[arg] for arg in args)
f504a784849470405dbb6022bc0b7ce877caceda
22,612
async def some_async_function(value): """A demo async function. Does not do any actual I/O.""" return value
9324669acd9955095e12c28acc09cf0b7d68d767
22,613
import json import hashlib def compute_caching_hash(d): """Generate a hash from a dictionary to use as a cache file name This is intended to be used for experiments cache files """ string = json.dumps(d, sort_keys=True, ensure_ascii=True) h = hashlib.sha1() h.update(string.encode('utf8')) return h.hexdigest()
140a68c2f349fb5f3db6d48a24c16cc22a215bb0
22,614
def slopeFromVector(v): """ Return the slope of a vector from a QVector2D object. """ if v.x() == 0.0: return v.y() return v.y() / v.x()
26d33063f84b889ee80868758eb7d15d9abf7dae
22,615
def abstract_class_property_A(**kwargs): """ Decorator function to decorate objects with abstract class properties. Leaves behind another decorator that takes a class as its input. """ def abstractor(WrappedClass): class PropertyWrapper(WrappedClass): """ A Wrapper class to decorate objects with abstract class properties. The class has a default dummy annotation, just so that we can append the items of the input dictionary d to the class definition. The dummy property is deleted at the end. """ _dummy_: None def __init__(self, *args, **kwargs): WrappedClass.__init__(self) d_ = dict() d_props = PropertyWrapper.__dict__.get('__annotations__', {}) for key, value in d_props.items(): d_[key] = value d_self = self.__class__.__dict__.get('__annotations__', {}) for key, value in d_self.items(): d_[key] = value for key in d_.keys(): if not hasattr(self, key): raise AttributeError(f'required attribute {key} not present ' f'in {self.__class__}') return res = PropertyWrapper d_ = res.__dict__['__annotations__'] for key, value in kwargs.items(): d_[key] = value for key, value in WrappedClass.__dict__.get('__annotations__', {}).items(): d_[key] = value del d_['_dummy_'] return res return abstractor
3c548e81b706e4cc3c78f1a49f513e5e206da0e2
22,616
import re def uncamelcase(s,separator=" "): """ EXAMPLES:: sage: sage.categories.category_with_axiom.uncamelcase("FiniteDimensionalAlgebras") 'finite dimensional algebras' sage: sage.categories.category_with_axiom.uncamelcase("JTrivialMonoids") 'j trivial monoids' sage: sage.categories.category_with_axiom.uncamelcase("FiniteDimensionalAlgebras", "_") 'finite_dimensional_algebras' """ return re.sub("(?!^)[A-Z]", lambda match: separator+match.group()[0], s).lower()
d550489954cbced9970b5973fcdef1c9c060a623
22,617
import random def rnd_start(seq,murate,inrate,delrate): """Random mutation/indel start points for Poisson and Guassian processes""" _l=len(seq) if murate>0: if _l>(1/murate): _mustart=max(0, round((1/murate)*random.random())) else: _mustart=0 else: _mustart=float('Nan') if inrate>0: if _l>(1/inrate): _instart=max(0, round((1/inrate)*random.random())) else: _instart=0 else: _instart=float('Nan') if delrate>0: if _l>(1/delrate): _delstart=max(0, round((1/delrate)*random.random())) else: _delstart=0 else: _delstart=float('NaN') return([_mustart, _instart, _delstart])
e768a5f146510f157fd1e0dc367370c1c45a8303
22,618
import os def ignorable_name(fn): """Filter out recognized pseudo-file names.""" if fn is None: return False return os.path.basename(fn) in [".", "..", "$FAT1", "$FAT2", "$OrphanFiles"]
d8bb710490d1d31eda5a0050aeb0a7789aa6f6bf
22,619
def validation(model, val_fn, datagen, mb_size=16): """ Validation routine for speech-models Params: model (keras.model): Constructed keras model val_fn (theano.function): A theano function that calculates the cost over a validation set datagen (DataGenerator) mb_size (int): Size of each minibatch Returns: val_cost (float): Average validation cost over the whole validation set """ avg_cost = 0.0 i = 0 for batch in datagen.iterate_validation(mb_size): inputs = batch['x'] labels = batch['y'] input_lengths = batch['input_lengths'] label_lengths = batch['label_lengths'] # Due to convolution, the number of timesteps of the output # is different from the input length. Calculate the resulting # timesteps output_lengths = [model.conv_output_length(l) for l in input_lengths] _, ctc_cost = val_fn([inputs, output_lengths, labels, label_lengths, True]) avg_cost += ctc_cost i += 1 if i == 0: return 0.0 return avg_cost / i
14b1465ec9934f95b96f2f019cc83a42e8c25cc7
22,620
def sumevenfib(N): """ N is a positive number This function will add up all even fibonacci numbers that do not exceed N We use the convention that F_0 = 0, F_1 = 1,...,F_3 = 2 Note that F_3k is the subsequence of even Fibonacci numbers """ F1 = 1 F0 = 0 S = 0 while F0 <= N: S += F0 F3 = 2*F1 + F0 F1 = F3 + F1 + F0 F0 = F3 return S
7b1949fa72e4595265bad5d5b2354f25c8a83bbc
22,622
import collections def _append_cell_contents(notebook): """Appends prior cell contents to a later cell dependent on labels This function will iterate through a notebook and grab all cells that have a label and add them to any cell that references that label (i.e., has the label in its ref_labels list). Each cell's content will be displayed according to the order of its appearance in the notebook. """ Cell = collections.namedtuple('Cell', ['label', 'contents']) cells = [] for cell in notebook['cells']: label = cell.get('metadata', {}).get('label', None) ref_labels = cell.get('metadata', {}).get('ref_labels', []) if label is not None: cells.append(Cell(label, cell['source'])) elif ref_labels: cell['source'] = '\n\n'.join(cell.contents for cell in cells if cell.label in ref_labels).strip() return notebook
fc9d15d9c351e55a36201aae889e08b044c0ca9b
22,624
import socket def get_local_addrs(): """Return all of the local IP Addresses.""" return [addr[4][0] for addr in socket.getaddrinfo(socket.gethostname(), None)]
870114f53fdbbc631f992c731a9e75c2755d5326
22,625
def change_op(instructions, i, new_op): """ Return a copy of the `instructions` where the operation at index `i` is changed to `new_op` """ # Store the value of the argument at index `i` in the instructions _, arg = instructions[i] # Make a copy of the instructions list modified = instructions.copy() # Update the instruction at index `i` with the new operation whilst keeping the argument unchanged modified[i] = (new_op, arg) return modified
3a2d6f690a341b2941a9feb20e8a060bb04cec04
22,626
def _intersection(A,B): """ A simple function to find an intersection between two arrays. @type A: List @param A: First List @type B: List @param B: Second List @rtype: List @return: List of Intersections """ intersection = [] for i in A: if i in B: intersection.append(i) return intersection
ad0be8b29900d7238df93309f5b9ad143e60ba0f
22,629
def scan_ioat_copy_engine(client, pci_whitelist): """Scan and enable IOAT copy engine. Args: pci_whitelist: Python list of PCI addresses in domain:bus:device.function format or domain.bus.device.function format """ params = {} if pci_whitelist: params['pci_whitelist'] = pci_whitelist return client.call('scan_ioat_copy_engine', params)
0723727cc9fe94e707aa323da498d90d99c15104
22,631
import os def get_mysql_config(): """Return the the parsed connection string as dict. I.e. can be used in MySQLConnectionPool as: mysql_config = dblib.get_mysql_config('DATA_QUALITY') mysql_pool = MySQLConnectionPool(pool_size=1, **mysql_config) """ # Return a configuration dict configurations return { "host": os.environ.get("MYSQL_HOST"), "port": os.environ.get("MYSQL_PORT", 3306), "user": os.environ.get("MYSQL_USER"), "password": os.environ.get("MYSQL_PASSWORD"), "database": os.environ.get("MYSQL_DATABASE") }
33641d6b10945b4436f3fcc14c7d88fca1883b5b
22,632
from typing import List import os import subprocess def bamfilter(bam:str, regions:List[str],outputdir:str): """ 从bam文件中抽提出比对到ref:start-end的read信息 :param bam:需要过滤的bam文件路径 :param regions:提取区域列表 例如:['edge1:0-10000','edge2'] :return: 过滤后的Sam文件路径 """ outfile=os.path.join(outputdir,'tmp.sam') assert 'sort' in bam,'bam file must be sort' assert os.path.isfile(bam+'.bai'),'index file must be exist' with open(os.path.join(outputdir,'regions'),mode='w') as r: for i in regions: r.write(i+'\n') cmd=['/share/app/samtools/1.11/bin/samtools','view','-F','2048','-@','8','-o',outfile,bam] cmd=cmd+regions p=subprocess.Popen(cmd) p.wait() return outfile
95c494b4f371125032e0b0eb643568a5bf304fd7
22,633
def compound_score(text, sia): """ This function computes VADER's compound score of some text Arguments: text : (string) sia: nltk.SentimentIntensityAnalyzer() object Returns: float between -1 and 1 """ return sia.polarity_scores(text)['compound']
5cdf5b2f5cc87ef28c80dbea1f53eafda6286acf
22,635
def even(n): """Builds a set of cycles that a graph with even vertices.""" assert n % 2 == 0 # Base case for complete graph such that V = {1, 2, 3, 4}. cycles = [[1, 2, 3], [2, 3, 4], [3, 4, 1], [4, 1, 2]] for i in range(6, n + 1, 2): a, b = i, i - 1 # Use edges (a, 1), (a, 0), (b, 1), (b, 0), (a, b) exactly twice each. cycles += [[a, 1, b], [a, 2, b], [a, 1, b, 2]] # Similar to odd(...) as we are left with 2n - 2 edges to use # connected to i - 4 of the vertices V' = {3 ... i - 2}. Notice that # |V'| is even so we can apply the same strategy as in odd(...). for k in range(3, i - 1, 2): c, d = k, k + 1 cycles += [[a, c, b, d]] * 2 return cycles
38792c058c8f8d045eeac2bb08d7d78abda12868
22,637
def _replaceRenamedPairMembers(kerning, leftRename, rightRename): """ Populate the renamed pair members into the kerning. """ renamedKerning = {} for (left, right), value in kerning.items(): left = leftRename.get(left, left) right = rightRename.get(right, right) renamedKerning[left, right] = value return renamedKerning
cdfa789f1f903276cacb798ee039181847b4922e
22,638
def api_url(query_type): """ Method Will return appropriate URL based on query type """ type_url_mapping = {'Email Sender': 'bademail', 'Email Recipient': 'bademail', 'Domain': 'baddomain', 'IP Address': 'v2.0/ip'} return type_url_mapping[query_type]
d9fd0057f9ba3dfa1526ddc6c15a7aafdaeab496
22,639
def recursively_split_version_string(input_version: str, output_version: list = []): """ Splits a version/tag string into a list with integers and strings i.e. "8.0.0.RC10" --> [8, '.', 0, '.', 0, '.RC', 10] Input: input_version (str): a version or tag i.e. "8.0.0.RC10" output_version (list): an empty list, which will be filled iteratively Returns: list: the version/tag string in a list with integers and strings i.e. [8, '.', 0, '.', 0, '.RC', 10] """ if type(input_version) != str: raise TypeError( "The provided version should be a str data type but is of type {}.".format( type(input_version) ) ) # when the part to split is only digits or no digits at all, the process is finished if ( input_version.isdigit() or any(char.isdigit() for char in input_version) == False ): version = output_version + [input_version] return [int(segment) if segment.isdigit() else segment for segment in version] # otherwise check until what position it is a digit (since we want to keep i.e. a multiple digits number as one integer) pos = 0 while ( input_version[pos].isdigit() == input_version[pos + 1].isdigit() and pos != len(input_version) - 2 ): # pos += 1 return recursively_split_version_string( input_version[pos + 1 :], output_version + [input_version[: pos + 1]] )
b5e8be1d88d5113591e8199bbb39c89f803a20ea
22,640
def get_dict(size, key_prefix, value_preix): """ Creates a dictionary with size numbers of keys. :type size: int :type key_prefix: str :type value_preix: str :rtype: dict[str, str] :param size: The amout of keys the dictionary must contain. :param key_prefix: A value to be added before each key. :param value_preix: A value to be added before each value. :return: A dictionary. """ my_dict = {} for num in range(0, size): key = "%s_%s" % (key_prefix, num) value = "%s_%s" % (value_preix, num) my_dict[key] = value return my_dict
ead26cf740f9e4f0f9a17cd748bf13acb6277183
22,642
import types import importlib def import_pickle() -> types.ModuleType: """ Returns cPickle module if available, returns imported pickle module otherwise """ try: pickle = importlib.import_module('cPickle') except ImportError: pickle = importlib.import_module('pickle') return pickle
5d876a41f875e8b57c526a271c94b92a36d991ca
22,643