content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def remove_dump_skills(skills_df): """This function is used to remove dump skills extracted from Dobie""" remove_skills = skills_df.loc[~skills_df['skill'].isin( ['Assurance', 'LeSS', 'Computer Science', 'Development', 'Programming', 'BDD', 'TDD', 'Developer', 'Software Engineer', 'Design', 'Testing'])] return remove_skills
2718e9a9f75787f353a37eccfaab4c9579b6ac98
51,155
import re def toIsoDateTime(value): """ Convert a datetime to an ISO8601 formatted dateTime string. :param value: the dateTime to convert :returns: an ISO8601 formatted string version of the dateTime """ rv = value.isoformat() if value.tzinfo is None: rv += 'Z' else: # replace +00:00 timezone with Z rv = re.sub('[+-]00:00$','Z',rv) return rv
1e029fb6b6ad60cb20bb6cc134c448d4a4f89a15
51,156
import re def find_valid_dates(text): """Searches for valid dates in a given string. Args: text: The string to be searched. Returns: dates: A list of tuples containing the components of any valid date found within the string. """ valid_date = re.compile(r"([0-3]\d)(/|-|.)([0-1]\d)(/|-|.)([1-2]\d\d\d)") dates = valid_date.findall(text) for date in dates: #Removes the date from the list if: #-the day is greater than 31 #-the month is greater than 12 #-the given day should not occur in the given month if (date[0][0] == "3" and date[0][1] > 1)\ or (date[2][0] == 1 and date[2][1] > 2)\ or (date[2] in ["04, 06, 09, 11"] and date[0] > 30)\ or (date[2] in ["01","03","05","07","08","10","12"] and date[0] == 31): del date elif (date[2] == "02" and date[0] > 28)\ or (date[4] % 4 == 0 and date[4] % 100 != 0 )\ and (date[2] == "02" and date[0] >= 29): del date return dates
6a496dff4c73c124c723f28b1b7ce3bbe28960f8
51,157
def read_sequence(filepath, labels=True): """ Return the list of protein sequences and cleavage sites from datapath. The data file must strictly follow the following pattern: - first line is a description of the sequence - second line is the sequence itself - third line is a list of chars S, C, M Example: 52 AGP_ECOLI 22 GLUCOSE-1-PHOSPHATASE PRECURSOR (EC 3.1.3.10) (G1PASE). MNKTLIAAAVAGIVLLASNAQAQTVPEGYQLQQVLMMSRHNLRAPLANNGSV SSSSSSSSSSSSSSSSSSSSSSCMMMMMMMMMMMMMMMMMMMMMMMMMMMMM """ protein_sequence = [] cleavage_site = [] # Loop condition conveniently discards the description lines with open(filepath, 'r') as f: while f.readline() is not '': # Slicing with :-1 to discard "\n" character protein_sequence.append(f.readline()[:-1]) if labels: cleavage_site.append(f.readline()[:-1]) return protein_sequence, cleavage_site
7df9a6ed25dc9b3f6d9005d84ddc375f30a55da4
51,158
def clean_for_viaf(text): """ assumes cleaned author form (see supporting_functions.clean_authors). Removes abbreviations and punctuation :param text: input :return: cleaned string """ text = text.strip().lower() text = text.replace(".","") text = text.replace('"',"") text = text.split() new_text = "" for t in text: if len(t) > 1: new_text += t+" " return new_text.strip()
c18abadeae4ebc015030793f64a9babd910a3f1e
51,159
def _min(integers): """Returns the minimum of given elements. Args: integers: A list of integers. Must not be empty. Returns: The minimum integer in the given list. """ _ignore = [integers] return 42
4345686783d910ed57c22b863b2f10f53ea07335
51,160
def penalty_label(dev_measure, dev_fun, value_discount): """Penalty label specifying design choices.""" dev_measure_labels = { 'none': 'None', 'rel_reach': 'RR', 'att_util': 'AU', 'reach': 'UR'} label = dev_measure_labels[dev_measure] disc_lab = 'u' if value_discount == 1.0 else 'd' dev_lab = '' if dev_measure in ['rel_reach', 'att_util']: dev_lab = 't' if dev_fun == 'truncation' else 'a' if dev_measure != 'none': label = label + '(' + disc_lab + dev_lab + ')' return label
a2c9bb0c3e5d96ce84a80f500603116cfa0cfc8f
51,162
import sys import time def robust_request(twitter, resource, params, max_tries=5): """ If a Twitter request fails, sleep for 15 minutes. Do this at most max_tries times before quitting. Args: twitter .... A TwitterAPI object. resource ... A resource string to request; e.g., "friends/ids" params ..... A parameter dict for the request, e.g., to specify parameters like screen_name or count. max_tries .. The maximum number of tries to attempt. Returns: A TwitterResponse object, or None if failed. """ for i in range(max_tries): request = twitter.request(resource, params) if request.status_code == 200: return request else: print('Got error %s \nsleeping for 15 minutes.' % request.text) sys.stderr.flush() time.sleep(61 * 15)
a034d9a46ce996b4f3f1f432f45ea7b6d9a1c658
51,164
def build_risk_assessment_counter(years): """Build a risk counter Args: years (int): No. of years for analysis Returns: risk_counter (list): An empty list for keeping track of bankruptcy events """ risk_counter = [] for y in range(years+1): risk_counter.append(0) return risk_counter
677d548471f7dfa825f92ac948d91e54c4a0d21c
51,165
import requests import json def check_api_obj_id_from_name( jamf_url, object_type, object_name, enc_creds, verbosity ): """check if a Classic API object with the same name exists on the server""" # define the relationship between the object types and their URL # we could make this shorter with some regex but I think this way is clearer object_types = { "package": "packages", "computer_group": "computergroups", "policy": "policies", "extension_attribute": "computerextensionattributes", } object_list_types = { "package": "packages", "computer_group": "computer_groups", "policy": "policies", "extension_attribute": "computer_extension_attributes", } headers = { "authorization": "Basic {}".format(enc_creds), "accept": "application/json", } url = "{}/JSSResource/{}".format(jamf_url, object_types[object_type]) r = requests.get(url, headers=headers) if r.status_code == 200: object_list = json.loads(r.text) if verbosity > 2: print(object_list) obj_id = 0 for obj in object_list[object_list_types[object_type]]: if verbosity > 2: print(obj) # we need to check for a case-insensitive match if obj["name"].lower() == object_name.lower(): obj_id = obj["id"] return obj_id
3477afeafee04871b201422917101a9301308e9b
51,166
def _retrieve_start_nodes(net, mc): """ retrieve nodes with in_degree == 0 as starting points for main path analysis args: net: a CitationNetwork object mc: minimum citations returns: list of start nodes in net """ return [node for node in net.nodes if (net.in_degree(node) == 0 and net.out_degree(node) > mc)]
ff31c50a16a24efa37fbe53f0ba4b7bd9195cb0d
51,167
from typing import List def get_corpus(documents: List[List[str]]) -> List[str]: """Get a list of all of the words in each document (includes duplicates). Args: documents (List[List[str]]): List where each element is a list of tokens in a given document. Returns: List[str]: List of all of the tokens appearing in each document, """ return [word for document in documents for word in document]
322fca0944b74bfcebf38eff3efe8aa469882e54
51,168
import json def format_file_data_into_json(data_file): """input: data_file (sequence ids separated by \n) output: json request structure of gene ids to pass to datasets api """ with open(data_file, "r") as f: content = f.read() genes = content.strip().split("\n") return json.dumps({'gene_ids': [int(gene) for gene in genes], 'include_annotation_type':['FASTA_PROTEIN']})
7f646c511a6433f88f59909d281b9ae044e45c82
51,169
def bottom_up(num_steps: int) -> int: """Compute number of steps using a bottom up approach This iterative appraoch is the best approach. Since it avoids the max recursion depth and uses O(1) space. Actually, the space complexity is more complicated. We only use 3 ints but the space needed by those 3 ints grows as n grows. I guess it actually uses O(log(n)) space? Args: num_steps: number of total steps Returns: The number of possible ways to climb the stairs """ if num_steps <= 2: return num_steps if num_steps == 3: return 4 a = 1 b = 2 c = 4 for _ in range(4, num_steps): a, b, c = b, c, a + b + c return a + b + c
8645158380859b5fe8a84ee8b5ac5059437ecdb5
51,171
from typing import Dict def get_line_from_file(input_filename: str) -> Dict[int, int]: """ Read comma seperated integers all on the same line """ with open(input_filename) as input_file: list_of_nums = [int(num) for num in input_file.readline().split(",")] return {idx: list_of_nums[idx] for idx in range(0, len(list_of_nums))}
3274eeb5a36f9ad4b460a927396e5f02a91c5c88
51,172
def destination_index(circle): """ >>> destination_index([3, 2, 5, 4, 6, 7]) (2, 1) >>> destination_index([2, 5, 4, 6, 7, 3]) (7, 4) >>> destination_index([5, 8, 9, 1, 3, 2]) (3, 4) """ val_next = circle[0] - 1 while val_next not in circle: val_next -= 1 if val_next < 0: val_next = max(circle) return val_next, circle.index(val_next)
2f35c15e4d44f43f9476a1d4b55095cec134dc17
51,173
def populate_store(store, gc_roots, profiles, paths, requisites=True): """Load derivations from nix store depending on cmdline invocation.""" if gc_roots: store.add_gc_roots() for profile in profiles: store.add_profile(profile) for path in paths: store.add_path(path) return store
bce67f5020db5d83cfa132bc9de79b425f76ccdd
51,174
def sql(request_type, *args): """ Composes queries based on type of request. """ query = '' ###################### # USER RELATED QUERIES ###################### if request_type == 'GET_ALL_USERS': query = """ SELECT * FROM Users_Public""" # <--- THIS IS A VIEW elif request_type == 'GET_USER_BY_ID': query = """SELECT * FROM users WHERE id = %s""" elif request_type == 'GET_USER_BY_NAME': query = """SELECT * FROM users WHERE name like %s""" elif request_type == 'GET_USER_BY_TYPE': query = """ SELECT name, type FROM users INNER JOIN userlevel ON users.fk_userlevel_id = userlevel.id WHERE type = %s """ elif request_type == 'POST_REGISTER_USER': query = """INSERT INTO `users` (name, hash, fk_userlevel_id) VALUES(%s, %s, %s)""" elif request_type == 'POST_UPDATE_TOKEN': query = """UPDATE `users` SET token = (%s) WHERE id = (%s)""" elif request_type == 'POST_UPDATE_USER': query = """ UPDATE `users` SET hash = (%s), address = (%s), phone_number = (%s), fk_community_ids = (%s) WHERE id = (%s) """ elif request_type == 'DELETE_USER': query = """ DELETE FROM users WHERE users.id = %s """ ########################### # COMMUNITY RELATED QUERIES ########################### elif request_type == 'GET_COMMUNITY_BY_NAME': query = """SELECT * FROM community WHERE name like %s""" elif request_type == 'GET_COMMUNITY_BY_AREA': query = """SELECT * FROM community WHERE area like %s""" elif request_type == 'GET_ALL_COMMUNITIES': query = """SELECT * FROM community""" ########################### # PRODUCT RELATED QUERIES ########################### elif request_type == 'GET_ALL_PRODUCTS': query = """ SELECT p.id, p.product, p.description, p.count, p.available, c.name FROM products p INNER JOIN community c on fk_community_id = c.id """ elif request_type == 'GET_PRODUCTS_BY_COMMUNITY': query = """ SELECT p.id, p.product, p.description, p.count, p.available, c.name FROM products p INNER JOIN community c on fk_community_id = c.id WHERE c.name like %s """ ########################### # COURSES ########################### elif request_type == 'GET_ALL_COURSES': query = """SELECT name, points, completed FROM courses""" elif request_type == 'GET_COMPLETED_COURSES': query = """ SELECT name, points, completed FROM courses WHERE completed = %s """ elif request_type == 'GET_SUM_COURSES': query = """ SELECT sum(points) as total_points FROM courses WHERE completed=%s """ ######################################## # PUBLISHABLE & PROJECT RELATED QUERIES ####################################### elif request_type == 'GET_PROJECT_AUTHOR': query = """ SELECT name, address, phone_number FROM projects INNER JOIN publishable on projects.fk_parent_id = publishable.id INNER JOIN users on users.id = publishable.fk_alias_id WHERE projects.fk_parent_id = %s """ elif request_type == 'GET_PUBLISHABLE_AUTHOR': query = """ SELECT name, address, phone_number FROM publishable INNER JOIN users on users.id = publishable.fk_alias_id WHERE publishable.id = %s """ elif request_type == 'GET_TOP_POSTERS': query = """ SELECT alias, name, address, phone_number, count(users.id) as nr_posts from publishable INNER JOIN users on users.id = publishable.fk_alias_id GROUP BY users.id ORDER BY nr_posts DESC """ elif request_type == 'GET_ALL_POSTS': query = """ SELECT id, alias, created, dateString, title, body, imageURL, hidden, fk_alias_id from publishable LEFT JOIN projects ON publishable.id = projects.fk_parent_id WHERE projects.fk_parent_id IS NULL """ elif request_type == 'GET_ALL_PROJECTS': query = """ SELECT * FROM projects INNER JOIN publishable on publishable.id = projects.fk_parent_id """ elif request_type == 'GET_POSTS_HIDDEN': query = """ SELECT * FROM publishable WHERE hidden = %s """ elif request_type == 'GET_POSTS_BY_DATE': query = """ SELECT *, from_unixtime(created) as timestamp from publishable where unix_timestamp(%s) < created AND unix_timestamp(%s) >= created """ elif request_type == 'GET_ALL_PUBLISHABLE_PROJECTS': query = """ SELECT * FROM publishable LEFT JOIN projects on publishable.id = projects.fk_parent_id ORDER BY publishable.id desc LIMIT %s """ elif request_type == 'GET_POSTS_OVER_X_CHARS': query = """ SELECT * FROM publishable WHERE publishable.id IN (SELECT publishable.id FROM publishable WHERE LENGTH (body) > %s) """ return query
01604cf6576e9f61823d2777f2cadf90f9727b9e
51,175
def dbfilename(db): """Compose filename from database record""" return db.filename()
381dd07143eb3cf09c7a0e0310f3b1e887f68135
51,177
def calc_offset(actpre, actcount): """ compute offset in ADC counts from a pressure reading and known barometric pressure. actpre is in kPa and actcount is corresponding adc count. Recall: y = mx + b where: y is the pressure in the units of choice x is the ADC reading m is the slope b is the y offset 54mv/kpa from data sheet but that is scaled for a 5.1V supply so adjust 94kPa measured pressure 209 ADC count sensitivity = 54mv/kPa * 5V/5.1V = 52.94 mV/kPa converting to ADC counts... 52.94 mV/kPa * 255 counts/5,000mV = 2.7counts/kPa We really want the inverse of that or 0.37037 kPa/count so we have: y = 5000mV / 255counts / sensitivity * x + b 5000mv / 255 counts = 0.37037 kpA/count 94 kPa = 0.37037 kpA/count * 209 counts + b b = y - mx """ sensitivity = 54 * 5 / 5.1 # mv/kPa m = 5000 / 255 / sensitivity # kPa/count b = actpre - m * actcount # kPa offset = b / m # count return offset
3f92d044b15df5dea3cddedfa952e81e396e92ec
51,178
def invalid_expression_kwargs(request): """ Fixture that yields mappings that are invalid sets of kwargs for creating a new :class:`~crython.expression.CronExpression` instance. """ return request.param
b15ee90beb61a7fb75cbfbeac9976928cc182f08
51,179
import os def files_exist(root, files): """Return whether each of the given files exist relative to the given root path.""" for filename in files: if not os.path.isfile(os.path.join(root, filename)): return False return True
0f62f8c5a5416b4d0f9c0f1c622369df80d3cf1b
51,181
from typing import List def rglob_invert(patterns: List[str]) -> List[str]: """ Inverts a rglob condition. """ result = [] for pattern in patterns: if pattern.startswith("!"): result.append(pattern[1:]) else: assert "!" not in pattern result.append("!" + pattern) return result
fb6af4460a13fcc2650e255afedf97e26ee068e6
51,182
import torch def default_device() -> str: """ :return: the device that should be defaulted to for the current setup. if multiple gpus are available then will return a string with all of them, else if single gpu available then will return cuda, else returns cpu """ if not torch.cuda.is_available(): return "cpu" if torch.cuda.device_count() < 2: return "cuda" device_ids = [str(i) for i in range(torch.cuda.device_count())] return "cuda:{}".format(",".join(device_ids))
7ef23c2c39609d1188fb9f0f2197d0cb44821051
51,183
import time def timestamp_from_local(item, pattern): """ Converts single local date/time to UNIX-epoch timestamp """ return time.mktime(time.strptime(item, pattern))
83b42e69a88d4ecbb0fcd8b2604a7d2b5c45a2f8
51,184
import argparse def get_args(): """ Returns parsed arguments. """ p = argparse.ArgumentParser(description="Character remapper.") p.add_argument("input", type=str, help="string to remap") p.add_argument("-v", "--verbose", action="store_true", dest="verbose", help="produce verbose output") p.add_argument("-p", "--pool", type=str, dest="pool", default="", help="pool of random characters that can be mapped") p.add_argument("-pA", "--pool-alph", action="store_true", dest="alph", help="include all alphabetical characters") p.add_argument("-pN", "--pool-num", action="store_true", dest="num", help="include all numerical characters") p.add_argument("-pL", "--pool-lower", action="store_true", dest="alphlow", help="include all lower alphabetical characters") p.add_argument("-pU", "--pool-upper", action="store_true", dest="alphup", help="include all upper alphabetical characters") p.add_argument("-pP", "--pool-punc", action="store_true", dest="punc", help="include all punctuation characters") p.add_argument("-i", "--ignore", type=str, dest="ignore", default="", help="pool of characters to ignore") p.add_argument("-iP", "--ignore-punc", action="store_true", dest="igp", help="ignore all punctuations") p.add_argument("-iN", "--ignore-num", action="store_true", dest="ign", help="ignore all numerical characters") p.add_argument("-e", "--error", type=str, dest="errchar", default="", help="error character style") p.add_argument("-l", "--hints", type=str, nargs="+", dest="hints", help="list of hints in the form of x=y", default=None) p.add_argument("-o", "--out-file", type=str, dest="outf", default="", help="output file to write to") p.add_argument("-f", "--from-file", action="store_true", dest="fromf", help="indicates that the input is a file name") return p
5ace1538d7741f09473f935b4dceff2ea0a088ec
51,187
import os def make_directory(dirname): """Creates one or more directories.""" try: os.mkdir(dirname) except: return False return True
ab24ad6475bc72145bdb0ef86c539101aa1e29a5
51,188
import re def _shell_quote(s): """Quote given string to be suitable as input for bash as argument.""" if not s: return "''" if re.search(r'[^\w@%+=:,./-]', s) is None: return s return "'" + s.replace("'", "'\"'\"'") + "'"
9cb72e668f2839cbb992012a1973c21156213ae1
51,189
def dec_to_bin_slow(n): """ Manually transform a decimal number to its binary representation. Parameters ---------- n: int Number in base 10 """ res = '' if n < 0: raise ValueError if n == 0: return '0' while n > 0: res = str(n % 2) + res n = n >> 1 return res
c6387f89781ec9d64dd3a3eaec840654ac6ac2e0
51,190
def get_image_representation(img_x, embedding_net): """ Return image representation (i.e., semantic features) for image features given embedding network. """ return embedding_net(img_x)
e8bda52c7edd60f77951513a5e078f97de93f623
51,192
def bbox_size(bbox): """Calcs bounding box width and height. :type bbox: list :param bbox: bounding box :rtype: tuple :return: width and height """ x0, y0, x1, y1 = bbox return abs(x1 - x0), abs(y1 - y0)
1032ab37e5b05e38f67121e974354ea2fcdf9385
51,193
def getFeedRate(rpm, chipLoad, numTeeth): """ Calculates the feedrate in inches per minute args: rpm = spindle speed chipLoad = chip load (inches per tooth) numTeeth = number of teeth on tool """ feedRate = rpm*chipLoad*numTeeth return feedRate
e603bdb066a53ec6efd956871a12a08a40eaf971
51,194
def parse_sheets_for_get_response(sheets: list, include_grid_data: bool) -> list: """ Args: sheets (list): this is the sheets list from the Google API response include_grid_data (bool): will determine in what manner to parse the response Returns: list : The sheets after the relevant data was extracted. This function will be called only upon include_grid_data = true """ sheet_lst = [] for sheet in sheets: output_sheet = {} properties = sheet.get('properties', {}) output_sheet['title'] = properties.get('title') output_sheet['sheetId'] = properties.get('sheetId') output_sheet['index'] = properties.get('index') output_sheet['gridProperties'] = properties.get('gridProperties') row_data: list = [] if not include_grid_data: output_sheet['rowData'] = [] sheet_lst.append(output_sheet) continue response_rows_data = sheet.get('data', {})[0].get('rowData', None) if not response_rows_data: row_data.append({'values': []}) else: for response_values in response_rows_data: values = [] if not response_values: row_data.append({'values': []}) else: for response_cell_data in response_values.get('values'): if not response_cell_data: values.append('') else: values.append(response_cell_data.get('formattedValue')) row_data.append({'values': values}) output_sheet['rowData'] = row_data sheet_lst.append(output_sheet) return sheet_lst
d7c2a34d6f2be19e310e6d9e2343acf5e21c43ae
51,195
def birch(V, E0, B0, B1, V0): """ From Intermetallic compounds: Principles and Practice, Vol. I: Principles Chapter 9 pages 195-210 by M. Mehl. B. Klein, D. Papaconstantopoulos paper downloaded from Web case where n=0 """ E = (E0 + 9.0/8.0*B0*V0*((V0/V)**(2.0/3.0) - 1.0)**2 + 9.0/16.0*B0*V0*(B1-4.)*((V0/V)**(2.0/3.0) - 1.0)**3) return E
fea738a86f6ed3cfd0be421583c96a63ace548cd
51,196
def stringToWordList(s): """Returns t.split() where t is obtained from s.lower() by replacing every non-letter in this string with a blank. Basically, it returns a list of the 'words' (sequences of letters) in s. PreC: s is a string. """ t = '' for c in s.lower(): if c in 'abcdefghijklmnopqrstuvwxyz': t = t + c else: t = t + ' ' return t.split()
166d192a6e3701431fc00617a9d667f0cf39a63a
51,198
def apply_zero_correction(nodes): """ Subtract from each ingress/egress edge to/from the master node (rank 0) the minimum inbound/outbound weight. We assume this baseline to be due to synchronisation comms. """ def find_min_weight_to_from_zero(nodes): w_to = 1e8 w_from = 1e8 for key in nodes: for e in nodes[key]["msg_count"]: if e[0][0] == 0 and e[1] < w_from: w_from = e[1] if e[0][1] == 0 and e[1] < w_to: w_to = e[1] return w_to, w_from # Find minimum weights ingressing and egressing from zero w_to, w_from = find_min_weight_to_from_zero(nodes) # Subtract minimum weights from all edges for key in nodes: for e in nodes[key]["msg_count"]: if e[0][0] == 0: e[1] -= w_from if e[0][1] == 0: e[1] -= w_to
c8703ed7f52ae9ae083562e234f071d4c5e1c2ac
51,199
from typing import Any import types import sys def _is_instrumentable(obj: Any) -> bool: """Returns True if this object can be instrumented.""" try: # Only callables can be instrumented if not hasattr(obj, "__call__"): return False # Only objects with a __code__ member of type CodeType can be instrumented if not hasattr(obj, "__code__"): return False if not isinstance(obj.__code__, types.CodeType): return False # Only code in a real module can be instrumented if not hasattr(obj, "__module__"): return False if obj.__module__ not in sys.modules: return False # Bound methods can't be instrumented - instrument the real func instead if hasattr(obj, "__self__"): return False # Only Python functions and methods can be instrumented, nothing native if (not isinstance(obj, types.FunctionType)) and (not isinstance( obj, types.MethodType)): return False except Exception: # pylint: disable=broad-except # If accessing any of those fields produced an exception, the object # probably can't be instrumented return False return True
04c78f3924744c25721954947b1c1f224df9dad7
51,200
def get_genomicsWorkflowbatches(db, query): """ Return list of documents from 'genomicsWorkflowbatches' collection based on query. """ return db, query
f43754e9000acfa8e340f8d4cacb7c0f30155641
51,202
from typing import List import statistics def compute_average_memory_usage(memory_data_list: List[float]) -> float: """Computes the mean and for a given list of data.""" return statistics.mean(memory_data_list)
0a13139d3dbe6e6a1ea23c353b680169857dd6ff
51,203
def load_instructions(file): """loads instructions from file, stores in cryptic tuples""" raw = file.readlines() dirs = [(l[0], int(l[1:])) for l in raw] return dirs
ff20dde4f02d10e2afc9c862aaba10828bfe88ef
51,204
def reflect_mpo_2site(mpo_2site): """Spatial reflection of a 2-site MPO. """ return tuple(reversed(mpo_2site))
8158a5c5c58f0e96716c3cc1dd26663c08fd6893
51,205
import os def file_count(path: str): """Return the number of file in a path and subpaths. Args: path: a string containing the path in which to count the files. Returns: The number of files int the path. """ return sum(len(files) for r, d, files in os.walk(path))
b2119f36c5afe3a0e1f9d1cc2b2bce96344eb97e
51,207
def _split_domain_name(domain_name): """ClouDNS requires the domain name and host to be split.""" full_domain_name = "_acme-challenge.{}".format(domain_name) domain_parts = full_domain_name.split(".") domain_name = ".".join(domain_parts[-2:]) host = ".".join(domain_parts[:-2]) return domain_name, host
3512c881f81c2cafc5d56564b306e334e63258e0
51,208
import json def read2json(file_name): """读取json文件,并转换为字典/列表""" with open(file_name, "r", encoding="utf-8") as fp: dict = json.load(fp) print(dict) return dict
4f9535a9b7244890313c9bc3432df76d243d5484
51,211
import math def manning_equation(hydraulic_radius, manning_coefficient, slope): """Manning formula estimating the average velocity of a liquid driven by gravity. :param hydraulic_radius: hydraulic radius of pipe or channel [m] :param manning_coefficient: Gauckler–Manning coefficient :param slope: slope of the hydraulic grade line [-] """ return math.pow(hydraulic_radius, 2 / 3) * math.pow(slope, 0.5) / manning_coefficient
313db8e85c74d2f346b24617ea6228627f589e57
51,213
def type_with_number(message): """ >>> type_with_number('Welcome to Beijing!') '9352663086023454641' >>> type_with_number('I miss my laptop.') '40647706905278671' >>> type_with_number('!!??.. ,,') '1111110011' # Add your doctests below here # >>> type_with_number('aaaaaaaaAAAAaaaaa') '22222222222222222' >>> type_with_number('The quick brown fox jumps over the lazy dog.') '84307842502769603690586770683708430529903641' >>> type_with_number('wtrang') '987264' """ num_dict = { ',': 1, '.': 1, '?': 1, '!': 1, 'a': 2, 'b': 2, 'c': 2, 'd': 3, 'e': 3, 'f': 3, 'g': 4, 'h': 4, 'i': 4, 'j': 5, 'k': 5, 'l': 5, 'm': 6, 'n': 6, 'o': 6, 'p': 7, 'q': 7, 'r': 7, 's': 7, 't': 8, 'u': 8, 'v': 8, 'w': 9, 'x': 9, 'y': 9, 'z': 9, ' ': 0 } if len(message) == 1: return num_dict[message.lower()] return str(type_with_number(message[0])) + \ str(type_with_number(message[1:]))
46a2c004160b14268fcf72c08693e60918cb3f91
51,214
def monpow(a, b): """Calcul de a à la puissance b Ici une explication longue de la puissance d'un nombre :math:`a^b = aa..a` b fois :param a: la valeur :param b: l'exposant :type a: int, float,... :type b: int, float,... :returns: a**b :rtype: int, float :Exemples: >> > nompow(2, 3) 8 >> > nompow(2., 2) 4.0 .. note:: c'est une version accélérée de la puissance par multiplication successives .. seealso:: pow .. warning:: a et b sont des nombres """ return a ** b
30f7b5c0ff08082594e2e2cf62509bc1c0742627
51,215
def weighted_recall(P0, P, Y): """ Compute precision without the complex weighting strategy. """ m = len(P) rhos = [] Z = sum(P0[x] for x, _ in Y) for i in range(m): rho_i = 0. for x, fx in Y: assert fx == 1. gxi = 1.0 if x in P[i] and P[i][x] > 0 else 0. rho_i += P0[x] * gxi rhos.append(rho_i/Z) return rhos
406f026ca2d98a69470dd7f7bfbde313244c1b89
51,216
def is_int(val): """ Check if val is int Parameters ---------- val: value to check type Returns ------- True or False """ if type(val) == int: return True else: if val.is_integer(): return True else: return False
0c33396973ff601deae19e1f47352c45ca3269a6
51,217
def plural(n): """Devuelve s si n!= 1""" if n != 1: return "s" else: return ""
5841d42b534cb21d4451b4ec20b92d16c0df12bc
51,218
def readable(filename: str) -> bool: """Conditional method to check if the given file (via filename) can be opened in this thread. :param filename: name of the file :type filename: str :return: true if file can be read, otherwise false :rtype: bool """ try: handler = open(filename, 'r') # If the file is closed for some reason, we don't want to # attempt reading it. result = handler.closed handler.close() return not result except IOError: #* File probably doesn't exist or not in given directory. return False
997dbd3f42ed432109169fb98b3d66f3f53341ba
51,220
def get_child_object(obj, child_name): """Return the child object Arguments: obj {object} -- parent object child_name {str} -- cild name Returns: object -- child object """ if hasattr(obj, '__getattr__'): try: return obj.__getattr__(child_name) except AttributeError: pass if hasattr(obj, '__getattribute__'): try: return obj.__getattribute__(child_name) except AttributeError: pass if hasattr(obj, '__getitem__'): try: return obj.__getitem__(child_name) except AttributeError: pass
d9404e09cdeaaf755c75675e6c0dc42f5fc7adf2
51,221
def main(name=None): """ Main function. Example use: >>> if gv.parallel.main(__name__): ... res = gv.parallel.imap_unordered(f, params) """ return name == '__main__'
5c92e83087ab190c6cca8c958fbb21fa464f07d5
51,222
import re def clean(text): """Cleans text by: (1) removing obviously non-Coptic text; (2) turning sequences of >=1 newline into a single newline; (3) turning sequences of >=1 space into a single space; (4) spacing out ., ·, and : :param text: A string of Coptic text :return: Cleaned Coptic text """ text = text.replace(".", " .").replace("·", " ·").replace(":", " : ") uncoptic1 = r'\[F[^]]+\]' # Square brackets if they start with F uncoptic2 = r'\|F[^|]+\|' # Pipe delimiters if they start with F uncoptic3 = r'\([^\)]+\)' # Anything in round brackets uncoptic4 = r'[A-Za-z0-9|]' # Latin or numbers, pipe uncoptic = "("+"|".join([uncoptic1, uncoptic2, uncoptic3, uncoptic4])+")" text = re.sub(uncoptic, '', text) text = re.sub(r"\n+", r"\n", text) text = re.sub(r" +", r" ", text) return text
1088dc0fccb66790f622a05d58027ca3635c8134
51,223
import subprocess def perform_whois(target_ip): """Method that relies on subprocess to perform a WHOIS lookup on an IP address""" whois_results = "" # Set time value for whois call in seconds time_limit = 180 ## Text result of the whois is stored in whois_result... # Note: encoding='iso-8859-1' due to French language and its cast of characters... cmd = ["whois", "n", "+", target_ip] try: whois_results = subprocess.check_output( cmd, stderr=subprocess.STDOUT, encoding="ISO-8859-1", timeout=time_limit ) if "No match for" in whois_results: print("\n[!] Processing whois failure on IP: '{}'".format(target_ip)) print("\t[-] WHOIS Results:\n{}".format(whois_results)) except subprocess.TimeoutExpired: print("\n[!] Processing whois failure on IP: '{}'".format(target_ip)) print("[-] Timeout triggered after '{}' seconds.\n".format(time_limit)) whois_results = "whois_data: TIME OUT FAILURE" return whois_results
a04026f29500b349c64a18a22469170f36ae2d4b
51,224
def ping_time_to_distance(time, calibration=None, distance_units='cm'): """ Calculates the distance (in cm) given the time of a ping echo. By default it uses the speed of sound (at sea level = 340.29 m/s) to calculate the distance, but a list of calibrated points can be used to calculate the distance using linear interpolation. :arg calibration: A sorted list of (time, distance) tuples to calculate the distance using linear interpolation between the two closest points. Example (for a HC-SR04 ultrasonic ranging sensor): [(680.0, 10.0), (1460.0, 20.0), (2210.0, 30.0)] """ if not time: return 0 if not calibration: # Standard calculation using speed of sound. # 1 (second) / 340.29 (speed of sound in m/s) = 0.00293866995 metres # distance = duration (microseconds) / 29.38 / 2 (go and back) distance = time / 29.3866995 / 2 else: # Linear interpolation between two calibration points. a = (0, 0) b = calibration[-1] for c in calibration: if c[0] < time: a = c if c[0] > time: b = c; break if a == b: a = calibration[-2] distance = a[1] + (b[1] - a[1]) * ((time - a[0]) / (b[0] - a[0])) return distance
db8cba30c8d50d301b6a0e15073c527e285d4aa6
51,225
def from_dict(cls, config): """ Helper function to convert a dict to a class """ return cls(**config)
cb50159046061f6b2a7ec15302f6df7bd1ed6cb9
51,226
import os def extract_directory_name(filename): """Extract a directory name from a HTAR `filename` that may contain various prefixes. Parameters ---------- filename : :class:`str` Name of HTAR file, including directory path. Returns ------- :class:`str` Name of a directory. """ d = os.path.dirname(filename) basefile = os.path.basename(filename).rsplit('.', 1)[0] # remove .tar if not d: return basefile prefix = d.replace('/', '_') + '_' try: i = basefile.index(prefix) except ValueError: try: prefix = '_'.join(prefix.split('_')[1:]) i = basefile.index(prefix) except ValueError: return basefile return basefile[(i + len(prefix)):]
19b13a09f5490ceef09cecb35f97a1c06b92ebfd
51,227
def get_distinct_value(df, cols=None): """ Return list of distinct values of a dataframe Parameters ---------- df : dataframe col: string Returns ------- """ if not cols: cols = df.columns return dict(( col, df[col].unique()) for col in cols)
5b34136f986bdef3a1c25fcc7d5eaa42ec407a80
51,228
import subprocess def check_node() -> bool: """ Check if node is installed on the system. :return: true if Node is installed on the system. """ try: subprocess.check_output("node -v", shell=True) return True except subprocess.CalledProcessError: return False
828b8607a13a6085a47849136c56cf6cca890025
51,229
def pack_str(string): """Pack a string into a byte sequence.""" return string.encode()
5ef0e1f41db1a242c8a67a90d32397f270e2ce4e
51,230
import token def _get_definition_tokens(tokens): """ Given the tokens, extracts the definition tokens. Parameters ---------- tokens : iterator An iterator producing tokens. Returns ------- A list of tokens for the definition. """ # Retrieve the trait definition. definition_tokens = [] first_line = None for type, name, start, stop, line_text in tokens: if first_line is None: first_line = start[0] if type == token.NEWLINE: break item = ( type, name, (start[0] - first_line + 1, start[1]), (stop[0] - first_line + 1, stop[1]), line_text, ) definition_tokens.append(item) return definition_tokens
f5ffe1b5757828742777d8678fdbd4738d227aa8
51,231
def _parse_daily_time(xml_root): """Convert contents of <DailyTime> to decimal mask""" mask = [[0 for h in range(24)] for d in range(7)] for instance in xml_root.findall("DailyTime/time_instance"): day = int(instance.find('daily').text) -1 s, e = instance.find('time').text.split('-') s, e = int(s), int(e)+1 mask[day][s:e] = [1] * (e-s) output = [] for daybin in mask: daydec = 0 for bit in daybin: daydec = (daydec << 1) | bit output.append(str(daydec)) if (output is not None and len(output)): return ','.join(output) else: return None
477c6e01a0be20545793a05f6b5aabac78d57f32
51,233
import os def traverse_mxd(root_dir, filter=lambda x: x.endswith("mxd")): """ Listing of all .mxd file in a given root directory and their sub-directory. Returns the list of path to the.file mxd. """ result = [] for files in os.walk(root_dir): dir = files[0] for file in files[2]: if filter(file): result.append(os.path.join(dir, file)) return result
4973c67e84bed0f6f9dfc493b6fb777b80ae77ea
51,235
def _get_ancestors(cube): """Extract ancestors from ``filename`` attribute of cube.""" ancestors = cube.attributes['filename'].split('|') return ancestors
35e5e70ea3b72e055895a9e66b4eff262ec24163
51,236
def flatten_test_results(trie, prefix=None): """Flattens a trie structure of test results into a single-level map. This function flattens a trie to a single-level map, stopping when it reaches a nonempty node that has either 'actual' or 'expected' as child keys. For example: .. code-block:: python { 'foo': { 'bar': { 'expected': 'something good', 'actual': 'something bad' }, 'baz': { 'expected': 'something else good', 'actual': 'something else bad', 'quxx': 'some other test metadata' } } } would flatten to: .. code-block:: python { 'foo/bar': { 'expected': 'something good', 'actual': 'something bad' }, 'foo/baz': { 'expected': 'something else good', 'actual': 'something else bad', 'quxx': 'some other test metadata' } } """ # Cloned from webkitpy.layout_tests.layout_package.json_results_generator # so that this code can stand alone. result = {} for name, data in trie.iteritems(): if prefix: name = prefix + '/' + name if len(data) and not 'actual' in data and not 'expected' in data: result.update(flatten_test_results(data, name)) else: result[name] = data return result
8204b5a2ccec23cb8323b6709979114fef12633e
51,237
import io import csv def csv_loader(text, metadata): """A loader for csv text. """ with io.StringIO(text, newline='') as csvfile: dialect = csv.Sniffer().sniff(csvfile.read(2048), delimiters=";, \t") csvfile.seek(0) reader = csv.reader(csvfile, dialect) table = list(reader.__iter__()) return table
23afaaae7b12f06fd62641286c5856fc11556274
51,238
def calculate_reward(state, v): """Calculate reward for player v based on current state.""" status = state.get_status() r = 0.0 if status == 0: r = -1.0 elif status == v: r = 2.0 else: r = -2.0 return (r, status)
5d575afbb34b56f8097766f2027596e2b6759238
51,239
def _sort_func_of_double(point): """ double outの時の得点の評価 ダブルアウトの場合ブルは最終手段なのでsepa, fat関係ない Parameters ----- point : int 評価する得点 Returns ----- score : int 得点の評価 """ #Bullは上がれるならいいが、最終手段 if point == 50: return 0.1 #ダブルの場合good if point%2 == 0 and point <= 40: return 1 else : return 0
9bf7de44573013ef1096edf3d43783b1e1b81779
51,240
def continue_to_get_objects(vim, token): """Continues to get the list of objects of the type specified.""" return vim.ContinueRetrievePropertiesEx( vim.service_content.propertyCollector, token=token)
6f5632a9f7f1ea2da5be76088c5565d101f32114
51,241
def expandGrid(coord_tuple, base): """ If the coordinate pair defining a grid is equal, expand it out so that the grid is 2 * base wide. """ if len(set(coord_tuple)) == 1: coord_tuple[0] -= base coord_tuple[1] += base return coord_tuple
59cc66cf063bcf952466df2badf6049915c4dfd9
51,245
def RC(annotation, prediction): """ Kendall rank correlation coefficient """ number_con = 0.0 number_dis = 0.0 for i in range(0, len(prediction)): for j in range(i + 1, len(prediction)): if annotation[prediction[i]][0] < annotation[prediction[j]][0]: number_con += 1 else: number_dis += 1 return (number_con - number_dis) / len(prediction) / (len(prediction) - 1) * 2
d2ab2af4333bc8cfb43b69e5291f7e0002296717
51,246
def human_readable(solutions): """Print letter solution in a human-readable way Parameters ---------- solutions : pandas.DataFrame Letter draw best solutions, ordered by number of letters Returns ------- str Human-readable version of `solutions` """ result = "" for i, group in solutions.groupby("nb_letters"): result = result + str(i) + " letters:" + "\n" result = result + " ".join(group["display"]) + "\n\n" return result
becca40e87db069eaf256e6514f6d47b05e7e1f0
51,247
from typing import Counter def label_sizes(pages): """ pages: list(Page) returns: dict[str: int] For all labels with landings in the page list, assign each label a "size", from 1 to 5, based on the total number of pages with that label. (A larger size means the label contains more pages.) """ # Note: this is basically calculating a 5-bin histogram, but I didn't want # to add another dependency like numpy just to do this. labels = [page["landing_for"] for page in pages if "landing_for" in page] label_counts = Counter() for page in pages: if "labels" in page: label_counts.update(page["labels"]) if not label_counts: return {} total_labels = len(label_counts.keys()) label_sizemap = {} size = 5 for i, (label, count) in enumerate(label_counts.most_common()): if 1 - (i / total_labels) < (size - 1) / 5: size -= 1 label_sizemap[label] = size return label_sizemap
785ac402645eee2ac91a66d538f39e8e64f56fa3
51,248
import os import errno def _open_ipipes(wds, fifos, input_pipes): """ This will attempt to open the named pipes in the set of ``fifos`` for writing, which will only succeed if the subprocess has opened them for reading already. This modifies and returns the list of write descriptors, the list of waiting fifo names, and the mapping back to input adapters. """ for fifo in fifos.copy(): try: fd = os.open(fifo, os.O_WRONLY | os.O_NONBLOCK) input_pipes[fd] = fifos.pop(fifo) wds.append(fd) except OSError as e: if e.errno != errno.ENXIO: raise e return wds, fifos, input_pipes
3239c55a73a551fa061c7a9361190defba3198dd
51,249
import sys def detect_parity_bit_error(data, parityBit): """ Checks (validated) parity bit at the receiver's end. Returns True if sender has sent correct parity bit, else returns False. """ onesCount = 0 for d in data: if d == '1': onesCount += 1 elif d == '0': pass else: print('Data is not in binary!!!') sys.exit() if onesCount % 2 == 0: pB = 0 else: pB = 1 if pB == parityBit: return True else: return False
2f268ba2ce0ec2d404803a6367b95aeb9517fff0
51,250
def verify_lc_action(action): """ :type: str :param action: The action parameter to check :rtype: str :return: Fix action format :raises: ValueError: invalid form """ action = str(action).lower() if action not in ['archive', 'expire']: raise ValueError('"{}" is not a valid action parameter. ' 'Allowed values are: "archive" or "expire"' .format(action)) return action
ab27d9c52baa2fa24149649e7825e58585c03a20
51,251
from typing import Iterable def validation_failed_dict( items: Iterable, dict_code: int = 1001, dict_message: str = 'Validation Failed' ): """Generate dict for failed validation in format of DRF-friendly-errors. Attributes: items: Items to put into dict. In format: [(code, field, message), ...] Returns: dict: In format of DRF """ return { 'code': dict_code, 'message': dict_message, 'errors': [ {'code': i[0], 'field': i[1], 'message': i[2]} for i in items ] }
01de1c3bc47a4a44005b3e7cd36cf9f5444b7d62
51,253
def _get_textx_rule_name(parent_rule): """ Iterate parent instances until `TextxRule` instance. """ while not type(parent_rule).__name__ == "TextxRule": parent_rule = parent_rule.parent return parent_rule.name
9073a82592edadeb43d7fcda91d718203052c1f7
51,256
def _train_and_score(clf, X, y, train, test): """ Fit a classifier clf and train set and return the accuracy score on test set""" clf.fit(X[train], y[train]) return clf.score(X[test], y[test])
886397f99ff3b21cc62100269c9b2c8d5bca8669
51,257
import sys import stat def compare_file_modes(mode1, mode2): """Return true if the two modes can be considered as equals for this platform""" if 'fcntl' in sys.modules: # Linux specific: standard compare return oct(stat.S_IMODE(mode1)) == oct(stat.S_IMODE(mode2)) # Windows specific: most of mode bits are ignored on Windows. Only check user R/W rights. return (stat.S_IMODE(mode1) & stat.S_IREAD == stat.S_IMODE(mode2) & stat.S_IREAD and stat.S_IMODE(mode1) & stat.S_IWRITE == stat.S_IMODE(mode2) & stat.S_IWRITE)
45fd5e8f17c69b20b379a214c1c7d91075e779af
51,258
def skip_dti_tests(): """XXX These tests are skipped until we clean up some of this code """ return True
c00c6f4a67f68a39d982be9de0e2596bce266612
51,261
import re def camelize(s): """ Convert underscores to camelcase; e.g. foo_bar => FooBar """ return s[0].upper() + re.sub(r'_([a-z])', lambda m: m.group(1).upper(), s[1:])
ff1a6d87f1d10171276be4ae8529850a23a93df9
51,263
def knothash_reverse(string, start, length): """ reverse([0, 1, 2, 3, 4], 0, 3) = [2, 1, 0, 3, 4] reverse([2, 1, 0, 3, 4], 3, 4) = [4, 3, 0, 1, 2] reverse([4, 3, 0, 1, 2], 3, 1) = [4, 3, 0, 1, 2] reverse([4, 3, 0, 1, 2], 1, 5) = [3, 4, 2, 1, 0] """ end = (start + length - 1) % len(string) length = length // 2 while length > 0: try: string[start], string[end] = string[end], string[start] start = (start + 1) % len(string) end -= 1 if end < 0: end = len(string) - 1 length -= 1 except IndexError: print(start, end, length) raise return string
a458859286c520cc142bdc3809c229e576e03392
51,264
def _compute_pragmatic_meaning_building(building_geometry, building_land_use, buildings_gdf, buildings_gdf_sindex, radius): """ Compute pragmatic for a single building. It supports the function "pragmatic_meaning" Parameters ---------- buildings_geometry: Polygon building_land_use: String buildings_gdf: Polygon GeoDataFrame buildings GeoDataFrame - case study area buildings_gdf_sindex: Rtree Spatial index radius: float Returns ------- float """ buffer = building_geometry.buffer(radius) possible_matches_index = list(buildings_gdf_sindex.intersection(buffer.bounds)) possible_matches = buildings_gdf.iloc[possible_matches_index] pm = possible_matches [possible_matches.intersects(buffer)] neigh = pm.groupby(["land_use"], as_index = True)["nr"].sum() Nj = neigh.loc[building_land_use] # nr of neighbours with same land_use # Pj = Nj/N Pj = 1-(Nj/pm["nr"].sum()) # inverting the value return Pj
4e0077d610aa576934e9eb87f3c352d5303fb15a
51,265
def get_whole_table(table): """ Receives a results table Returns all results in a list of lists with whitespace removed """ return [ entry.strip().split() for line in table.strip().split("\n") for entry in line.split("|") if entry.replace("-", "") != "" and entry.strip() != "" ]
6f23a851d75540bff0c4643bc27a18ce7494d7f6
51,266
def format_step(action_id, step, index, notes): """ reformat a step (dictionary) into a common response format """ return { 'url': '/actions/{}/steps/{}'.format(action_id, step.get('task_id')), 'state': step.get('state'), 'id': step.get('task_id'), 'index': index, 'notes': notes }
58dec56de2f554d1736a639b5b217ebccca63265
51,267
from functools import reduce import operator def trim_ranks(dims, ranks): """Return TT-rank to which TT can be exactly reduced A tt-rank can never be more than the product of the dimensions on the left or right of the rank. Furthermore, any internal edge in the TT cannot have rank higher than the product of any two connected supercores. Ranks are iteratively reduced for each edge to satisfy these two requirements until the requirements are all satisfied. """ ranks = list(ranks) for i, r in enumerate(ranks): dim_left = reduce(operator.mul, dims[: i + 1], 1) dim_right = reduce(operator.mul, dims[i + 1 :], 1) # dim_left = np.prod() # dim_right = np.prod(dims[i + 1 :]) ranks[i] = min(r, dim_left, dim_right) changed = True ranks = [1] + ranks + [1] while changed: changed = False for i, d in enumerate(dims): if ranks[i + 1] > ranks[i] * d: changed = True ranks[i + 1] = ranks[i] * d if ranks[i] > d * ranks[i + 1]: changed = True ranks[i] = d * ranks[i + 1] return tuple(ranks[1:-1])
49d59691aba2b67b7e6686d887961fcf1939c071
51,268
import requests def publish(url, assets): """ API publishing function. """ x = requests.post(url, json=assets) return x.json()
8ffe294e9d97d09f71ba082a243c1927c2a54743
51,269
def manage_groups(dset): """ manage_groups description: Prompts the user with options to manage group assignments on the Dataset instance: - Assign indices to a group - View assigned group indices - Get data by group Returns a boolean indicating whether the user is finished with assigning groups. (this function gets called again if False is returned) parameters: dset (lipydomics.data.Dataset) -- lipidomics dataset instance returns: (bool) -- finished managing groups """ print('Managing groups... What would you like to do?') print("\t1. Assign group") print("\t2. View assigned groups") print("\t3. Get data by group(s)") print('\t"back" to go back') option = input('> ') if option == "1": print("Please provide a name for a group and its indices in order of name > starting index > ending index." "\n\t* group name should not contain spaces\n\t* indices start at 0\n\t* example: 'A 1 3'") group = input('> ') group = group.split() name = group[0] indices = [_ for _ in range(int(group[1]), int(group[2]) + 1)] try: dset.assign_groups({name: indices}) print('! INFO: Assigned indices: {} to group: "{}"'.format(dset.group_indices[name], name)) except ValueError as ve: print('! ERROR:', ve) print("! ERROR: Failed to assign group, please check your formatting and try again") elif option == "2": for group in dset.group_indices: print('\t"{}": {}'.format(group, dset.group_indices[group])) return False elif option == "3": print("Which group would you like to view?") name = input('> ') print(dset.get_data_bygroup(name)) return False elif option == 'back': return True else: print('! ERROR: unrecognized option: "{}"'.format(option)) return False
92b96bcfb387018c4148329cea577fe19c5c9290
51,270
def findIdx(list1, list2): """ Return the indices of list1 in list2 """ return [i for i, x in enumerate(list1) if x in list2]
2fb6c27cdc65185675bc4cd45a61d986f4792e07
51,271
def GenCount(data): """ Generate a total count from all bins for an OPC """ counts=[] for index,row in data.iterrows(): count=0 for column in data.columns: if "b" in column: count=count+row[column] # print(count) counts.append(count) data.insert(len(data.columns),"BinCount",counts,True) return data
0cf0cb677b3f87cb434f60501213b62ec86f6d0c
51,273
from datetime import datetime def reformat_subway_dates(date): """ The dates in our subway data are formatted in the format month-day-year. The dates in our weather underground data are formatted year-month-day. In order to join these two data sets together, we'll want the dates formatted the same way. Write a function that takes as its input a date in the MTA Subway data format, and returns a date in the weather underground format. Hint: There are a couple of useful functions in the datetime library that will help on this assignment, called strptime and strftime. More info can be seen here and further in the documentation section: http://docs.python.org/2/library/datetime.html#datetime.datetime.strptime """ struct_time = datetime.strptime(date, "%m-%d-%y") date_formatted = datetime.strftime(struct_time, "%Y-%m-%d") return date_formatted
a40aff606bc790e41b75b4588dbb9b4442510805
51,274
import re def find_edf_artifacts(asc_str, kind='EBLINK|ESACC'): """Find artifacts in an edf asci representation Parameters ---------- asc_str : str String with edf asci represenation as returned by read_edf. kind : 'EBLINK|ESACC' | 'EBLINK' | 'ESACC' Kind of artifact to search. """ for kind_part in kind.split('|'): if kind_part not in ['EBLINK', 'ESACC']: raise ValueError("invalid kind parameter: %r" % kind) re_artifact = re.compile(r'\b(%s)\t[LR]\t(\d+)\t(\d+)' % kind) artifacts = re_artifact.findall(asc_str) return artifacts
d58173ee8c4d66be7a5ffb3ff080a151a3e49e17
51,275
import json def serialize_sso_records(user_social_auths): """ Serialize user social auth model object """ sso_records = [] for user_social_auth in user_social_auths: sso_records.append({ 'provider': user_social_auth.provider, 'uid': user_social_auth.uid, 'created': user_social_auth.created, 'modified': user_social_auth.modified, 'extraData': json.dumps(user_social_auth.extra_data), }) return sso_records
de84d7ba0d4f0c6f7c7074ad0d363c3ff031881b
51,276
import requests import logging import json def update_intersight_role(AUTH, ORG_MOID, ROLE_MOID, CLAIM_CONFIG): """ Add priviledges to an Intersight Role """ response = requests.get( CLAIM_CONFIG['intersight_base_url'] + "iam/Roles?$select=Name,Moid&$filter=Name%20in%20%28" + ",".join(CLAIM_CONFIG['intersight_roles']) + "%29", auth=AUTH ) logging.info("ROLE MOIDS :" + response.text) response_json = response.json() request_roles = [] for role_moid_dict in response_json["Results"]: logging.info(role_moid_dict["Moid"]) request_roles.append({ "ObjectType":"iam.Role", "Moid":role_moid_dict["Moid"] }) logging.info(request_roles) request_body = { "Permission":{ "ObjectType":"iam.Permission", "Moid": ROLE_MOID }, "Resource":{ "ObjectType":"organization.Organization", "Moid": ORG_MOID },"Roles":request_roles } logging.info(request_body) response = requests.post( CLAIM_CONFIG['intersight_base_url'] + 'iam/ResourceRoles', data=json.dumps(request_body), auth=AUTH ) logging.info(response.text) response_json = response.json() return response_json["Moid"]
d968cc71e994ff11863d007cc175a433d851e395
51,278
import os import logging import json def get_database_uri(): """ Initialized DB2 database connection This method will work in the following conditions: 1) With DATABASE_URI as an environment variable 2) In Bluemix with DB2 bound through VCAP_SERVICES 3) With PostgreSQL running on the local server as with Travis CI """ database_uri = None if 'DATABASE_URI' in os.environ: # Get the credentials from DATABASE_URI logging.info("Using DATABASE_URI...") database_uri = os.environ['DATABASE_URI'] elif 'VCAP_SERVICES' in os.environ: # Get the credentials from the Bluemix environment services = json.loads(os.environ['VCAP_SERVICES']) database_uri = services['dashDB For Transactions'][0]['credentials']["uri"] else: database_uri = "postgres://postgres:postgres@localhost:5432/postgres" return database_uri
17a34194ac88c28434e31d199020fee470f1043d
51,284
def load_txt(path): """ load txt file :param path: the path we save the data. :return: data as a list. """ with open(path,'r') as f: data = [] while True: a = f.readline().split() if a: data.append(a[0]) else: break return data
902fdba2f6d3a9d2dbe1aef22fdcecfba1ed2455
51,285
from typing import Tuple from typing import Counter def check_string(instr: str) -> Tuple[bool, bool]: """Extract count check form a string.""" count = Counter(instr) count_vals = count.values() return (2 in count_vals, 3 in count_vals)
9c3ce9f01c857495528bd557ff903847f6841e77
51,287
def get_time_string(hour): """Returns a string to display given a value 8-22 (8am to 10pm)""" if hour < 12: return str(hour) + ":00am" elif hour == 12: return str(hour) + ":00pm" elif hour > 12 and hour < 23: return str(hour-12) + ":00pm" else: return None
bc2d7f84f51ef2198d6cce7fa537f4e048331fcb
51,288