content
stringlengths
35
416k
sha1
stringlengths
40
40
id
int64
0
710k
def parse_create_or_delete(message): """ Parses create or delete event. """ return { 'type': message["type"], 'event': message["action"], 'values': { 'user': message["data"]["owner"]["name"], 'subject': message["data"]["subject"] if "subject" in list(message["data"].keys()) else message["data"]["name"] } }
922ec574053569c0156d7babdf81056e562b9878
19,780
def strip_prefix(name, strip_text): """ Strip the first section of an underscore-separated ``name``. If the first section matches the ``strip_text``, we'll remove it. Otherwise, the object will remain unchanged. Parameters ---------- name: ``str`` underscore_separated_name to strip from strip_text: ``str`` Text to strip from the name, if it matches the first segment Returns ------- stripped: ``str`` The ``name``, modified or unmodified. """ if name.startswith(strip_text): return name[len(strip_text)+1:] else: return name
0efe643933a0006617bc0ed9416a6ef931de34e1
19,781
import os def _win32_config_dir(): """Return resource under APPDATA directory. https://technet.microsoft.com/en-us/library/cc749104(v=ws.10).aspx """ # Purposefully use a syntax that triggers an error # if the APPDATA environment variable does not exists. # It's not clear what should be the default. return os.environ['APPDATA']
d87eadfa41ed6dc171589345d3f4b7721ee3f145
19,783
def readSerialLine(ser): """ Reads data from Serial (from Arduino) Args: ser(byte): Serial line from Arduino Returns: (str): The output of the line """ line = ser.readline() line = line.decode("utf-8") data_line = line line_output = data_line.strip() return line_output
45a4eb8b2abf16460a59d4d66405d2a6546422e9
19,784
def scriptExit(*args, **kwargs): """ Exit Nuke. """ return None
956efc2b3c2c6585974043d0ad8d36f80612a24c
19,785
import numpy def xavier_weight(nin, nout=None, rng=None, dtype="float32"): """ Xavier init """ rng = numpy.random.RandomState(rng) if nout is None: nout = nin r = numpy.sqrt(6.) / numpy.sqrt(nin + nout) W = rng.rand(nin, nout) * 2 * r - r return numpy.array(W, dtype=dtype)
e86e778e653aa5ce02c3a2cd53e1b079e8d6f836
19,788
import argparse import pathlib def _parse_args() -> argparse.Namespace: """Setup argparse and parse command line args.""" parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(dest='command', metavar='<command>', required=True) project_parser = subparsers.add_parser( 'project', help='output components of an MCUXpresso project') project_parser.add_argument('manifest_filename', type=pathlib.Path) project_parser.add_argument('--include', type=str, action='append') project_parser.add_argument('--exclude', type=str, action='append') project_parser.add_argument('--prefix', dest='path_prefix', type=str) return parser.parse_args()
c9487848c36678fe389d528c8ee8d72d830e7bab
19,789
def strip_unnamed(string: str) -> str: """When fusing headers coming from excel, the lines with NA values are named as `Unnamed:...`. This function filters them out. """ if string.strip().startswith("Unnamed:"): return "" else: return string.strip()
cf4167b23a96c5248491a13d149a0e10b6a7714a
19,790
import functools import operator def evaluate (packet): """Evaluates Buoyancy Interchange Transmission System (BITS) `packet` and returns its value. """ value = 0 if packet.op == 0: value = sum(evaluate(p) for p in packet.value) elif packet.op == 1: value = functools.reduce(operator.mul, (evaluate(p) for p in packet.value), 1) elif packet.op == 2: value = min(evaluate(p) for p in packet.value) elif packet.op == 3: value = max(evaluate(p) for p in packet.value) elif packet.op == 4: value = packet.value elif packet.op == 5: value = int( evaluate(packet.value[0]) > evaluate(packet.value[1]) ) elif packet.op == 6: value = int( evaluate(packet.value[0]) < evaluate(packet.value[1]) ) elif packet.op == 7: value = int( evaluate(packet.value[0]) == evaluate(packet.value[1]) ) return value
bd6be3bf0e5040e989a90409905edf89413f5ae0
19,791
def temp_h5_file(tmpdir_factory): """ a fixture that fetches a temporary output dir/file for a test file that we want to read or write (so it doesn't clutter up the test directory when the automated tests are run)""" return str(tmpdir_factory.mktemp('data').join('test.h5'))
d5a01682747fcdc27e917da1a7456380729239e5
19,792
def peak_handler(payload): """Helper function for peaks. Payload is output from download function""" assert payload != [], "No data to process." Ls = [] Ss = [] measurements = [item["Data"]["Measurements"] for item in payload if item["Data"]["Measurements"] != [[]]] for outer in measurements: for middle in outer: for inner in middle: if inner[1] == "L": Ls.append(inner[0]) if inner[1] == "S": Ss.append(inner[0]) return Ls, Ss
f6e4794eed4217fde46dcb7b69c6a05223d3272e
19,794
def lxml_text_content(element): """ gets the string contained inside this element node tree """ return " ".join(x for x in element.itertext())
84dd39c5dec068bef91960f38980e124531502a0
19,797
def get_keywords_from_stupid_txt_file(file_name): """Gets the keywords from a stupid text file I made. You can largely ignore""" result = [] with open(file_name) as file: for line in file: sep_index02 = line.rfind(' ') result.append(line[:sep_index02]) return result
55b27800772fef1f549a54cfd5fdd7c1adf52bbf
19,798
import logging import pathlib def _entry_file_is_valid(key, entry): """Check the validity of a `file` field of an entry. Ensures that 1. the entry has a `file` field, 2. the `file` field is nonempty, 3. the file pointed to exists, and 4. the file pointed to is a file, not a directory. Returns ------- bool: True if the file is valid by the above definitions. False otherwise. """ if 'file' not in entry.keys(): logging.warn(f'No file in entry with key `{key}`. Skipping.') return False if entry['file'] == '': logging.warn(f'File field in entry with key `{key}` is ' 'empty. Skipping.') return False if not pathlib.Path(entry['file']).exists(): logging.warn(f"File `{entry['file']}` in entry with key " f"`{key}` does not exist. Skipping.") return False if not pathlib.Path(entry['file']).is_file(): logging.warn(f"File `{entry['file']}` in entry with key " f"`{key}` is not a file. Skipping.") return False return True
815ef3d2ce55028634a2bad0d3b1c9444ce2b752
19,799
def abbrev(term_list): """Given a list of terms, return the corresponding list of abbreviated terms.""" abbrev_dict = {"Point": "Pt", "ProperInterval": "PInt", "Interval": "Int", "Region": "Reg"} return '|'.join([abbrev_dict[term] for term in term_list])
4a24282d324b2a2194007bc6edb373865321c672
19,801
def squareMean3( array, centerPixel ): """ Kernel neighborhood function for focal map algebra. Reutrns mean of a 3x3 square array. @param array - array from which to retrieve the neighborhood kernel @param centerPixel - (i,j) corrdinates of center pixel of kernel in the array @return - mean of 3x3 square neighborhood around centerPixel """ rows = centerPixel[0] - 1 cols = centerPixel[1] - 1 neighArray = array[rows:rows + 3, cols:cols + 3] return neighArray.mean()
e017f38483fdc72163adcd65ef9ef4900fa8b350
19,802
import random def generate_int_list(list=[], length=10, min_value=0, max_value=10): """Generate and return a list of random integers. The random values will be between min_value and max_value - 1. If list's length were less than length, it'll be completed with random values until list's length were equal to length. """ # Using a variable to check list length, avoid calling # +the same function (len()) more than once. length_list = len(list) while length_list < length: list.append(random.randrange(min_value, max_value)) length_list += 1 return list
f80ce3edd5cd1ca4189d1cda9c3d151ff4943d50
19,803
def __fix_context(context): """Return a new context dict based on original context. The new context will be a copy of the original, and some mutable members (such as script and css files) will also be copied to prevent polluting shared context. """ COPY_LISTS = ('script_files', 'css_files',) for attr in COPY_LISTS: if attr in context: context[attr] = context[attr][:] return context
ebaaa2b7afccf47a2d4e9eb40b481be46565b95e
19,804
import os import sys def get_destdir(destdir, name): """Return destination directory for results of current snapshot.""" # Remove extension from name basename = os.path.splitext(name)[0] dir_path = os.path.join(destdir, basename) # Sanity check: we don't want to overwrite exisiting results. if os.path.exists(dir_path): print("ERROR: destination directory already exists:", dir_path) sys.exit(1) return dir_path
17fdfd1472f94a1ad66d1d9540a8d33fadbdc79a
19,805
import torch def residuals(target, predictions:list, total = True): """ Calculates the mean of the prediction error Parameters ---------- target : torch.Tensor The true values of the target variable predictions : list (torch.Tensor) The predicted expected values of the target variable total : bool, default = True - When total is set to True, return the overall mean of the error - When total is set to False, return the mean of the error along the horizon Returns ------- torch.Tensor The mean of the error, which depending on the value of 'total' is either a scalar (overall mean) or 1d-array over the horizon, in which case it is expected to increase as we move along the horizon. Generally, lower is better. Raises ------ ValueError When the dimensions of the predictions and targets are not compatible """ if predictions[0].shape != target.shape: raise ValueError('dimensions of predictions and targets need to be compatible') error = target - predictions[0] if total: return torch.mean(error) else: return torch.mean(error, dim=0)
98898cced03d3c0d6c1ea53af25b8987ade01ea7
19,808
import codecs def _reg_file_to_str(file_path): """Open file at given path and return as a string.""" with codecs.open(file_path, 'r', 'utf-16-le') as f: file_str = f.read() return file_str
657c50e5f6b410eb13c6e651054c98ac9db6da45
19,811
def divisao(): """ Imprime uma linha divisória :return: não há retorno. Somente a impressão na saída padrão """ return '-' * 60 + '\n'
acf136809d00746ece26f3b92cc3cd28ff7d7949
19,816
import os import yaml def dummy_conda_env(tmpdir, file_name: str = "conda-env.yaml") -> str: """Creates a dummy conda-environment file in a temporary directory and returns the path to the file.""" # Create a dummy environment dummy_environment = { "name": "test-env", "channels": ["conda-forge"], "dependencies": [ "forcebalance=1.7.5=py37h48f8a5e_0", "nonbonded=0.0.1a4=pyh87d46a9_0", "openff-evaluator=0.3.1=pyhf40f5cb_0", "openff-recharge=0.0.1a6=pyhf40f5cb_0", "pint=0.14=py_0", "openmm=7.4.2=py37_cuda101_rc_1", "yank=0.25.2=py37_1", "pymbar=3.0.5=py37hc1659b7_0", "openforcefield=0.8.0=pyh39e3cac_0", "openeye-toolkits=2020.1.0=py37_0", "rdkit=2020.09.2=py37h713bca6_0", "xorg-xextproto=7.3.0=h14c3975_1002", "openmmtools=0.20.0=py37_0", {"pip": ["ambertools==20.9", "amberlite==16.0"]}, ], } with open(os.path.join(tmpdir, file_name), "w") as file: yaml.safe_dump(dummy_environment, file) return os.path.join(tmpdir, file_name)
b94d0b9a221b5f83273e3cd9460b8a44f4d7228b
19,818
def fields_with_prefix(prefix, fields): """Create the string like prefix.field_0 as prefix_field_0, prefix.field_1 AS prefix_field_1 ... from prefix and fields list. It is a damn python magic which I am not sure that I will remember even tomorrow. :param prefix: :param fields: list of string keys :return: """ return ', '.join([prefix + '.' + ' AS {}_'.format(prefix).join([x] * 2) for x in fields])
5d7a2e2d633bb9773661d0be2c41219fb7b76f7d
19,819
from pathlib import Path def guard_name_project_plus_name(filename: str, project_name: str): """ Create a header guard name composed of a project name the filename. Parameters ---------- filename : str The name of the file containing the header guard project_name : str The name of the C++ project to add to the header guard Returns ------- str The header guard """ path = Path(filename) if project_name: return f"{project_name}_{path.stem}_H".upper() return f"{path.stem}_H".upper()
53d310c8858934bd2453a62cf83aa1d4439be9c1
19,820
import torch def compute_i_th_moment(input, i): """ computes the i-th moment of the input tensor channel-wise :param input: tensor of size (n, c, h, w), n=1 :param i: the moment one wants to compute :return: tensor with the i-th moment of every channel """ # get the input size input_size = input.size() # (n, c, h, w) n = input_size[0] c = input_size[1] mean = torch.mean(input.view(n, c, -1), dim=2, keepdim=True).view(n, c, 1, 1) eps = 1e-5 var = torch.var(input.view(n, c, -1), dim=2, keepdim=True) + eps std = torch.sqrt(var).view(n, c, 1, 1) if i == 1: return mean elif i == 2: return std else: return torch.mean((((input - mean) / std).pow(i)).view(n, c, -1), dim=2, keepdim=True).view(n, c, 1, 1)
1c23d885836863d35bac21c5cf5ea1fc46aec228
19,821
def get_word_list(cursor, ids=False): """ Return a list of all words. """ query = "select word, soundex from words" if ids is True: query = "select word, word_id, soundex from words" result = cursor.execute(query) return {word[0]: word[1:] for word in result.fetchall()}
2738e6b599911892808b33d3c07cfc9fcd91a063
19,822
import re def _is_valid_cluster_name(name): """Validate that the cluster name against the pattern.""" if name and len(name) > 25: return False return re.match("^[a-zA-Z][A-Za-z0-9-]*$", name) is not None
af59a56ecc3ad9b57e20578625cae988c57023ca
19,823
def options(occ=None, debyewaller=None, scattering_factor=None, moment=None, incident_polarisation_vector=(1, 0, 0), magnetic_formfactor=None, energy_kev=8, polarisation='sp', azi_ref_q=(1, 0, 0), psi=0, f0=0, f1=1, f2=0): """ Create an input dict that will work with all structure factor (sf_) functions :param occ: [m,1] array of atomic occupancies :param debyewaller: [n,m] array of thermal factors for each atom and reflection :param scattering_factor: array [n,m] or [n]: radiation dependent scattering factor/ form factor,/ scattering length :param moment: [m,3] array of magnetic moment direction in orthogonal basis :param incident_polarisation_vector: [1,3] direction of incident polarisation :param magnetic_formfactor: [n,m] array of magnetic form factors for each atom and relection :param energy_kev: float value of incident x-ray energy in keV :param azi_ref_q: [1,3] azimuthal refence, in cartesian basis (Q) :param psi: float value of the azimthal angle - the rotation out of the scattering plane. :param polarisation: str definition of the polarisation can be: ['ss','sp','ps','pp'] with 's'=sigma, 'p'=pi :param f0: float Flm value 0 (charge) :param f1: float Flm value 1 :param f2: float Flm value 2 :return: dict """ return locals()
0890c8a7680130d93e8de3093b51ac9bea5d7b52
19,824
import codecs def punycode(text): """ Punycode transforms given text. """ return codecs.encode(text, "punycode").decode('utf-8')
f717f3845b55d79c576dd4c889888bad7f071190
19,825
def find_chromosome(l): """ Get name line and return chromosome """ return l.strip().split(" ")[1]
2a7dd92808699edb30a4fc1400bd887161cd14e5
19,827
def marking_on_coordinate_axes(arm_list): """ Take arm list and makes systolic and diastolic lists for marking on coordinate axes """ arm = arm_list[3] if arm != 'Right' and arm != 'Left': raise ValueError("Incorrect arm name") list_systolic_pressure = list(map(int, arm_list[0])) list_diastolic_pressure = list(map(int, arm_list[1])) return arm, list_systolic_pressure, list_diastolic_pressure
f0216c83a5d99484bd3523ead3bd925fc3c9ef6b
19,828
from typing import List def _get_offset_63360(bounds: List[float]) -> List[float]: """Custom cases for scale==63360""" maxy = bounds[3] # Lower 48 if maxy < 49.25: return [.25, .25] # Sections of Alaska if maxy < 59.25: return [1 / 3, .25] if maxy < 62.25: return [.375, .25] if maxy < 68.25: return [.5, .25] # Each map has a width of .6 return [.2, .25]
668de2c734f951883329d0c8aa7a84551e94beab
19,829
import os def batch_id(): """Returns a batch id.""" return os.getenv("GENCOVE_BATCH_ID_TEST")
e7abc28e7fc0b1d1c9be6b8e806c3c620dcda3ec
19,830
import binascii import os def get_uid() -> str: """ Get a random identifier which is unique with high probability from the operating system's randomness """ return '{}'.format(binascii.hexlify(os.urandom(16)).decode('utf-8'))
c7024b7bf3669ddae6fe4b297df0bd8398f0b5f7
19,831
def mol2lbmol(x): """mol -> lbmol""" return x/453.592
f8e706e0ea3720b00f1d916a30ce1b5e964980b7
19,834
def camel_case(string, delimiters=[" "]): """Converts a string into camel case Parameters ---------- string: str, the string to convert delimiter: str, the character that denotes separate words """ for delimiter in delimiters: str_list = [s[0].upper() + s[1:] for s in string.split(delimiter)] string = "".join([s for s in str_list]) return string
45d6ae68f19c634093934209d6599d6e6b0d6347
19,835
def _maybe_name(_id): """Return message if len of id does not match expectation (32)""" if len(_id) == 32: return "" return "Maybe you used name rather than id?"
aea00f9602a09816386964debf480593838809c5
19,836
def get_metric(line, name, split): """ Get metric value from output line :param line: console output line :param name: name of metric :param split: split character :return: metric value """ start = line.find(name) + len(name) + 1 end = line.find(split, start) return line[start:end]
0a17f69983ed125b67ac513e3b7f658d5d1c35f5
19,838
def vis14(n): # DONE """ O O O OOOOOO OOOOOO OOOOOO OOOOOO OOOOOO OOOOOO Number of Os: 7 13 19""" result = 'O\n' for i in range(n): result += 'OOOOOO\n' return result
b898b7a82885a644d2c42c88aba141153d0d23b6
19,841
import copy def get_actions_for_states_direct(states): """ get the possible actions for a direct change @param states: @param paths_per_flows: @return: actions """ actions = [] for state in states: state_changes = [] other_states = copy.deepcopy(states) # kick out original state other_states.remove(state) for next_state in other_states: # find out which states should be changed for flowId in state: path_state = state[flowId] path_other_state = next_state[flowId] if path_state != path_other_state: state_changes.append((flowId, path_other_state)) actions.append((state, (next_state, copy.deepcopy(state_changes)))) actions.append((state, ('NoTrans', []))) return actions
823db2882788eca4c9857dab84be607c0140a0e8
19,843
def IsSwitch(op): """Return true if `op` is a Switch.""" return op.type == "Switch" or op.type == "RefSwitch"
03d8c4f458ee1316f3f1fe25f956cf1bf8b71af3
19,845
async def get_join_position(ctx, user_id): """ Gets the join position based on the given user id """ seq = " ".join(["{}-{}".format(x.joined_at.strftime("%Y-%m-%d"), x.id) for x in ctx.message.guild.members]) #all join dates in a single string sl = sorted(seq.split(), key=lambda x: tuple(map(int, x[0:].split("-"))), reverse=False) #sort them position_list = [] for _ in sl: position_list.append(_.split("-")[-1]) #only append user ids to the position_list return position_list.index(str(user_id))
7f50d65319f880b64b24564e5e68ab0fe53e938c
19,846
import argparse def configuration(): """ Return configuration parameters. Returns ------- args : parameters. """ parser = argparse.ArgumentParser(description='Matrix Query') parser.add_argument('--maxiter', default=1000000, help='total iteration') parser.add_argument('--maxitercg', default=5, help='maximum iteration for conjugate gradient method') parser.add_argument('--maxiterls', default=50, help='maximum iteration for finding a step size') parser.add_argument('--theta', default=1e-10, help='determine when to stop conjugate gradient method' ' smaller theta makes the step direction more accurate' ' but it takes more time') parser.add_argument('--beta', default=0.5, help='step size decrement') parser.add_argument('--sigma', default=1e-2, help='determine how much decrement in ' 'objective function is sufficient') parser.add_argument('--NTTOL', default=1e-3, help='determine when to update self.param_t, ' 'smaller NTTOL makes the result more accurate ' 'and the convergence slower') parser.add_argument('--TOL', default=1e-3, help='determine when to stop the whole program') parser.add_argument('--MU', default=2, help='increment for ' 'barrier approximation parameter self.param_t') parser.add_argument('--init_mat', default='id_index', help='id_index method is sufficient as an initialization.') parser.add_argument('--basis', default='work', help='id: id mat; work: work mat') return parser.parse_args()
5d13bdf50d3665d627c2f5fbed91b6b5976ec35c
19,852
def isComposite(x): """Returns whether or not the given number x is Composite. A composite number is a natural number greater than 1 and which has more than two factors For example: - Calling isComposite(11) will return False - Calling isPrime(71) will return True - Calling isPrime(12) will return False - Calling isPrime(76) will return False """ factors = [] for i in range(1, x + 1): if (x % i == 0): factors.append(i) length = len(factors) # finding out length of factors using len function if (length > 2): # checking for number of factors as Composite number have more than two factors return True else: return False
5cc9e2cb32ff73bf83acb6fa08376b0e5bf45334
19,853
import os def c_path(tail): """Build path rooted in ~/.carl directory""" return os.path.join(os.path.expanduser('~'), '.carl/', tail)
9cae3933be27b0108cf76b0033d03da0c2fc5a71
19,854
def _get_partner(pdb_filename): """ Get binding mode of pdb file. x if none. e.g. 11as_r_u.pdb would give u """ if "_r" in pdb_filename or "_2" in pdb_filename: return "r" elif "_l" in pdb_filename or "_1" in pdb_filename: return "l" else: return "x"
f1ab5b4f4f77703bbe315fd2cb44495e4b414615
19,856
def frequency_of_meetings(): """ Real Name: Frequency of Meetings Original Eqn: 1/12 Units: Meetings/Month/Person Limits: (None, None) Type: constant Subs: None How many maintenance meetings does the agent have with each client in a month? """ return 1 / 12
2c54e3e284698dd8d45848657cfb86227e411cbf
19,857
import sys import os import unittest def additional_tests(): """ Used directly by setuptools to run unittests """ sys.path.insert(0, os.path.dirname(__file__)) suite = unittest.TestSuite() tests = os.listdir(os.path.dirname(__file__)) tests = [n[:-3] for n in tests if n.startswith('test') and n.endswith('.py')] for test in tests: m = __import__(test) if hasattr(m, 'suite'): suite.addTest(m.suite()) sys.path.pop(0) return suite
1f56a7d39d4fd9007a56a6c976cf8fc2dc4361de
19,858
from typing import List def get_keys(format_string: str) -> List[str]: """format_string is a format string with embedded dictionary keys. Return a set containing all the keys from the format string.""" keys = [] end = 0 repetitions = format_string.count('{') for _ in range(repetitions): start = format_string.find('{', end) + 1 # pass the '{' end = format_string.find('}', start) key = format_string[start:end] keys.append(key) # may add duplicates # find indices of marked tags (to be used more than once) ind = [i for i, s in enumerate(keys) if '#' in s] # isolate the marked tags and keep one instance each mults = [] for ele in ind: mults.append(keys[ele]) mults = list(set(mults)) # delete all marked tags from original list for ele in sorted(ind, reverse=True): del keys[ele] # ...and add back one instance each keys = keys + mults return keys
c90e5201d56875dbd3044d7f46823921b1c7736d
19,859
import sys def config(args): """Handle config management.""" if not args.cmnd: sys.stdout.write('Subcommands required, for help run fyrd conf -h\n') return 1 print(args)
4279880664ca1ff9d3ee36b10b8b66c94df921fc
19,860
def path_cost(path): """The total cost of a path (which is stored in a tuple with the final action.""" # path = (state, (action, total_cost), state, ... ) if len(path) < 3: return 0 else: action, total_cost = path[-2] return total_cost
7f0fa8a3a35809977bc2aa75164b78e354126812
19,861
def prefix_to_infix(prefix: str) -> str: """ iterate over prefix expression in reverse 1. if operands: push to stack 2. if operator: pop 2 operands from stack and create: (op1 op op2) 3. push result back to stack 4. repeat """ stack: list = [] for c in reversed(prefix): if c.isalpha(): stack.append(c) else: expr: str = f"({stack.pop()}{c}{stack.pop()})" stack.append(expr) return stack[0]
a7455fed412f4dd9a0129c864e5346d054c71bc4
19,862
def total_edge_pixels_from_mask(mask): """Compute the total number of borders-pixels in a masks.""" border_pixel_total = 0 for y in range(mask.shape[0]): for x in range(mask.shape[1]): if not mask[y, x]: if mask[y + 1, x] or mask[y - 1, x] or mask[y, x + 1] or mask[y, x - 1] or \ mask[y + 1, x + 1] or mask[y + 1, x - 1] or mask[y - 1, x + 1] or mask[y - 1, x - 1]: border_pixel_total += 1 return border_pixel_total
b88717f7e8a92d35c3db7a8dbaba1879b0707637
19,864
def ci(series, beta=0.95): """ Finds an interval that takes proprotion of beta of the distribution in series """ alpha = 1 - beta return (series.quantile(alpha / 2), series.quantile(1 - alpha / 2))
d1f4554bd1f1d801798690db2634cfcbd12cee41
19,865
def grid_challenge(grid): """Hackerrank Problem: https://www.hackerrank.com/challenges/grid-challenge/problem Given a square grid of characters in the range ascii[a-z], rearrange elements of each row alphabetically, ascending. Determine if the columns are also in ascending alphabetical order, top to bottom. Return YES if they are or NO if they are not. For example, given: a b c a d e e f g The rows are already in alphabetical order. The columns a a e, b d f and c e g are also in alphabetical order, so the answer would be YES. Only elements within the same row can be rearranged. They cannot be moved to a different row. Args: grid (array): Array of strings Returns: str: "YES" or "NO" if columns of grid are in ascending order after sorting the rows """ # First sort the rows for i, s in enumerate(grid): grid[i] = "".join(sorted(s)) prev = "a" # Now, check the columns and see if string is in ascending order for i in range(len(grid[0])): for j in range(len(grid)): if prev > grid[j][i]: return "NO" prev = grid[j][i] prev = "a" return "YES"
1373a4a00b67bebd01ccb69803868430ebf22dfb
19,866
def cur_to_dict(self, cur): """ Helper function to convert data in SQLite cursor to a dictionary """ res = [] rows = cur.fetchall() print(rows) for row in rows: user = {} user['user_id'] = row['user_id'] user['username'] = row['username'] user['email'] = row['email'] user['pass'] = row['pass'] res.append(user) return res
3f7f07a7be73f02498c173b503b62ef580de0327
19,867
def hasCycle(self, head): # ! Floyd判圈法(龟兔赛跑算法) """ :type head: ListNode :rtype: bool """ try: slow = head # ! 龟 fast = head.next # ! 兔 while slow is not fast: slow = slow.next fast = fast.next.next return True except AttributeError as e: return False
e15a628af424b7584db115ed74599f2eb4a18a79
19,868
import re def count_items_matches(text, item, regexp): """ Counts the number of occurences of the item in the text """ expression = r'(^|%s)%s($|%s)' % (regexp, re.escape(item), regexp) pattern = re.compile(expression, flags=re.U) matches = re.findall(pattern, text) return len(matches)
37428503a3f10f6a6e2f5b6d89b0ba4183b33bb0
19,869
import json def kernel_ready_event(event_str: str) -> bool: """Wait for an event signalling the kernel is ready to run cell. Seems like `comm_info_reply` is a reasonable target for `jupyter notebook` whereas `kernel_info_reply` works for `jupyter server`. """ try: event = json.loads(event_str) msg_type = event["msg_type"] content = event["content"] status = content["status"] except (TypeError, KeyError): return False return all( ( msg_type == "kernel_info_reply", status == "ok", ) )
d12bd2944bfb6d49211dc9dfbc5f844b02e60e05
19,870
from os import sep def slashpath_to_localpath(path): """ Replace ``/`` in ``path`` with ``os.sep`` . """ return path.replace('/', sep)
cd43ef9f49b723c8f144773268de17f77c261c01
19,871
import ipaddress def is_competitor(contest, user, request=None): """Whether a user is a competitor of a contest.""" # Anonymous user is always not a competitor. if user.is_anonymous: return False # Admin always is competitor. elif user.is_admin_role(): return True # Regular user can't retrieve invisible contest. if not contest.visible: return False # True if no group and no IP limitation. if not contest.groups.exists() and not contest.allowed_ip_ranges: return True else: # Check if user in the contest groups list. if contest.groups.exists() \ and not contest.groups \ .filter(pk=user.userprofile.group.pk) \ .exists(): return False # Check if user request IP in allowed IP ranges. if contest.allowed_ip_ranges: # request is required to get the IP. if not request: print('request is required') return False else: request_ip = ipaddress.ip_address(request.ip) if not any( request_ip in ipaddress.ip_network(cidr, strict=False) for cidr in contest.allowed_ip_ranges ): return False # True if pass all check. return True
a04892fab3e2c03b5333cae32233fcb7136be67b
19,872
def isBalanced(s): """ Check if the structure s is of a balanced nature """ balance = 1 for bracket in ["()", "[]", "{}", "<>"]: counter = 0 for a in range(len(s)): if s[a] in bracket[0]: counter += 1 elif s[a] in bracket[1]: counter -= 1 if counter != 0: balance = 0 return balance
2716a26086d90210e7de0cef8f9cadf2cf4b63be
19,873
def get_all_refs(schema): """Get all ref links in a schema. Traverses a schema and extracts all relative ref links from the schema, returning a set containing the results. Parameters: schema: An OAS schema in the form of nested dicts to be traversed. Returns: set: All of the ref links found during traversal. """ all_refs = set() if type(schema) is dict: for key, val in schema.items(): if key == "$ref" and type(val) is str: all_refs.add(val) all_refs.update(get_all_refs(val)) elif type(schema) is list: for item in schema: all_refs.update(get_all_refs(item)) return all_refs
6b0c57d080f44f823cfaf8614da5b51abb9c86a4
19,875
from datetime import datetime def get_days_since_date(date): """Ensures at least 2 days at minimum without corrupting the date itself.""" seconds_since_cutoff = (datetime.now() - date).total_seconds() return int(seconds_since_cutoff / 86400) + 1
ac02cbf43c76f6b7e8bde82ebc2123cb2520e384
19,876
import subprocess def get_ip_address(): """ Returns the IP address of the roboot """ try: cmd = "hostname -I | cut -d' ' -f1" IP = subprocess.check_output(cmd, shell=True).decode("utf-8") except: IP = "N/A" return IP
873b775aa8faeb3bd4dde336df16e86ed6a8916a
19,877
def instance_property(func): """Decorator like ``property``, but underlying function is only called once per instance. Set instance attribute with '_' prefix. """ def wrapper(self): attribute_name = '_{}'.format(func.__name__) # do not use hasattr/getattr to avoid problems when overwriting # __getattr__ on a class which also uses instance_property try: return object.__getattribute__(self, attribute_name) except AttributeError: setattr(self, attribute_name, func(self)) return object.__getattribute__(self, attribute_name) wrapper.__doc__ = func.__doc__ return property(wrapper)
dab147e464dc27fb79d272e55fede935db6edfb7
19,878
def on_duplicate(insert, compiler, **kw): """Custom MySQL insert on_duplicate support.""" stmt = compiler.visit_insert(insert, **kw) my_var = insert.dialect_kwargs.get('mysql_on_duplicate', None) if my_var: stmt += ' ON DUPLICATE KEY UPDATE %s' % my_var return stmt
7cc417016a5a904bdb5c94767b3435dc393c06c0
19,880
def is_sorted(l): """ test whether a list is sorted """ return l == sorted(l)
1cd90853067708c7e133e5d619917d172b21e041
19,881
def gen_all_q_colnam(all_q_names): """ Generate a dict containing all the column names of the solution DataFrames. This is hardcoded for manifest uniformity, and assumes the standard setting of 10 queries. Parameters ---------- all_q_names Returns ------- """ all_q_colnam = {} all_q_colnam[all_q_names[0]] = ['tname', 'year','HomeRun'] all_q_colnam[all_q_names[1]] = ['nameFirst', 'nameLast', 'birthYear', 'birthMonth', 'birthDay'] all_q_colnam[all_q_names[2]] = ['nameFirst', 'nameLast', 'tname'] all_q_colnam[all_q_names[3]] = ['tname', 'rank', 'W', 'L', 'nameFirst', 'nameLast'] all_q_colnam[all_q_names[4]] = ['tname'] all_q_colnam[all_q_names[5]] = ['tname', 'yearID', 'rank', 'W', 'L'] all_q_colnam[all_q_names[6]] = ['nameLast', 'nameFirst'] all_q_colnam[all_q_names[7]] = ['birthState', 'avg_weight', 'avg_height', 'avg_HomeRun', 'avg_Saves'] all_q_colnam[all_q_names[8]] = ['yearID', 'tname', 'HomeRun'] all_q_colnam[all_q_names[9]] = ['yearID', 'tname', 'rank', 'Games'] assert len(set(all_q_colnam)) == len(all_q_names) return all_q_colnam
ea40ebd96ed585e6b56189d385e82f564b401100
19,882
def env_to_bool(input): """ Must change String from environment variable into Boolean defaults to True """ if isinstance(input, str): return input not in ("False", "false") else: return input
25f51e92c2daab935c17c1a85ae7b7510a25207b
19,883
def get_index(substring, kmer_list): """Return set of indices of positions of substrings in a list of strings.""" return {k: k.find(substring) for k in kmer_list}
a1f31e3e01dde95c7e6b826fb9ca4893aab64ccf
19,884
import requests def get_vegref_info(list_of_vegrefs): """ :param list_of_vegrefs: A list containing the vegrefs :return: gathered_calls: A list containing JSON objects as dicts. """ base_url = "https://www.vegvesen.no/nvdb/api/v2/veg?vegreferanse=" gathered_veg = [] for vegref in list_of_vegrefs: r = requests.get(base_url + vegref) if r.status_code == requests.codes.ok: gathered_veg.append(r.json()) else: print("Could not perform API call due to status code: ", r.status_code, "on veggref: ", vegref) return gathered_veg
2afa459e201e356660d2222557542bd223644b23
19,885
def cli(ctx, role_id): """Display information on a single role Output: Details of the given role. For example:: {"description": "Private Role for Foo", "id": "f2db41e1fa331b3e", "model_class": "Role", "name": "Foo", "type": "private", "url": "/api/roles/f2db41e1fa331b3e"} """ return ctx.gi.roles.show_role(role_id)
72e0b8dc4d06d736e67bf1c4b8f70bd030c160b3
19,886
import os def find_genome_directory(dirname): """Handle external non-docker installed biodata located relative to config directory. """ mounts = [] sam_loc = os.path.join(dirname, "tool-data", "sam_fa_indices.loc") genome_dirs = {} if os.path.exists(sam_loc): with open(sam_loc) as in_handle: for line in in_handle: if line.startswith("index"): parts = line.split() genome_dirs[parts[1].strip()] = parts[-1].strip() for genome_dir in sorted(list(set(genome_dirs.values()))): # Special case used in testing -- relative paths if genome_dir and not os.path.isabs(genome_dir): rel_genome_dir = os.path.dirname(os.path.dirname(os.path.dirname(genome_dir))) full_genome_dir = os.path.normpath(os.path.join(os.path.dirname(sam_loc), rel_genome_dir)) mounts.append("%s:%s" % (full_genome_dir, full_genome_dir)) return mounts
ac26b6feb13ba8f8f0f59565f33ec766ae60ff7f
19,887
def text_cleaner(l_values): """Funcao que arruma o texto, deixando mais visivel""" texto_clean = [value.get_text().strip() for value in l_values[1].findAll('td')] texto_clean[0] = texto_clean[0][:texto_clean[0].find('-') - 1] return texto_clean
2c8806bebef52b9f8f1e9e715202f63e0d8fe9cb
19,888
def parse_time_hms(s): """Convert HH:MM:SS to seconds.""" parts = [float(x) for x in s.split(":")] sec = 0 for i, part in enumerate(reversed(parts)): sec += part * 60**i return sec
3424545387e4a4c20726e001dab4fd9e39686e6d
19,889
from collections.abc import Mapping def combine_celltypes(df, cols_to_combine=None): """ Function to sum related cell types into a single column Inputs: - df: pandas dataframe. Output of td.tumor_deconvolve() - cols_to_combine: dictionary. Keys are the desired names of any new cell type columns, values are an arary of current column names to combine under the key name - Default = dictionary for combining common cell types from LM22 Outputs: - Pandas dataframe with columns combined as specified by the dictionary. All unmentioned column names are left as they are """ if cols_to_combine is not None: if isinstance(cols_to_combine, Mapping): pass else: raise TypeError("cols_to_combine must be a dictionary") else: # Use LM22 as default print("WARNING: No dictionary defined for combining columns... Attempting to use default dict for LM22 signatures") cols_to_combine = {'B cells':['B cells naive', 'B cells memory'], 'CD4 T cells':['T cells CD4 naive', 'T cells CD4 memory resting', 'T cells CD4 memory activated','T cells follicular helper', 'T cells regulatory (Tregs)'], 'CD8 T cells':['T cells CD8'], 'NK cells':['NK cells resting', 'NK cells activated'], 'Macrophages':['Macrophages M0', 'Macrophages M1', 'Macrophages M2'], 'Mast cells':['Mast cells resting','Mast cells activated'], 'Dendritic cells':['Dendritic cells resting', 'Dendritic cells activated'] } df2 = df.copy() for cell_type in cols_to_combine.keys(): try: df2[cell_type] = df2[cols_to_combine[cell_type]].sum(axis=1) df2.drop(cols_to_combine[cell_type],axis=1,inplace=True) except KeyError as e: print("WARNING: Failed to combine some columns: ") print("KeyError: "+str(e)) pass return df2
1215e49ad263fbae33a9eb2d9971b50c92cb4438
19,890
from typing import Tuple from typing import Optional def split_domain_port(netloc: str) -> Tuple[str, Optional[str]]: """ Splits the netloc into domain and port. >>> split_domain_port("example.com") ('example.com', None) >>> split_domain_port("example.com:80") ('example.com', '80') """ segments = netloc.split(":") if len(segments) > 1: return ":".join(segments[:-1]), segments[-1] return netloc, None
ce6724b0b05d1b409976b67527ccd5c50fbcbe01
19,891
import math def change_speed(input: float) -> float: """将线性输入 [0.0, 1.0] 变速,非线性输出 [0.0, 1.0]""" return math.cos((input + 1) * math.pi) / 2.0 + 0.5
b12ebb5480a57a0bcf27951f27dd2e15c809b681
19,892
def available_outputs(self): """get output vars that could be used to set :attr:`outputs`. The order may be unstable :type: tuple of :class:`.SymbolVar`""" return tuple(self._var2output_saver.keys())
76fbe9e6b9dbc2eda8935095c64345d3f3841915
19,893
import os def remove_path(filepath): """Returns the name of the file from a filepath""" return os.path.split(filepath)[1]
5a390c8c318a242a3a51828106368f3f1dbee09c
19,894
import re def check_format(id_card_no): """ check the format of id card number :param id_card_no: :return: """ pattern = '^[0-9]{17}[0-9xX]$' re.compile(pattern) if re.match(pattern, id_card_no) is not None: return True return False
30b2dabaa89890c3308cb467dbbad04608c9d676
19,895
import random def gen_otp_secret(): """Generate OTP secret.""" return ("").join([ random.choice("ABCDEFGHIJKLMNOPQRSTUVWXYZ234567") for ignore in range(16) ])
683e35c914baadb5907c9cf8dc67680f8deae57e
19,896
def to_unicode(string, encoding='utf-8'): """Convert byte string to unicode.""" return str(string, encoding) if isinstance(string, bytes) else string
1372ddc1882ddfee28f4efb52695577404d827b2
19,897
def parse_value(key, value): """ Parses .ini value and returns tuple of value and type. """ # Parse dictionary if value[0] == '{': value = value[1:-1] v_split = value.split(',') d = {} for x in v_split: x_split = x.split(':') d[x_split[0]] = x_split[1] return key, d, 'dictionary' # Parse single variable or list typ = 'var' if key[-2:] == '[]': typ = 'list' key = key[:-2] split = value.split(',') if len(split) == 1: return key, value, typ else: return key, split, 'list'
193d0b19685231f726e5458e79717273b53b13b5
19,898
import random import time def get_uuid(): """ Generate unique work order number """ st = int(time.time() * 1000) i = random.randrange(100000, 999999) return int(str(st)+str(i))
bc15af565cf975ea934aeb3bc47aa10028aeb5e6
19,900
def img_denormalize(img, img_mean, img_std, img_mode='rgb'): """ De-normalize the image array by multiply `img_std` and add the `img_mean` in the right image mode, the mean and std should correspond to `img_mode` Args: img (ndarray): Image array to be normalized. img_mean (tuple[float]): mean value for each channel of image. img_std (tuple[float]): std value for each channel of image. img_mode (str): `rgb` or `bgr`, to specify the img mode. Returns: normed_img (ndarray): de-normalized image array. """ assert img_mode in ['rgb', 'bgr'], "image mode must be 'rgb' or 'bgr'." return img * img_std + img_mean
1aa59c7956051e41b0c8621012b7e135e48165a2
19,901
def valid_string_int(text, minimum=None, maximum=None): """Validate string representative of integer. Returns True if it is a valid integer and between the minimum and maximum value.""" if not text.isdigit(): return False text = int(text) if minimum is not None and maximum is not None: if text not in range(minimum, maximum + 1): return False elif minimum is None and maximum is None: return True elif minimum is not None: if text >= minimum: return True elif maximum is not None: if text <= maximum: return True return True
72906fb85a55786504baecd059499fb46bb1b261
19,902
def get_stats(cm): """ Return the usual stats for a confusion matrix • TP (True Positive):表示正确分类为前景的像素个数。 • TN (True Negative):表示正确分类为背景的像素个数。 • FP (False Positive):表示错误分类为前景的像素个数。 • FN (False Negative):表示错误分类为背景的像素个数。 1. Recall 即召回率,表示算法正确检测出的前景像素个数占基准结果图像中所有前景像素个数的百分比,数值在 0 到 1 之间,结果越趋近于 1 则说明算法检测效果越好 2. Precision 即准确率,表示算法正确检测出的前景像素个数占所有检测出的前景像素个数的百分比,数值在 0 到 1 之间,结果越趋近于 1 则说明算法检测效果越好 3. F-Measure (F1-Score) 就是这样一个指标,常用来衡量二分类模型精确度,它同时兼顾了分类模型的 Recall 和 Precision,是两个指标的一种加权平均, 最小值是 0,最大值是 1,越趋近于 1 则说明算法效果越好 4. Specificity 表示算法正确检测出的背景像素个数占基准结果图像中所有背景像素个数的百分比,数值在 0 到 1 之间,越趋近于 1 则说明算法检测效果越好 5. FPR 表示背景像素被错误标记为前景的比例,数值在 0 到 1 之间,和上述四个指标相反,该值越趋近于 0,则说明算法检测效果越好 6. FNR 表示前景像素被错误标记为背景的比例,数值在 0 到 1 之间,同样该值越趋近于 0,则说明算法检测效果越好 7. PWC 表示错误率,包括前景像素和背景像素,数值在 0 到 ? 之间,该值越趋近于 0,则说明算法检测效果越好 :param cm: 混淆矩阵 :return: """ # TP = cm[0, 0] # FN = cm[0, 1] # FP = cm[1, 0] # TN = cm[1, 1] TP, FN, FP, TN = cm recall = TP / (TP + FN) specficity = TN / (TN + FP) fpr = FP / (FP + TN) fnr = FN / (TP + FN) pbc = 100.0 * (FN + FP) / (TP + FP + FN + TN) precision = TP / (TP + FP) fmeasure = 2.0 * (recall * precision) / (recall + precision) stats_dic = {'Recall': round(recall, 4), 'Precision': round(precision, 4), 'Specificity': round(specficity, 4), 'FPR': round(fpr, 4), 'FNR': round(fnr, 4), 'PWC': round(pbc, 4), 'FMeasure': round(fmeasure, 4), } return stats_dic
ecc647fdc72c4d74e93fccf4f2cf873e8c957328
19,903
def whitespace(string): """ return the number of whitespace chars in front or behind the string :param string: read from this """ i = 0 if string.strip() == "": return float("inf") while string and string[i] == " " and i < len(string): i += 1 return i
fe461426342cde6eeabc11916ed2a11fe097c1c1
19,904
def win_safe_name(cityname): """Edit the string so that it can be used as a valid file name in Windows. """ cityname = cityname.strip() if cityname.endswith("."): cityname = cityname[:-1] + "_" for chr in cityname: if chr in ("<", ">", ":", "/", "\\", "|", "?", "*", "\""): cityname = cityname.replace(chr, "_") return cityname
02e9f230b7e94a2ccd059ed6c0e10e1ab2c743e4
19,905
def bigend_2_int(p_bytes): """ Convert bigending bytestring to int """ l_ix = 0 l_int = 0 while l_ix < len(p_bytes): l_b = int(p_bytes[l_ix]) l_int = l_int * 256 + l_b l_ix += 1 return l_int
5c51a3752eec30804ab45185fb51c70be240a5b6
19,906
import torch def loss_KL(p,q): """ Kulback-Leibler divergence between two torch Tensors :param p: 1st tensor :param q: 2nd tensor :return: Kullback-Leibler divergence of p and q """ return torch.sum(p*torch.log(p/q)-p+q)
0e3d8ff6fa84f569c9c3f4532e33a179645e2252
19,907
import platform def _get_host_name(): """Gets the host name of this machine.""" return platform.node()
68c90eb282c22866e74545d8fb306d8057428e3e
19,909
def clean_url(string): """ Given a url that doesn't conform to http://something.here make it look like that. """ if string == "NULL": return "" if string == "http://": return "" if string == "": return string if string.startswith("http"): return string return "http://" + string
b64ae25f4931988e93493003315a697c9591d21e
19,911
def check_any_str(list_to_check, input_string): """Check if any items in a list have a string in them. Parameters ---------- list_to_check : list[str] A list of strings. input_string : str A string to check items in the list. Returns ------- Boolean True or False, depending on whether input_string is found in >= list item. """ return any(input_string in string for string in list_to_check)
d4c470281de2e2060ecdc575ed6935ef92d8886b
19,912
def parseMoClassName(className): """ Given a class name (aaaUserEp) returns tuple aaa,UserEp""" idx = -1 upperFound = False for c in className: idx += 1 if c.isupper(): upperFound = True break if upperFound: pkg = className[:idx] klass = className[idx:] else: pkg = className klass = "" return pkg, klass
c611f9e19b64674fcbd62944ca7e1d9a8cfd62c3
19,913
import json def dump_json(filename, data, **kwargs): """Serialize data as JSON-file Parameters ---------- filename : str data : list, dict, etc. Data to save. Chech json.dump for more details and description. """ with open(filename, 'w') as f: json.dump(data, f, **kwargs) return None
5a2f94021e640378e7924d3476238c3db67ed13b
19,915
import os def add_directory(path, newdir="Results"): """ Add a directory to the path. If that directory does not exist, then new directory will be made. add_directory(".../XX.tif", "NewDir") returns ".../NewDir/XXS.tif" """ fname = os.path.basename(path) dname = os.path.dirname(path) new_dname = os.path.join(dname, newdir) if not os.path.exists(new_dname): os.makedirs(new_dname, exist_ok=False) return os.path.join(new_dname, fname)
d0d86c3cecf6e3091ccac36153549be04a7f55ce
19,916