content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def tokenize(lines, token='word'): """Split text lines into word or character tokens.""" if token == 'word': return [line.split() for line in lines] elif token == 'char': return [list(line) for line in lines] else: print('ERROR: unknown token type: ' + token)
c30c8b3f1ea5d5752e17bc9fd514acaf097cba18
3,643,000
import os def get_versioned_persist(service): """Get a L{Persist} database with upgrade rules applied. Load a L{Persist} database for the given C{service} and upgrade or mark as current, as necessary. """ persist = Persist(filename=service.persist_filename) upgrade_manager = UPGRADE_MANAGERS[service.service_name] if os.path.exists(service.persist_filename): upgrade_manager.apply(persist) else: upgrade_manager.initialize(persist) persist.save(service.persist_filename) return persist
5d636ff552cd693a701506377643f410f3384b29
3,643,001
def gravatar(environ): """ Generate a gravatar link. """ email = environ.get('tank.user_info', {}).get('email', '') return GRAVATAR % md5(email.lower()).hexdigest()
0464d409f4e0c1fef251927930618236146ac3f1
3,643,002
def keyrep(kspec, enc="utf-8"): """ Instantiate a Key given a set of key/word arguments :param kspec: Key specification, arguments to the Key initialization :param enc: The encoding of the strings. If it's JSON which is the default the encoding is utf-8. :return: Key instance """ if enc: _kwargs = {} for key, val in kspec.items(): if isinstance(val, str): _kwargs[key] = val.encode(enc) else: _kwargs[key] = val else: _kwargs = kspec if kspec["kty"] == "RSA": item = RSAKey(**_kwargs) elif kspec["kty"] == "oct": item = SYMKey(**_kwargs) elif kspec["kty"] == "EC": item = ECKey(**_kwargs) else: item = Key(**_kwargs) return item
25524953376a83562859b33a91ba10ae85c2c25d
3,643,003
import math def aa2matrix(axis, angle, radians=True, random=False): """ Given an axis and an angle, return a 3x3 rotation matrix. Based on: https://en.wikipedia.org/wiki/Rotation_matrix#Axis_and_angle Args: axis: a vector about which to perform a rotation angle: the angle of rotation radians: whether the supplied angle is in radians (True) or in degrees (False) random: whether or not to choose a random rotation matrix. If True, the axis and angle are ignored, and a random orientation is generated Returns: a 3x3 numpy array representing a rotation matrix """ #Convert to radians if necessary if radians is not True: angle *= rad #Allow for generation of random rotations if random is True: a = rand() axis = [rand(),rand(),rand()] angle = rand()*pi*2 #Ensure axis is a unit vector axis = axis / np.linalg.norm(axis) #Define quantities which are reused x = np.real(axis[0]) y = np.real(axis[1]) z = np.real(axis[2]) c = math.cos(angle) s = math.sin(angle) C = 1 - c #Define the rotation matrix Q = np.zeros([3,3]) Q[0][0] = x*x*C + c Q[0][1] = x*y*C - z*s Q[0][2] = x*z*C + y*s Q[1][0] = y*x*C + z*s Q[1][1] = y*y*C + c Q[1][2] = y*z*C - x*s Q[2][0] = z*x*C - y*s Q[2][1] = z*y*C + x*s Q[2][2] = z*z*C + c return Q
d41460663edd36e5da1255636514468180e20511
3,643,004
def expand_value_range(value_range_expression): """Expand the value range expression. Args: value_range_expression: Value range or expression to expand. Return: iterable. """ if type(value_range_expression) is str: # Grid search if value_range_expression.startswith('np.arange'): value_range_expression = arange(value_range_expression) # Random search elif value_range_expression.startswith('np.random'): raise NotImplementedError('Random search space ' 'not implemented yet') # If not an iterable, make it an iterable try: iter(value_range_expression) except TypeError: value_range_expression = [value_range_expression] return value_range_expression
bddfc2fd4ed65101ecb3d8ca2bc5d11de58374bd
3,643,005
def date_range(df): """Takes the dataframe returns date range. Example here: http://pandas.pydata.org/pandas-docs/stable/timeseries.html Returns as Days """ start_date = df.tail(1)['date'] start = pd.Timestamp.date(list(start_date.to_dict().values())[0]) end_date = df.head(1)['date'] end = pd.Timestamp.date(list(end_date.to_dict().values())[0]) rng = pd.date_range(start, end) return rng
1b577b29ccc7ed6751e8162f11b076042178c590
3,643,006
from datetime import datetime def last_hit_timestamp(hit_count_rules, month): """ Get list of last hit timestamp to rule :param hit_count_rules: dictionary which contain json response with all hit count rules :param month: number of month elapsed since the rule was triggered last :return: list with rules that older than value in param month (contain rule name, id, type and access policy name) """ rule = [] for i in hit_count_rules: last_refresh = datetime.datetime.strptime(i["lastFetchTimeStamp"], '%Y-%m-%dT%H:%M:%SZ') limit = last_refresh - datetime.timedelta(month * 365 / 12) if i["lastHitTimeStamp"] != " ": last_hit = datetime.datetime.strptime(i["lastHitTimeStamp"], '%Y-%m-%dT%H:%M:%SZ') if last_hit < limit: rule.append(i["rule"]) return rule
048d63e9ad77bf19974b4506aedb66d98fb84403
3,643,007
def url(should_be=None): """Like the default ``url()``, but can be called without arguments, in which case it returns the current url. """ if should_be is None: return get_browser().get_url() else: return twill.commands.url(should_be)
a9faa937ffe994136d16e5c86082f22600368431
3,643,008
def remove_outliers(X_train,y_train): """ This function deletes outliers on the given numpy arrays, and returns clean version of them. Parameters ---------- X_train: dataset to remove outliers with k features y_train: dataset to remove outliers with k features """ clf = LocalOutlierFactor(n_neighbors=2) out1 = clf.fit_predict(X_train) out2 = clf.fit_predict(y_train) indexes = np.argwhere(out1+out2 != 2) X_train = np.delete(X_train,indexes,axis=0) y_train = np.delete(y_train,indexes,axis=0) return X_train,y_train
35c86ba50ca6398ec70e95c07091bb1ffc6811d2
3,643,009
import tqdm def query_data(regions, filepath_nl, filepath_lc, filepath_pop): """ Query raster layer for each shape in regions. """ shapes = [] csv_data = [] for region in tqdm(regions): geom = shape(region['geometry']) population = get_population(geom, filepath_pop) pop_density_km2, area_km2 = get_density(geom, population, 'epsg:4326', 'epsg:3857') shapes.append({ 'type': region['type'], 'geometry': mapping(geom), # 'id': region['id'], 'properties': { 'population': population, 'pop_density_km2': pop_density_km2, 'area_km2': area_km2, 'geotype': define_geotype(pop_density_km2), 'GID_2': region['properties']['GID_2'], 'GID_3': region['properties']['GID_3'], } }) csv_data.append({ 'population': population, 'pop_density_km2': pop_density_km2, 'area_km2': area_km2, 'geotype': define_geotype(pop_density_km2), 'GID_2': region['properties']['GID_2'], 'GID_3': region['properties']['GID_3'], }) return shapes, csv_data
a4ecd234d04fc1cf677d276afe11c716a1c3b854
3,643,010
from bs4 import BeautifulSoup import re def scrape_urls(html_text, pattern): """Extract URLs from raw html based on regex pattern""" soup = BeautifulSoup(html_text,"html.parser") anchors = soup.find_all("a") urls = [a.get("href") for a in anchors] return [url for url in urls if re.match(pattern, url)!=None]
dfba40df7894db91575b51a82d89fef0f824d362
3,643,011
from typing import List def get_num_weight_from_name(model: nn.Module, names: List[str]) -> List[int]: """Get list of number of weights from list of name of modules.""" numels = [] for n in names: module = multi_getattr(model, n) num_weights = module.weight.numel() numels.append(num_weights) return numels
ae6c3bfb5abe3522ff6d276cde052a5270e5741e
3,643,012
from typing import OrderedDict def _categories_level(keys): """use the Ordered dict to implement a simple ordered set return each level of each category [[key_1_level_1,key_2_level_1],[key_1_level_2,key_2_level_2]] """ res = [] for i in zip(*(keys)): tuplefied = _tuplify(i) res.append(list(OrderedDict([(j, None) for j in tuplefied]))) return res
35f62244c3d3b893008d7ba7b8a9f651528c198e
3,643,013
def to_usd(my_price): """ Converts a numeric value to usd-formatted string, for printing and display purposes. Param: my_price (int or float) like 4000.444444 Example: to_usd(4000.444444) Returns: $4,000.44 """ return f"${my_price:,.2f}"
a8959cdca7f011a435e35b4a4a5d2d43911a55da
3,643,014
def computeNodeDerivativeHermiteLagrange(cache, coordinates, node1, derivative1, scale1, node2, scale2): """ Computes the derivative at node2 from quadratic Hermite-Lagrange interpolation of node1 value and derivative1 to node2 value. :param cache: Field cache to evaluate in. :param coordinates: Coordinates field. :param node1, node2: Start and end nodes. :param derivative1: Node value label for derivative at node1. :param scale1, scale2: Scaling to apply to derivatives at nodes, e.g. -1.0 to reverse. :return: dx_dxi at node2 """ cache.setNode(node1) result, v1 = coordinates.getNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, 3 ) result, d1 = coordinates.getNodeParameters(cache, -1, derivative1, 1, 3 ) d1 = [ d*scale1 for d in d1 ] cache.setNode(node2) result, v2 = coordinates.getNodeParameters(cache, -1, Node.VALUE_LABEL_VALUE, 1, 3 ) d2 = interpolateHermiteLagrangeDerivative(v1, d1, v2, 1.0) d2 = [ d*scale2 for d in d2 ] return d2
7eb98502341e94e277b4d7b98b68293ff28f395b
3,643,015
def _step5(state): """ Construct a series of alternating primed and starred zeros as follows. Let Z0 represent the uncovered primed zero found in Step 4. Let Z1 denote the starred zero in the column of Z0 (if any). Let Z2 denote the primed zero in the row of Z1 (there will always be one). Continue until the series terminates at a primed zero that has no starred zero in its column. Unstar each starred zero of the series, star each primed zero of the series, erase all primes and uncover every line in the matrix. Return to Step 3 """ count = 0 path = state.path path[count, 0] = state.Z0_r path[count, 1] = state.Z0_c while True: # Find the first starred element in the col defined by # the path. row = np.argmax(state.marked[:, path[count, 1]] == 1) if not state.marked[row, path[count, 1]] == 1: # Could not find one break else: count += 1 path[count, 0] = row path[count, 1] = path[count - 1, 1] # Find the first prime element in the row defined by the # first path step col = np.argmax(state.marked[path[count, 0]] == 2) if state.marked[row, col] != 2: col = -1 count += 1 path[count, 0] = path[count - 1, 0] path[count, 1] = col # Convert paths for i in range(count + 1): if state.marked[path[i, 0], path[i, 1]] == 1: state.marked[path[i, 0], path[i, 1]] = 0 else: state.marked[path[i, 0], path[i, 1]] = 1 state._clear_covers() # Erase all prime markings state.marked[state.marked == 2] = 0 return _step3
4d6e50164724b6fdaa42fa41423677dd80500a3e
3,643,016
def encode_ascii_xml_array(data): """Encode an array-like container of strings as fixed-length 7-bit ASCII with XML-encoding for characters outside of 7-bit ASCII. """ if isinstance(data, np.ndarray) and \ data.dtype.char == STR_DTYPE_CHAR and \ data.dtype.itemsize > 0: return data convert = lambda s: encode_ascii_xml(s) if s is not None else '' ascii_data = map(convert, data) fixed_len = max(len(s) for s in ascii_data) fixed_len = max(1, fixed_len) dtype = '%s%d' % (STR_DTYPE_CHAR, fixed_len) # note: python3 would require np.fromiter return np.array(ascii_data, dtype=dtype)
a9baf0ca562b78ce36c49e1d64c8f8a9015df097
3,643,017
from typing import Union from pathlib import Path from typing import Optional from typing import List import queue def download_dataset( period: str, output_dir: Union[Path, str], fewer_threads: bool, datasets_path: Optional[Union[Path, str]] = None ) -> List[Path]: """Download files from the given dataset with the provided selections. Args: period: Name of the period to be downloaded. output_dir: Path to where the data should be stored. fewer_threads: If True, reduce the number of threads by half. dataset_config_filename: Filename of the configuration file. Default: None, in which case, the files will be taken from those defined in the package. Returns: None. """ # Validation output_dir = Path(output_dir) if datasets_path: datasets_path = Path(datasets_path) # Setup the dataset dataset = _extract_dataset_from_yaml(period=period, datasets_path=datasets_path) # Setup q: FilePairQueue = queue.Queue() queue_filler = DatasetDownloadFiller( dataset=dataset, output_dir=output_dir, q=q, ) download(queue_filler=queue_filler, q=q, fewer_threads=fewer_threads) # Return the files that are stored corresponding to this period. period_specific_dir = output_dir / dataset.data_type / str(dataset.year) / dataset.period period_files = sorted(Path(period_specific_dir).glob(f"**/{dataset.filename}")) logger.info(f"period_specific_dir: {period_specific_dir}, number of files: {len(period_files)}") # Write out the file list filelist = Path(output_dir) / "filelists" / f"{dataset.period}{dataset.file_type}.txt" filelist.parent.mkdir(exist_ok=True, parents=True) # Add the suffix to access the ROOT file if it's contained in a zip archive. suffix = "" if ".zip" in dataset.filename: suffix = "#AliAOD.root" if dataset.file_type == "AOD" else "#AliESDs.root" with open(filelist, "w") as f: # One file per line. f.write("\n".join([f"{p}{suffix}" for p in period_files])) return period_files
0c98956bb54f6f948a31097e47ba6008e91ebefc
3,643,018
def monospaced(fields, context): """ Make text monospaced. In HTML: use tags In Markdown: use backticks In Text: use Unicode characters """ content = fields[0] target = context['target'] if target == 'md': return wrapper('`')([content], context) if target == 'html': multiline = False for chunk in content: if type(chunk) is str and '\n' in chunk: multiline = True break if multiline: tag = 'pre' else: tag = 'code' return taggifier(tag)([content], context) if target == 'txt': return keymapper('monospaced')([content], context)
eed91b414ce8cb486b115d0d203db4e7ed81e5d5
3,643,019
def indexview(request): """ initial page shows all the domains in columns """ domdb = Domain.objects if not request.user.has_perm('editapp.see_all'): # only see mine domdb = domdb.filter(owner__username=request.user.username) domains = [ d.domain for d in domdb.order_by('domain') ] # show in four columns # so slice into four arrays dslice = int((len(domains)+3)/4) c1,c2,c3,c4 = [ [d for d in domains[n*dslice:(n+1)*dslice]] for n in range(4) ] return render(request, 'editapp/index.html', { 'c1': c1, 'c2': c2, 'c3': c3, 'c4': c4, 'bpnav': bpnav(request, 'index') })
b99e8b3499f7d7a282b5a93606dddf7527a5e93b
3,643,020
def MC_swap(alloy, N, E, T): """ Randomly selects an atom and one of its neighbours in a matrix and calculates the change in energy if the two atoms were swapped. The following assignment is used to represent the neighbouring directions: 1 = up 2 = right 3 = down 4 = left """ kT = 8.617332*10**-5*T random_atom = np.random.randint(0,N,2) atom1 = alloy[random_atom[0],random_atom[1]] random_neighbour = np.random.randint(1,5,1) # Select appropriate neighbour if random_neighbour==1: row2=(random_atom[0]-2)%N column2 = random_atom[1] elif random_neighbour==2: row2 = random_atom[0] column2 = (random_atom[1])%N elif random_neighbour==3: row2 = (random_atom[0])%N column2 = random_atom[1] else: row2 = random_atom[0] column2 = (random_atom[0]-2)%N atom2 = alloy[row2, column2] if atom1==atom2: e=0 else: # Need to calculate the energy before and after atom one and two swap # Atom 1 up1= (random_atom[0]-2)%N down1 = (random_atom[0]%N) left1 = (random_atom[1]-2)%N right1 = (random_atom[1]%N) # Atom 2 up2=(row2-2)%N down2=(row2%N) left2=(column2-2)%N right2=(column2%N) # Change in energy Bonds1 = alloy[down1, random_atom[1]] + alloy[up1, random_atom[1]] + alloy[random_atom[0], right1] + alloy[random_atom[0], left1] Bonds2 = alloy[down2, column2] + alloy[up2, column2] + alloy[row2, right2] + alloy[row2, left2] # Count number of A-B bonds for atoms 1 and 2 if atom1==0: Initial1=Bonds1 End1=4-Bonds1 Initial2=4-Bonds2 End2=Bonds2 else: Initial1=4-Bonds1 End1=Bonds1 Initial2=Bonds2 End2=4-Bonds2 e = E*(End1+End2-Initial1-Initial2) # Energy difference for swapping atoms #Swapping atoms if there is enough energy to do so if e<0: alloy[random_atom[0],random_atom[1]]=atom2 alloy[row2, column2]=atom1 elif np.exp(-e/kT)>np.random.uniform(0,1): alloy[random_atom[0],random_atom[1]]=atom2 alloy[row2, column2]=atom1 return alloy
aea84cd605389e480d89e78fcca9806bc68e0c83
3,643,021
def _try_type(value, dtype): """ Examples -------- >>> _try_type("1", int) 1 >>> _try_type(1.0, int) 1 >>> _try_type("ab", float) 'ab' """ try: return dtype(value) except ValueError: return value
4a188e57dfafca96e6cd8a815dbbb162c74df01b
3,643,022
from datetime import datetime def get_all_codes(date=None): """ 获取某个交易日的所有股票代码列表,如果没有指定日期,则从当前日期一直向前找,直到找到有 数据的一天,返回的即是那个交易日的股票代码列表 :param date: 日期 :return: 股票代码列表 """ datetime_obj = datetime.now() if date is None: date = datetime_obj.strftime('%Y-%m-%d') codes = [] while len(codes) == 0: code_cursor = DB_CONN.basic.find( {'date': date}, projection={'code': True, '_id': False}) codes = [x['code'] for x in code_cursor] datetime_obj = datetime_obj - timedelta(days=1) date = datetime_obj.strftime('%Y-%m-%d') return codes
b5d861f1991763e8196f1f336faffefc00b58df4
3,643,023
def cluster_config(request_data, op_ctx: ctx.OperationContext): """Request handler for cluster config operation. Required data: cluster_name Optional data and default values: org_name=None, ovdc_name=None (data validation handled in broker) :return: Dict """ _raise_error_if_pks_not_enabled() cluster, broker = _get_cluster_info(request_data, op_ctx, telemetry=False) # noqa: E501 telemetry_handler.record_user_action_details( cse_operation=CseOperation.PKS_CLUSTER_CONFIG, cse_params=_get_telemetry_data(request_data, cluster)) return broker.get_cluster_config(data=request_data)
985e9633d54c0b7ccfc235f6c34bb4d4c5086ebf
3,643,024
import os def compute_pad_value(input_dir, list_IDs): """ Computes the minimum pixel intensity of the entire dataset for the pad value (if it's not 0) Args: input_dir: directory to input images list_IDs: list of filenames """ print("Computing min/pad value...") # iterating through entire dataset min_list = [] for id in list_IDs: x_train = load_data(os.path.join(input_dir, id)) min_list.append(x_train.min()) return np.asarray(min_list).min()
68d93bf50c8653c42d22dac2382047f18a23c88e
3,643,025
from .pyazureutils_errors import PyazureutilsError def iotcentral_cli_handler(args): """ CLI entry point for command: iotcentral """ logger = getLogger(__name__) try: if args.action == "register-device": status = _action_register_device(args) except PyazureutilsError as exc: logger.error("Operation failed with %s: %s", type(exc).__name__, exc) return STATUS_FAILURE
e5c78f24c459ff45ab8a88198697eae0a9bb7abe
3,643,026
def kelly_kapowski(s, g, w, its=45, r=0.025, m=1.5, **kwargs): """ Compute cortical thickness using the DiReCT algorithm. Diffeomorphic registration-based cortical thickness based on probabilistic segmentation of an image. This is an optimization algorithm. Arguments --------- s : ANTsimage segmentation image g : ANTsImage gray matter probability image w : ANTsImage white matter probability image its : integer convergence params - controls iterations r : scalar gradient descent update parameter m : scalar gradient field smoothing parameter kwargs : keyword arguments anything else, see KellyKapowski help in ANTs Returns ------- ANTsImage Example ------- >>> import ants >>> img = ants.image_read( ants.get_ants_data('r16') ,2) >>> img = ants.resample_image(img, (64,64),1,0) >>> mask = ants.get_mask( img ) >>> segs = ants.kmeans_segmentation( img, k=3, kmask = mask) >>> thick = ants.kelly_kapowski(s=segs['segmentation'], g=segs['probabilityimages'][1], w=segs['probabilityimages'][2], its=45, r=0.5, m=1) """ if isinstance(s, iio.ANTsImage): s = s.clone('unsigned int') d = s.dimension outimg = g.clone() kellargs = {'d': d, 's': s, 'g': g, 'w': w, 'c': its, 'r': r, 'm': m, 'o': outimg} for k, v in kwargs.items(): kellargs[k] = v processed_kellargs = utils._int_antsProcessArguments(kellargs) libfn = utils.get_lib_fn('KellyKapowski') libfn(processed_kellargs) return outimg
809d119d5691e64504671a4915525a109d0ae375
3,643,027
def get_word_count(frame, pattern_list, group_by_name): """ Compute word count and return a dataframe :param frame: :param pattern_list: :param column_name: :return: frame with count or None if pattern_list is empty """ if not pattern_list or len(pattern_list) == 0: return None else: return pd.DataFrame(frame[frame.words.isin(pattern_list)]. groupby(group_by_name).words.value_counts() .to_frame())
06a1c82b387bfc2194e3d4ee0a4526e5b4e3f800
3,643,028
def parse(text, from_timezone=None): """ :rtype: TimeeDT """ timee_dt = None if from_timezone: timee_dt = parse_with_maya(text, timezone=from_timezone) return timee_dt else: for parse_method in parsing_methods(): result = parse_method(text) if result is not None: timee_dt = result break return timee_dt
c7a8b7819031ee7f97c54c9903f19d4b24112c4a
3,643,029
import collections def _command_line_objc_copts(objc_fragment): """Returns copts that should be passed to `clang` from the `objc` fragment. Args: objc_fragment: The `objc` configuration fragment. Returns: A list of `clang` copts, each of which is preceded by `-Xcc` so that they can be passed through `swiftc` to its underlying ClangImporter instance. """ # In general, every compilation mode flag from native `objc_*` rules should be passed, but `-g` # seems to break Clang module compilation. Since this flag does not make much sense for module # compilation and only touches headers, it's ok to omit. clang_copts = objc_fragment.copts + objc_fragment.copts_for_current_compilation_mode return collections.before_each("-Xcc", [copt for copt in clang_copts if copt != "-g"])
8c55d1297b0aa116b9f6dc859cad1dfda1901f00
3,643,030
import logging import urllib def handle_incoming_mail(addr=None): """Handle an incoming email by making a task to examine it. This code checks some basic properties of the incoming message to make sure that it is worth examining. Then it puts all the relevent fields into a dict and makes a new Cloud Task which is futher processed in python 3 code. """ logging.info('Request Headers: %r', flask.request.headers) logging.info('\n\n\nPOST for InboundEmail and addr is %r', addr) if addr != settings.INBOUND_EMAIL_ADDR: logging.info('Message not sent directly to our address') return {'message': 'Wrong address'} if flask.request.content_length > MAX_BODY_SIZE: logging.info('Message too big, ignoring') return {'message': 'Too big'} msg = get_incoming_message() precedence = msg.get('precedence', '') if precedence.lower() in ['bulk', 'junk']: logging.info('Precedence: %r indicates an autoresponder', precedence) return {'message': 'Wrong precedence'} from_addrs = (_extract_addrs(msg.get('x-original-from', '')) or _extract_addrs(msg.get('from', ''))) if from_addrs: from_addr = from_addrs[0] else: logging.info('could not parse from addr') return {'message': 'Missing From'} in_reply_to = msg.get('in-reply-to', '') body = u'' for part in msg.walk(): # We only process plain text emails. if part.get_content_type() == 'text/plain': body = part.get_payload(decode=True) if not isinstance(body, unicode): body = body.decode('utf-8') break # Only consider the first text part. to_addr = urllib.unquote(addr) subject = msg.get('subject', '') task_dict = { 'to_addr': to_addr, 'from_addr': from_addr, 'subject': subject, 'in_reply_to': in_reply_to, 'body': body, } logging.info('task_dict is %r', task_dict) response = call_py3_task_handler('/tasks/detect-intent', task_dict) if response.status_code and response.status_code != 200: logging.warning('Handoff to py3 failed.') flask.abort(400) return {'message': 'Done'}
9bef81cf818d433cc833e64b5291b5c371605424
3,643,031
def split(df, partition, column): """ :param df: The dataframe to split :param partition: The partition to split :param column: The column along which to split : returns: A tuple containing a split of the original partition """ dfp = df[column][partition] if column in categorical: values = dfp.unique() lv = set(values[:len(values)//2]) rv = set(values[len(values)//2:]) return dfp.index[dfp.isin(lv)], dfp.index[dfp.isin(rv)] else: median = dfp.median() dfl = dfp.index[dfp < median] dfr = dfp.index[dfp >= median] return (dfl, dfr)
8d87d025695a0a2dde681e1abbbf0f5acccdc914
3,643,032
def get_gaussian_kernel(l=5, sig=1.): """ creates gaussian kernel with side length l and a sigma of sig """ ax = np.linspace(-(l - 1) / 2., (l - 1) / 2., l) xx, yy = np.meshgrid(ax, ax) kernel = np.exp(-0.5 * (np.square(xx) + np.square(yy)) / np.square(sig)) return kernel / np.sum(kernel)
71982928ee89d3ac98ae8d74dcc079dd2c4ca0d8
3,643,033
def get_data_tbl(path, tblname): """Wrapper function around @merge_json """ files = get_annon_db_file(path, tblname) log.info("files: {}".format(files)) K,V = common.merge_json(files) return K,V
55381098b1a702a5497a965169b0588192e0a439
3,643,034
def costes_coloc(im_1, im_2, psf_width=3, n_scramble=1000, thresh_r=0.0, roi=None, roi_method='all', do_manders=True): """ Perform Costes colocalization analysis on a pair of images. Parameters ---------- im_1: array_like Intensity image for colocalization. Must be the same shame as `im_1`. im_2: array_like Intensity image for colocalization. Must be the same shame as `im_2`. psf_width: int, default 3 Width, in pixels of the point spread function. n_scramble: int, default 1000 Number of strambled image comparisons to do to get statistics. thresh_r: float, default 0.0 Threshold Pearson r value to be considered colocalized. roi: array_like, dtype bool, default None Boolean image the same shape as `im_1` and `im_2` that is True for pixels within the ROI. roi_method: str, default 'all' If 'all', all pixels of a given subimage must be within the ROI for the subimage itself to be considered part of the ROI. If 'any', if any one pixel is within the ROI, the subimage is considered part of the ROI. do_manders: bool, default True If True, compute the Manders coefficients. Returns ------- output: A CostesColocalization instance. The CostesColocalization instance has the following attributes. im_1, im_2, psf_width, n_scramble, thresh_r, roi, roi_method: As in the input parameters. a: slope of the regression line I_2 = a * I_1 + b b: intercept of regression line I_2 = a * I_1 + b M_1: Manders coefficient for image 1 M_2: Manders coefficient for image 2 pearson_r: Pearson coerrelaction coefficient of the pixels in the two images. p_coloc: The probability of colocalization being present in the two images. """ # Make float mirrored boundaries in preparation for scrambling im_1_mirror = mirror_edges(im_1, psf_width).astype(float) im_2_mirror = mirror_edges(im_2, psf_width).astype(float) # Set up ROI if roi is None: roi = np.ones_like(im_1, dtype='bool') # Rename images to be sliced ROI and convert to float im_1 = im_1[roi].astype(float) im_2 = im_2[roi].astype(float) # Mirror ROI at edges roi_mirror = mirror_edges(roi, psf_width) # Compute the blocks that we'll scramble blocks_1 = im_to_blocks(im_1_mirror, psf_width, roi_mirror, roi_method) blocks_2 = im_to_blocks(im_2_mirror, psf_width, roi_mirror, roi_method) # Compute the Pearson coefficient pearson_r = _pearson_r(blocks_1.ravel(), blocks_2.ravel()) # Do image scrambling and r calculations r_scr = scrambled_r(blocks_1, blocks_2, n=n_scramble) # Compute percent chance of coloc p_coloc = (r_scr < pearson_r).sum() / n_scramble # Now do work to compute adjusted Manders's coefficients if do_manders: # Get the linear relationship between im_2 and im_1 a, b = _odr_linear(im_1.ravel(), im_2.ravel()) # Perform threshold calculation thresh_1 = _find_thresh(im_1, im_2, a, b, thresh_r=thresh_r) thresh_2 = a * thresh_1 + b # Compute Costes's update to the Manders's coefficients inds = (im_1 > thresh_1) & (im_2 > thresh_2) M_1 = im_1[inds].sum() / im_1.sum() M_2 = im_2[inds].sum() / im_2.sum() # Toss results into class for returning return _CostesColocalization( im_1=im_1, im_2=im_2, roi=roi, roi_method=roi_method, psf_width=psf_width, n_scramble=n_scramble, thresh_r=thresh_r, thresh_1=thresh_1, thresh_2=thresh_2, a=a, b=b, M_1=M_1, M_2=M_2, r_scr=r_scr, pearson_r=pearson_r, p_coloc=p_coloc) else: return _CostesColocalization( im_1=im_1, im_2=im_2, roi=roi, roi_method=roi_method, psf_width=psf_width, n_scramble=n_scramble, thresh_r=None, thresh_1=None, thresh_2=None, a=None, b=None, M_1=None, M_2=None, r_scr=r_scr, pearson_r=pearson_r, p_coloc=p_coloc)
34776cc4e8845e696f61750718736feae6105dee
3,643,035
def get_induced_dipole_count(efpobj): """Gets the number of polarization induced dipoles in `efpobj` computation. Returns ------- int Total number of polarization induced dipoles. """ (res, ndip) = efpobj._efp_get_induced_dipole_count() _result_to_error(res) return ndip
1c7bbd25c17e0a1326e48319c00ad8298174a4b7
3,643,036
def _gen_parabola(phase: float, start: float, mid: float, end: float) -> float: """Gets a point on a parabola y = a x^2 + b x + c. The Parabola is determined by three points (0, start), (0.5, mid), (1, end) in the plane. Args: phase: Normalized to [0, 1]. A point on the x-axis of the parabola. start: The y value at x == 0. mid: The y value at x == 0.5. end: The y value at x == 1. Returns: The y value at x == phase. """ mid_phase = 0.5 delta_1 = mid - start delta_2 = end - start delta_3 = mid_phase ** 2 - mid_phase coef_a = (delta_1 - delta_2 * mid_phase) / delta_3 coef_b = (delta_2 * mid_phase ** 2 - delta_1) / delta_3 coef_c = start return coef_a * phase ** 2 + coef_b * phase + coef_c
bdd808339e808a26dd1a4bf22552a1d32244bb02
3,643,037
import uuid def grant_perms(obj: element, mast: element, read_only: bool, meta): """ Grants another user permissions to access a Jaseci object Param 1 - target element Param 2 - master to be granted permission Param 3 - Boolean read_only flag Return - Sorted list """ mast = meta['h'].get_obj(meta['m_id'], uuid.UUID(meta['m_id'])) return mast.object_perms_grant(obj=obj, mast=mast, read_only=read_only)['success']
3a73baf583214d95c31011e8dfc427ea364edb4a
3,643,038
import os import logging def get_instance_ids_compute_hostnames_conversion_dict(instance_ids, id_to_hostname, region=None): """Return instanceIDs to hostnames dict if id_to_hostname=True, else return hostname to instanceID dict.""" try: if not region: region = os.environ.get("AWS_DEFAULT_REGION") conversion_dict = {} ec2_client = boto3.client("ec2", region_name=region) response = ec2_client.describe_instances(InstanceIds=instance_ids).get("Reservations") for reservation in response: for instance in reservation.get("Instances"): instance_hostname = instance.get("PrivateDnsName").split(".")[0] instance_id = instance.get("InstanceId") if id_to_hostname: conversion_dict[instance_id] = instance_hostname else: conversion_dict[instance_hostname] = instance_id return conversion_dict except Exception as e: logging.error("Failed retrieving hostnames for instances {} with exception: {}".format(instance_ids, e))
a0ba0710c2a79861d2b4ac818644cbe460afad59
3,643,039
def pkgdir(tmpdir, monkeypatch): """ temp directory fixture containing a readable/writable ./debian/changelog. """ cfile = tmpdir.mkdir('debian').join('changelog') text = """ testpkg (1.1.0-1) stable; urgency=medium * update to 1.1.0 * other rad packaging updates * even more cool packaging updates that take a lot of text to describe so the change wraps on multiple lines -- Ken Dreyer <kdreyer@redhat.com> Tue, 06 Jun 2017 14:46:37 -0600 testpkg (1.0.0-2redhat1) stable; urgency=medium * update to 1.0.0 (rhbz#123) -- Ken Dreyer <kdreyer@redhat.com> Mon, 05 Jun 2017 13:45:36 -0600 """.lstrip("\n") cfile.write(text) monkeypatch.chdir(tmpdir) return tmpdir
0717aba1d5181e48eb11fa1e91b72933cda1af14
3,643,040
import configparser def read_plot_config(filename): """Read in plotting config file. Args: filename (str): Full path and name of config file. Returns: dict: Contents of config file. """ config = configparser.ConfigParser() config.read(filename) out = {} for section in config.sections(): out[section] = _get_section(config, section) return out
876a84b2976807d2ef02c79806c9c2d14874997a
3,643,041
def parse(file_path, prec=15): """ Simple helper - file_path: Path to the OpenQASM file - prec: Precision for the returned string """ qasm = Qasm(file_path) return qasm.parse().qasm(prec)
5303753da86780854f1b2b9abff18ad9531e1ea8
3,643,042
def sinusoid(amplitude=1.0, frequency=1.0, phase=0.0, duration=60.0, samplerate=100.0): """Generate a sinusoid""" t = np.arange(0, duration, 1.0/samplerate) d = np.sin(2.0 * np.pi * frequency * t) return t, d
ea55aec9519321221946e74504732209771b0b23
3,643,043
def get_model(): """ Returns a compiled convolutional neural network model. Assume that the `input_shape` of the first layer is `(IMG_WIDTH, IMG_HEIGHT, 3)`. The output layer should have `NUM_CATEGORIES` units, one for each category. """ model = tf.keras.models.Sequential() model.add( tf.keras.layers.Conv2D( 32, (3, 3), input_shape=( IMG_WIDTH, IMG_HEIGHT, 3))) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2))) model.add(tf.keras.layers.Conv2D(64, (3, 3))) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Conv2D(64, (4, 4))) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Conv2D(128, (4, 4))) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.MaxPooling2D(pool_size=(3, 3))) model.add(tf.keras.layers.Flatten()) model.add(tf.keras.layers.Dense(128)) model.add(tf.keras.layers.Activation('relu')) model.add(tf.keras.layers.Dropout(0.2)) model.add(tf.keras.layers.Dense(NUM_CATEGORIES)) model.add(tf.keras.layers.Activation('sigmoid')) model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy']) return model
d6d5ad41ec6ba61ebcf7d2dfb962f18a24b7a8a1
3,643,044
def check_datetime_str(datetime_str): """ Tries to parse the datetime string to a datetime object. If it fails, it will return False :param str datetime_str: :return: returns True or False depending on the validity of the datetime string :rtype: bool """ try: parse_datetime_str(datetime_str) return True except ValueError: return False
8129a3ef87d377bc488bfbd151012241f673e07d
3,643,045
def _df_pitch(df: pd.DataFrame, xcol: str = 'x', ycol: str = 'y', zcol: str = 'z'): """Find angular pitch for each row in an accelerometer dataframe. Args: df (pd.DataFrame): accelerometer dataframe xcol, ycol, zcol (str): column names for x, y, and z acceleration Returns: pd.Series: pitch """ out = pd.Series(pitch(df[xcol].values, df[ycol].values, df[zcol].values), name='pitch') return out
50c6e40e535b5cd7acead652edf1a9420125fee8
3,643,046
def gan_masked_generate_face(generator_fun, face_img: np.array): """ Generated a face from the seed one considering a generator_fun which should output alpha mask and bgr results :param generator_fun: takes an image and returns alpha mask concatenated with bgr results :param face_img: img to feed to the generator :return: """ gen_res = generator_fun(face_img) gen_mask = gen_res[:, :, 0] gen_bgr = gen_res[:, :, 1:] gen_mask = np.clip(gen_mask * 255, 0, 255).astype(np.uint8) # stack mask such as we have three channels gen_mask = np.stack([gen_mask, gen_mask, gen_mask], axis=2) return gen_bgr, gen_mask
9b6ce882f509851b0a9c52364bb602909db45cb6
3,643,047
import os def get_namespace_from_path(path): """get namespace from file path Args: path (unicode): file path Returns: unicode: namespace """ return os.path.splitext(os.path.basename(path))[0]
5ca9bdde1dbe3e845a7d8e64ca0813e215014efd
3,643,048
def feat_row_sum_inv_normalize(x): """ :param x: np.ndarray, raw features. :return: np.ndarray, normalized features """ x_feat = x.astype(dtype=np.float64) inv_x_rowsum = np.power(x_feat.sum(axis=1), -1).flatten() inv_x_rowsum[np.isinf(inv_x_rowsum)] = 0. x_diag_mat = np.diag(inv_x_rowsum) normalized_x = x_diag_mat.dot(x_feat) return normalized_x
ea55c7826054ca13f810852a24cf315f268dfd6a
3,643,049
def cross3(v1, v2): """ cross3 """ return (v1[1] * v2[2] - v1[2] * v2[1], v1[2] * v2[0] - v1[0] * v2[2], v1[0] * v2[1] - v1[1] * v2[0])
f3bb2b82acf54d929ffc14177fde120970617886
3,643,050
import copy def api_request(request, viewset, method, url_kwargs={}, get_params={}): """ Call an API route on behalf of the user request. Examples: data = api_request(request, CaseDocumentViewSet, 'list', get_params={'q': 'foo'}).data data = api_request(request, CaseDocumentViewSet, 'retrieve', url_kwargs={'id': '123'}).data """ # copy selected fields due to infinite recursion for some # request copies if isinstance(request, rest_framework.request.Request): request = request._request api_request = copy(request) api_request.method = 'GET' api_request.GET = QueryDict(mutable=True) api_request.GET.update(get_params) return viewset.as_view({'get': method})(api_request, **url_kwargs)
c3d118d1a9857e9522f3e518a77da6e51e443ef7
3,643,051
def ca_restart(slot): """ :param slot: """ LOG.info("CA_Restart: attempting to restart") ret = CA_Restart(CK_ULONG(slot)) LOG.info("CA_Restart: Ret Value: %s", ret) return ret
1192f371c14bdf8f773b1402f77e66d24d3aee94
3,643,052
import os def ogr_wkts(src_ds): """return the wkt(s) of the ogr dataset""" these_regions = [] src_s = src_ds.split(':') if os.path.exists(src_s[0]): poly = ogr.Open(src_s[0]) if poly is not None: p_layer = poly.GetLayer(0) for pf in p_layer: pgeom = pf.GetGeometryRef() pwkt = pgeom.ExportToWkt() r = Region().from_string(pwkt) if len(src_s) > 1: src_r = src_s[1].split('/') if len(src_r) > 0: r.zmin = utils.float_or(src_r[0]) if len(src_r) > 1: r.zmax = utils.float_or(src_r[1]) if len(src_r) > 2: r.wmin = utils.float_or(src_r[2]) if len(src_r) > 3: r.wmax = utils.float_or(src_r[3]) these_regions.append(r) poly = None return(these_regions)
af7a2ae2f33d7616cee224ae51f25d4b77937f9d
3,643,053
import os def load_word2vec_matrix(embedding_size): """ Return the word2vec model matrix. Args: embedding_size: The embedding size Returns: The word2vec model matrix Raises: IOError: If word2vec model file doesn't exist """ word2vec_file = '../data/word2vec_' + str(embedding_size) + '.txt' if not os.path.isfile(word2vec_file): raise IOError("✘ The word2vec file doesn't exist. ") model = KeyedVectors.load_word2vec_format(open(word2vec_file, 'r'), binary=False, unicode_errors='replace') vocab_size = len(model.wv.vocab.items()) vocab = dict([(k, v.index) for k, v in model.wv.vocab.items()]) vector = np.zeros([vocab_size, embedding_size]) for key, value in vocab.items(): if key is not None: vector[value] = model[key] return vocab_size, vector
adb44f40ab12ddac737c4c467c6a6fee971cf83b
3,643,054
async def app_exception_handler(request, exc): """ Error handler for AppException errors. Logs the AppException error detected and returns the appropriate message and details of the error. """ logger.debug(exc) return JSONResponse( Response(success=False, error_code=422, message=str(exc)).dict() )
049322caa0e0541e8d4348e685b533292484e2c5
3,643,055
def db_query_map(db_or_el, query, func_match, func_not) -> tuple: """ Helper function to find elems from query and transform them, to generate 2 lists of matching/not-matching elements. """ expr = parse_query_expr(query) elems1, elems2 = [], [] for el in _db_or_elems(db_or_el): m = el_to_meta(el) ok = [func(m.get(prop), val) for prop, func, val in expr] if ok and all(ok): r = func_match(el) if r is not None: elems1.append(r) else: r = func_not(el) if r is not None: elems2.append(r) return elems1, elems2
d7b56e8d62d0c80c4bfdb026879d5a848b7d3b8f
3,643,056
import inspect def route(pattern, method = HTTP_METHOD.GET): """ Decorator to declare the routing rule of handler methods. """ def decorator(func): frm = inspect.stack()[1] class_name = frm[3] module_name = frm[0].f_back.f_globals["__name__"] full_class_name = module_name + '.' + class_name real_pattern = '^' + pattern + '$' add_handler(method, real_pattern, full_class_name, func) return asynchronous(func) return decorator
28d107abbce1d36611fa5313b0d52491000a1f73
3,643,057
from operator import invert def div_q(a: ElementModPOrQorInt, b: ElementModPOrQorInt) -> ElementModQ: """Compute a/b mod q.""" b = _get_mpz(b) inverse = invert(b, _get_mpz(get_small_prime())) return mult_q(a, inverse)
285a8aa161748d8c7aaa38bd04f81fe7c22e5e43
3,643,058
def donoho_gavish_threshold(shape, sigma): """ (Gavish and Donoho, 2014) Parameters ---------- shape: tuple (n_samples, n_features) Shape of the data matrix. sigma: float Estiamte of the noise standard deviation. Output ------ sigular_value_threshold: float """ n_samples, n_features = shape beta = n_features / n_samples mult = n_samples # TODO: is this what we want to do? if beta > 1: beta = 1 / beta mult = n_features if n_samples == n_features: lambd = 4 / np.sqrt(3) else: lambd = dg_threshold(beta) return lambd * np.sqrt(mult) * sigma
ba6731ff2a3b27543ed2a738a5e09abf3ec5cc78
3,643,059
import pyarrow def pyarrow_to_r_schema( obj: 'pyarrow.lib.Schema' ): """Create an R `arrow::Schema` object from a pyarrow Schema. This is sharing the C/C++ object between the two languages. The returned object depends on the active conversion rule in rpy2. By default it will be an `rpy2.robjects.Environment`. """ schema_ptr = rarrow.allocate_arrow_schema()[0] try: obj._export_to_c(int(schema_ptr)) r_schema = rarrow.ImportSchema(schema_ptr) finally: rarrow.delete_arrow_schema(schema_ptr) return r_schema
0eb461451ea805b3ac888084b4f46ca9cbbd7c00
3,643,060
import pandas import os def plot_ovlp_stats(jobdir, nproc): """Plot 5' and 3' Overlap distributions""" log.info("Generating overlap plots") overlaps = get_overlaps(jobdir, nproc) ovlp_dict = {} for ovlp in overlaps: rid, length, fiveprime, threeprime = ovlp.split() ovlp_dict[rid] = fiveprime, threeprime fiveprime_ovlps = [int(v[0]) for k, v in ovlp_dict.items()] threeprime_ovlps = [int(v[1]) for k, v in ovlp_dict.items()] fig, axs = plt.subplots(2, 1) dataframe = pandas.DataFrame({'five_prime_ovlps': fiveprime_ovlps, 'three_prime_ovlps': threeprime_ovlps}) binsfive = dataframe['five_prime_ovlps'].max() + 1 binsthree = dataframe['three_prime_ovlps'].max() + 1 dataframe['five_prime_ovlps'].plot.hist( bins=binsfive, ax=axs[0], figsize=(5, 10)) axs[0].set_title('5\' overlaps') axs[0].set_xlim(0, 100) dataframe['three_prime_ovlps'].plot.hist( bins=binsthree, ax=axs[1], figsize=(5, 10)) axs[1].set_title('3\' overlaps') axs[1].set_xlim(0, 100) outfig = os.path.join('outfigs', 'overlap_distribution.png') plt.savefig(outfig) return dataframe
d96b9fbec409e112f4eb8562b1a74f0504346728
3,643,061
import calendar def validate_days(year, month, day): """validate no of days in given month and year >>> validate_days(2012, 8, 31) 31 >>> validate_days(2012, 8, 32) 31 """ total_days = calendar.monthrange(year, month) return (total_days[1] if (day > total_days[1]) else day)
7499dc9654ec9ffd7f534cf27444a3236dd82e81
3,643,062
import json def save_to_s3(bucket_name, file_name, data): """ Saves data to a file in the bucket bucket_name - - The name of the bucket you're saving to file_name - - The name of the file dat - - data to be saved """ s3 = boto3.resource('s3') obj = s3.Object(bucket_name, file_name) resp = obj.put(Body=json.dumps(data)) return resp
520599136418d635cfbaf67c7bffbb2da105985c
3,643,063
def linsearch_fun_BiCM_exp(xx, args): """Linsearch function for BiCM newton and quasinewton methods. This is the linesearch function in the exponential mode. The function returns the step's size, alpha. Alpha determines how much to move on the descending direction found by the algorithm. :param xx: Tuple of arguments to find alpha: solution, solution step, tuning parameter beta, initial alpha, function f :type xx: (numpy.ndarray, numpy.ndarray, float, float, func) :param args: Tuple, step function and arguments. :type args: (func, tuple) :return: Working alpha. :rtype: float """ x = xx[0] dx = xx[1] beta = xx[2] alfa = xx[3] f = xx[4] step_fun = args[0] arg_step_fun = args[1] i = 0 s_old = -step_fun(x, arg_step_fun) while ( sof.sufficient_decrease_condition( s_old, -step_fun(x + alfa * dx, arg_step_fun), alfa, f, dx ) is False and i < 50 ): alfa *= beta i += 1 return alfa
c88f974c76ceec84a12a67ef9c6f71ae357f472b
3,643,064
def getCountryName(countryID): """ Pull out the country name from a country id. If there's no "name" property in the object, returns null """ try: countryObj = getCountry(countryID) return(countryObj['name']) except: pass
72b90de7e49911983fe60e18b00cc577f423785d
3,643,065
import os import base64 def _pic_download(url, type): """ 图片下载 :param url: :param type: :return: """ save_path = os.path.abspath('...') + '\\' + 'images' if not os.path.exists(save_path): os.mkdir(save_path) img_path = save_path + '\\' + '{}.jpg'.format(type) img_data = base64.b64decode(url) with open(img_path, 'wb') as f: f.write(img_data) return img_path
05541a9991b4b3042c84d4e1b5188def365cc4fc
3,643,066
def get_placeholder(default_tensor=None, shape=None, name=None): """Return a placeholder_wirh_default if default_tensor given, otherwise a new placeholder is created and return""" if default_tensor is not None: return default_tensor else: if shape is None: raise ValueError('One of default_tensor and shape must be given') return tf.placeholder(tf.float32, shape=shape, name=name)
e62fe4ca8244ae45ac853a0398754375454626dc
3,643,067
def get_targets(args): """ Gets the list of targets for cmake and kernel/build.sh :param args: The args variable generated by parse_parameters :return: A string of targets suitable for cmake or kernel/build.sh """ if args.targets: targets = args.targets elif args.full_toolchain: targets = "all" else: targets = "AArch64;ARM;BPF;Hexagon;Mips;PowerPC;RISCV;SystemZ;X86" return targets
81eb31fe416303bc7e881ec2c10cfeeea4fdab05
3,643,068
def _format_warning(message, category, filename, lineno, line=None): # noqa: U100, E501 """ Simple format for warnings issued by ProPlot. See the `internal warning call signature \ <https://docs.python.org/3/library/warnings.html#warnings.showwarning>`__ and the `default warning source code \ <https://github.com/python/cpython/blob/master/Lib/warnings.py>`__. """ return f'{filename}:{lineno}: ProPlotWarning: {message}\n'
f5709df0a84d9479d6b895dccb3eae8292791f74
3,643,069
def piocheCarte(liste_pioche, x): """ Cette fonction renvoie le nombre x de cartes de la pioche. Args: x (int): Nombre de cartes à retourner. Returns: list: Cartes retournées avec le nombre x. """ liste_carte = [] for i in range(x): liste_carte.append(liste_pioche[i]) del liste_pioche[0] return liste_carte
ed31c47d699447870207a4066a3da9c35333ada8
3,643,070
import os def is_process_running(pid): """Returns true if a process with pid is running, false otherwise.""" # from # http://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid try: os.kill(pid, 0) except OSError: return False else: return True
a05cbeb84b5d6f3d6d7a06ab3d14702742b2a289
3,643,071
def cost_logistic(p, x, y): """ Sum of absolute deviations of obs and logistic function :math:`L/(1+exp(-k(x-x0)))` Parameters ---------- p : iterable of floats parameters (`len(p)=3`) - `p[0]` = L = Maximum of logistic function - `p[1]` = k = Steepness of logistic function - `p[2]` = x0 = Inflection point of logistic function x : float or array_like of floats independent variable y : float or array_like of floats dependent variable, observations Returns ------- float sum of absolute deviations """ return np.sum(np.abs(y-logistic_p(x, p)))
4985d19ff792bf2df8fe5692330cb9c32d329cab
3,643,072
import requests def get_price(token: str, sellAmount=1000000000000000000): """ get_price uses the 0x api to get the most accurate eth price for the token :param token: token ticker or token address :param buyToken: token to denominate price in, default is WETH :param sellAmount: token amount to sell in base unit, default is 1e18 :return: eth/bnb price per token for the specified amount to sell """ if curr_network == "bsc" or curr_network == "bsc-fork": endpoint = "https://bsc.api.0x.org/" buyToken = "WBNB" elif curr_network == "eth": endpoint = "https://api.0x.org/" buyToken = "WETH" else: raise ValueError("Unrecognized network") params = ( "swap/v1/quote?buyToken=" + buyToken + "&sellToken=" + token + "&sellAmount=" + str(sellAmount) ) r = requests.get(endpoint + params) data = r.json() if not data.get("guaranteedPrice"): console.log(data) raise ValueError("Price could not be fetched") return data["guaranteedPrice"]
b1dae25571eccb28433b9bfe7c3be6f006f05184
3,643,073
from datetime import datetime import logging def inv_exportlog(): """Exports a csv file formatted for Profitek's inventory task list. """ date = datetime.today().strftime('%Y%m%d') scanner_terminal = escape(session["scanner_terminal"]) allscanners = escape(request.form.get('allscanners','yes')) if 'yes' in allscanners.lower(): #get absolutely everything invdict = __countAllBarcodes_inv() else: invdict = redis_client.hgetall(f'inventory_{scanner_terminal}') logging.debug(invdict) with open(f'/var/ldbinvoice/{date}_{scanner_terminal}_inventory_scan_log.txt', 'w') as f: for k,v in invdict.items(): line = f"{k},{v}" logging.info(line) f.write(f'{line}\n') return {'success': True}
96d8d341ebc0d8d0e2cb79669a779acf5e4a5ddb
3,643,074
def _get_all_errors_if_unrecognized_properties(model: dict, props: list) -> iter: """Get error messages if the model has unrecognized properties.""" def get_error_if_property_is_unrecognized(key): if key not in props: return f"unrecognized field named '{key}' found in model '{model}'" return map(get_error_if_property_is_unrecognized, model.keys())
e7c380b750606adc466f335a2411619eab11312f
3,643,075
import os def no_holders(disk): """Return true if the disk has no holders.""" holders = os.listdir('/sys/class/block/' + disk + '/holders/') return len(holders) == 0
3ef1b7754cde64f248ca9da747adb398aaefd878
3,643,076
import inspect import sys def main(fn): """Call fn with command line arguments. Used as a decorator. The main decorator marks the function that starts a program. For example, @main def my_run_function(): # function body Use this instead of the typical __name__ == "__main__" predicate. """ if inspect.stack()[1][0].f_locals['__name__'] == '__main__': args = sys.argv[1:] # Discard the script name from command line fn(*args) # Call the main function return fn
6c87ac44dc8422b8d4f02685a764f0cccffbc215
3,643,077
def get_get_single_endpoint_schema(class_name, id_field_where_type, response_schema): """ :param class_name: :param id_field_where_type: :param response_schema: """ return { "tags": [class_name], "description": f"Get a {class_name} model representation", "parameters": [ { "name": "id", "description": f"{class_name} identifier", "in": "path", "schema": {"type": "integer" if id_field_where_type == "int:" else "string"}, } ], "responses": { "200": {"description": f"{class_name} response model", "content": {"application/json": {"schema": response_schema}}}, "404": {"description": "Not found response model", "content": {"application/json": {"schema": not_found_swagger_schema}}}, "500": {"description": "Operation fail", "content": {"application/json": {"schema": error_swagger_schema}}}, }, }
98daaaa20e5e52c2480ce6aa1805ee3da6b163d7
3,643,078
from typing import Optional def overlapping_template_matching( sequence, template_size: Optional[int] = None, blocksize: Optional[int] = None, matches_ceil: Optional[int] = None, ): """Overlapping matches to template per block is compared to expected result The sequence is split into blocks, where the number of overlapping patterns matches to the template in each block is found. This is referenced to the expected mean and variance in matches of a hypothetically truly random sequence. Parameters ---------- sequence : array-like with two distinct values Sequence containing 2 distinct elements template_size : ``int`` Size of the template to be generated blocksize : ``int`` Size of the blocks that partition the given sequence matches_ceil : ``int`` Group matches of this value and higher as one single tally Returns ------- result : ``OverlappingTemplateMatchingTestResult`` Dataclass that contains the test's statistic and p-value as well as other relevant information gathered. """ return _randtests.overlapping_template_matching( sequence, template_size=template_size, blocksize=blocksize, matches_ceil=matches_ceil, )
035ce0c333c69bdf437f1e6f93071c9342154e92
3,643,079
def _extract_options(config, options, *args): """Extract options values from a configparser, optparse pair. Options given on command line take precedence over options read in the configuration file. Args: config (dict): option values read from a config file through configparser options (optparse.Options): optparse 'options' object containing options values from the command line *args (str tuple): name of the options to extract """ extract = {} for key in args: if key not in args: continue extract[key] = config[key] option = getattr(options, key, None) if option is not None: extract[key] = option return extract
3d74857b3dcdd242950a35b84d3bcaae557a390b
3,643,080
def _calc_fans(shape): """ :param shape: tuple with the shape(4D - for example, filters, depth, width, height) :return: (fan_in, fan_out) """ if len(shape) == 2: # Fully connected layer (units, input) fan_in = shape[1] fan_out = shape[0] elif len(shape) in {3, 4, 5}: # Convolutional kernals k_size = np.prod(shape[2:]) fan_in = k_size * shape[1] fan_out = k_size * shape[0] else: raise ValueError("Incompatible shape") return fan_in, fan_out
70535fd002f08bbaadf1a0af4ec980851e52ad92
3,643,081
from typing import List from typing import Tuple import logging import torch import copy from pathlib import Path def train_collision(net: nn.Module, full_props: List[c.CollisionProp], args: Namespace) -> Tuple[int, float, int, float]: """ The almost completed skeleton of training Collision Avoidance/Detection networks using ART. :return: trained_epochs, train_time, certified, final accuracies """ logging.info(net) if args.reset_params: try: net.reset_params() except AttributeError: ''' This is possible when creating FFNN on the fly which doesn't have reset_params(). It's fine since such FFNN is using newly initialized weights. ''' pass props_dict = c.cluster_props(full_props) large_props = [ps[0] for ps in props_dict.values()] # pick the largest one for each safety margin base point large_props = AndProp(large_props[:args.n_props]) logging.info(f'Using {len(large_props.props)} largest properties.') v = Bisecter(args.dom, large_props) def run_abs(batch_abs_lb: Tensor, batch_abs_ub: Tensor, batch_abs_bitmap: Tensor) -> Tensor: """ Return the safety distances over abstract domain. """ batch_abs_ins = args.dom.Ele.by_intvl(batch_abs_lb, batch_abs_ub) batch_abs_outs = net(batch_abs_ins) return large_props.safe_dist(batch_abs_outs, batch_abs_bitmap) in_lb, in_ub = large_props.lbub(device) in_bitmap = large_props.bitmap(device) # already moved to GPU if necessary trainset = c.CollisionData.load(device) testset = trainset # there is only training set, following that in Ehlers 2017 start = timer() if args.no_abs or args.no_refine: curr_abs_lb, curr_abs_ub, curr_abs_bitmap = in_lb, in_ub, in_bitmap else: # refine it at the very beginning to save some steps in later epochs curr_abs_lb, curr_abs_ub, curr_abs_bitmap = v.split(in_lb, in_ub, in_bitmap, net, args.refine_top_k, # tiny_width=args.tiny_width, stop_on_k_all=args.start_abs_cnt) opti = Adam(net.parameters(), lr=args.lr) scheduler = args.scheduler_fn(opti) # could be None accuracies = [] # epoch 0: ratio best_metric = 1e9 if args.accu_bar else -1. best_params = None certified = False epoch = 0 while True: # first, evaluate current model logging.info(f'[{utils.time_since(start)}] After epoch {epoch}:') if not args.no_pts: logging.info(f'Loaded {trainset.real_len()} points for training.') if not args.no_abs: logging.info(f'Loaded {len(curr_abs_lb)} abstractions for training.') with torch.no_grad(): full_dists = run_abs(curr_abs_lb, curr_abs_ub, curr_abs_bitmap) worst_loss = full_dists.max() logging.info(f'min loss {full_dists.min()}, max loss {worst_loss}.') if worst_loss <= 0.: certified = True logging.info(f'All {len(curr_abs_lb)} abstractions certified.') else: _, worst_idx = full_dists.max(dim=0) logging.info(f'Max loss at LB: {curr_abs_lb[worst_idx]}, UB: {curr_abs_ub[worst_idx]}.') worst_props = large_props.props_of(curr_abs_bitmap[worst_idx]) logging.info(f'Max loss labels: {[p.larger_category for p in worst_props]}') accu = eval_test(net, testset) accuracies.append(accu) logging.info(f'Test set accuracy {accu}.') if args.accu_bar is None or args.no_abs: # pick the best accuracy model if accu > best_metric: best_metric = accu best_params = copy.deepcopy(net.state_dict()) else: if accu > args.accu_bar and worst_loss < best_metric: best_metric = worst_loss best_params = copy.deepcopy(net.state_dict()) # check termination if certified and epoch >= args.min_epochs: # all safe and sufficiently trained break if epoch >= args.max_epochs: break epoch += 1 certified = False # writting like this because ReduceLROnPlateau do not have get_lr() _param_lrs = [group['lr'] for group in opti.param_groups] curr_lr = sum(_param_lrs) / len(_param_lrs) logging.info(f'\n[{utils.time_since(start)}] Starting epoch {epoch} with lr = {curr_lr}:') absset = exp.AbsIns(curr_abs_lb, curr_abs_ub, curr_abs_bitmap) # dataset may have expanded, need to update claimed length to date if not args.no_pts: trainset.reset_claimed_len() if not args.no_abs: absset.reset_claimed_len() if (not args.no_pts) and (not args.no_abs): ''' Might simplify this to just using the amount of abstractions, is it unnecessarily complicated? ''' # need to enumerate both max_claimed_len = min(trainset.claimed_len, absset.claimed_len) # max_claimed_len = trainset.claimed_len trainset.claimed_len = max_claimed_len absset.claimed_len = max_claimed_len if not args.no_pts: # using drop_last may increase accuracy a bit, but decrease safety a bit? conc_loader = data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, drop_last=True) nbatches = len(conc_loader) conc_loader = iter(conc_loader) if not args.no_abs: # using drop_last may increase accuracy a bit, but decrease safety a bit? abs_loader = data.DataLoader(absset, batch_size=args.batch_size, shuffle=True, drop_last=True) nbatches = len(abs_loader) # doesn't matter rewriting len(conc_loader), they are the same abs_loader = iter(abs_loader) accu_total_loss = 0. safe_total_loss = 0. for i in range(nbatches): opti.zero_grad() batch_loss = 0. if not args.no_pts: batch_inputs, batch_labels = next(conc_loader) batch_outputs = net(batch_inputs) batch_loss += args.accuracy_loss(batch_outputs, batch_labels) accu_total_loss += batch_loss.item() if not args.no_abs: batch_abs_lb, batch_abs_ub, batch_abs_bitmap = next(abs_loader) batch_dists = run_abs(batch_abs_lb, batch_abs_ub, batch_abs_bitmap) safe_loss = batch_dists.mean() # L1, need to upgrade to batch_worsts to unlock loss other than L1 safe_loss *= args.safe_lambda safe_total_loss += safe_loss.item() batch_loss += safe_loss logging.debug(f'Epoch {epoch}: {i / nbatches * 100 :.2f}%. Batch loss {batch_loss.item()}') batch_loss.backward() nn.utils.clip_grad_norm_(net.parameters(), args.grad_clip) # doesn't seem to make a difference here.. opti.step() # inspect the trained weights after another epoch # meta.inspect_params(net.state_dict()) accu_total_loss /= nbatches safe_total_loss /= nbatches if scheduler is not None: scheduler.step(accu_total_loss + safe_total_loss) logging.info(f'[{utils.time_since(start)}] At epoch {epoch}: avg accuracy training loss {accu_total_loss}, ' + f'safe training loss {safe_total_loss}.') # Refine abstractions, note that restart from scratch may output much fewer abstractions thus imprecise. if (not args.no_refine) and len(curr_abs_lb) < args.max_abs_cnt: curr_abs_lb, curr_abs_ub, curr_abs_bitmap = v.split(curr_abs_lb, curr_abs_ub, curr_abs_bitmap, net, args.refine_top_k, # tiny_width=args.tiny_width, stop_on_k_new=args.refine_top_k) pass # summarize train_time = timer() - start if certified and args.n_props == 100: # the latest one is certified, use that final_accu = accuracies[-1] tot_certified = 500 else: # not yet having a certified model, thus pick the one with best accuracy so far and try certify it on all props if best_params is not None: logging.info(f'Post certify using best metric {best_metric}') net.load_state_dict(best_params) final_accu = eval_test(net, testset) tot_certified = 0 for i, (k, ps) in enumerate(props_dict.items()): assert len(ps) == 5 for j, p in enumerate(ps): tmp_v = Bisecter(args.dom, p) in_lb, in_ub = p.lbub(device) if tmp_v.try_certify(in_lb, in_ub, None, net, args.batch_size, timeout_sec=args.certify_timeout): tot_certified += (5 - j) logging.info(f'Certified prop based at {k} using {j}th eps, now {tot_certified}/{5*(i+1)}.') break pass serial_net = nn.Sequential(*[layer.export() for layer in net]) # save exported network in serialization torch.save(serial_net.cpu(), Path(RES_DIR, f'trained-{tot_certified}-{final_accu:.4f}-model.pt')) accuracies = [f'{v:.4f}' for v in accuracies] logging.info(f'Accuracy at every epoch: {accuracies}') logging.info(f'After {epoch} epochs / {utils.pp_time(train_time)}, ' + f'eventually the trained network got certified at {tot_certified} / 500 props, ' + f'with {final_accu:.4f} accuracy on test set.') return epoch, train_time, tot_certified, final_accu
4fa7a37435fe316ebe63cfc4979d957a64fce8dc
3,643,082
def statRobustness(compromised, status): """produce data for robustness stats""" rob = {0:{"empty":0, "login based":0, "top 10 common":0, "company name":0}, 1:{"top 1000 common":0, "login extrapolation":0, "company context related":0, "4 char or less":0}, 2:{"top 1M common":0, "6 char or less":0, "2 charsets or less":0}, 3:{"present in attack wordlist":0, "present in locale attack wordlist":0, "leaked":0, "undetermined":0}} for acc in compromised: if status == 'all' or 'account_disabled' not in compromised[acc]["status"]: rob[compromised[acc]["robustness"]][compromised[acc]["reason"]] += 1 return rob
46920b466b96fa37a94888e788104c1d901a9227
3,643,083
def ns_diff(newstr, oldstr): """ Calculate the diff. """ if newstr == STATUS_NA: return STATUS_NA # if new is valid but old is not we should return new if oldstr == STATUS_NA: oldstr = '0' new, old = int(newstr), int(oldstr) return '{:,}'.format(max(0, new - old))
bbf58a649ef71524e7413fb47501d0054b828919
3,643,084
def get_crab(registry): """ Get the Crab Gateway :rtype: :class:`crabpy.gateway.crab.CrabGateway` # argument might be a config or a request """ # argument might be a config or a request regis = getattr(registry, 'registry', None) if regis is None: regis = registry return regis.queryUtility(ICrab)
6f8f02ac4bf7e82c8f4828fb4fdab4b78451ae49
3,643,085
def create_meal(): """Create a new meal. --- tags: - meals parameters: - in: body name: body schema: id: Meal properties: name: type: string description: the name of the meal description: type: string description: the description of the meal price: type: number format: float description: the cost of the meal scheduled_for: type: string format: date-time description: the date time that the meal is scheduled for responses: 201: description: Meal was successfully created schema: id: Meal 401: description: The user is not authenticated 422: description: The data failed validation 428: description: The current user has not added their address """ if current_user.location is None: raise PreconditionRequired(Errors.LOCATION_NOT_CREATED_YET) meal_data = MEAL_SCHEMA.load(request.json).data if 'tags' in meal_data: tags = meal_data.pop('tags') meal = Meal.create(location_id=current_user.location.id, **meal_data) meal.tags = tags else: meal = Meal.create(location_id=current_user.location.id, **meal_data) return jsonify(data=MEAL_SCHEMA.dump(meal).data, message=Success.MEAL_CREATED), 201
ee1c235410d7d6ca9f3661ea9f7a1f9fb434a730
3,643,086
def buscaBinariaIterativa(alvo, array): """ Retorna o índice do array em que o elemento alvo está contido. Considerando a coleção recebida como parâmetro, identifica e retor- na o índice em que o elemento especificado está contido. Caso esse elemento não esteja presente na coleção, retorna -1. Utiliza uma abordagem iterativa. Parameters ---------- alvo : ? Elemento cujo índice está sendo buscado array : list A lista cujo índice do elemento deve ser identificado Return ------ index : int O índice em que o elemento alvo está armazenado """ min = 0 max = len(array) - 1 while (min <= max): mid = (min + max) // 2 if (array[mid] == alvo): return mid else: if (array[mid] < alvo): min = mid + 1 else: max = mid - 1 return -1
e74fed0781b3c1bed7f5f57713a06c58bcbde107
3,643,087
def empiricalcdf(data, method='Hazen'): """Return the empirical cdf. Methods available: Hazen: (i-0.5)/N Weibull: i/(N+1) Chegodayev: (i-.3)/(N+.4) Cunnane: (i-.4)/(N+.2) Gringorten: (i-.44)/(N+.12) California: (i-1)/N Where i goes from 1 to N. """ i = np.argsort(np.argsort(data)) + 1. N = len(data) method = method.lower() if method == 'hazen': cdf = (i-0.5)/N elif method == 'weibull': cdf = i/(N+1.) elif method == 'california': cdf = (i-1.)/N elif method == 'chegodayev': cdf = (i-.3)/(N+.4) elif method == 'cunnane': cdf = (i-.4)/(N+.2) elif method == 'gringorten': cdf = (i-.44)/(N+.12) else: raise ValueError('Unknown method. Choose among Weibull, Hazen,' 'Chegodayev, Cunnane, Gringorten and California.') return cdf
6150361002d3f008185e5deafabfdc74b3189bd8
3,643,088
def CCT_to_xy_Kang2002(CCT): """ Returns the *CIE XYZ* tristimulus values *CIE xy* chromaticity coordinates from given correlated colour temperature :math:`T_{cp}` using *Kang et al. (2002)* method. Parameters ---------- CCT : numeric or array_like Correlated colour temperature :math:`T_{cp}`. Returns ------- ndarray *CIE xy* chromaticity coordinates. Raises ------ ValueError If the correlated colour temperature is not in appropriate domain. References ---------- :cite:`Kang2002a` Examples -------- >>> CCT_to_xy_Kang2002(6504.38938305) # doctest: +ELLIPSIS array([ 0.313426 ..., 0.3235959...]) """ CCT = as_float_array(CCT) if np.any(CCT[np.asarray(np.logical_or(CCT < 1667, CCT > 25000))]): usage_warning(('Correlated colour temperature must be in domain ' '[1667, 25000], unpredictable results may occur!')) x = np.where( CCT <= 4000, -0.2661239 * 10 ** 9 / CCT ** 3 - 0.2343589 * 10 ** 6 / CCT ** 2 + 0.8776956 * 10 ** 3 / CCT + 0.179910, -3.0258469 * 10 ** 9 / CCT ** 3 + 2.1070379 * 10 ** 6 / CCT ** 2 + 0.2226347 * 10 ** 3 / CCT + 0.24039, ) cnd_l = [CCT <= 2222, np.logical_and(CCT > 2222, CCT <= 4000), CCT > 4000] i = -1.1063814 * x ** 3 - 1.34811020 * x ** 2 + 2.18555832 * x - 0.20219683 j = -0.9549476 * x ** 3 - 1.37418593 * x ** 2 + 2.09137015 * x - 0.16748867 k = 3.0817580 * x ** 3 - 5.8733867 * x ** 2 + 3.75112997 * x - 0.37001483 y = np.select(cnd_l, [i, j, k]) xy = tstack([x, y]) return xy
cb9462e2b38bf5c55e7d7984632923ba9029e1fb
3,643,089
def tel_information(tel_number): """ check and return a dictionary that has element of validation and operator of number if number is not valid it return validation = 'False' and operator = 'None' """ validation = is_valid(tel_number) operator = tel_operator(tel_number) info_dict = {'validation' : validation, 'operator' : operator} return (info_dict)
b68fe615a3adf5e8a7ac8528f4b89ba2d85b4067
3,643,090
import pathlib from typing import Dict from typing import Any import yaml import json def load_file(file_name: pathlib.Path) -> Dict[str, Any]: """ Load JSON or YAML file content into a dict. This is not intended to be the default load mechanism. It should only be used if a OSCAL object type is unknown but the context a user is in. """ content_type = FileContentType.to_content_type(file_name.suffix) with file_name.open('r', encoding=const.FILE_ENCODING) as f: if content_type == FileContentType.YAML: return yaml.load(f, yaml.FullLoader) elif content_type == FileContentType.JSON: return json.load(f)
c042d9e94953c2130971fe6ebf4774cd31556256
3,643,091
from tests.test_plugins.documentations_plugin import DocumentPlugin def DocumentPlugin(): """ :return: document plugin class """ return DocumentPlugin
8cbcc4eb3ee58236f9fbf861a6e33a696db2ddff
3,643,092
import os def read_in(file_index, normalized): """ Reads in a file and can toggle between normalized and original files :param file_index: patient number as string :param normalized: boolean that determines whether the files should be normalized or not :return: returns npy array of patient data across 4 leads """ if normalized == 1: data = np.load(os.path.join("Working_Data", "Normalized_Fixed_Dim_HBs_Idx" + file_index + ".npy")) else: data = np.load(os.path.join("Working_Data", "Fixed_Dim_HBs_Idx" + file_index + ".npy")) return data
14690a57e6d1287ebc400a6dab3d89438febf694
3,643,093
import re def remove_extended(text): """ remove Chinese punctuation and Latin Supplement. https://en.wikipedia.org/wiki/Latin-1_Supplement_(Unicode_block) """ # latin supplement: \u00A0-\u00FF # notice: nbsp is removed here lsp_pattern = re.compile(r'[\x80-\xFF]') text = lsp_pattern.sub('', text) # chinese special character # chc_pattern = re.compile(r'[\r\t\n\.\!\/_,$%^*(+\"\')]|[+——()?【】“”!,。?、~@#¥%……&*()]') # text = chc_pattern.sub('',text) return text
52d0f5082b519d06f7dd20ba3d755790b1f3166d
3,643,094
def appointment_letter(request, tid): """Display the appointment letter.""" paf = get_object_or_404(Operation, pk=tid) return render( request, 'transaction/appointment_letter.html', {'paf': paf}, )
765115cb98e4b99cdff1ad2ad010d635eabf4103
3,643,095
import random from typing import Iterable import itertools def balance_targets(sentences: Iterable[Sentence], method: str = "downsample_o_cat", shuffle=True) \ -> Iterable[Sentence]: """ Oversamples and/or undersamples training sentences by a number of targets. This is useful for linear shallow classifiers, that are prone to simply overfit the most-occurring category. See the source code for a documentation of resample methods logic :param shuffle: whether to shuffle the output :param sentences: sentences to resample :param method: resample method, one of {downsample_o_cat, downsample_o_pzk_cats, all_upsampled, remove_o_cat} :return: resampled, possibly shuffled input sentences """ # take the second-top count from categories apart from "Other" targets = [s.label for s in sentences] second_top_count = sorted([sum([target == cat for target in targets]) for cat in set(targets) - {"O"}])[-2] if method == "downsample_o_cat": # downsample "other" category to second-most-occurring category count out_sentences = list((random.sample([s for s in sentences if s.label == "O"], second_top_count) + [s for s in sentences if s.label != "O"])) elif method == "downsample_o_pzk_cats": # downsample "other" + "P_ZK" (experience description) category to third-most-occurring category count out_sentences = list((random.sample([s for s in sentences if s.label == "O"], second_top_count) + [s for s in sentences if s.label != "O"])) out_sentences = list((random.sample([s for s in out_sentences if s.label == "P_ZK"], second_top_count) + [s for s in out_sentences if s.label != "P_ZK"])) elif method == "all_upsampled": # upsample all categories to a count of most-occurring one (presumably "other" category) out_sentences = list(itertools.chain(*[random.choices([s for s in sentences if s.label == cat], k=second_top_count) for cat in set(targets)])) elif method == "remove_o_cat": # completely remove sentences of "other" category out_sentences = [s for s in sentences if s.label != "O"] else: out_sentences = sentences if shuffle: # random shuffle output sentences random.shuffle(out_sentences) return out_sentences
2d0b5736bcfeb6e7b2566791dba4d74ac3c84456
3,643,096
def discriminator_loss(real_output, fake_output, batch_size): """ Computes the discriminator loss after training with HR & fake images. :param real_output: Discriminator output of the real dataset (HR images). :param fake_output: Discriminator output of the fake dataset (SR images). :param batch_size: Batch size. :return: Discriminator loss. """ real_loss = tf.nn.compute_average_loss(cross_entropy(tf.ones_like(real_output), real_output), global_batch_size=batch_size) fake_loss = tf.nn.compute_average_loss(cross_entropy(tf.zeros_like(fake_output), fake_output), global_batch_size=batch_size) total_loss = real_loss + fake_loss return total_loss
ade81d34b80226a8905d64249187f35c73d496ee
3,643,097
def NNx(time, IBI, ibimultiplier=1000, x=50): """ computes Heart Rate Variability metrics NNx and pNNx Args: time (pandas.DataFrame column or pandas series): time column IBI (pandas.DataFrame column or pandas series): column with inter beat intervals ibimultiplier (IntegerType): defualt = 1000; transforms IBI to milliseconds. If data is already in ms, set as 1 x (IntegerType): default = 50; set the number of times successive heartbeat intervals exceed 'x' ms Returns: NNx (FloatType): the number of times successive heartbeat intervals exceed x ms pNNx (FloatType): the proportion of NNx divided by the total number of NN (R-R) intervals. """ time = time ibi = IBI*ibimultiplier differences = abs(np.diff(ibi)) n = np.sum(differences > x) p = (n / len(differences)) * 100 return (round(n * 10) / 10), (round(p * 10) / 10)
94f7f7ec732532cddfe5c29a273af479733e4ced
3,643,098
from datetime import datetime def datetimeobj_YmdHMS(value): """Convert timestamp string to a datetime object. Timestamps strings like '20130618120000' are able to be converted by this function. Args: value: A timestamp string in the format '%Y%m%d%H%M%S'. Returns: A datetime object. Raises: ValueError: If timestamp is invalid. Note: The timezone is assumed to be UTC/GMT. """ i = int(value) S = i M = S//100 H = M//100 d = H//100 m = d//100 Y = m//100 return datetime.datetime( Y % 10000, m % 100, d % 100, H % 100, M % 100, S % 100, tzinfo=TZ_GMT )
d1f63b1e50f278bd4dcea78feea8942bc1112c6f
3,643,099