content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def gen_api_url(endpoint): """Construct a Wger API url given the endpoint""" # type: (str) -> str return WGER["host_name"] + WGER["api_ext"] + endpoint
70623f9130b4dbde277a8c15b3f43a5168e4e487
24,600
from typing import Literal def create_inputs( data: np.ndarray, input_type_name: Literal[ "data", "data_one_column", "one_in_one_out_constant", "one_in_one_out", "one_in_batch_out", "sequentions", ], input_type_params: dict, mode: Literal["validate", "in_sample"] = "validate", predicts: int = 7, repeatit: int = 10, predicted_column_index: int = 0, ) -> Inputs: """Define configured inputs for various models. For some models use `make_sequences` function => check it's documentation how it works. For `data` input type name, just return data, if data_one_column, other columns are deleted, if something else, it create inputs called X and y - same convention as in sklearn plus x_input - input for predicted values. If constant in used name, it will insert bias 1 to every sample input. Args: data (np.ndarray): Time series data. input_type_name (str): Name of input. Choices are ['data', 'data_one_column', 'one_in_one_out_constant', 'one_in_one_out', 'one_in_batch_out', 'sequentions']. If 'sequentions', than input type params define produces inputs. input_type_params (dict): Dict of params used in make_sequences. E.g. {'n_steps_in': cls.default_n_steps_in, 'n_steps_out': cls.predicts, 'default_other_columns_length': cls.default_other_columns_length, 'constant': 0}. Used only if `input_type_params` is 'sequentions'. mode (Literal["validate", "in_sample"], optional): 'validate' or 'in_sample'. All data are used but if 'in_sample', 'repeatit' number of in-sample inputs are used for test validation. If 'validate', just one last input (same like predict input is used). Test output is generated before this function in test / train split. Defaults to 'validate'. predicts (int, optional): Number of predicted values. Defaults to 7. repeatit (int, optional): Number of generated sequentions for testing. Defaults to 10. predicted_column_index (int, optional): Predicted column index. Defaults to 0. Returns: Inputs: model_train_input, model_predict_input, model_test_inputs. Example: >>> data = np.array( ... [ ... [1, 2, 3, 4, 5, 6, 7, 8], ... [9, 10, 11, 12, 13, 14, 15, 16], ... [17, 18, 19, 20, 21, 22, 23, 24], ... ] ... ).T ... >>> inputs = create_inputs( ... data, ... "sequentions", ... { ... "n_steps_in": 3, ... "n_steps_out": 1, ... "constant": 1, ... }, ... ) >>> inputs[0][1] array([[4], [5], [6], [7], [8]]) >>> inputs[1] array([[ 1., 6., 7., 8., 14., 15., 16., 22., 23., 24.]]) """ # Take one input type, make all derived inputs (save memory, because only slices) and create dictionary of inputs for one iteration used_sequences = {} if input_type_name == "data": used_sequences = data elif input_type_name == "data_one_column": used_sequences = data[:, predicted_column_index] else: if input_type_name in [ "one_in_one_out_constant", "one_in_one_out", "one_in_batch_out", ]: used_sequences = data[:, predicted_column_index : predicted_column_index + 1] else: used_sequences = data used_sequences = make_sequences( used_sequences, predicts=predicts, repeatit=repeatit, **input_type_params ) if isinstance(used_sequences, tuple): model_train_input = (used_sequences[0], used_sequences[1]) model_predict_input = used_sequences[2] if mode == "validate": model_test_inputs = [model_predict_input] else: model_test_inputs = used_sequences[3] else: model_train_input = model_predict_input = used_sequences if mode == "validate": model_test_inputs = [model_predict_input] else: model_test_inputs = [] if used_sequences.ndim == 1: for i in range(repeatit): model_test_inputs.append(used_sequences[: -predicts - repeatit + i + 1]) else: for i in range(repeatit): model_test_inputs.append(used_sequences[:, : -predicts - repeatit + i + 1]) return Inputs(model_train_input, model_predict_input, model_test_inputs)
bdb916915d561aa87435c2af5ea2f8b892f3c4b1
24,601
def CDLKICKINGBYLENGTH(df): """ 函数名:CDLKICKINGBYLENGTH 名称:Kicking - bull/bear determined by the longer marubozu 由较长缺影线决定的反冲形态 简介:二日K线模式,与反冲形态类似,较长缺影线决定价格的涨跌。 python API integer=CDLKICKINGBYLENGTH(open, high, low, close) :return: """ open = df['open'] high = df['high'] low = df['low'] close = df['close'] return talib.CDLKICKINGBYLENGTH(open, high, low, close)
e3a0c62627e8b4866580f232b5199570768c5197
24,602
def report_count_table_sort(s1, s2): """ """ # Sort order: Class and scientific name. columnsortorder = [0, 2, 3, 6] # Class, species, size class and trophy. # for index in columnsortorder: s1item = s1[index] s2item = s2[index] # Empty strings should be at the end. if (s1item != '') and (s2item == ''): return -1 if (s1item == '') and (s2item != ''): return 1 if s1item < s2item: return -1 if s1item > s2item: return 1 # return 0
cf207e4e8f524e48f99422017b17e643b66a9e78
24,603
import urllib def serch_handler(msg): """ 处理音乐搜索结果 :param msg: 搜索信息 :return: """ # url = 'https://www.ximalaya.com/revision/search?core=all&kw={0}&spellchecker=true&device=iPhone' url = 'https://www.ximalaya.com/revision/search?kw={0}&page=1&spellchecker=false&condition=relation&rows=50&device=iPhone&core=track&fq=category_id%3A2&paidFilter=false' request_url = url.format(urllib.parse.quote(msg)) # url编码 return get_url_response(request_url)
e91620dce4d4b6e7d79ab0e8cbf612322f0248b3
24,604
def random(start: int, end: int) -> int: """Same as `random.randint(start, end)`""" return randint(start, end)
473f27e528d13cdb649b6e6d6e5ba32498a96cc1
24,605
def zero_check(grid): """Take a 2d grid and calculates number of 0 entries.""" zeros = 0 for row in grid: for element in row: if element == 0: zeros += 1 return zeros
0d69a948eef96937f8a5033256c3c4d9f22ce14d
24,606
from typing import List def get_channel_clips(channel: Channel) -> List[Clip]: """ Uses a (blocking) HTTP request to retrieve Clip info for a specific channel. :param channel: A Channel object. :returns: A list of Clip objects. """ clips = [] pagination = "" while True: query = gql.GET_CHANNEL_CLIPS_QUERY.format( channel_id=channel.login, after=pagination, first=100 ) resp = gql.gql_query(query=query).json() resp = resp["data"]["user"]["clips"] if not resp or not resp["edges"]: break pagination = resp["edges"][-1]["cursor"] for clip in resp["edges"]: c = clip["node"] b = c["broadcaster"] w = c["curator"] g = c["game"] v = c["video"] v_id = "unknown" if v is not None: v_id = v["id"] w_id = b["id"] w_login = b["login"] w_name = b["displayName"] if w is not None: w_id = w["id"] w_login = w["login"] w_name = w["displayName"] g_id = "" g_name = "" if g is not None: g_id = g["id"] g_name = g["name"] clips.append( Clip( id=c["id"], slug=c["slug"], created_at=c["createdAt"], user_id=b["id"], user_login=b["login"], user_name=b["displayName"], clipper_id=w_id, clipper_login=w_login, clipper_name=w_name, game_id=g_id, game_name=g_name, title=c["title"], view_count=c["viewCount"], length=c["durationSeconds"], offset=c["videoOffsetSeconds"] or 0, video_id=v_id ) ) if pagination == "" or pagination == None: break return clips
7b5d5c19b0ac5f7ec665e8e453a529c5556cbadd
24,607
def tensorflow2xp(tf_tensor: "tf.Tensor") -> ArrayXd: # pragma: no cover """Convert a Tensorflow tensor to numpy or cupy tensor.""" assert_tensorflow_installed() if tf_tensor.device is not None: _, device_type, device_num = tf_tensor.device.rsplit(":", 2) else: device_type = "CPU" if device_type == "CPU" or not has_cupy: return tf_tensor.numpy() else: dlpack_tensor = tensorflow.experimental.dlpack.to_dlpack(tf_tensor) return cupy.fromDlpack(dlpack_tensor)
81bb8bea01e2e108c21022699005cb17cab12f0e
24,608
def __str__(self, indent=0, func_role="obj"): """ our own __str__ """ out = [] out += self._str_signature() out += self._str_index() + [''] out += self._str_summary() out += self._str_extended_summary() out += self._str_param_list('Parameters') out += self._str_options('Options') out += self._str_returns() for param_list in ('Other Parameters', 'Raises', 'Warns'): out += self._str_param_list(param_list) out += self._str_warnings() out += self._str_see_also(func_role) out += self._str_section('Notes') out += self._str_references() out += self._str_examples() for param_list in ('Attributes', 'Methods'): out += self._str_member_list(param_list) out = self._str_indent(out, indent) return '\n'.join(out)
3e55fccb76f8e200ef7e57366c2ccd9609975959
24,609
import os def cmd(): """Return the Juju command There are times when multiple versions of Juju may be installed or unpacked requiring testing. This function leverages two environment variables to select the correct Juju binary. Thus allowing easy switching from one version to another. JUJU_BINARY: The full path location to a Juju binary that may not be in $PATH. Example: /home/ubuntu/Downloads/juju/usr/lib/juju-2.1-rc1/bin/juju JUJU_VERSION: The full binary name of Juju that is in $PATH. Example: juju-2.1-rc1 The result of $(which juju-2.1-rc1) If neither of these environment variables is set, the default Juju binary in $PATH is selected. Example: juju The result of $(which juju) @returns string Juju command """ if os.environ.get('JUJU_BINARY'): juju = os.environ.get('JUJU_BINARY') elif os.environ.get('JUJU_VERSION'): ver = (".".join(os.environ.get('JUJU_VERSION') .split('-')[0].split('.')[:2])) juju = 'juju-{}'.format(ver) else: juju = 'juju' return juju
88e34da7be521dd62c438033ce54cd9736f4f7b5
24,610
def worker_years_6_download(request): """ 纺织类通信带条件查询,然后下载对应结果文件 :param request: :return: """ return download.worker_years_6_download(request)
2250828132dff114665c4d6b3c4c6eb2a21840ce
24,611
def set_autoscaler_location(autoscaler, is_regional, location): """ Sets location-dependent properties of the autoscaler. """ name = autoscaler['name'] location_prop_name = 'region' if is_regional else 'zone' autoscaler['type'] = REGIONAL_LOCAL_AUTOSCALER_TYPES[is_regional] autoscaler['properties'][location_prop_name] = location location_output = { 'name': location_prop_name, 'value': '$(ref.{}.{})'.format(name, location_prop_name) } return location_output
2e663856d4b4d9a3a477de9ce330cc5fe42502a1
24,612
from typing import Dict from typing import Iterable def _define_deformation_axes() -> Dict[str, Iterable[str]]: """Defines object sets for each axis of deformation.""" rgb_objects_dim = {} for a in DEFORMATION_AXES: rgb_objects_dim[a] = [] for v in _DEFORMATION_VALUES: obj_id = f'{v}{a}' # There are excluded objects we need to check for. if obj_id in RGB_OBJECTS_FULL_SET: rgb_objects_dim[a].append(f'{v}{a}') return rgb_objects_dim
d50384ca1261a312f48f9f6540252fa5f265cf80
24,613
def ldns_rr2buffer_wire_canonical(*args): """LDNS buffer.""" return _ldns.ldns_rr2buffer_wire_canonical(*args)
5012bf22889ab0cd8375750bab8f54ca2ecb0da0
24,614
def get_stretch_factor(folder_name, indices, **kwargs): """ Computes the stretch factor using the (16-50-84) percentile estimates of x0 - x1 for each restframe wavelength assuming orthogonality Parameters: folder_name: folder containing the individual likelihoods and their percentile estimates indices: which restframe wavelengths to use Returns: stretch_x0, stretch_x1: the stretch factors along x0 and x1 """ x0_cen = np.zeros(len(indices)) x0_err = np.zeros(len(indices)) x1_cen = np.zeros(len(indices)) x1_err = np.zeros(len(indices)) for i, index in enumerate(indices): _, est_x0, est_x1 = np.loadtxt(folder_name + \ 'xx_percentile_est_%d.dat' % index) x0_cen[i] = est_x0[0] x0_err[i] = (est_x0[1] + est_x0[2]) / 2. x1_cen[i] = est_x1[0] x1_err[i] = (est_x1[1] + est_x1[2]) / 2. res0 = get_corrfunc(x0_cen, x0_err, model=True, est=True, sfx=folder_name + "x0_corr") res1 = get_corrfunc(x1_cen, x1_err, model=True, est=True, sfx=folder_name + "x1_corr") stretch_x0 = res0[3] / res0[1] stretch_x1 = res1[3] / res1[1] return stretch_x0, stretch_x1
17d61ba5205aada23b8f6b6a57c1920770a43408
24,615
def zonal_length(lat, nlon): """ length of zonal 1/nlon segment at latitude lat""" return R_earth * 2*np.pi/nlon * np.cos(lat*np.pi/180)
cf539ec73cae803a187d913c84ef7cb739cf8952
24,616
def map_request_attrs(name_request, **kwargs): """ Used internally by map_request_data. :param name_request: :key nr_id: int :key nr_num: str :key request_entity: str :key request_action: str :key request_type: str :return: """ try: # Use class property values for the ID, NR Number and Source! # Do not map those values from the request if supplied, as they # should not be changed outside of the context of this application! nr_id = kwargs.get('nr_id') nr_num = kwargs.get('nr_num') name_request.id = nr_id name_request.nrNum = nr_num name_request._source = NAME_REQUEST_SOURCE # Default to whatever entity, action, or type already exists when mapping request_entity = kwargs.get('request_entity', name_request.entity_type_cd) request_action = kwargs.get('request_action', name_request.request_action_cd) request_type = kwargs.get('request_type', name_request.requestTypeCd) # Set action and entity if request_entity: name_request.entity_type_cd = request_entity if request_action: name_request.request_action_cd = request_action # TODO: Throw exceptions for invalid combos? if not request_type and request_entity and request_action: # If request_type is None (eg. no 'requestTypeCd' was provided in the payload) # but a request_entity (entity_type_cd) and a request_action (request_action_cd) # are supplied, use get_mapped_request_type to map the requestTypeCd in the model # using the action and entity type request_type = get_mapped_request_type(request_entity, request_action) name_request.requestTypeCd = request_type[0] elif request_type is not None: # If request_type is NOT None, (eg. 'requestTypeCd' was provided in the payload) # then use the provided value name_request.requestTypeCd = request_type except Exception as err: raise MapRequestAttributesError(err) return name_request
16b4fbf2023b097956f3fb4a3dd0e3c1a7e1c800
24,617
def create_gw_response(app, wsgi_env): """Create an api gw response from a wsgi app and environ. """ response = {} buf = [] result = [] def start_response(status, headers, exc_info=None): result[:] = [status, headers] return buf.append appr = app(wsgi_env, start_response) close_func = getattr(appr, 'close', None) try: buf.extend(list(appr)) finally: close_func and close_func() response['body'] = ''.join(buf) response['statusCode'] = result[0].split(' ', 1)[0] response['headers'] = {} for k, v in result[1]: response['headers'][k] = v if 'Content-Length' not in response['headers']: response['headers']['Content-Length'] = str(len(response['body'])) if 'Content-Type' not in response['headers']: response['headers']['Content-Type'] = 'text/plain' return response
73dd8459cbf9b79655137536ff42195ba62c1372
24,618
def Squeeze(parent, axis=-1, name=""): """\ Dimension of size one is removed at the specified position (batch dimension is ignored). :param parent: parent layer :param axis: squeeze only along this dimension (default: -1, squeeze along all dimensions) :param name: name of the output layer :return: Squeeze layer """ return _eddl.Squeeze(parent, axis, name)
3c8e6d6292e29d857412db1c74352b02aab99654
24,619
import attrs def parse_namespace(tt): """ <!ELEMENT NAMESPACE EMPTY> <!ATTLIST NAMESPACE %CIMName;> """ check_node(tt, 'NAMESPACE', ['NAME'], [], []) return attrs(tt)['NAME']
70a2f4e0ad0f8f98e38fde1892e9d40a34653af1
24,620
def clean(val, floor, ceiling): """Make sure RH values are always sane""" if val > ceiling or val < floor or pd.isna(val): return None if isinstance(val, munits.Quantity): return float(val.magnitude) return float(val)
6bf2822dc47a0b50cd88f05e2c127d97c5d71c0f
24,621
from typing import Union from typing import List from typing import Tuple def count_swaps_in_row_order(row_order: Union[List[int], Tuple[int]]) -> int: """ Counts the number of swaps in a row order. Args: row_order (Union[List[int], Tuple[int]]): A list or tuple of ints representing the order of rows. Returns: int: The minimum number of swaps it takes for a range(len(row_order)) to reach row_order. """ count = 0 for i in range(len(row_order)): if row_order[i] != i: row_order[row_order[i]], row_order[i] = ( row_order[i], row_order[row_order[i]], ) count += 1 return count
c96fcb26fac03d252918f7cfa1dd3048eaf22320
24,622
def evaluate(ast, env): """Evaluate an Abstract Syntax Tree in the specified environment.""" print(ast) if is_boolean(ast): return ast if is_integer(ast): return ast if is_string(ast): return ast if is_symbol(ast): return env.lookup(ast) if is_list(ast): if len(ast) == 0: raise DiyLangError("Empty list") if ast[0] == "quote": if len(ast[1:]) != 1: raise DiyLangError(f"Wrong number of arguments in {ast[0]}") return ast[1] if ast[0] == "atom": if len(ast[1:]) != 1: raise DiyLangError(f"Wrong number of arguments in {ast[0]}") result = evaluate(ast[1], env) return is_atom(result) if ast[0] == "eq": if len(ast[1:]) != 2: raise DiyLangError(f"Wrong number of arguments in {ast[0]}") left = evaluate(ast[1], env) right = evaluate(ast[2], env) if not is_atom(left) or not is_atom(right): return False return left == right if ast[0] in ["+", "-", "/", "*", "mod", ">"]: if len(ast[1:]) != 2: raise DiyLangError(f"Wrong number of arguments in {ast[0]}") left = evaluate(ast[1], env) right = evaluate(ast[2], env) if not is_integer(left) or not is_integer(right): raise DiyLangError(f"{left} or {right} is not a number") if ast[0] == "+": return left + right if ast[0] == "-": return left - right if ast[0] == "/": return left // right if ast[0] == "*": return left * right if ast[0] == "mod": return left % right if ast[0] == ">": return left > right if ast[0] == "if": if len(ast[1:]) != 3: raise DiyLangError(f"Wrong number of arguments in {ast[0]}") predicate = evaluate(ast[1], env) if predicate: return evaluate(ast[2], env) else: return evaluate(ast[3], env) if ast[0] == "define": if len(ast[1:]) != 2: raise DiyLangError(f"Wrong number of arguments in {ast[0]}") left = ast[1] if not is_symbol(left): raise DiyLangError(f"{left} is not a symbol") right = evaluate(ast[2], env) env.set(left, right) return if ast[0] == "cons": head = evaluate(ast[1], env) tail = evaluate(ast[2], env) if is_list(tail): return [head] + tail if is_string(tail): return String(head.val + tail.val) raise DiyLangError("Can't use cons on a non list/string") if ast[0] == "head": list_ = evaluate(ast[1], env) if is_list(list_): if len(list_) == 0: raise DiyLangError("Can't use head on empty list") return list_[0] if is_string(list_): if len(list_.val) == 0: raise DiyLangError("Can't use head on empty string") return String(list_.val[0]) raise DiyLangError("Can't use head on a non list/string") if ast[0] == "tail": list_ = evaluate(ast[1], env) if is_list(list_): if len(list_) == 0: raise DiyLangError("Can't use tail on empty list") return list_[1:] if is_string(list_): if len(list_.val) == 0: raise DiyLangError("Can't use tail on empty string") return String(list_.val[1:]) raise DiyLangError("Can't use tail on a non list/string") if ast[0] == "empty": list_ = evaluate(ast[1], env) if is_list(list_): return len(list_) == 0 if is_string(list_): return len(list_.val) == 0 raise DiyLangError("Can't use empty on a non list/string") if ast[0] == "cond": cases = ast[1] for (condition, value) in cases: if evaluate(condition, env): return evaluate(value, env) return False if ast[0] == "let": new_env = env for (key, value) in ast[1]: evaluated_value = evaluate(value, new_env) new_env = new_env.extend({ key: evaluated_value }) return evaluate(ast[2], new_env) if ast[0] == "defn": return evaluate(["define", ast[1], ["lambda", ast[2], ast[3]]], env) if ast[0] == "lambda": if len(ast[1:]) != 2: raise DiyLangError(f"Wrong number of arguments in {ast[0]}") params = ast[1] if not is_list(params): raise DiyLangError(f"{params} is not a list") for param in params: if not is_symbol(param): raise DiyLangError(f"{param} is not a symbol") body = ast[2] return Closure(env, params, body) if is_closure(ast[0]): closure = ast[0] args = ast[1:] return evaluate_closure(closure, args, env) if is_list(ast[0]): closure = evaluate(ast[0], env) args = ast[1:] return evaluate_closure(closure, args, env) function_name = ast[0] if not is_symbol(function_name): raise DiyLangError(f"{function_name} is not a function") closure = env.lookup(function_name) if not is_closure(closure): raise DiyLangError(f"{closure} is not a function") args = ast[1:] return evaluate_closure(closure, args, env)
c9e5da8c9b073f72a2b27bfbb84a7939fbcec134
24,623
from datetime import datetime import json def load_data( assets: tp.Union[None, tp.List[tp.Union[str,dict]]] = None, min_date: tp.Union[str, datetime.date, None] = None, max_date: tp.Union[str, datetime.date, None] = None, dims: tp.Tuple[str, str] = (ds.TIME, ds.ASSET), forward_order: bool = True, tail: tp.Union[datetime.timedelta, int, float] = DEFAULT_TAIL, ) -> tp.Union[None, xr.DataArray]: """ Loads index time series. :param assets: :param min_date: :param max_date: :param dims: :param forward_order: :param tail: :return: """ track_event("DATA_INDEX_SERIES") max_date = parse_date(max_date) if min_date is not None: min_date = parse_date(min_date) else: min_date = max_date - parse_tail(tail) if assets is not None: assets = [a['id'] if type(a) == dict else a for a in assets] if assets is None: assets_array = load_list(min_date, max_date) assets_arg = [i['id'] for i in assets_array] else: assets_arg = assets params = {"ids": assets_arg, "min_date": min_date.isoformat(), "max_date": max_date.isoformat()} params = json.dumps(params) params = params.encode() raw = request_with_retry("idx/data", params) if raw is None or len(raw) < 1: arr = xr.DataArray( [[np.nan]], dims=[ds.TIME, ds.ASSET], coords={ ds.TIME: pd.DatetimeIndex([max_date]), ds.ASSET: ['ignore'] } )[1:,1:] else: arr = xr.open_dataarray(raw, cache=True, decode_times=True) arr = arr.compute() if forward_order: arr = arr.sel(**{ds.TIME: slice(None, None, -1)}) if assets is not None: assets = list(set(assets)) assets = sorted(assets) assets = xr.DataArray(assets, dims=[ds.ASSET], coords={ds.ASSET:assets}) arr = arr.broadcast_like(assets).sel(asset=assets) arr = arr.dropna(ds.TIME, 'all') arr.name = "indexes" return arr.transpose(*dims)
83bd4221b92e2697bc79a416d0d5108fdb79ef27
24,624
def create_model(values): """create the model basing on the calculated values. Args: values (dict): values from the get_values_from_path function Raises: ValueError: if the loss function doesnt excist Returns: torch.nn.Module: model the network originally was trained with. """ pretrain = get_model( values["model"], num_classes=values["embs"], in_channels=values["num_chan"]) pretrain.output = nn.BatchNorm1d(512) if values["loss"] == "softmax": classifier = nn.Linear(values["embs"], values["num_cl"], bias=False) model = Model(pretrain, classifier) elif values["loss"] == "arcface": classifier = AAML(values["embs"], values["num_cl"]) model = ModelArc(pretrain, classifier) elif values["loss"] == "circle": classifier = CircleLoss(values["embs"], values["num_cl"]) model = ModelArc(pretrain, classifier) elif values["loss"] == "rbf": classifier = RBFClassifier( values["embs"], values["num_cl"], scale=3, gamma=1) model = Model(pretrain, classifier) else: raise ValueError("That loss function doesn't exist!") return model
9750fa13042572c8ca7a5f81ef73c87134a08431
24,625
def loss(logits, labels): """Calculates the loss from the logits and the labels. Args: logits: Logits tensor, float - [batch_size, NUM_CLASSES]. labels: Labels tensor, int32 - [batch_size]. Returns: loss: Loss tensor of type float. """ loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits, labels), name='xentropy_mean') return loss
5d86168b8cffba3feb822120fd112835593a8d36
24,626
import glob import os def old_get_obs_session(project=None, dss=None, date=None, path='proj'): """ Provides project, station, year and DOY, asking as needed. It follows one of several possible paths to get to the session:: proj - path through /usr/local/projects/<project> hdf5 - path through /usr/local/RA_data/HDF5 fits - path through /usr/local/RA_data/FITS wvsr - path through /data @param project : optional name as defined in /usr/local/projects @type project : str @param dss : optional station number @type dss : int @param date : optional YYYY/DDD @type date : str @return: project, DSS, year, DOY. """ def get_directory(path): """ """ # only one trailing / path = path.rstrip('/')+"/*" logger.debug("get_obs_session:get_directory: from %s", path) names = glob.glob(path) if names: dirs = [] for name in names: if os.path.isdir(name): dirs.append(os.path.basename(name)) dirs.sort() for name in dirs: print((name), end=' ') return input('\n>') else: return [] def from_wvsr_dir(): """ this needs to be completed and tested on crab14 or an auto host """ session = get_directory(local_dirs.wvsr_dir) return session cwd = os.getcwd() # get the project if project: pass else: os.chdir(local_dirs.projects_dir) project = get_directory(local_dirs.projects_dir) logger.debug("from_wvsr_dir: project is %s", project) projectpath = local_dirs.projects_dir+project # get the station if path[:4].lower() == 'wvsr': # special call print("from_wvsr_dir()") if path[:4].lower() == 'proj': os.chdir(projectpath+"/Observations/") elif path[:4].lower() == 'hdf5': os.chdir(local_dirs.hdf5_dir) elif path[:4].lower() == 'fits': os.chdir(local_dirs.fits_dir) # get the station if dss: pass else: # This seems odd but get_directory() needs '/' and int does not station = get_directory(os.getcwd()+"/").rstrip('/') dss = int(station[-2:]) stationpath = os.getcwd()+"/dss"+str(dss) # get the date if date: items = date.split('/') year = int(items[0]) DOY = int(items[1]) else: year = int(get_directory(stationpath)) yearpath = stationpath+"/"+str(year) DOY = int(get_directory(yearpath)) os.chdir(cwd) return project, dss, year, DOY
2a2116b56ba2fa1dadca0564510be202246fc46e
24,627
def create_pattern_neighbors_ca2d(width, height, n_states=2): """ Returns a list with the weights for 'neighbors' and 'center_idx' parameters of evodynamic.connection.cellular_automata.create_conn_matrix_ca1d(...). The weights are responsible to calculate an unique number for each different neighborhood pattern. Parameters ---------- width : int Neighborhood width. height : int Neighborhood height. n_states : int Number of discrete state in a cell. Returns ------- out1 : list List of weights of the neighbors. out2 : int Index of the center of the neighborhood. """ return np.array([n_states**p for p in range(width*height)]).reshape(width,height),\ [width//2, height//2]
53ce7cd0afb9f9b590754d299ba2d621489bc4e6
24,628
def by_colname_like(colname, colname_val): """ Query to handle the cases in which somebody has the correct words within their query, but in the incorrect order (likely to be especially relevant for professors). """ def like_clause_constructor(colname, colname_val): """ Helper function for constructing like clause. """ like_list = colname_val.split(' ') like_unit = "lower({colname}) like lower('%{word}%') and " like_clause = "" for word in like_list: like_clause += like_unit.format(colname=colname, word=word) return like_clause return """ select title, section, instructor, time, building, hours, interesting, recommend from {table_name} where {where_clause} recommend > 0 limit 3 """.format(where_clause=like_clause_constructor(colname=colname, colname_val=colname_val), table_name=TABLE_NAME)
256f5e952fdff02f8de44c45b3283013547d0287
24,629
import json def decode_classnames_json(preds, top=5): """ Returns class code, class name and probability for each class amongst top=5 for each prediction in preds e.g. [[('n01871265', 'tusker', 0.69987053), ('n02504458', 'African_elephant', 0.18252705), ... ]] """ if len(preds.shape) != 2 or preds.shape[1] != 1000: raise ValueError('`decode_classnames_json` expects ' 'a batch of predictions ' '(i.e. a 2D array of shape (samples, 1000)). ' 'Found array with shape: ' + str(preds.shape)) with open('imagenet_class_index.json') as data_file: data = json.load(data_file) results = [] for pred in preds: top_indices = pred.argsort()[-top:][::-1] result = [tuple(data[str(i)]) + (pred[i],) for i in top_indices] results.append(result) return results
807bed051300801a5e6a92bbc96324a66050f6c0
24,630
import os import json def _GetSupplementalColumns(build_dir, supplemental_colummns_file_name): """Reads supplemental columns data from a file. Args: build_dir: Build dir name. supplemental_columns_file_name: Name of a file which contains the supplemental columns data (in JSON format). Returns: A dict of supplemental data to send to the dashboard. """ supplemental_columns = {} supplemental_columns_file = os.path.join(build_dir, results_dashboard.CACHE_DIR, supplemental_colummns_file_name) if os.path.exists(supplemental_columns_file): with file(supplemental_columns_file, 'r') as f: supplemental_columns = json.loads(f.read()) return supplemental_columns
f19c12a554ced4107a804badefe117165ef792d5
24,631
import math def prime_decomposition(number): """Returns a dictionary with the prime decomposition of n""" decomposition = {} number = int(number) if number < 2: return decomposition gen = primes_gen() break_condition = int(math.sqrt(number)) while number > 1: current_prime = next(gen) if current_prime > break_condition: decomposition[number] = 1 return decomposition while number % current_prime == 0 or number == current_prime: if current_prime in decomposition: decomposition[current_prime] += 1 else: decomposition[current_prime] = 1 number /= current_prime return decomposition
67062c15676e02747385e64e2dc177ea95d48de1
24,632
from typing import Iterable import os import glob import logging def load_10x( celltype: str = "CD8_healthy", exclude_singles: bool = True ) -> pd.DataFrame: """ Load 10x data. Columns of interest are TRA_aa and TRB_aa """ def split_to_tra_trb(s: Iterable[str]): """Split into two lists of TRA and TRB""" # TODO this does NOT correctly handle cases where there are say # multiple TRA sequences in a single row tra_seqs, trb_seqs = [], [] for entry in s: sdict = dict([part.split(":") for part in entry.split(";")]) tra = sdict["TRA"] if "TRA" in sdict else "" trb = sdict["TRB"] if "TRB" in sdict else "" tra_seqs.append(tra) trb_seqs.append(trb) return tra_seqs, trb_seqs dirname = os.path.join(LOCAL_DATA_DIR, "10x", celltype) assert os.path.isdir(dirname), f"Unrecognized celltype: {celltype}" if celltype == "CD8_healthy": fnames = glob.glob( os.path.join(dirname, "vdj_v1_hs_aggregated_donor*_clonotypes.csv") ) else: fnames = glob.glob(os.path.join(dirname, "*_t_clonotypes.csv")) assert fnames fnames = sorted(fnames) dfs = [] for fname in fnames: df = pd.read_csv(fname) tra_seqs, trb_seqs = split_to_tra_trb(df["cdr3s_aa"]) df["TRA_aa"] = tra_seqs df["TRB_aa"] = trb_seqs tra_nt, trb_nt = split_to_tra_trb(df["cdr3s_nt"]) df["TRA_nt"] = tra_nt df["TRB_nt"] = trb_nt if exclude_singles: is_single_idx = np.where( np.logical_or(df["TRA_aa"] == "", df["TRB_aa"] == "") ) logging.info( f"Dropping {len(is_single_idx[0])} entries for unmatched TRA/TRB" ) df.drop(index=is_single_idx[0], inplace=True) dfs.append(df) retval = pd.concat(dfs, axis=0) return retval
e40ee234d6d84ac5a61f13d8de3edbb12d932c6c
24,633
def levenshtein(s1, s2): """ Levenstein distance, or edit distance, taken from Wikibooks: http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#Python """ if len(s1) < len(s2): return levenshtein(s2, s1) if not s1: return len(s2) previous_row = xrange(len(s2) + 1) for i, c1 in enumerate(s1): current_row = [i + 1] for j, c2 in enumerate(s2): insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer deletions = current_row[j] + 1 # than s2 substitutions = previous_row[j] + (c1 != c2) current_row.append(min(insertions, deletions, substitutions)) previous_row = current_row return previous_row[-1]
58ef88e60e454fda4b1850cc800f75a1d711a9af
24,634
from typing import Tuple from typing import Optional def configure_mpi_node() -> Tuple[RabbitConfig, Celery]: """Will configure and return a celery app targetting GPU mode nodes.""" log.info("Initializing celery app...") app = _celery_app_mpi # pylint: disable=unused-variable @app.task( name="comp.task.mpi", bind=True, autoretry_for=(Exception,), retry_kwargs={"max_retries": 3, "countdown": 2}, on_failure=_on_task_failure_handler, on_success=_on_task_success_handler, track_started=True, ) def pipeline( self, user_id: str, project_id: str, node_id: Optional[str] = None ) -> None: shared_task_dispatch(self, user_id, project_id, node_id) set_boot_mode(BootMode.MPI) log.info("Initialized celery app in %s", get_boot_mode()) return (_rabbit_config, app)
bbc1c04ac8372ff8f5478d39d3f210e14b284c51
24,635
def customizations(record): """ Use some functions delivered by the library @type record: record @param record: a record @rtype: record @returns: -- customized record """ record = type(record) # record = author(record) record = convert_to_unicode(record) # record = editor(record) # record = journal(record) # record = keyword(record) # record = link(record) # record = page_double_hyphen(record) # record = doi(record) return record
32e47923e194a5fcb0540d9c2953be8d4dab019e
24,636
def enrichs_to_gv(G, enrich_fhs, ofh, **kwargs): """ Read the next line in each file handle in <enrich_fhs> and parse the significant GO:BP annotation in that line. Then, write the GO:BP annotations, along with the original graph, to a graphviz open file handle <ofh>. Parameters ---------- G : nx.Graph enrich_fhs : list of io-like GO:BP enrichment results from GProfiler, written by write_enrich ofh : io-like Returns ------- exhausted TODO ---- - handle case where any fh does not have next - this could have used the data from GProfiler python API directly to be faster but this may be more reusable later """ # parse annotation data to prepare graphviz "clusters" cluster_members = [] enrich_ind = 0 exhausted_inds = [] for i in range(len(enrich_fhs)): enrich_fh = enrich_fhs[i] line = None try: line = enrich_fh.__next__().rstrip() except StopIteration as err: exhausted_inds.append(i) if line is not None: words = line.split('\t') p_val = float(words[FIELD_TO_INDEX['p-value']]) t_name = words[FIELD_TO_INDEX['t name']] anno_hits = words[FIELD_TO_INDEX['Q&T list']].split(",") cluster = anno_hits label = "{}\\np = {}".format(t_name, p_val) tpl = (cluster, label) cluster_members.append(tpl) enrich_ind += 1 return exhausted_inds, cluster_members
f063f2f2d692a02c383fc25dfd21791f97b0629a
24,637
def _has_letter(pw): """ Password must contain a lowercase letter :param pw: password string :return: boolean """ return any(character.isalpha() for character in pw)
2f8eea521e8ca88001b2ecc3bc2501af8b14bbc8
24,638
import math def closest_power_of_two(n): """Returns the closest power of two (linearly) to n. See: http://mccormick.cx/news/entries/nearest-power-of-two Args: n: Value to find the closest power of two of. Returns: Closest power of two to "n". """ return pow(2, int(math.log(n, 2) + 0.5))
50d78d2a6de4f689ce268a95df97aae72dbd81ac
24,639
def get_product(barcode, locale='world'): """ Return information of a given product. """ return utils.fetch(utils.build_url(geography=locale, service='api', resource_type='product', parameters=barcode, entity="pet"))
409cfd2702ee06bab3e02bb446f0ce9d7e284892
24,640
def score(y_true, y_score): """ Evaluation metric """ fpr, tpr, thresholds = roc_curve(y_true, y_score, pos_label = 1) score = 0.4 * tpr[np.where(fpr>=0.001)[0][0]] + \ 0.3 * tpr[np.where(fpr>=0.005)[0][0]] + \ 0.3 * tpr[np.where(fpr>=0.01)[0][0]] return score
b561e3cb3bd84d00c78dbd7e906e682c8758859d
24,641
def divisors(num): """ Takes a number and returns all divisors of the number, ordered least to greatest :param num: int :return: list (int) """ list = [] x = 0 for var in range(0, num): x = x + 1 if num % x == 0: list.append(x) return list
848ed77fa92ae1c55d90a5236f0d9db6ae2f377c
24,642
def files_page(): """Displays a table of the user's files.""" user = utils.check_user(token=request.cookies.get("_auth_token")) if user is None: return redirect(location="/api/login", code=303), 303 return render_template(template_name_or_list="home/files.html", user=user, files=utils.all(iterable=cache.files, condition=lambda file: file.owner.id == user.id and not file.deleted))
58e627297c2d7b881c18459dc47a012b016cee3d
24,643
def GetCodepage(language): """ Returns the codepage for the given |language|. """ lang = _LANGUAGE_MAP[language] return "%04x" % lang[0]
7c84552d6b2f2747ee8365d89ba29bc7843054b7
24,644
def NumVisTerms(doc): """Number of visible terms on the page""" _, terms = doc return len(terms)
a6b762f314732d90c2371adf9472cf80117adae5
24,645
def replace_inf_price_nb(prev_close: float, close: float, order: Order) -> Order: """Replace infinity price in an order.""" order_price = order.price if order_price > 0: order_price = close # upper bound is close else: order_price = prev_close # lower bound is prev close return order_nb( size=order.size, price=order_price, size_type=order.size_type, direction=order.direction, fees=order.fees, fixed_fees=order.fixed_fees, slippage=order.slippage, min_size=order.min_size, max_size=order.max_size, size_granularity=order.size_granularity, reject_prob=order.reject_prob, lock_cash=order.lock_cash, allow_partial=order.allow_partial, raise_reject=order.raise_reject, log=order.log )
6b3581e31d69236c950a3ad812bb95eebbedcf10
24,646
def get_trend(d): """ Calcuate trend for a frame `d`. """ dv = d.reset_index(drop=True) dv["minutes"] = np.arange(dv.shape[0], dtype=np.float64) covariance = dv.cov() return (((covariance["minutes"]) / covariance.loc["minutes", "minutes"])[d.columns] .rename(lambda cl: "_".join([cl, "trend"])))
b649e60b8ef74b0a64ec935fde35271b68b0dad7
24,647
def getIpAddress(): """Returns the IP address of the computer the client is running on, as it appears to the client. See also: system.net.getExternalIpAddress(). Returns: str: Returns the IP address of the local machine, as it sees it. """ return "127.0.0.1"
d6aefaa4027a899344c762bc7df5ce40a5dbde4e
24,648
def line(x, y, weights=None, clip=0.25): """Fit a line Args: x (numpy.array): x-values y (numpy.array): y-values clip (float, optional): Fit only first part. Defaults to 0.25. Returns: pandas.Series: fit parameters """ if 0 < clip < 1: clip_int = int(len(x) * clip) - 1 else: clip_int = int(clip_int) # clip data for fit to only use first part X = x[:clip_int] Y = y[:clip_int] if weights: W = 1 / weights[:clip_int] else: W = np.ones((len(X))) # weighted LS X = sm.add_constant(X) wls_model = sm.WLS(Y, X, weights=W) fit_params = wls_model.fit().params fit_params["diffusion_constant"] = fit_params["tau"] / 2 / 2 return fit_params
cfc794095e1b60f608b94d44480cb79ece0af653
24,649
def one_hot(data): """ Using pandas to convert the 'data' into a one_hot enconding format. """ one_hot_table = pd.get_dummies(data.unique()) one_hot = data.apply(lambda x: one_hot_table[x] == 1).astype(int) return one_hot
fed9c171ae5b3bcdb78311afa47017c21e1c4b59
24,650
def update_set(j, n): """Computes the update set of the j-th orbital in n modes Args: j (int) : the orbital index n (int) : the total number of modes Returns: Array of mode indexes """ indexes = np.array([]) if n % 2 == 0: if j < n / 2: indexes = np.append(indexes, np.append( n - 1, update_set(j, n / 2))) else: indexes = np.append(indexes, update_set(j - n / 2, n / 2) + n / 2) return indexes
c1d91f245710b1e11aa7178db490f050827d5683
24,651
def chunklist(inlist: list, chunksize: int) -> list: """Split a list into chucks of determined size. Keyword arguments: inList -- list to chunk chunkSize -- number of elements in each chunk """ if not isinstance(inlist, list): raise TypeError def __chunkyield() -> list: # https://www.geeksforgeeks.org/break-list-chunks-size-n-python/ for i in range(0, len(inlist), chunksize): yield inlist[i:i + chunksize] return list(__chunkyield())
1351f0fa2ca208095a35ac0806a625f3227b24ef
24,652
def getLocation(seq, meifile, zones): """ Given a sequence of notes and the corresponding MEI Document, calculates and returns the json formatted list of locations (box coordinates) to be stored for an instance of a pitch sequence in our CouchDB. If the sequence is contained in a single system, only one location will be stored. If the sequence spans two systems, a list of two locations will be stored. """ ulys = [] lrys = [] twosystems = 0 endofsystem = len(seq)-1 if seq[0].getId() not in systemcache: systemcache[seq[0].getId()] = meifile.lookBack(seq[0], "sb") # systemcache[seq[0]] = meifile.get_system(seq[0]) if seq[endofsystem].getId() not in systemcache: systemcache[seq[endofsystem].getId()] = meifile.lookBack(seq[endofsystem], "sb") # systemcache[seq[endofsystem]] = meifile.get_system(seq[endofsystem]) if systemcache[seq[0].getId()] != systemcache[seq[endofsystem].getId()]: # then the sequence spans two systems and we must store two seperate locations to highlight twosystems = 1 for i in range(1 , len(seq)): if seq[i-1].getId() not in systemcache: systemcache[seq[i-1].getId()] = meifile.lookBack(seq[i-1], "sb") if seq[i] not in systemcache: systemcache[seq[i].getId()] = meifile.lookBack(seq[i], "sb") # find the last note on the first system and the first note on the second system if systemcache[seq[i-1].getId()] != systemcache[seq[i].getId()]: endofsystem = i # this will be the index of the first note on second system # ulx1 = int(meifile.get_by_facs(seq[0].parent.parent.facs)[0].ulx) # lrx1 = int(meifile.get_by_facs(seq[i-1].parent.parent.facs)[0].lrx) # ulx2 = int(meifile.get_by_facs(seq[i].parent.parent.facs)[0].ulx) # lrx2 = int(meifile.get_by_facs(seq[-1].parent.parent.facs)[0].lrx) ulx1 = int(findbyID(zones, seq[0].parent.parent.getAttribute("facs").value, meifile).getAttribute("ulx").value) lrx1 = int(findbyID(zones, seq[i-1].parent.parent.getAttribute("facs").value, meifile).getAttribute("lrx").value) ulx2 = int(findbyID(zones, seq[i].parent.parent.getAttribute("facs").value, meifile).getAttribute("ulx").value) lrx2 = int(findbyID(zones, seq[-1].parent.parent.getAttribute("facs").value, meifile).getAttribute("lrx").value) else: # the sequence is contained in one system and only one box needs to be highlighted ulx = int(findbyID(zones, seq[0].parent.parent.getAttribute("facs").value, meifile).getAttribute("ulx").value) lrx = int(findbyID(zones, seq[-1].parent.parent.getAttribute("facs").value, meifile).getAttribute("lrx").value) # ulx = int(meifile.get_by_facs(seq[0].parent.parent.facs)[0].ulx) # lrx = int(meifile.get_by_facs(seq[-1].parent.parent.facs)[0].lrx) for note in seq: ulys.append(int(findbyID(zones, note.parent.parent.getAttribute("facs").value, meifile).getAttribute("uly").value)) lrys.append(int(findbyID(zones, note.parent.parent.getAttribute("facs").value, meifile).getAttribute("lry").value)) if twosystems: uly1 = min(ulys[:endofsystem]) uly2 = min(ulys[endofsystem:]) lry1 = max(lrys[:endofsystem]) lry2 = max(lrys[endofsystem:]) return [{"ulx": int(ulx1), "uly": int(uly1), "height": abs(uly1 - lry1), "width": abs(ulx1 - lrx1)}, {"ulx": int(ulx2), "uly": int(uly2), "height": abs(uly2 - lry2), "width": abs(ulx2 - lrx2)}] else: uly = min(ulys) lry = max(lrys) return [{"ulx": int(ulx), "uly": int(uly), "height": abs(uly - lry), "width": abs(ulx - lrx)}]
18297c74cb867e018e7a4f3147cdd50ba1eb8225
24,653
import warnings def calculate_A0_moving_LE(psi_baseline, psi_goal_0, Au_baseline, Au_goal, deltaz, c_baseline, l_LE, eps_LE): """Find the value for A_P0^c that has the same arc length for the first bay as for the parent.""" def integrand(psi_baseline, Al, deltaz, c ): return c*np.sqrt(1 + dxi_u(psi_baseline, Al, deltaz/c)**2) def equation(A0, L_baseline, Au_goal, deltaz): Au_goal[0] = A0 c = calculate_c_baseline(c_P, Au_goal, Au_baseline, deltaz/c_P, l_LE, eps_LE, psi_spars[0]) y, err = quad(integrand, 0, psi_goal_0, args=(Au_goal, deltaz, c)) print('y', y, y - (1-eps_LE)*L_baseline, A0, c) return y - (1-eps_LE)*(L_baseline - c*l_LE) L_baseline, err = quad(integrand, 0, psi_baseline[0], args=(Au_baseline, deltaz, c_baseline)) with warnings.catch_warnings(): warnings.simplefilter("ignore") y = fsolve(equation, Au_goal[0], args=(L_baseline, Au_goal, deltaz)) return y[0]
9a01a8c02aa51db0a675e94bf32d6a4b26a13206
24,654
def create_train_val_set(x_train, one_hot_train_labels): """[summary] Parameters ---------- x_train : [type] [description] y_train : [type] [description] """ x_val = x_train[:1000] partial_x_train = x_train[1000:] y_val = one_hot_train_labels[:1000] partial_y_train = one_hot_train_labels[1000:] return (x_val, partial_x_train, y_val, partial_y_train)
dd0b3ca06b3d8bdae8e75284b17eb60ba6bbe36b
24,655
import os import shutil def build_master_and_get_version(): """Checks out the latest master build and creates a new binary.""" if not os.path.exists(TOOL_SOURCE): process.call( 'git clone https://github.com/google/clusterfuzz-tools.git', cwd=HOME) process.call('git fetch', cwd=TOOL_SOURCE) process.call('git checkout origin/master -f', cwd=TOOL_SOURCE) process.call('./pants binary tool:clusterfuzz-ci', cwd=TOOL_SOURCE, env={'HOME': HOME}) delete_if_exists(BINARY_LOCATION) shutil.copy(os.path.join(TOOL_SOURCE, 'dist', 'clusterfuzz-ci.pex'), BINARY_LOCATION) # The full SHA is too long and unpleasant to show in logs. So, we use the # first 7 characters of the SHA instead. return process.call( 'git rev-parse HEAD', capture=True, cwd=TOOL_SOURCE)[1].strip()[:7]
040e1f78e80bb36b1a8323c7590a9db8bbde46e8
24,656
def get_adj_date(time,today): """ """ #print(time,today) if 'month' in time: week_multiplier = 28 elif 'week' in time: week_multiplier = 7 elif 'day' in time: week_multiplier = 1 else: week_multiplier = 0 # Hack! updated from 01.12.2020 # TODO: please remove this!!! if 'Posted ' in time: if time != '--missing--': units = int(time.split('Posted ')[1].split()[0]) else: units = 0 else: if time != '--missing--': units = int(time.split(' ')[0]) else: units = 0 days_to_subtract = units*week_multiplier #print(time,today, days_to_subtract, units) adjusted_date = today - timedelta(days=days_to_subtract) #adjusted_date = str(d.date())#.split(' ')[0].replace('-','.') return str(adjusted_date.date()),adjusted_date
930273bc313d241b7f0777290459195c5b4499d4
24,657
def update_dataset_temporal_attrs(dataset: xr.Dataset, update_existing: bool = False, in_place: bool = False) -> xr.Dataset: """ Update temporal CF/THREDDS attributes of given *dataset*. :param dataset: The dataset. :param update_existing: If ``True``, any existing attributes will be updated. :param in_place: If ``True``, *dataset* will be modified in place and returned. :return: A new dataset, if *in_place* is ``False`` (default), else the passed and modified *dataset*. """ return _update_dataset_attrs(dataset, [_TIME_ATTRS_DATA], update_existing=update_existing, in_place=in_place)
99bb1a638e275a4788e2bd99122b67d3f0d5b536
24,658
def py_cpu_nms(dets, thresh): """Pure Python NMS baseline.""" x1 = dets[:, 0] y1 = dets[:, 1] x2 = dets[:, 2] y2 = dets[:, 3] scores = dets[:, 4] areas = (x2 - x1 + 1) * (y2 - y1 + 1) ## index for dets order = scores.argsort()[::-1] keep = [] while order.size > 0: i = order[0] keep.append(i) xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h ovr = inter / (areas[i] + areas[order[1:]] - inter) inds = np.where(ovr <= thresh)[0] order = order[inds + 1] return keep
78d5b3c672142dc1861a2df11e6f1a4671467fd4
24,659
import itertools def _split_iterators(iterator, n=None): """Split itererator of tuples into multiple iterators. :param iterator: Iterator to be split. :param n: Amount of iterators it will be split in. toolz.peak can be used to determine this value, but that is not lazy. This is basically the same as x, y, z = zip(*a), however, this function is lazy. """ #if n is None: # item, iterator = cytoolz.peek(iterator) # n = len(item) iterators = itertools.tee(iterator, n) #iterators = ((sample[i] for sample in iterator) for i, iterator in enumerate(iterators)) # Above does not work?! out = list() out.append(s[0] for s in iterators[0]) out.append(s[1] for s in iterators[1]) out.append(s[2] for s in iterators[2]) iterators = out return iterators
25b9409941eaf958aef755c0124d4aee4a3a67e5
24,660
def get_league_listing(**kwargs): """ Get a list of leagues """ return make_request("GetLeaguelisting", **kwargs)
5b5fb9ee4f06a0ede684f6584fd3af0638c807a4
24,661
async def perhaps_this_is_it( disc_channel: disnake.TextChannel = commands.Param(lambda i: i.channel), large: int = commands.Param(0, large=True), ) -> PerhapsThis: """This description should not be shown Parameters ---------- disc_channel: A channel which should default to the current one - uses the id large: A large number which defaults to 0 - divided by 2 """ return PerhapsThis(disc_channel.id, large / 2)
158d9d6b278c5142d13e392becd9df35c2844961
24,662
def maybe_setup_moe_params(model_p: InstantiableParams): """Convert a FeedforwardLayer to a MoE Layer for StackedTransformer.""" if model_p.cls == layers.StackedTransformerRepeated: model_p = model_p.block if model_p.num_experts == 0: return model_p ff_p = model_p.transformer_layer_params_tpl.tr_fflayer_tpl assert issubclass(ff_p.cls, layers.TransformerFeedForward) moe_p = model_p.moe_layer_tpl # Copy over the base params. base_layer.BaseLayer.copy_base_params(ff_p, moe_p) # Copy over othe params. moe_p.name = ff_p.name moe_p.input_dims = ff_p.input_dims moe_p.hidden_dims = ff_p.hidden_dims moe_p.ln_tpl = ff_p.ln_tpl.Copy() moe_p.activation = ff_p.activation moe_p.relu_dropout_tpl = ff_p.relu_dropout_tpl.Copy() moe_p.relu_dropout_prob = ff_p.relu_dropout_prob moe_p.residual_dropout_tpl = ff_p.residual_dropout_tpl.Copy() moe_p.residual_dropout_prob = ff_p.residual_dropout_prob moe_p.add_skip_connection = ff_p.add_skip_connection moe_p.norm_policy = ff_p.norm_policy
2ba82eb85ca85f8c16b3ee5de2d8ac3edb90275a
24,663
import re def verify_message( message ): """Verifies that a message is valid. i.e. it's similar to: 'daily-0400/20140207041736'""" r = re.compile( "^[a-z]+(-[0-9])?-([a-z]{3})?[0-9]+/[0-9]+" ) return r.match( message )
f25a37a5e3f076a647c0a03c26d8f2d2a8fd7b2e
24,664
def get_all_unicode_chars(): """Get all unicode characters.""" all_unicode_chars = [] i = 0 while True: try: all_unicode_chars.append(chr(i)) except ValueError: break i += 1 return all_unicode_chars
da63b26dd082987937b17fdfffb1219726d9d2c6
24,665
def get_east_asian_width_property(value, binary=False): """Get `EAST ASIAN WIDTH` property.""" obj = unidata.ascii_east_asian_width if binary else unidata.unicode_east_asian_width if value.startswith('^'): negated = value[1:] value = '^' + unidata.unicode_alias['eastasianwidth'].get(negated, negated) else: value = unidata.unicode_alias['eastasianwidth'].get(value, value) return obj[value]
9eb8f70229a6d53faae071b641a39b79d8807941
24,666
def ModifyListRequest(instance_ref, args, req): """Parse arguments and construct list backups request.""" req.parent = instance_ref.RelativeName() if args.database: database = instance_ref.RelativeName() + '/databases/' + args.database req.filter = 'database="{}"'.format(database) return req
fde5a06cde30ed1cf163299dc8ae5f0e826e3f9d
24,667
import sys import locale def output_encoding(outfile=None): """Determine the encoding to use for output written to `outfile` or stdout.""" if outfile is None: outfile = sys.stdout encoding = ( getattr(outfile, "encoding", None) or getattr(sys.__stdout__, "encoding", None) or locale.getpreferredencoding() ) return encoding
872f8147a139c3747dda31254cf0a31d397baad7
24,668
from datetime import datetime def day_start(src_time): """Return the beginning of the day of the specified datetime""" return datetime(src_time.year, src_time.month, src_time.day)
2bcc7b136e5cb1e7929e6655daf67b07dbbaa542
24,669
def _construct_corrections_dict(file): """Construct a dictionary of corrections. Given the name of a .ifa corrections file, construct a dictionary where the keys are wavelengths (represented as integers) and the values are measures of the instrument sensitivity (represented as floats). Intensity data should be divided by the correction value corresponding to the wavelength at which it was collected. """ str_file = resources.read_text(wrangling.fluorimeter.corrections, file) data = str_file[str_file.find("[Data]") + 6 :] data = [x for x in data.split("\n") if x != ""] corrections = {} for entry in data: wavelength, correction = [ x.strip() for x in entry.split("\t") if x != "" ] corrections.update({int(wavelength[:-3]) : float(correction)}) return corrections
972db28c3609357a0ce742bf6679fbb46b86ef7c
24,670
def get_activation_func(activation_label): """ Returns the activation function given the label Args: activation_label: Name of the function """ if activation_label == 'sigmoid': return tf.nn.sigmoid elif activation_label == 'identity': return tf.identity elif activation_label == 'relu': return tf.nn.relu elif activation_label == 'tanh': return tf.nn.tanh else: raise ValueError('Unknown activation function %s' % activation_label)
cfa1a46b9c40fe2680bfa49831ed09e222ef3335
24,671
def sim_share( df1, df2, group_pop_var1, total_pop_var1, group_pop_var2, total_pop_var2, ): """Simulate the spatial population distribution of a region using the CDF of a comparison region. For each spatial unit i in region 1, take the unit's percentile in the distribution, and swap the group share with the value of the corresponding percentile in region 2. The share is the minority population of unit i divided by total population of minority population. This approach will shift the total population of each unit without changing the regional proportion of each group Parameters ---------- df1 : pandas.DataFrame or geopandas.GeoDataFrame dataframe for first dataset with columns holding group and total population counts df2 : pandas.DataFrame or geopandas.GeoDataFrame dataframe for second dataset with columns holding group and total population counts group_pop_var1 : str column holding population counts for group of interest on input df1 total_pop_var1 : str column holding total population counts on input df1 group_pop_var2 : str column holding population counts for group of interest on input df2 total_pop_var2 : str column holding total population counts on input df2 Returns ------- two pandas.DataFrame dataframes with simulated population columns appended """ df1, df2 = _prepare_comparative_data(df1, df2, group_pop_var1, group_pop_var2, total_pop_var1, total_pop_var2) df1["compl_pop_var"] = df1[total_pop_var1] - df1[group_pop_var1] df2["compl_pop_var"] = df2[total_pop_var2] - df2[group_pop_var2] df1["share"] = (df1[group_pop_var1] / df1[group_pop_var1].sum()).fillna(0) df2["share"] = (df2[group_pop_var2] / df2[group_pop_var2].sum()).fillna(0) df1["compl_share"] = (df1["compl_pop_var"] / df1["compl_pop_var"].sum()).fillna(0) df2["compl_share"] = (df2["compl_pop_var"] / df2["compl_pop_var"].sum()).fillna(0) # Rescale due to possibility of the summation of the counterfactual share values being grater or lower than 1 # CT stands for Correction Term CT1_2_group = df1["share"].rank(pct=True).apply(df2["share"].quantile).sum() CT2_1_group = df2["share"].rank(pct=True).apply(df1["share"].quantile).sum() df1["counterfactual_group_pop"] = ( df1["share"].rank(pct=True).apply(df2["share"].quantile) / CT1_2_group * df1[group_pop_var1].sum() ) df2["counterfactual_group_pop"] = ( df2["share"].rank(pct=True).apply(df1["share"].quantile) / CT2_1_group * df2[group_pop_var2].sum() ) # Rescale due to possibility of the summation of the counterfactual share values being grater or lower than 1 # CT stands for Correction Term CT1_2_compl = ( df1["compl_share"].rank(pct=True).apply(df2["compl_share"].quantile).sum() ) CT2_1_compl = ( df2["compl_share"].rank(pct=True).apply(df1["compl_share"].quantile).sum() ) df1["counterfactual_compl_pop"] = ( df1["compl_share"].rank(pct=True).apply(df2["compl_share"].quantile) / CT1_2_compl * df1["compl_pop_var"].sum() ) df2["counterfactual_compl_pop"] = ( df2["compl_share"].rank(pct=True).apply(df1["compl_share"].quantile) / CT2_1_compl * df2["compl_pop_var"].sum() ) df1["counterfactual_total_pop"] = ( df1["counterfactual_group_pop"] + df1["counterfactual_compl_pop"] ) df2["counterfactual_total_pop"] = ( df2["counterfactual_group_pop"] + df2["counterfactual_compl_pop"] ) return df1.fillna(0), df2.fillna(0)
cc7857d6deb81e7224e4e21fe6908376c963169a
24,672
def fixextensions(peeps, picmap, basedir="."): """replaces image names with ones that actually exist in picmap""" fixed = [peeps[0].copy()] missing = [] for i in range(1, len(peeps)): name, ext = peeps[i][2].split(".", 1) if (name in picmap): fixed.append(peeps[i].copy()) fixed[i][2] = picmap[name] else: missing.append(i) return fixed, missing
d2af911aacea80f7e25cbdde0f5dfad0f1757aee
24,673
def do_divide(data, interval): """ 使用贪心算法,得到“最优”的分段 """ category = [] p_value, chi2, index = divide_data(data, interval[0], interval[1]) if chi2 < 15: category.append(interval) else: category += do_divide(data, [interval[0], index]) category += do_divide(data, [index, interval[1]]) return category
2e25f913c664dd1cc3d60e4c3f89146b81476e3b
24,674
import json def get_config(key_path='/'): """ Return (sub-)configuration stored in config file. Note that values may differ from the current ``CONFIG`` variable if it was manipulated directly. Parameters ---------- key_path : str, optional ``'/'``-separated path to sub-configuration. Default is ``'/'``, which returns the full configuration dict. Returns ------- sub_config (sub-)configuration, either a dict or a value """ keys = [k for k in key_path.split('/') if k != ''] with open(CONFIG_FILENAME, 'r') as config_fp: config = json.load(config_fp) sub_config = config for k in keys: sub_config = sub_config[k] return sub_config
075b3cd021be67c5c2f23203236f839fc47a678b
24,675
import boto3, logging from botocore.exceptions import ClientError def find_cloudtrails(ocredentials, fRegion, fCloudTrailnames=None): """ ocredentials is an object with the following structure: - ['AccessKeyId'] holds the AWS_ACCESS_KEY - ['SecretAccessKey'] holds the AWS_SECRET_ACCESS_KEY - ['SessionToken'] holds the AWS_SESSION_TOKEN - ['AccountNumber'] holds the account number fRegion=region fCloudTrailnames=List of CloudTrail names we're looking for (null value returns all cloud trails) Returned Object looks like this: { 'trailList': [ { 'Name': 'string', 'S3BucketName': 'string', 'S3KeyPrefix': 'string', 'SnsTopicName': 'string', 'SnsTopicARN': 'string', 'IncludeGlobalServiceEvents': True|False, 'IsMultiRegionTrail': True|False, 'HomeRegion': 'string', 'TrailARN': 'string', 'LogFileValidationEnabled': True|False, 'CloudWatchLogsLogGroupArn': 'string', 'CloudWatchLogsRoleArn': 'string', 'KmsKeyId': 'string', 'HasCustomEventSelectors': True|False, 'HasInsightSelectors': True|False, 'IsOrganizationTrail': True|False }, ] } """ session_ct=boto3.Session( aws_access_key_id=ocredentials['AccessKeyId'], aws_secret_access_key=ocredentials['SecretAccessKey'], aws_session_token=ocredentials['SessionToken'], region_name=fRegion) client_ct=session_ct.client('cloudtrail') logging.info("Looking for CloudTrail trails in account %s from Region %s", ocredentials['AccountNumber'], fRegion) if fCloudTrailnames == None: # Therefore - they're really looking for a list of trails try: response=client_ct.list_trails() trailname = "Various" fullresponse = response['Trails'] if 'NextToken' in response.keys(): while 'NextToken' in response.keys(): response=client_ct.list_trails() for i in range(len(response['Trails'])): fullresponse.append(response['Trails'][i]) except ClientError as my_Error: if str(my_Error).find("InvalidTrailNameException") > 0: logging.error("Bad CloudTrail name provided") fullresponse=trailname+" didn't work. Try Again" return(fullresponse, trailname) else: # TODO: This doesn't work... Needs to be fixed. # They've provided a list of trails and want specific info about them for trailname in fCloudTrailnames: try: response=client_ct.describe_trails(trailNameList=[trailname]) if len(response['trailList']) > 0: return(response, trailname) except ClientError as my_Error: if str(my_Error).find("InvalidTrailNameException") > 0: logging.error("Bad CloudTrail name provided") response=trailname+" didn't work. Try Again" return(response, trailname)
d20d27b3c630c3c8c34a86e41ba05c28eeffa5a3
24,676
def evaluate(sess, logits, loss, labels, img_name, dataset): """ Trains the network Args: sess: TF session logits: network logits """ conf_mat = np.ascontiguousarray( np.zeros((FLAGS.num_classes, FLAGS.num_classes), dtype=np.uint64)) loss_avg = 0 for i in trange(dataset.num_examples()): #for i in trange(100): out_logits, gt_labels, loss_val, img_prefix = sess.run([logits, labels, loss, img_name]) loss_avg += loss_val #net_labels = out_logits[0].argmax(2).astype(np.int32, copy=False) net_labels = out_logits[0].argmax(2).astype(np.int32) #gt_labels = gt_labels.astype(np.int32, copy=False) cylib.collect_confusion_matrix(net_labels.reshape(-1), gt_labels.reshape(-1), conf_mat) if FLAGS.draw_predictions: img_prefix = img_prefix[0].decode("utf-8") save_path = FLAGS.debug_dir + '/val/' + img_prefix + '.png' eval_helper.draw_output(net_labels, CityscapesDataset.CLASS_INFO, save_path) #print(q_size) #print(conf_mat) print('') pixel_acc, iou_acc, recall, precision, _ = eval_helper.compute_errors( conf_mat, 'Validation', CityscapesDataset.CLASS_INFO, verbose=True) return loss_avg / dataset.num_examples(), pixel_acc, iou_acc, recall, precision
c8be93950e4f33eeaeea19cc6759cf919373abbf
24,677
def _get_options(): """ Function that aggregates the configs for sumo and returns them as a list of dicts. """ if __mods__['config.get']('hubblestack:returner:sumo'): sumo_opts = [] returner_opts = __mods__['config.get']('hubblestack:returner:sumo') if not isinstance(returner_opts, list): returner_opts = [returner_opts] for opt in returner_opts: processed = {'sumo_nebula_return': opt.get('sumo_nebula_return'), 'proxy': opt.get('proxy', {}), 'timeout': opt.get('timeout', 9.05)} sumo_opts.append(processed) return sumo_opts try: sumo_nebula_return = __mods__['config.get']('hubblestack:returner:sumo:sumo_nebula_return') except Exception: return None sumo_opts = {'sumo_nebula_return': sumo_nebula_return, 'proxy': __mods__['config.get']('hubblestack:nebula:returner:sumo:proxy', {}), 'timeout': __mods__['config.get']('hubblestack:nebula:returner:sumo:timeout', 9.05)} return [sumo_opts]
3d4d491b12f89501e7f5cdadda1d983676027367
24,678
from typing import Any from typing import Optional def convert_boolean(value: Any) -> Optional[bool]: """Convert a value from the ToonAPI to a boolean.""" if value is None: return None return bool(value)
d479898afe1bb8eaba3615a9e69a7a38637c6ec6
24,679
def Split4(thisBrep, cutters, normal, planView, intersectionTolerance, multiple=False): """ Splits a Brep into pieces using a combination of curves, to be extruded, and Breps as cutters. Args: cutters (IEnumerable<GeometryBase>): The curves, surfaces, faces and Breps to be used as cutters. Any other geometry is ignored. normal (Vector3d): A construction plane normal, used in deciding how to extrude a curve into a cutter. planView (bool): Set True if the assume view is a plan, or parallel projection, view. intersectionTolerance (double): The tolerance with which to compute intersections. Returns: Brep[]: A new array of Breps. This array can be empty. """ url = "rhino/geometry/brep/split-brep_geometrybasearray_vector3d_bool_double" if multiple: url += "?multiple=true" args = [thisBrep, cutters, normal, planView, intersectionTolerance] if multiple: args = list(zip(thisBrep, cutters, normal, planView, intersectionTolerance)) response = Util.ComputeFetch(url, args) response = Util.DecodeToCommonObject(response) return response
74009e33a3da88c7d096e6835952014bc8b40ef9
24,680
def generate_passwords_brute_force(state): """ String Based Generation :param state: item position for response :return: """ if state is None: state = [0, 0] k, counter = state password = '' i = counter while i > 0: r = i % base password = alphabet[r] + password i = i // base password = alphabet[0] * (k - len(password)) + password counter += 1 if password == alphabet[-1] * k: k += 1 counter = 0 return password, [k, counter]
11e503e53903c884545c2324e721ebcbad1eb7c2
24,681
from typing import Tuple import ctypes def tparse(instring: str, lenout: int = _default_len_out) -> Tuple[float, str]: """ Parse a time string and return seconds past the J2000 epoch on a formal calendar. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/tparse_c.html :param instring: Input time string, UTC. :param lenout: Available space in output error message string. :return: Equivalent UTC seconds past J2000, Descriptive error message. """ errmsg = stypes.string_to_char_p(lenout) lenout = ctypes.c_int(lenout) instring = stypes.string_to_char_p(instring) sp2000 = ctypes.c_double() libspice.tparse_c(instring, lenout, ctypes.byref(sp2000), errmsg) return sp2000.value, stypes.to_python_string(errmsg)
33b322062e7756d9bc1d728d9655adafd1c4d989
24,682
def align_decision_ref(id_human, title): """ In German, decisions are either referred to as 'Beschluss' or 'Entscheidung'. This function shall align the term used in the title with the term used in id_human. """ if 'Beschluss' in title: return id_human return id_human.replace('Beschluss ', 'Entscheidung ')
ac4f584b8e008576816d9a49dba58bc9c9a6dbc4
24,683
def get_headers(soup): """get nutrient headers from the soup""" headers = {'captions': [], 'units': []} footer = soup.find('tfoot') for cell in footer.findAll('td', {'class': 'nutrient-column'}): div = cell.find('div') headers['units'].append(div.text) headers['captions'].append(div.previous_sibling.strip()) return headers
5e7772a8830271f800791c75ef7ceecc98aba2bb
24,684
def table2rank(table, transpose=False, is_large_value_high_performance=True, add_averaged_rank=False): """ transform a performance value table to a rank table :param table: pandas DataFrame or numpy array, the table with performance values :param transpose: bool, whether to transpose table (default: False; the method is column and data set is row) :param is_large_value_high_performance: bool, whether a larger value has higher performance :param add_averaged_rank: bool, whether add averaged ranks after the last row/column :return: a rank table (numpy.array or pd.DataFrame) """ table = table.copy() if isinstance(table, pd.DataFrame): column_name = table.columns.values if table.iloc[:, 0].dtype == 'object': index_name = table.iloc[:, 0].values table = table.iloc[:, 1:] else: index_name = None data = table.values else: data = table if transpose: data = data.transpose() # rank each row rank_table = list() for row in data: if is_large_value_high_performance: index = np.argsort(-row) else: index = np.argsort(row) rank = np.zeros(len(index)) for i, value in enumerate(index): if i > 0: if row[value] == row[index[i - 1]]: rank[value] = i - 1 continue rank[value] = i rank += 1 rank_table.append(rank) rank_table = np.asarray(rank_table) if add_averaged_rank: averaged_rank = [np.mean(rank_table[:, i]) for i in range(rank_table.shape[1])] rank_table = np.concatenate([rank_table, np.asarray([averaged_rank])]) if transpose: rank_table = rank_table.transpose() if isinstance(table, pd.DataFrame): # reconstruct the pandas table if index_name is not None: if add_averaged_rank: if not transpose: index_name = np.concatenate([index_name, np.array(['AR'])]) else: column_name = np.concatenate([column_name, np.asarray(['AR'])]) rank_table = np.concatenate([index_name[:, np.newaxis], rank_table], axis=1) rank_table = pd.DataFrame(data=rank_table, columns=column_name) return rank_table
36b313b8c19f6767690b4ef6d76fcc4b5633865c
24,685
def F_z_i(z, t, r1, r2, A): """ Function F for Newton's method :param z: :param t: :param r1: :param r2: :param A: :return: F: function """ mu = mu_Earth C_z_i = c2(z) S_z_i = c3(z) y_z = r1 + r2 + A * (z * S_z_i - 1.0) / np.sqrt(C_z_i) F = (y_z / C_z_i) ** 1.5 * S_z_i + A * np.sqrt(np.abs(y_z)) - np.sqrt(mu) * t return F
ca4af99f8722d8e932f58896120883f09e73fb1a
24,686
from tayph.vartests import typetest import numpy as np from astropy.stats import mad_std def sigma_clip(array,nsigma=3.0,MAD=False): """This returns the n-sigma boundaries of an array, mainly used for scaling plots. Parameters ---------- array : list, np.ndarray The array from which the n-sigma boundaries are required. nsigma : int, float The number of sigma's away from the mean that need to be provided. MAD : bool Use the true standard deviation or MAD estimator of the standard deviation (works better in the presence of outliers). Returns ------- vmin,vmax : float The bottom and top n-sigma boundaries of the input array. """ typetest(array,[list,np.ndarray],'array in fun.sigma_clip()') typetest(nsigma,[int,float],'nsigma in fun.sigma_clip()') typetest(MAD,bool,'MAD in fun.sigma_clip()') m = np.nanmedian(array) if MAD: s = mad_std(array,ignore_nan=True) else: s = np.nanstd(array) vmin = m-nsigma*s vmax = m+nsigma*s return vmin,vmax
e62e76c0a92dde4de324a31ecc03968da18de7d3
24,687
def points_distance(xyz_1, xyz_2): """ :param xyz_1: :param xyz_2: :return: """ if len(xyz_1.shape) >= 2: distance = np.sqrt(np.sum((xyz_1 - xyz_2)**2, axis=1)) else: distance = np.sqrt(np.sum((xyz_1 - xyz_2)**2)) return distance
0acc6bf45c03ed554cb13c4375095871dee482fb
24,688
from typing import Optional def ensure_society(sess: SQLASession, name: str, description: str, role_email: Optional[str] = None) -> Collect[Society]: """ Register or update a society in the database. For existing societies, this will synchronise member relations with the given list of admins. """ try: society = get_society(name, sess) except KeyError: res_record = yield from _create_society(sess, name, description, role_email) society = res_record.value else: yield _update_society(sess, society, description, role_email) return society
49700a80ab23b0f4211c8bf5f0bc2c1d68c2f1cb
24,689
def odd_numbers_list(n): """ Returns the list of n first odd numbers """ return [2 * k - 1 for k in range(1, n + 1)]
2066cf07e926e41d358be0012a7f2a248c5987a7
24,690
def domain_delete(domainName): # noqa: E501 """domain_delete Remove the domain # noqa: E501 :param domainName: :type domainName: str :rtype: DefaultMessage """ return 'do some magic!'
a0865aa2ff4902ac5cf8a8c0ea9eb62e792af56b
24,691
from datetime import datetime def ParseDate(s): """ ParseDate(s) -> datetime This function converts a string containing the subset of ISO8601 that can be represented with xs:dateTime into a datetime object. As such it's suitable for parsing Collada's <created> and <modified> elements. The date must be of the form '-'? yyyy '-' mm '-' dd 'T' hh ':' mm ':' ss ('.' s+)? (zzzzzz)? See http://www.w3.org/TR/xmlschema-2/#dateTime for more info on the various parts. return: A datetime or None if the string wasn't formatted correctly. """ # Split the date (yyyy-mm-dd) and time by the "T" in the middle parts = s.split("T") if len(parts) != 2: return None date = parts[0] time = parts[1] # Parse the yyyy-mm-dd part parts = date.split("-") yearMultiplier = 1 if date[0] == "-": yearMultiplier = -1 parts.remove(0) if len(parts) != 3: return None try: year = yearMultiplier * int(parts[0]) month = int(parts[1]) day = int(parts[2]) except ValueError: return None # Split the time and time zone by "Z", "+", or "-" timeZoneDelta = timedelta() timeZoneDeltaModifier = 1 parts = time.split("Z") if len(parts) > 1: if parts[1] != "": return None if len(parts) == 1: parts = time.split("+") if len(parts) == 1: parts = time.split("-") timeZoneDeltaModifier = -1 if len(parts) == 1: # Time zone not present return None time = parts[0] timeZone = parts[1] if timeZone != "": parts = timeZone.split(":") if len(parts) != 2: return None try: hours = int(parts[0]) minutes = int(parts[1]) except ValueError: return None timeZoneDelta = timeZoneDeltaModifier * timedelta(0, 0, 0, 0, minutes, hours) parts = time.split(":") if len(parts) != 3: return None try: hours = int(parts[0]) minutes = int(parts[1]) seconds = int(parts[2]) # We're losing the decimal portion here, but it probably doesn't matter except ValueError: return None return datetime(year, month, day, hours, minutes, seconds) - timeZoneDelta
d65e4bb51487d9cb22b910e3dc44e299882600b5
24,692
def kinetic_energy(atoms): """ Returns the kinetic energy (Da*angs/ps^2) of the atoms. """ en = 0.0 for a in atoms: vel = v3.mag(a.vel) en += 0.5 * a.mass * vel * vel return en
8615d61f30f5ded029d1c230346682a040d05e87
24,693
def boxes_intersect(boxes, box): """Determine whether a box intersects with any of the boxes listed""" x1, y1, x2, y2 = box if in_box(boxes, x1, y1) \ or in_box(boxes, x1, y2) \ or in_box(boxes, x2, y1) \ or in_box(boxes, x2, y2): return True return False
b2201e1501a7827b6db8ef63ebf468b3e1839800
24,694
def cumulative_mean_normalized_difference_function(df, n): """ Compute cumulative mean normalized difference function (CMND). :param df: Difference function :param n: length of data :return: cumulative mean normalized difference function :rtype: list """ # scipy method cmn_df = df[1:] * range(1, n) / np.cumsum(df[1:]).astype(float) return np.insert(cmn_df, 0, 1)
974c0bdaab0e8872ef746839c8d973604eab6929
24,695
from typing import Iterable def add_spaces_old(spaces: Iterable["zfit.Space"]): """Add two spaces and merge their limits if possible or return False. Args: spaces: Returns: Union[None, :py:class:`~zfit.Space`, bool]: Raises: LimitsIncompatibleError: if limits of the `spaces` cannot be merged because they overlap """ spaces = convert_to_container(spaces) if not all(isinstance(space, ZfitSpace) for space in spaces): raise TypeError("Cannot only add type ZfitSpace") if len(spaces) <= 1: raise ValueError("Need at least two spaces to be added.") # TODO: allow? usecase? obs = frozenset(frozenset(space.obs) for space in spaces) if len(obs) != 1: return False obs1 = spaces[0].obs spaces = [space.with_obs(obs=obs1) if not space.obs == obs1 else space for space in spaces] if limits_overlap(spaces=spaces, allow_exact_match=True): raise LimitsIncompatibleError("Limits of spaces overlap, cannot merge spaces.") lowers = [] uppers = [] for space in spaces: if not space.limits_are_set: continue for lower, upper in space: for other_lower, other_upper in zip(lowers, uppers): lower_same = np.allclose(lower, other_lower) upper_same = np.allclose(upper, other_upper) assert not lower_same ^ upper_same, "Bug, please report as issue. limits_overlap did not catch right." if lower_same and upper_same: break else: lowers.append(lower) uppers.append(upper) lowers = tuple(lowers) uppers = tuple(uppers) if len(lowers) == 0: limits = None else: limits = lowers, uppers new_space = zfit.Space(obs=spaces[0].obs, limits=limits) return new_space
9d7aced4502b79125f62b4b040fbb6b7aad9d832
24,696
def minimal_subject_transformer(index, minimal_subject, attributes, subject_types, subject_type_is, center, radius): """Construct the JSON object for a MinimalSubject.""" subdomain, type = minimal_subject.subdomain, minimal_subject.type # Gather all the attributes values = [None] # attributes are indexed starting from 1 for attribute in attributes: name = attribute.key().name() if name in subject_types[type].minimal_attribute_names: values.append(minimal_subject.get_value(name)) else: values.append(None) # Pack the results into an object suitable for JSON serialization. subject_jobject = { 'name': minimal_subject.name, 'type': subject_type_is[subdomain + ':' + type], 'values': values, } if (minimal_subject.has_value('location') and minimal_subject.get_value('location')): location = { 'lat': minimal_subject.get_value('location').lat, 'lon': minimal_subject.get_value('location').lon } if center: subject_jobject['distance_meters'] = distance(location, center) dist = subject_jobject.get('distance_meters') if center and (dist is None or dist > radius > 0): return None return subject_jobject
3babc222c508850b8b5bdea551efa8c9e9bc0aa4
24,697
def beta_avg_inv_cdf(y, parameters, res=0.001): """ Compute the inverse cdf of the average of the k beta distributions. Parameters ---------- y : float A float between 0 and 1 (the range of the cdf) parameters : array of tuples Each tuple (alpha_i, beta_i) is the parameters of a Beta distribution. res : float, optional (default=0.001) The precision of the convolution, measured as step size in the support. Returns ------- x : float the inverse cdf of y """ return brentq(lambda x: beta_avg_cdf([x], parameters, res)[0] - y, 0, 1)
01c266e21401f6f7ad624151aa40d195c9196453
24,698
def _aprime(pHI,pFA): """recursive private function for calculating A'""" pCR = 1 - pFA # use recursion to handle # cases below the diagonal defined by pHI == pFA if pFA > pHI: return 1 - _aprime(1-pHI ,1-pFA) # Pollack and Norman's (1964) A' measure # formula from Grier 1971 if pHI == 0 or pFA == 1: # in both of these cases pHI == pFA return .5 return .5 + (pHI - pFA)*(1 + pHI - pFA)/(4*pHI*(1 - pFA))
3694dcdbc5da2c12bece51e85988245a60ebe811
24,699