content
stringlengths
22
815k
id
int64
0
4.91M
def concat_chunked_data(jsons, f_src='c', *args, **kwargs): """ Takes chunks of data and combines them into a numpy array of shape trial x cells x time, concatendated over trials, and clips the trials at shortest frame number and fewest cells. Args and kwargs are passed to process_data. Args: jsons (list): list of jsons to process f_src (str): key to F data to load ('c' or 'dff'). Defaults to 'c'. Returns: trial_dat: 3D numpy array, (trials, cells, time) """ # load and format c_trials = [load_json(j)[f_src] for j in jsons] s_trials = [load_json(j)['splits'] for j in jsons] # smoosh all the lists of trials into a big array trial_dat = [] for c,s in zip(c_trials, s_trials): out = process_data(c, s, *args, **kwargs) trial_dat.append(out) # ensure that trials are the same length and have same shortest = min([s.shape[2] for s in trial_dat]) # shortest trial # fewest = min([c.shape[1] for c in trial_dat]) # fewest cells # trial_dat = np.concatenate([a[:, :fewest, :shortest] for a in trial_dat]) try: trial_dat = np.concatenate([a[:, :, :shortest] for a in trial_dat]) except: print('WARNING LOST A CELL(S)!!!!') fewest = min([c.shape[1] for c in trial_dat]) # fewest cells trial_dat = np.concatenate([a[:, :fewest, :shortest] for a in trial_dat]) return trial_dat
5,332,300
def test_apply_phaser(frequency: float, frame_rate: int, kind: str) -> None: """Test that `apply_phaser` function runs without failures.""" sound = generate_mono_wave( 'sine', frequency, np.ones(frame_rate), frame_rate ) sound = np.vstack((sound, sound)) event = Event( instrument='any_instrument', start_time=0, duration=1, frequency=frequency, velocity=1, effects='', frame_rate=frame_rate ) result = apply_phaser(sound, event, kind) assert np.all(np.isfinite(result))
5,332,301
def hmc_update(context, hmc_uuid, values, session=None): """Updates an existing HMC instance in the Database""" return IMPL.hmc_update(context, hmc_uuid, values, session)
5,332,302
def _dense_difference(fun, x0, f0, h, one_sided, method): """ Calculates an approximation of the Jacobian of `fun`at the point `x0` in dense matrix form. NOTE: Inspired from: https://github.com/scipy/scipy/blob/master/scipy/optimize/_numdiff.py Parameters ---------- fun : callable Function which computes a vector of residuals with call f(x, *args, **kwargs). x0 : array_like with shape (n,) or float Initial guess of the dependent variable. method : {'2-point', '3-point'}, optional Method used for the finite difference scheme. Returns ------- J : array_like, shape (m, n) Approximation of the Jacobian matrix. """ m = f0.size n = x0.size Jt = np.empty((n, m)) hv = np.diag(h) for i in range(h.size): if method == '2-point': x = x0 + hv[i] dx = x[i] - x0[i] df = fun(x) - f0 elif (method == '3-point') and one_sided[i]: x1 = x0 + hv[i] x2 = x0 + 2. * hv[i] dx = x2[i] - x0[i] f1 = fun(x1) f2 = fun(x2) df = -3. * f0 + 4. * f1 - f2 elif (method == '3-point') and (not one_sided[i]): x1 = x0 - hv[i] x2 = x0 + hv[i] dx = x2[i] - x1[i] f1 = fun(x1) f2 = fun(x2) df = f2 - f1 else: raise ValueError("Step-method must be either '2-point' or '3-point'.") Jt[i, :] = df / dx if m == 1: Jt = np.ravel(Jt) return Jt.T
5,332,303
def alternate( name, *functions ): """Construct a callable that functions as the first implementation found of given set of alternatives if name is a function then its name will be used.... """ if not isinstance( name, (bytes,unicode)): functions = (name,)+functions name = name.__name__ return type( name, (_Alternate,), {} )( name, *functions )
5,332,304
def check_messenger(messenger: Optional[Callable]): """ Check that `messenger` is a `utipy.Messenger` object or `None`. In the latter case a `utipy.Messenger` with `verbose=False` is returned. Parameters ---------- messenger : `utipy.Messenger` or None A Messenger instance to check. Or `None`, in which case a `utipy.Messenger` with `verbose=False` is returned. Returns ------- `utipy.Messenger` """ # Check the messenger function if messenger is None: messenger = Messenger(verbose=False) else: assert isinstance(messenger, Messenger) return messenger
5,332,305
def convert(s): """Take an input string s, find all things that look like SGML character entities, and replace them with the Unicode equivalent. Function is from: http://stackoverflow.com/questions/1197981/convert-html-entities-to-ascii-in-python/1582036#1582036 """ matches = re.findall("&#\d+;", s) if len(matches) > 0: hits = set(matches) for hit in hits: name = hit[2:-1] try: entnum = int(name) s = s.replace(hit, unichr(entnum)) except ValueError: pass matches = re.findall("&\w+;", s) hits = set(matches) amp = "&" if amp in hits: hits.remove(amp) for hit in hits: name = hit[1:-1] if name in htmlentitydefs.name2codepoint: s = s.replace(hit, unichr(htmlentitydefs.name2codepoint[name])) s = s.replace(amp, "&") return s
5,332,306
def check_data_selection(race_id=None, category_index=None, racer_id=None): """Makes sure that we are trying to show data that is in the database.""" errors = [] if not race_id in Races.get_column('race_id'): race_id = Races.get_random_id() errors.append('race') categories = Races.get_categories(race_id) if category_index >= len(categories): category_index = 0 errors.append('category') if not racer_id in Racers.get_column('RacerID'): # Random racer from the currently selected category racer_id = Results.get_random_racer_id(racer_id, categories[category_index]) errors.append('racer') if errors: return redirect(url_for('error'))
5,332,307
def request_access_ticket(pat: str, permission_endpoint: str, resources: List[dict], secure: bool = False) -> str: """ As a Resource Server, request permission to the AS to access a resource, generating a ticket as a result. - CAN THROW EXCEPTIONS - MAKES A CONNECTION TO AN EXTERNAL ENDPOINT Args: - pat = String containing the pat (token) - permission_endpoint = URL of the token permission endpoint in the AS - resources = List of resources to request permission to. Format: [ { "resource_id": <str resource id>, "resource_scopes": [ <scope 1>, <scope 2>, ...] }, ... ] - secure = toggle checking of SSL certificates. Activating this is recommended on production environments Returns: A string containing the ticket for accessing those resources. """ headers = { 'content-type': "application/json", 'authorization': "Bearer "+pat, } if len(resources) == 1: resources = resources[0] # Use a single dict instead of a list for 1 resource disable_warnings_if_debug(secure) response = request("POST", permission_endpoint , json=resources, headers=headers, verify=secure) if not is_ok(response): raise Exception("An error occurred while requesting permission for a resource: "+str(response.status_code)+":"+str(response.reason)+":"+str(response.text)) try: return response.json()["ticket"] except Exception as e: raise Exception("Call to permission endpoint returned unexpected value or error: '"+response.text+"'"+". Error: "+str(e))
5,332,308
def index(request): """ Root page view. Just shows a list of liveblogs. """ # Get a list of liveblogs, ordered by the date of their most recent # post, descending (so ones with stuff happening are at the top) liveblogs = Liveblog.objects.annotate( max_created=Max("posts__created") ).order_by("-max_created") # Render that in the index template return render(request, "index.html", { "liveblogs": liveblogs, })
5,332,309
def model_fn_builder( bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings ): """Returns `model_fn` closure for TPUEstimator.""" def model_fn(features, labels, mode, params): # pylint: disable=unused-argument """The `model_fn` for TPUEstimator.""" tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids0 = features["input_ids0"] input_mask0 = features["input_mask0"] segment_ids0 = features["segment_ids0"] input_ids1 = features["input_ids1"] input_mask1 = features["input_mask1"] segment_ids1 = features["segment_ids1"] input_ids2 = features["input_ids2"] input_mask2 = features["input_mask2"] segment_ids2 = features["segment_ids2"] label_ids = features["label_ids"] is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits, probabilities) = create_model( bert_config, is_training, input_ids0, input_mask0, segment_ids0, input_ids1, input_mask1, segment_ids1, input_ids2, input_mask2, segment_ids2, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() initialized_variable_names = {} scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) accuracy = tf.metrics.accuracy(label_ids, predictions) loss = tf.metrics.mean(per_example_loss) return { "eval_accuracy": accuracy, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, predictions=probabilities, scaffold_fn=scaffold_fn) return output_spec return model_fn
5,332,310
def strarray(*args): """strarray(strarray_t array, size_t array_size, int code) -> char""" return _idaapi.strarray(*args)
5,332,311
def _single_value_set(target_list, value): """ Return true if this constraint has only one value and it is this one. """ return len(target_list) == 1 and target_list[0] == value
5,332,312
def get_positive_input(message, float_parse=False, allow_zero=False): """ Obtains and returns a positive int from the user. Preconditions: message: non-empty string float_parse: bool defaulted to False allow_zero: bool defaulted to False Parameters: message: The message that is printed when obtaining the input. float_parse: Whether to parse input to float or int allow_zero: Whether to allow zero as an input Postconditions: num: The valid inputted number. """ # use ternary operator to determine the sign to use sign = ">=" if allow_zero else ">" # try to parse input to either a float or int try: if float_parse: num = float(input("(must be " + sign + " 0), " + message).strip()) else: num = int(input("(must be " + sign + " 0), " + message).strip()) # raise a ValueError if input was invalid if (not allow_zero) and (num <= 0): raise ValueError() elif num < 0: raise ValueError() return num # catch any ValueErrors. except ValueError: print("Not a valid input.") # recurse the method until proper input was found return get_positive_input(message, float_parse, allow_zero)
5,332,313
def hellinger(p, q): """Compute Hellinger distance between 2 distributions.""" return np.linalg.norm(np.sqrt(p) - np.sqrt(q)) / np.sqrt(2)
5,332,314
def usd_currency(currency_df: pd.DataFrame, value: int, date: str) -> float: """ Compute VALUE/(USD/SYMBOL) Parameters ---------- currency_df : pd.DataFrame USD/SYMBOL df value : int Value of product date : str Currency quote day Returns --------- float Computed value """ return value / currency_df.loc[date].usd
5,332,315
def svn_wc_adm_probe_retrieve(*args): """svn_wc_adm_probe_retrieve(svn_wc_adm_access_t associated, char path, apr_pool_t pool) -> svn_error_t""" return _wc.svn_wc_adm_probe_retrieve(*args)
5,332,316
def flights_preclean(df): """ Input: Raw dataframe of Flights table. Output: Cleaned flights table: - Remove cancelled rows, made available in new dataframe "df_can" - Drop columns ['Unnamed: 0', 'branded_code_share', 'mkt_carrier', 'cancelled', 'cancellation_code', 'flights', 'air_time', 'first_dep_time', 'total_add_gtime', 'longest_add_gtime', 'no_name'] - Fill null values in delay columns - Drop remaining null values """ global df_can df_can = df[df.cancelled == 1].copy() print("Removed cancelled flights - now available in dataframe 'df_can'") df = df[df.cancelled == 0] df = df.drop(columns=['Unnamed: 0', 'branded_code_share', 'mkt_carrier', 'cancelled', 'cancellation_code', 'flights', 'air_time', 'first_dep_time', 'total_add_gtime', 'longest_add_gtime', 'no_name']) for col in ['carrier_delay', 'weather_delay', 'nas_delay', 'security_delay', 'late_aircraft_delay']: df[col] = df[col].fillna(value=0) df = df.dropna() return df
5,332,317
def convert_spectral_kernel_quint(sequences, list_seq_to_id): """ Return a list seq of nb of time the seq in list_seq_to_id appear in sequence""" final = [] for j in range(len(sequences)): sequence = sequences[j] dico_appear = {seq: 0 for seq in list_seq_to_id} for i in range(len(sequence) - 4): seq_to_add = sequence[i] + sequence[i+1] + sequence[i+2] + sequence[i+3] + sequence[i+4] dico_appear[seq_to_add] += 1 final.append([dico_appear[k] for k in list_seq_to_id]) return final
5,332,318
def grad_of_marginal_fit(c, h, tau, epsilon): """Computes grad of terms linked to marginals in objective. Computes gradient w.r.t. f ( or g) of terms in https://arxiv.org/pdf/1910.12958.pdf, left-hand-side of Eq. 15 (terms involving phi_star) Args: c: jnp.ndarray, first target marginal (either a or b in practice) h: jnp.ndarray, potential (either f or g in practice) tau: float, strength (in ]0,1]) of regularizer w.r.t. marginal epsilon: regularization Returns: a vector of the same size as c or h """ if tau == 1.0: return c else: rho = epsilon * tau / (1 - tau) return jnp.where(c > 0, c * derivative_phi_star(-h, rho), 0.0)
5,332,319
def SuggestField(**kwargs): """ Query 'foo' to get the TextField, or 'foo.raw' to get the KeywordField, or 'foo.suggest' to get the CompletionField. """ return fields.TextField( fields={ 'raw': fields.KeywordField(), 'suggest': fields.CompletionField(), }, **kwargs )
5,332,320
def pyramid(a, b, c, d, e, depth, vertices, edges, surfaces, config): """ Recursive pyramid fractal generation. ARGUMENTS: a, b, c, d, e - pyramid vertices depth - how deep in interations vertices, edges, surfaces - mesh data lists config - config tuple to generate from """ # if depth zero, final iteration, draw pyramid if depth == 0: draw_pyramid(a, b, c, d, e, vertices, edges, surfaces) else: # create sub-pyramids and iterate over them pyramids = generate_sub_pyramids(a, b, c, d, e) for index, item in enumerate(pyramids): depth_iter = depth-1 # helper variable # setup next pyramid according to config if index in config[0]: if index in config[1]: depth_iter = 0 # recursive call pyramid(item[0], item[1], item[2], item[3], item[4], depth_iter, vertices, edges, surfaces, config)
5,332,321
def distance_on_unit_sphere(FoLat, FoLng, ToLat, ToLng): """ Convert latitude and longitude to spherical coordinates in radians.""" phi1 = math.radians(90.0 - FoLat) phi2 = math.radians(90.0 - ToLat) theta1 = math.radians(FoLng) theta2 = math.radians(ToLng) """Compute spherical distance from spherical coordinates. For two locations in spherical coordinates (1, theta, phi) and (1, theta', phi') cosine( arc length ) = sin phi sin phi' cos(theta-theta') + cos phi cos phi' distance = rho * arc length""" cos = (math.sin(phi1) * math.sin(phi2) * math.cos(theta1 - theta2) + math.cos(phi1) * math.cos(phi2)) arc = math.acos(cos) """Remember to multiply arc by the radius of the earth in your favorite set of units to get length.""" return arc
5,332,322
def check_pandas_support(caller_name): """Raise ImportError with detailed error message if pandsa is not installed. Plot utilities like :func:`fetch_openml` should lazily import pandas and call this helper before any computation. Parameters ---------- caller_name : str The name of the caller that requires pandas. """ try: import pandas # noqa return pandas except ImportError as e: raise ImportError( "{} requires pandas.".format(caller_name) ) from e
5,332,323
def multisgmapper(filein, krnfolder, outfolder): """Create series of artificial brightness maps from kernels in folder. filein (str): VIIRS DNB file path krnfolder (str): path to folder with kernel tifs outfolder (str): location to save skyglow maps to """ # read in VIIRS DNB image viirsraster = gdal.Open(filein) imgarr = viirsraster.ReadAsArray() # Get kernel library file list krnsrch = os.path.join(krnfolder,'kernel*.tif') krnlist = glob.glob(krnsrch) ln = 0 for krn in krnlist: # If the loop has already ran once then rescale imgarr back down to prevent multiple scaling factors being applied if ln > 0: imgarr *= 10**-9 ln = 1 kh = gdal.Open(krn) krnarr = kh.ReadAsArray() # Break down kernel filename to get elements for skyglow filename krnbase = os.path.basename(krn) sgtags = krnbase.split("_") # Change "kernel" to "skyglow" sgtags[0] = "skyglow" # Put the skyglow basename together sep = "_" sgbase = sep.join(sgtags) sgfile = os.path.join(outfolder, sgbase) # Create skyglow array sgarr = convolve_viirs_to_skyglow(imgarr, krnarr) sgarr, new_transform = subset_to_200km(sgarr, krnarr, filein) # Write skyglow to a geotiff array_to_geotiff(sgarr, sgfile, filein, new_transform)
5,332,324
def parse_cmdline(): """Parse the command line arguments. Returns: argparse.Namespace. The parsed arguments or defaults. """ parser = argparse.ArgumentParser( description="Dataplane Automated Testing System, version " + __version__, epilog='Note: parameters on the command line will override values set in the configuration file.') parser.add_argument('-d', '--tests-directory', default='./tests', metavar='DIRECTORY', dest='tests_dir', help='Directory containing test scripts, ./tests/ by default') parser.add_argument('-f', '--config', default='./dats.cfg', help='Configuration file name, ./dats.cfg by default') parser.add_argument('-l', '--list', action='store_true', help='List valid names of test cases and exit') parser.add_argument('-r', '--report', default=datetime.now().strftime('dats-report-%Y%m%d_%H%M%S/'), metavar='DIRECTORY', dest='report_dir', help='Where to save the report. A new directory with timestamp in its name is created by default.') parser.add_argument('-v', '--verbose', action='store_true', help='Verbose output - set log level of screen to VERBOSE instead of INFO') parser.add_argument( 'test', nargs='*', help='List of test names to execute. All tests are executed by default.') args = parser.parse_args() return args
5,332,325
def summarize_sequence(summary_type, hidden, d_model, n_head, d_head, dropout, dropatt, input_mask, is_training, initializer, scope=None, reuse=None): """Summarize hidden sequence into a vector.""" tf.logging.info("===== Sequence summary =====") tf.logging.info(" - input_mask %s", input_mask) tf.logging.info(" - summary_type %s", summary_type) tf.logging.info("============================") with tf.variable_scope(scope, "sequnece_summary", reuse=reuse): if summary_type == "last": summary = hidden[:, -1] elif summary_type == "first": summary = hidden[:, 0] elif summary_type == "max": if input_mask is None: summary = tf.reduce_max(hidden, axis=1) else: neg_pad = -1e10 * input_mask[:, :, None] summary = tf.reduce_max(hidden + neg_pad, axis=1) elif summary_type == "mean": if input_mask is None: summary = tf.reduce_mean(hidden, axis=1) else: inp_mask = (1. - input_mask)[:, :, None] summary = (tf.reduce_sum(hidden * inp_mask, axis=1) / (1e-6 + tf.reduce_sum(inp_mask, axis=1))) elif summary_type == "attn": bsz = tf.shape(hidden)[1] summary_bias = tf.get_variable("summary_bias", [d_model], dtype=hidden.dtype, initializer=initializer) summary_bias = tf.tile(summary_bias[None, None], [bsz, 1, 1]) if input_mask is not None: # [B X T] -> [B x N x F x T] input_mask = input_mask[:, None, None, :] summary, _ = multihead_attn(summary_bias, hidden, hidden, input_mask, d_model, n_head, d_head, dropout, dropatt, is_training, initializer, residual=False) summary = summary[:, 0] else: raise ValueError("Unsupported summary type {}".format(summary_type)) # use another projection with `tanh` activation summary = tf.layers.dense( summary, d_model, activation=tf.tanh, use_bias=True, kernel_initializer=initializer, name="summary") return summary
5,332,326
def process_submissions(company: Company, filing_data: dict): """ Adds all 10-K/10-Q (and amended versions) from the SEC API data :param company: Company model object :param filing_data: :return: None """ form_list = filing_data["form"] report_date_list = filing_data["reportDate"] filing_date_list = filing_data["filingDate"] accn_list = filing_data["accessionNumber"] indices = [ i for i, x in enumerate(form_list) if x in ["10-K", "10-Q", "10-K/A", "10-Q/A"] ] obj_list = [] for index in indices: obj = Filing( company=company, type=form_list[index], accn_num=accn_list[index], report_date=report_date_list[index], date_filed=filing_date_list[index], ) obj_list.append(obj) Filing.objects.bulk_create(obj_list, ignore_conflicts=True)
5,332,327
def encode_line(line, vocab): """Given a string and a vocab dict, encodes the given string""" line = line.strip() sequence = [vocab.get(char, vocab['<UNK>']) for char in line] sequence_length = len(sequence) return sequence, sequence_length
5,332,328
def get_like_from_mats(ky_mat, l_mat, alpha, name): """ compute the likelihood from the covariance matrix :param ky_mat: the covariance matrix :return: float, likelihood """ # catch linear algebra errors labels = _global_training_labels[name] # calculate likelihood like = (-0.5 * np.matmul(labels, alpha) - np.sum(np.log(np.diagonal(l_mat))) - math.log(2 * np.pi) * ky_mat.shape[1] / 2) return like
5,332,329
def cached(*cache_args, **cache_kwargs): """General-purpose. Allows custom filename, with function fallback. Load/save cached function data. Also handle data types gracefully. Example: >>> cached('myfile.json', directory=SOME_DIR) >>> def my_thing(): >>> return {'foo': 'bar'} """ def outer(func, *args, **kwargs): folder = cache_kwargs.get('directory') if len(cache_args) > 0: name = cache_args[0] else: # Guess a resonable name. name = func.__name__.replace('_', '-') + '.json' def _inner(*args, **kwargs): try: # Allow users to specify non-existant subfolders # but fail gracefully. os.makedirs(folder) except Exception: pass path = '{}/{}'.format(folder, name) if folder is not None else name try: with open(path, 'r') as _cached: if '.json' in name: return json.loads(_cached.read()) else: return _cached.read() except ValueError as exc: if 'No JSON object could be decoded' in str(exc): return func(*args, **kwargs) except IOError: res = func(*args, **kwargs) if res is None: return with open(path, 'w') as _cached: if '.json' in name: try: to_write = json.dumps(res, indent=4) # The json was invalid, skip this. except TypeError: return res else: to_write = res _cached.write(to_write) _cached.write('\n') return res return _inner return outer
5,332,330
def test_python(host, f): """Ensure that python2-specific files no longer exist, except on AmazonLinux or Debian 9.""" # Note that r"^9(\.|$)" will match any string starting with "9.", # or the string "9". if host.system_info.distribution == "amzn" or ( host.system_info.distribution == "debian" and re.match(r"^9(\.|$)", host.system_info.release) is not None ): assert host.file(f).exists else: assert not host.file(f).exists
5,332,331
def engulfing(data: pd.DataFrame): """ engulfing Positive numbers are multi-side, negative numbers are short-side 0 is abnormal, meaning that the ratio of the absolute value of the current Candle up or down to the previous one is more than 10 times. For machine learning convenience, a floating point number should be returned to indicate the strength of the engulfing, in preparation for the final Machine Learning normalization. """ def cal(ser): result = 0 if ser.raise_0 > 0 >= ser.raise_1 and ser.open <= ser.close_1 and ser.close >= ser.open_1: # Current candle is going up,long rr = abs(ser.raise_0) / abs(ser.raise_1) if 0 > ser.raise_1 else ser.raise_0/ser.avg_5_change_abs result = rr if rr > 1 else 0 elif ser.raise_0 < 0 < ser.raise_1 and ser.open >= ser.close_1 and ser.close <= ser.open_1: # Current candle is going down, short rr = abs(ser.raise_0) / abs(ser.raise_1) if 0 < ser.raise_1 else ser.raise_0/ser.avg_5_change_abs result = -rr if rr > 1 else 0 return result data_copy = data.copy() data_copy["raise_0"] = data_copy["close"] - data_copy["open"] data_copy["raise_1"] = data_copy["raise_0"].shift(1) data_copy["open_1"] = data_copy["open"].shift(1) data_copy["close_1"] = data_copy["close"].shift(1) # get recent 5 average price change, in order to calculate if prev day price is zero change, we still won't miss it data_copy["avg_5_change_abs"] = data_copy.raise_0.rolling(window=5).apply(lambda ser: ser.abs().mean()) data_copy["engulfing"] = data_copy[["raise_0", "raise_1", "open", "open_1", "close", "close_1", "avg_5_change_abs"]].apply(cal, axis=1) # print(data_copy.query("raise_1==0").tail(20)) data["engulfing"] = data_copy["engulfing"]
5,332,332
def create_dat_files(ipc_tests, test_results, dst_dir): """ Creates .dat files for all the test_results. @param ipc_tests: list of ipc tests which was used to created the data. @param test_results: dictionary containing all test results. @param dst_dir: directory where the .dat-files shall be stored. @return: a list of paths to all dat files which where created. @rtype: list of strings. e.g. ["path1", "path2", ... ] """ dat_files = {} for ipc_method in ipc_tests: filename = f"{ipc_method}.dat" filename = os.path.join(dst_dir, filename) try: with open(filename, 'w') as out_file: line = "# message_size (in Bytes) throughput (in Mbit/s)\n" out_file.write(line) for message_size in sorted(test_results.keys()): throughput = None try: throughput = test_results[message_size][ipc_method]["throughput"] except KeyError: throughput = "-" line = f"{message_size}\t{throughput}\n" out_file.write(line) dat_files[ipc_method] = ipc_method + ".dat" except IOError as ex: raise IpcBenchError() from ex return dat_files
5,332,333
def filter_matches(kp1, kp2, matches, ratio = 0.75): """ This function applies a ratio test :param kp1: raw keypoint 1 :param kp2: raw keypoint 2 :param matches: raw matches :param ratio: filtering ratio :return: filtered keypoint 1, filtered keypoint 2, keypoint pairs """ mkp1, mkp2 = [], [] for m in matches: if len(m) == 2 and m[0].distance < m[1].distance * ratio: m = m[0] mkp1.append( kp1[m.queryIdx] ) # keypoint with Index of the descriptor in query descriptors mkp2.append( kp2[m.trainIdx] ) # keypoint with Index of the descriptor in train descriptors p1 = np.float32([kp.pt for kp in mkp1]) p2 = np.float32([kp.pt for kp in mkp2]) kp_pairs = list(zip(mkp1, mkp2)) return p1, p2, kp_pairs
5,332,334
def try_different_adaptors(): """ This method didn't return any positive results but it was work a try. We were looking to see if using different SSLAdapters with the session object made a difference. It certainly has different behaviour on different platforms but did not fix the issue. """ from requests_toolbelt import SSLAdapter import ssl ssl3 = SSLAdapter(ssl.PROTOCOL_SSLv23) tls1 = SSLAdapter(ssl.PROTOCOL_TLSv1) tls11 = SSLAdapter(ssl.PROTOCOL_TLSv1_1) print 'TLSv1' session = session_post(adaptor=tls1) session_post(session=session) print 'TLSv1' session = session_post(adaptor=tls11) session_post(session=session) print 'SSLv3' session = session_post(adaptor=ssl3) session_post(session=session)
5,332,335
def logInsideConfInt(value0, unc0, value1, unc1, quantity): """Two values are within acceptable statistical limits.""" _notifyWithUncs(debug, quantity, 'Confidence intervals for {} overlap', value0, unc0, value1, unc1)
5,332,336
def add_derived_columns( data: pd.DataFrame, differences: bool = True, second_differences: bool = True, multiplications: bool = True, rolling_means: int | None = 10, rolling_stds: int | None = 10, mean_distances: bool = True, ) -> pd.DataFrame: """This will create many columns that can be valuable for making predictions like difference, or rolling mean or distance from average. Computed columns will be appened to original data. It will process all the columns, so a lot of redundant data will be created. It is necessary do some feature extraction afterwards to remove noncorrelated columns. Args: data (pd.DataFrame): Data that we want to extract more information from. differences (bool, optional): Compute difference between n and n-1 sample. Defaults to True. second_differences (bool, optional): Compute second difference. Defaults to True. multiplications (bool, optional): Column multiplicated with other column. Defaults to True. rolling_means (int | None, None), optional): Rolling mean with defined window. Defaults to 10. rolling_stds (int | None, optional): Rolling std with defined window. Defaults to 10. mean_distances (bool, optional): Distance from average. Defaults to True. Returns: pd.DataFrame: Data with more columns, that can have more informations, than original data. Number of rows can be little bit smaller. Data has the same type as input. Example: >>> import mydatapreprocessing as mdp >>> data = pd.DataFrame( ... [mdp.generate_data.sin(n=100), mdp.generate_data.ramp(n=100)] ... ).T ... >>> extended = add_derived_columns(data, differences=True, rolling_means=32) """ results = [data] if differences: results.append( pd.DataFrame(np.diff(data.values, axis=0), columns=[f"{i} - Difference" for i in data.columns],) ) if second_differences: results.append( pd.DataFrame( np.diff(data.values, axis=0, n=2), columns=[f"{i} - Second difference" for i in data.columns], ) ) if multiplications: combinations = list(itertools.combinations(data.columns, 2)) combinations_names = [f"Multiplicated {i}" for i in combinations] multiplicated = np.zeros((len(data), len(combinations))) for i, j in enumerate(combinations): multiplicated[:, i] = data[j[0]] * data[j[1]] results.append(pd.DataFrame(multiplicated, columns=combinations_names)) if rolling_means: results.append( pd.DataFrame( np.mean(rolling_windows(data.values.T, rolling_means), axis=2).T, columns=[f"{i} - Rolling mean" for i in data.columns], ) ) if rolling_stds: results.append( pd.DataFrame( np.std(rolling_windows(data.values.T, rolling_stds), axis=2).T, columns=[f"{i} - Rolling std" for i in data.columns], ) ) if mean_distances: mean_distanced = np.zeros(data.T.shape) for i in range(data.shape[1]): mean_distanced[i] = data.values.T[i] - data.values.T[i].mean() results.append(pd.DataFrame(mean_distanced.T, columns=[f"{i} - Mean distance" for i in data.columns])) min_length = min(len(i) for i in results) return pd.concat([i.iloc[-min_length:].reset_index(drop=True) for i in results], axis=1)
5,332,337
def parse_query( query: List[str], format, use_youtube, generate_m3u, lyrics_provider, threads, path_template, ) -> List[SongObject]: """ Parse query and return list containing song object """ songs_list = [] # Iterate over all search queries and add them to songs_list for request in query: if request.endswith(".spotdlTrackingFile"): continue songs_list.extend( parse_request( request, format, use_youtube, generate_m3u, lyrics_provider, threads, path_template, ) ) # linefeed to visually separate output for each query print() # remove duplicates seen_songs = set() songs = [] for song in songs_list: if song.file_name not in seen_songs: songs.append(song) seen_songs.add(song.file_name) return songs
5,332,338
def start_db_server(): """Spin up a test database server""" log.debug('start_db_server()') try: os.mkdir(DATA_DIR) except FileExistsError: shutil.rmtree(DATA_DIR) sp.check_call(['initdb', '-D', DATA_DIR]) db_proc = sp.Popen(['postgres', '-D', DATA_DIR]) time.sleep(0.5) # Ensure the db has time to start return db_proc
5,332,339
def GFF_bcftools_format(in_handle, out_handle): """Convert a bacterial genbank file from NCBI to a GFF3 format that can be used in bcftools csq. see https://github.com/samtools/bcftools/blob/develop/doc/bcftools.txt#L1066-L1098. Args: in_file: genbank file out_file: name of GFF file """ from BCBio import GFF # in_handle = open(in_file) # out_handle = open(out_handle, "w") from Bio.SeqFeature import SeqFeature from Bio.SeqFeature import FeatureLocation from copy import copy, deepcopy from Bio import SeqIO for record in SeqIO.parse(in_handle, "genbank"): # make a copy of the record as we will be changing it during the loop new = copy(record) new.features = [] # loop over all features for feat in record.features: q = feat.qualifiers # remove some unecessary qualifiers for label in ["note", "translation", "product", "experiment"]: if label in q: del q[label] if feat.type == "CDS": # use the CDS feature to create the new lines tag = q["gene"][0] # q['locus_tag'][0] protein_id = q["protein_id"][0] q["ID"] = "CDS:%s" % protein_id q["biotype"] = "protein_coding" for i, new_loc in enumerate( feat.location.parts if (hasattr(feat.location, "parts")) else (feat.location,) ): new_feat = deepcopy(feat) tr_id = "transcript:%s" % (protein_id + "_" + str(i)) new_feat.qualifiers["Parent"] = tr_id new_feat.location = new_loc new.features.append(new_feat) # create mRNA feature m = SeqFeature(feat.location, type="mRNA", strand=feat.strand) q2 = m.qualifiers q2["ID"] = tr_id q2["Parent"] = "gene:%s" % tag q2["biotype"] = "protein_coding" new.features.append(m) elif feat.type == "gene": tag = q["gene"][0] # edit the gene feature q = feat.qualifiers q["ID"] = "gene:%s" % tag q["biotype"] = "protein_coding" q["Name"] = q["gene"] new.features.append(feat) # write the new features to a GFF GFF.write([new], out_handle) return out_handle.close()
5,332,340
def assert_address_book(address_book): """Fixture returning an object providing a custom address book asserts.""" return icemac.addressbook.testing.AddressBookAssertions(address_book)
5,332,341
def make_shell_context(): """Open shell.""" db = get_db() return {"db": db, "Doi": Doi, "Url": Url, "FBRequest": FBRequest}
5,332,342
def get_version(): """ Extracts the version number from the version.py file. """ VERSION_FILE = 'fleming/version.py' mo = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', open(VERSION_FILE, 'rt').read(), re.M) if mo: return mo.group(1) else: raise RuntimeError('Unable to find version string in {0}.'.format(VERSION_FILE))
5,332,343
def test_batch_op_async_execute_failure(context): """Tests that an AirflowException is raised in case of error event""" task = BatchOperatorAsync( task_id="task", job_name=JOB_NAME, job_queue="queue", job_definition="hello-world", max_retries=MAX_RETRIES, status_retries=STATUS_RETRIES, parameters=None, overrides={}, array_properties=None, aws_conn_id="airflow_test", region_name="eu-west-1", tags={}, ) with pytest.raises(AirflowException) as exc_info: task.execute_complete(context=None, event={"status": "error", "message": "test failure message"}) assert str(exc_info.value) == "test failure message"
5,332,344
def get_cache_path(): """ Return a path suitable to store cached files """ try: os.mkdir(cache_path) except FileExistsError: pass return cache_path
5,332,345
def distance(s1, s2): """ Euclidean distance between two sequences. Supports different lengths. If the two series differ in length, compare the last element of the shortest series to the remaining elements in the longer series. This is compatible with Euclidean distance being used as an upper bound for DTW. :param s1: Sequence of numbers :param s2: Sequence of numbers :return: Euclidean distance """ n = min(len(s1), len(s2)) ub = 0 for v1, v2 in zip(s1, s2): ub += (v1 - v2)**2 # If the two series differ in length, compare the last element of the shortest series # to the remaining elements in the longer series if len(s1) > len(s2): v2 = s2[n - 1] for v1 in s1[n:]: ub += (v1 - v2)**2 elif len(s1) < len(s2): v1 = s1[n-1] for v2 in s2[n:]: ub += (v1 - v2)**2 return math.sqrt(ub)
5,332,346
def remove_test_set_gender_and_age(nodes): """Remove the gender feature from a subset of the nodes for estimation""" # todo: the 40k random can be adjusted if youre working with a subset test_profiles = np.random.choice(nodes["user_id"].unique(), 40000, replace=False) nodes["TRAIN_TEST"] = "TRAIN" test_condition = nodes["user_id"].isin(test_profiles) nodes.loc[test_condition, ["AGE", "gender"]] = np.nan nodes.loc[test_condition, ["TRAIN_TEST"]] = "TEST" return nodes
5,332,347
def valid_attribute(attr_filter_key, attr_filter_val, hit): """Validates the hit according to a filter attribute.""" if (attr_filter_key != "None") and (attr_filter_val != "None"): try: # If key for filtering is not correct or doesn't exist-> error # should be ignored hit_attrib_val = re.split( "; " + attr_filter_key + " ", hit[8])[1].split(';')[0].strip('"\'').rstrip('\"') except IndexError: # if key doesn't exist re.split will give error hit_attrib_val = "not.found" # If biotype of hit == attr_value from query-> continue annotation return attr_filter_val == hit_attrib_val else: return True
5,332,348
def dek_to_greg(dek_day: int, dek_month: int, dek_year: int) -> tuple: """Returns a Gregorian date from a Dekatrian date. Args: dek_day (int): Day of the month. dek_month (int): Month of the year. dek_year (int): Year. Return: tuple: A tuple with the day, month and year. """ year_day = year_day_on_greg_date(dek_day, dek_month, dek_year) JAN = 31 FEB = 28 + calendar.isleap(dek_year) MAR = 31 APR = 30 MAY = 31 JUN = 30 JUL = 31 AUG = 31 SEP = 30 OCT = 31 NOV = 30 DEC = 31 gregorian_calendar_months = ( JAN, FEB, MAR, APR, MAY, JUN, JUL, AUG, SEP, OCT, NOV, DEC, ) # TODO: MUDAR PRA DICIONARIO for month, days in enumerate(gregorian_calendar_months, start=1): if year_day > days: year_day -= days else: break return (year_day, month, dek_year)
5,332,349
def progress_bar(t): """Prints a progress bar that take t seconds to complete loading.""" from time import sleep for i in range(1, 101): print("\r{:>6}% |{:<30}|".format( i, u"\u2588" * round(i // 3.333)), end='', flush=True) sleep(t/100) sleep(0.1) print("\n")
5,332,350
def encrypt_message(kx, ky, message): """ Encrypts a message using ECC and AES-256 First generates a random AES key and IV with os.urandom() Then encrypts the original message with that key Then encrypts the AES key with the ECC key NOTE: This means that plaintext will not have the same ciphertext when encrypted twice. Keep this in mind if you require reproducibility behavior :param kx: Public key kx (int) :param ky: Public key ky (int) :param message: Message (bytes) :return: Tuple (encrypted key (list of ints), encrypted IV (list of ints), and encrypted message (bytes)) """ ecies = ecc.ECEIS(ecc.CURVE) r, s = ecies.exchange(ecc.ECPublicKey(ecc.AffineCurvePoint(kx, ky, ecc.CURVE))) s = str(s).encode('utf-8') key = sha.SHA3_512(s).digest() message_encryptor = Encrypter(mode=AESModeOfOperationCBC(key[:32], iv=key[32:48])) encrypted_blocks = message_encryptor.feed(oaep.oaep_pad(message)) encrypted_blocks += message_encryptor.feed() encrypted_key = r.x, r.y return encrypted_key, encrypted_blocks
5,332,351
def _create_init_files(original_file_location: FilePath) -> List[str]: """Given the original file location of a handler, create the artifact paths for all the __init__.py files that make the handle a valid python modules Args: original_file_location (str): original file path Returns: List[str]: all __init__.py files needed """ relative_to_ws_path_paths = paths.get_relative_to_workspace_path( original_file_location ).split("/")[:-1] base_path = paths.get_workspace_path() rv = [] for path in relative_to_ws_path_paths: rv.append(os.path.join(base_path, path, "__init__.py")) base_path = os.path.join(base_path, path) return rv
5,332,352
def mirror_notes(key_position: int) -> int: """ 指定したキーポジションを反転させた値を返します 引数 ---- key_position : int -> キーポジション 戻り値 ------ int -> キーポジションを反転したときのキーポジション """ return 512 - key_position
5,332,353
def import_odim_hdf5(filename, **kwargs): """Import a precipitation field (and optionally the quality field) from a HDF5 file conforming to the ODIM specification. Parameters ---------- filename : str Name of the file to import. Other Parameters ---------------- qty : {'RATE', 'ACRR', 'DBZH'} The quantity to read from the file. The currently supported identitiers are: 'RATE'=instantaneous rain rate (mm/h), 'ACRR'=hourly rainfall accumulation (mm) and 'DBZH'=max-reflectivity (dBZ). The default value is 'RATE'. Returns ------- out : tuple A three-element tuple containing the OPERA product for the requested quantity and the associated quality field and metadata. The quality field is read from the file if it contains a dataset whose quantity identifier is 'QIND'. """ if not h5py_imported: raise MissingOptionalDependency( "h5py package is required to import " "radar reflectivity composites using ODIM HDF5 specification " "but it is not installed" ) qty = kwargs.get("qty", "RATE") if qty not in ["ACRR", "DBZH", "RATE"]: raise ValueError( "unknown quantity %s: the available options are 'ACRR', 'DBZH' and 'RATE'" ) f = h5py.File(filename, "r") R = None Q = None for dsg in f.items(): if dsg[0][0:7] == "dataset": what_grp_found = False # check if the "what" group is in the "dataset" group if "what" in list(dsg[1].keys()): qty_, gain, offset, nodata, undetect = _read_odim_hdf5_what_group( dsg[1]["what"] ) what_grp_found = True for dg in dsg[1].items(): if dg[0][0:4] == "data": # check if the "what" group is in the "data" group if "what" in list(dg[1].keys()): qty_, gain, offset, nodata, undetect = _read_odim_hdf5_what_group( dg[1]["what"] ) elif not what_grp_found: raise DataModelError( "Non ODIM compilant file: " "no what group found from {} " "or its subgroups".format(dg[0]) ) if qty_.decode() in [qty, "QIND"]: ARR = dg[1]["data"][...] MASK_N = ARR == nodata MASK_U = ARR == undetect MASK = np.logical_and(~MASK_U, ~MASK_N) if qty_.decode() == qty: R = np.empty(ARR.shape) R[MASK] = ARR[MASK] * gain + offset R[MASK_U] = 0.0 R[MASK_N] = np.nan elif qty_.decode() == "QIND": Q = np.empty(ARR.shape, dtype=float) Q[MASK] = ARR[MASK] Q[~MASK] = np.nan if R is None: raise IOError("requested quantity %s not found" % qty) where = f["where"] proj4str = where.attrs["projdef"].decode() pr = pyproj.Proj(proj4str) LL_lat = where.attrs["LL_lat"] LL_lon = where.attrs["LL_lon"] UR_lat = where.attrs["UR_lat"] UR_lon = where.attrs["UR_lon"] if ( "LR_lat" in where.attrs.keys() and "LR_lon" in where.attrs.keys() and "UL_lat" in where.attrs.keys() and "UL_lon" in where.attrs.keys() ): LR_lat = float(where.attrs["LR_lat"]) LR_lon = float(where.attrs["LR_lon"]) UL_lat = float(where.attrs["UL_lat"]) UL_lon = float(where.attrs["UL_lon"]) full_cornerpts = True else: full_cornerpts = False LL_x, LL_y = pr(LL_lon, LL_lat) UR_x, UR_y = pr(UR_lon, UR_lat) if full_cornerpts: LR_x, LR_y = pr(LR_lon, LR_lat) UL_x, UL_y = pr(UL_lon, UL_lat) x1 = min(LL_x, UL_x) y1 = min(LL_y, LR_y) x2 = max(LR_x, UR_x) y2 = max(UL_y, UR_y) else: x1 = LL_x y1 = LL_y x2 = UR_x y2 = UR_y if "xscale" in where.attrs.keys() and "yscale" in where.attrs.keys(): xpixelsize = where.attrs["xscale"] ypixelsize = where.attrs["yscale"] else: xpixelsize = None ypixelsize = None if qty == "ACRR": unit = "mm" transform = None elif qty == "DBZH": unit = "dBZ" transform = "dB" else: unit = "mm/h" transform = None if np.any(np.isfinite(R)): thr = np.nanmin(R[R > np.nanmin(R)]) else: thr = np.nan metadata = { "projection": proj4str, "ll_lon": LL_lon, "ll_lat": LL_lat, "ur_lon": UR_lon, "ur_lat": UR_lat, "x1": x1, "y1": y1, "x2": x2, "y2": y2, "xpixelsize": xpixelsize, "ypixelsize": ypixelsize, "yorigin": "upper", "institution": "Odyssey datacentre", "accutime": 15.0, "unit": unit, "transform": transform, "zerovalue": np.nanmin(R), "threshold": thr, } f.close() return R, Q, metadata
5,332,354
def sugerir(update: Update, _: CallbackContext) -> int: """Show new choice of buttons""" query = update.callback_query query.answer() keyboard = [ [ InlineKeyboardButton("\U0001F519 Volver", callback_data=str(NINE)), InlineKeyboardButton("\U0001F44B Salir", callback_data=str(TEN)), ] ] reply_markup = InlineKeyboardMarkup(keyboard) query.edit_message_text( text="\U0001F91A Sugerir cuentos:\n\n Responde este mensaje para sugerir un personaje o para realizar el aporte de un cuento\n", reply_markup=reply_markup ) return NINE
5,332,355
def clone_subgraph(*, outputs, inputs, new_inputs, suffix="cloned"): """ Take all of the tensorflow nodes between `outputs` and `inputs` and clone them but with `inputs` replaced with `new_inputs`. Args: outputs (List[tf.Tensor]): list of output tensors inputs (List[tf.Tensor]): list of input tensors new_inputs (List[tf.Tensor]): list of new input tensors suffix (str, optional): suffix to the transformed operation names Returns: List[T]: list of transformed outputs """ return transform(outputs=outputs, inputs=inputs, transformed_inputs=new_inputs, transformer=lambda op, inputs: clone_op(op, inputs, suffix=suffix))
5,332,356
async def refresh_replacements(db, sample_id: str) -> list: """ Remove sample file `replacement` fields if the linked files have been deleted. :param db: the application database client :param sample_id: the id of the sample to refresh :return: the updated files list """ files = await virtool.db.utils.get_one_field(db.samples, "files", sample_id) for file in files: replacement = file.get("replacement") if replacement and not await db.files.count_documents({"_id": replacement["id"]}): file["replacement"] = None document = await db.samples.find_one_and_update({"_id": sample_id}, { "$set": { "files": files } }) return document["files"]
5,332,357
def test_sinewaveform(): """ Test a SineWaveform object. Ensure that the object is callable with the correct sine waveform. """ t = np.random.rand(1000) freq, offset, amp = 1e9, 1, 2 wf = SineWaveform(frequency=freq, offset=offset, amplitude=amp) y = amp * np.sin(2 * np.pi * freq * t - offset) assert y == pytest.approx(wf(t), rel=0.01)
5,332,358
def _validate_options(args, unknown, groups): """Validate argument options.""" valid_count = 0 invalid = False need_targets = False call_on = None if (args.search or args.query) and not args.verbose: def no_call(name): pass call_on = no_call else: def action(name): _console_output("performing {}".format(name)) call_on = action if args.sync: call_on("sync") valid_count += 1 if args.upgrades or args.search or args.clean or args.deps: sub_count = 0 sub_command = None if args.upgrades: sub_command = "upgrade" sub_count += 1 if args.clean: sub_command = "clean" sub_count += 1 if args.search: sub_count += 1 if args.deps: sub_count += 1 sub_command = "deps" if sub_count == 1: if sub_command is not None: call_on("sync: {}".format(sub_command)) else: _console_error("cannot perform multiple sub-options") invalid = True if args.search or args.deps or (not args.upgrades and not args.clean and not args.deps): need_targets = True if args.remove: call_on("remove") valid_count += 1 need_targets = True if args.query: call_on("query") valid_count += 1 if not invalid: if valid_count > 1: _console_error("multiple top-level arguments given") elif valid_count == 0: _console_error("no valid top-level arguments given") if valid_count != 1: invalid = True if not invalid and (args.search or args.upgrades or args.clean): if not args.sync: _console_error("search, upgrade, and clean are sync only") invalid = True if not invalid and args.info and not args.search: _console_error("info only works with search") invalid = True if not invalid and args.info and args.quiet: _console_error("info and quiet do not work together") invalid = True if not invalid and args.gone and not args.query: _console_error("gone only works with query") invalid = True if not invalid and need_targets: if len(unknown) == 0: _console_error("no targets specified") invalid = True if not args.pacman or not os.path.exists(args.pacman): _console_error("invalid config file") invalid = True ctx = Context(unknown, groups, args) callback = None if not invalid: if args.query: if args.gone: callback = _gone else: callback = _query if args.search: callback = _search if args.upgrades: callback = _upgrades if args.clean: callback = _clean if args.deps: callback = _deps if args.sync: if not args.search and \ not args.upgrades and \ not args.clean and \ not args.deps: callback = _sync if args.remove: callback = _remove if not invalid and callback is None: _console_error("unable to find callback") invalid = True if invalid: ctx.exiting(1) callback(ctx)
5,332,359
def commitTaskBySerialNo(SerialNo): """ 根据流水号通过任务的函数 :param SerialNo: 任务流水号 :return: """ raw = rawSql.Raw_sql() raw.sql = "UPDATE RMI_TASK SET State = 0 WHERE SerialNo = '%s'"%SerialNo raw.update() return
5,332,360
def torch_model (model_name, device, checkpoint_path = None): """ select imagenet models by their name and loading weights """ if checkpoint_path: pretrained = False else: pretrained = True model = models.__dict__ [model_name](pretrained) if hasattr (model, 'classifier'): if model_name == 'mobilenet_v2': model.classifier = nn.Sequential( nn.Dropout (0.2), nn.Linear (model.classifier [-1].in_features, 2)) else: model.classifier = nn.Sequential( nn.Linear (model.classifier.in_features, 2)) elif hasattr (model, 'fc'): model.fc = nn.Linear (model.fc.in_features, 2) model.to(device) if checkpoint_path: load_checkpoint (checkpoint_path, model, device) return model
5,332,361
def main(inargs): """Run the program.""" pr_cube, pr_history = gio.combine_files(inargs.pr_files, 'precipitation_flux', checks=True) evap_cube, evap_history = gio.combine_files(inargs.evap_files, 'water_evapotranspiration_flux', checks=True) assert pr_cube.shape == evap_cube.shape pe_cube = pr_cube.copy() pe_cube.data = pr_cube.data - evap_cube.data iris.std_names.STD_NAMES['precipitation_minus_evaporation_flux'] = {'canonical_units': pe_cube.units} pe_cube.standard_name = 'precipitation_minus_evaporation_flux' pe_cube.long_name = 'precipitation minus evaporation flux' pe_cube.var_name = 'pe' pe_cube.attributes['history'] = cmdprov.new_log(git_repo=repo_dir) iris.save(pe_cube, inargs.outfile)
5,332,362
def get_timezone() -> Tuple[datetime.tzinfo, str]: """Discover the current time zone and it's standard string representation (for source{d}).""" dt = get_datetime_now().astimezone() tzstr = dt.strftime("%z") tzstr = tzstr[:-2] + ":" + tzstr[-2:] return dt.tzinfo, tzstr
5,332,363
def get_model(input_shape: List[int], weight_array: np.array, batches_per_step: int, replication_factor: int, batch_size: int, channels: int, data_len: int, synthetic_data: bool, buffer_streams: bool) -> Tuple: """Get a simple model for comparison with buffer streams on and off. Adapted from prefetch_test.py as we require to test the validity of streams here as well. Args: batches_per_step (int): Batches to run per step replication_factor (int): Replicas to run batch_size (int): Number of samples per model run channels (int): Number of channels e.g. RGB = 3 data_len (int): Data size synthetic_data (bool): Use synthetic data (zeros in this case) buffer_streams (bool): The test option: whether to create ops before the stream in order to schedule data loading as part of graph scheduling. See T29603. Returns: Tuple: session, anchors, input_shape, label_shape required to run the model """ micro_batch_size = batch_size // (replication_factor) builder = popart.Builder() data_shape = popart.TensorInfo("FLOAT", input_shape) lbl_shape = popart.TensorInfo("INT32", [micro_batch_size]) w = builder.addInitializedInputTensor(weight_array) ip = builder.addInputTensor(data_shape, "main_input_123") lb = builder.addInputTensor(lbl_shape, "label_input_456") a = builder.aiOnnx.matmul([ip, w]) o = builder.reshape_const( builder.aiOnnx, [a], [micro_batch_size, channels * data_len * data_len]) relu = builder.aiOnnx.relu([o]) sm = builder.aiOnnx.softmax([relu], axis=0, debugContext="output") builder.addOutputTensor(sm) o = builder.aiGraphcore.nllloss([sm, lb], reduction=popart.ReductionType.Mean) art = popart.AnchorReturnType("All") data_flow = popart.DataFlow(batches_per_step, { ip: art, lb: art, o: art, sm: art, a: art, relu: art }) opts = popart.SessionOptions() opts.useHostCopyOps = buffer_streams # TODO: Fix outlining opts.enableOutlining = False ipus = 1 if replication_factor > 1: opts.replicatedGraphCount = replication_factor opts.enableReplicatedGraphs = True ipus *= replication_factor device = tu.create_test_device(ipus) assert device patterns = popart.Patterns(popart.PatternsLevel.Minimal).enablePattern( "MatMulLhsGradOp", True).enablePattern("MatMulRhsGradOp", True) patterns.InPlace = False if synthetic_data: opts.syntheticDataMode = popart.SyntheticDataMode.Zeros session = popart.TrainingSession(fnModel=builder.getModelProto(), dataFlow=data_flow, loss=o, optimizer=popart.ConstSGD(LR), userOptions=opts, deviceInfo=device, patterns=patterns) session.setRandomSeed(0) session.prepareDevice() label_shape = [micro_batch_size] if replication_factor > 1: input_shape = [replication_factor] + input_shape label_shape = [replication_factor] + label_shape if batches_per_step > 1: input_shape = [batches_per_step] + input_shape label_shape = [batches_per_step] + label_shape anchors = session.initAnchorArrays() return session, anchors, label_shape
5,332,364
def shell_safe_json_parse(json_or_dict_string, preserve_order=False): """ Allows the passing of JSON or Python dictionary strings. This is needed because certain JSON strings in CMD shell are not received in main's argv. This allows the user to specify the alternative notation, which does not have this problem (but is technically not JSON). """ try: if not preserve_order: return json.loads(json_or_dict_string) from collections import OrderedDict return json.loads(json_or_dict_string, object_pairs_hook=OrderedDict) except ValueError as json_ex: try: import ast return ast.literal_eval(json_or_dict_string) except SyntaxError: raise CLIError(json_ex) except ValueError as ex: # log the exception which could be a python dict parsing error. logger.debug(ex) # raise json_ex error which is more readable and likely. raise CLIError(json_ex)
5,332,365
def get_timed_roadmaps_grid_common( ins: Instance, T: int, size: int, ) -> list[TimedRoadmap]: """[deprecated] get grid roadmap shared by all agents Args: ins (Instance): instance T (int): assumed makespan size (int): size x size grid will be constructed Returns: list[np.ndarray]: locations Note: use get_timed_roadmaps_grid_common_2d_fast in 2d environment """ if ins.dim == 2: return get_timed_roadmaps_grid_common_2d_fast(ins, T, size) return get_common_roadmaps(ins, T, get_grid(size, ins.rads[0], ins))
5,332,366
def train(last_step, lmbdas): """Trains the model.""" if args.verbose: tf.logging.set_verbosity(tf.logging.INFO) # create input data pipeline. with tf.device('/cpu:0'): train_files = glob.glob(args.train_glob) train_dataset = tf.data.Dataset.from_tensor_slices(train_files) train_dataset = train_dataset.shuffle(buffer_size=len(train_files)).repeat() train_dataset = train_dataset.map( load_image, num_parallel_calls=args.preprocess_threads) train_dataset = train_dataset.map( lambda x: tf.random_crop(x, (args.patchsize, args.patchsize, 3))) train_dataset = train_dataset.batch(args.batchsize) train_dataset = train_dataset.prefetch(32) num_pixels = args.batchsize * args.patchsize ** 2 total_filters_num = args.num_filters # get training patch from dataset. x = train_dataset.make_one_shot_iterator().get_next() if args.train_jointly: # lists to keep loss for each lambda y_tilde, entropy_bottlenecks, likelihoods = list(), list(), list() train_bpp, train_mse, train_loss = list(), list(), list() # build a slimmable encoder y = slimmable_analysis_transform(x, args.switch_list, total_filters_num) for i, _switch in enumerate(args.switch_list): entropy_bottlenecks.append(tfc.EntropyBottleneck()) _y_tilde, _likelihoods = entropy_bottlenecks[i](y[i], training=True) y_tilde.append(_y_tilde) likelihoods.append(_likelihoods) # build a slimmable decoder x_tilde = slimmable_synthesis_transform(y_tilde, args.switch_list, total_filters_num) for i, _switch in enumerate(args.switch_list): # Total number of bits divided by number of pixels. train_bpp.append(tf.reduce_sum(tf.log(likelihoods[i])) / (-np.log(2) * num_pixels)) # Mean squared error across pixels. train_mse.append(tf.reduce_mean(tf.squared_difference(x, x_tilde[i]))) # Multiply by 255^2 to correct for rescaling. # train_mse[i] *= 255 ** 2 # The rate-distortion cost. train_loss.append(lmbdas[i] * train_mse[i] + train_bpp[i]) # total loss total_train_loss = tf.add_n(train_loss) # minimize loss and auxiliary loss, and execute update op. step = tf.train.create_global_step() main_optimizer = tf.train.AdamOptimizer(learning_rate=1e-4) main_step = main_optimizer.minimize(total_train_loss, global_step=step) aux_optimizers = list() list_ops = [main_step] for i, entropy_bottleneck in enumerate(entropy_bottlenecks): aux_optimizers.append(tf.train.AdamOptimizer(learning_rate=1e-3)) list_ops.append(aux_optimizers[i].minimize(entropy_bottleneck.losses[0])) list_ops.append(entropy_bottleneck.updates[0]) train_op = tf.group(list_ops) # summaries for i, _switch in enumerate(args.switch_list): tf.summary.scalar("loss_%d" % i, train_loss[i]) tf.summary.scalar("bpp_%d" % i, train_bpp[i]) tf.summary.scalar("mse_%d" % i, train_mse[i]* 255 ** 2) # Rescaled tf.summary.histogram("hist_y_%d" % i, y[i]) tf.summary.scalar("total_loss", total_train_loss) hooks = [ tf.train.StopAtStepHook(last_step=last_step), tf.train.NanTensorHook(total_train_loss), ] with tf.train.MonitoredTrainingSession( hooks=hooks, checkpoint_dir=args.checkpoint_dir, save_checkpoint_secs=900, save_summaries_secs=600) as sess: while not sess.should_stop(): sess.run(step) sess.run(train_op)
5,332,367
def connect_areas_in_streams(system, cortical_areas): """ Dorsal / ventral aware version. Include V1 and V2 in cortical_areas, but they must be connected to each other separately. """ for target in [a for a in cortical_areas if a not in ('V1', 'V2')]: for source in data.get_source_areas(target, feedforward_only=True): # print(source) if source in cortical_areas: FLNe = data.get_FLNe(source, target) SLN = data.get_SLN(source, target) # print('{}->{} FLNe: {} SLN: {}'.format(source, target, FLNe, SLN)) # FLN, SLF, TLF = get_connection_details(source, target) target_pop = '{}_4'.format(target) fl5, fl6 = _get_layer_56_source_fractions(source, target) # print('{}->{} fractions L5: {} L6: {}'.format(source, target, fl5, fl6)) if source == 'V1': if is_ventral(target): # V4 gets 2/3 input, not sure about others system.connect_areas('V1_2/3blob', target_pop, .333*FLNe*SLN/100) system.connect_areas('V1_2/3interblob', target_pop, .667*FLNe*SLN/100) system.connect_areas('V1_5', target_pop, FLNe*(1-SLN/100)) else: # true for MT, not sure about others system.connect_areas('V1_4B', target_pop, FLNe*SLN/100) elif source == 'V2': if is_ventral(target): thin_fraction = .333 pale_fraction = .667 system.connect_areas('V2thin_2/3', target_pop, thin_fraction*FLNe*SLN/100) system.connect_areas('V2thin_5', target_pop, fl5*thin_fraction*FLNe*(1-SLN/100)) system.connect_areas('V2thin_6', target_pop, fl6*thin_fraction*FLNe*(1-SLN/100)) system.connect_areas('V2pale_2/3', target_pop, pale_fraction*FLNe*SLN/100) system.connect_areas('V2pale_5', target_pop, fl5*pale_fraction*FLNe*(1-SLN/100)) system.connect_areas('V2pale_6', target_pop, fl6*pale_fraction*FLNe*(1-SLN/100)) else: system.connect_areas('V2thick_2/3', target_pop, FLNe*SLN/100) system.connect_areas('V2thick_5', target_pop, fl5*FLNe*(1-SLN/100)) system.connect_areas('V2thick_6', target_pop, fl6*FLNe*(1-SLN/100)) else: system.connect_areas('{}_2/3'.format(source), target_pop, FLNe*SLN/100) system.connect_areas('{}_5'.format(source), target_pop, fl5*FLNe*(1-SLN/100)) system.connect_areas('{}_6'.format(source), target_pop, fl6*FLNe*(1-SLN/100))
5,332,368
def remove_except_npy(c: "Config", keep: str): """Remove files except keep .npy files with a given word in the filename.""" keep = keep.split() print_w(f"Removing all files in: {c.generated_data_dir()}") print_w(f"Except files containing one of: {keep}") def clean_dir(dir_path): for root, dir_names, file_names in os.walk(dir_path): for file_name in file_names: if not ( any(k in file_name for k in keep) and file_name.endswith("npy") ): print_i(f"Removing {file_name}") os.remove(os.path.join(root, file_name)) else: print_w(f"Keeping {file_name}") for dir_name in dir_names: clean_dir(os.path.join(root, dir_name)) clean_dir(c.generated_data_dir())
5,332,369
def arithmetic_series(a: int, n: int, d: int = 1) -> int: """Returns the sum of the arithmetic sequence with parameters a, n, d. a: The first term in the sequence n: The total number of terms in the sequence d: The difference between any two terms in the sequence """ return n * (2 * a + (n - 1) * d) // 2
5,332,370
def get_arc_proxy_user(proxy_file=None): """ Returns the owner of the arc proxy. When *proxy_file* is *None*, it defaults to the result of :py:func:`get_arc_proxy_file`. Otherwise, when it evaluates to *False*, ``arcproxy`` is queried without a custom proxy file. """ out = _arc_proxy_info(args=["--infoitem=identity"], proxy_file=proxy_file)[1].strip() try: return re.match(r".*\/CN\=([^\/]+).*", out.strip()).group(1) except: raise Exception("no valid identity found in arc proxy: {}".format(out))
5,332,371
def pass_hot_potato(names, num): """Pass hot potato. A hot potato is sequentially passed to ones in a queue line. After a number of passes, the one who got the hot potato is out. Then the passing hot potato game is launched againg, until the last person is remaining one. """ name_queue = Queue() for name in names: name_queue.enqueue(name) while name_queue.size() > 1: for i in xrange(num): name_queue.enqueue(name_queue.dequeue()) name_queue.dequeue() return name_queue.dequeue()
5,332,372
def convert_timeseries_dataframe_to_supervised(df: pd.DataFrame, namevars, target, n_in=1, n_out=0, dropT=True): """ Transform a time series in dataframe format into a supervised learning dataset while keeping dataframe intact. Returns the transformed pandas DataFrame, the name of the target column and the names of the predictor columns Arguments: df: A timeseries dataframe that you want to convert to Supervised dataset. namevars: columns that you want to lag in the data frame. Other columns will be untouched. target: this is the target variable you intend to use in supervised learning n_in: Number of lag periods as input (X). n_out: Number of future periods (optional) as output for the taget variable (y). dropT: Boolean - whether or not to drop columns at time 't'. Returns: df: This is the transformed data frame with the time series columns laggged. Note that the original columns are dropped if you set the 'dropT' argument to True. If not, they are preserved. This Pandas DataFrame of lagged time series data is immediately available for supervised learning. rtype: pd.DataFrame, str, List[str] """ target = copy.deepcopy(target) df = copy.deepcopy(df) int_vars = df.select_dtypes(include='integer').columns.tolist() # Notice that we will create a sequence of columns from name vars with suffix (t-n,... t-1), etc. drops = [] int_changes = [] for i in range(n_in, -1, -1): if i == 0: for var in namevars: addname = var + '(t)' df = df.rename(columns={var:addname}) drops.append(addname) if var in int_vars: int_changes.append(addname) else: for var in namevars: addname = var + '(t-' + str(i) + ')' df[addname] = df[var].shift(i) if var in int_vars: int_changes.append(addname) ## forecast sequence (t, t+1,... t+n) if n_out == 0: n_out = False for i in range(1, n_out): for var in namevars: addname = var + '(t+' + str(i) + ')' df[addname] = df[var].shift(-i) # drop rows with NaN values df = df.dropna() ### Make sure that whatever vars came in as integers return back as integers! if int_changes: ### only do this if there are some changes to implement ### df[int_changes] = df[int_changes].astype(np.int64) # put it all together for each_target in target: df = df.rename(columns={each_target+'(t)':each_target}) if dropT: ### If dropT is true, all the "t" series of the target column (in case it is in the namevars) ### will be removed if you don't want the target to learn from its "t" values. ### Similarly, we will also drop all the "t" series of name_vars if you set dropT to Trueself. try: drops.remove(target) except: pass df.drop(drops, axis=1, inplace=True) preds = [x for x in list(df) if x not in target] return df, target, preds
5,332,373
def setup_logging(options, conf): """ Sets up the logging options for a log with supplied name :param options: Mapping of typed option key/values :param conf: Mapping of untyped key/values from config file """ if options.get('log_config', None): # Use a logging configuration file for all settings... if os.path.exists(options['log_config']): logging.config.fileConfig(options['log_config']) return else: raise RuntimeError("Unable to locate specified logging "\ "config file: %s" % options['log_config']) # If either the CLI option or the conf value # is True, we set to True debug = options.get('debug') or conf.get('debug', False) debug = debug in [True, "True", "1"] verbose = options.get('verbose') or conf.get('verbose', False) verbose = verbose in [True, "True", "1"] root_logger = logging.root root_logger.setLevel( logging.DEBUG if debug else logging.INFO if verbose else logging.WARNING) # Set log configuration from options... # Note that we use a hard-coded log format in the options # because of Paste.Deploy bug #379 # http://trac.pythonpaste.org/pythonpaste/ticket/379 log_format = options.get('log_format', DEFAULT_LOG_FORMAT) log_date_format = options.get('log_date_format', DEFAULT_LOG_DATE_FORMAT) formatter = logging.Formatter(log_format, log_date_format) # grab log_file and log_dir from config; set to defaults of not already # defined logfile = options.get('log_file') or conf.get('log_file', DEFAULT_LOG_FILE) logdir = options.get('log_dir') or conf.get('log_dir', DEFAULT_LOG_DIR) # Add FileHandler if it doesn't exist logfile = os.path.abspath(os.path.join(logdir, logfile)) handlers = [handler for handler in root_logger.handlers if isinstance(handler, FileHandler) and handler.baseFilename == logfile] if not handlers: logfile = logging.FileHandler(logfile) logfile.setFormatter(formatter) root_logger.addHandler(logfile) # Mirror to console if verbose or debug if debug or verbose: add_console_handler(root_logger, logging.DEBUG) else: add_console_handler(root_logger, logging.INFO)
5,332,374
def build_graph(adj_mat): """build sparse diffusion graph. The adjacency matrix need to preserves divergence.""" # sources, targets = adj_mat.nonzero() # edgelist = list(zip(sources.tolist(), targets.tolist())) # g = Graph(edgelist, edge_attrs={"weight": adj_mat.data.tolist()}, directed=True) g = Graph.Weighted_Adjacency(adj_mat) return g
5,332,375
def retrieve_context_path_comp_service_end_point_end_point(uuid): # noqa: E501 """Retrieve end-point Retrieve operation of resource: end-point # noqa: E501 :param uuid: ID of uuid :type uuid: str :rtype: List[str] """ return 'do some magic!'
5,332,376
def submit_pai_task(pai_cmd, datasource): """Submit given cmd to PAI which manipulate datasource Args: pai_cmd: The command to submit datasource: The datasource this cmd will manipulate """ user, passwd, address, project = MaxComputeConnection.get_uri_parts( datasource) cmd = [ "odpscmd", "--instance-priority", "9", "-u", user, "-p", passwd, "--project", project, "--endpoint", address, "-e", pai_cmd ] exitcode = run_command_and_log(cmd) if exitcode != 0: raise SQLFlowDiagnostic("Execute odps cmd fail: cmd is %s" % " ".join(cmd))
5,332,377
def test_step8(): """8. If the remaining string is empty, return a slash.""" assert check(["dirname", "///"]).stdout == "/\n"
5,332,378
def get_external_repos(gh): """ Get all external repositories from the `repos.config` file """ external_repos = [] with open("repos.config") as f: content = f.readlines() content = [x.strip() for x in content] for entry in content: org_name, repo_name = entry.split('/') external_repos.append(gh.get_organization(org_name).get_repo(repo_name)) return external_repos
5,332,379
def handle_standard_table(pgconn, table_name, columns, record): """ :param pgconn: :param table_name: :param columns: :param record: :return: """ data = dict(record) log.debug("Standard handler: {}".format(data)) if 'id' in columns: data_exists = pgconn.execute( "SELECT 1 FROM {table_name} WHERE id = :id".format(table_name=table_name), { 'id': data['id'] } ).fetchone() if data_exists: pgconn.execute(update_statement(table_name, columns, data), record) else: pgconn.execute(insert_statement(table_name, columns, data), record) else: pgconn.execute(insert_statement(table_name, columns, data), record) return True
5,332,380
def load_fit_profile(): """ This methods return the FIT profile types based on the Profile.xslx that is included in the Garmin FIT SDK (https://developer.garmin.com/fit/download/). The returned profile can be used to translate e.g. Garmin product names to their corresponding integer product ids. """ fpath = _fit_profile_json_path() with fpath.open("r") as fit_profile_file: profile = json.load(fit_profile_file) return profile
5,332,381
def mmethod(path, *args, **kwargs): """ Returns a mapper function that runs the path method for each instance of the iterable collection. >>> mmethod('start') is equivalent to >>> lambda thread: thread.start() >>> mmethod('book_set.filter', number_of_pages__gte=100) is equivalent to >>> lambda author: author.book_set.filter(number_of_pages__gte=100) """ return lambda x: mattr(path)(x)(*args, **kwargs)
5,332,382
def create_crd_from_yaml( api_extensions_v1_beta1: ApiextensionsV1beta1Api, name, yaml_manifest ) -> None: """ Create a specific CRD based on yaml file. :param api_extensions_v1_beta1: ApiextensionsV1beta1Api :param name: CRD name :param yaml_manifest: an absolute path to file """ print(f"Create a CRD with name: {name}") with open(yaml_manifest) as f: docs = yaml.safe_load_all(f) for dep in docs: if dep["metadata"]["name"] == name: create_crd(api_extensions_v1_beta1, dep) print("CRD was created")
5,332,383
def download_peakfile(source_url, filename, destination_dir): """Download peakfile from encode""" response = requests.get(source_url, stream=True) with open(os.path.join(destination_dir, filename), 'wb') as f: shutil.copyfileobj(response.raw, f) with gzip.open(os.path.join(destination_dir, filename), 'rb') as in_file: with open(os.path.join(destination_dir, filename.replace('.gz','')), 'wb') as out_file: out_file.write( in_file.read() ) del response
5,332,384
def generate_test_demand_design_image() -> TestDataSet: """ Returns ------- test_data : TestDataSet 2800 points of test data, uniformly sampled from (price, time, emotion). Emotion is transformed into img. """ org_test: TestDataSet = generate_test_demand_design(False) treatment = org_test.treatment covariate = org_test.covariate target = org_test.structural emotion_arr = covariate[:, 1].astype(int) emotion_img = attach_image(emotion_arr, False, 42) covariate_img = np.concatenate([covariate[:, 0:1], emotion_img], axis=1) return TestDataSet(treatment=treatment, covariate=covariate_img, structural=target)
5,332,385
def run_sim( params: SimulationParams, gen_factory: TransactionGeneratorFactory, sched_factory: TransactionSchedulerFactory, executor: TransactionExecutor = DEFAULT_EXECUTOR, obj_set_maker_factory: ObjSetMakerFactory = DEFAULT_OBJ_SET_MAKER_FACTORY, ): """Yield index and path through the state space found by the simulator.""" for i in range(ARGS.repeats): gen = tr_factory() obj_set_maker = obj_set_maker_factory() scheduler = sched_factory( params.clock_period, params.pool_size, params.queue_size ) sim = Simulator(gen, obj_set_maker, scheduler, executor, params.core_num) yield i, sim.run(ARGS.verbose)
5,332,386
def rename(src, dst): """Rename file.""" makedirs(dst) os.rename(src, dst)
5,332,387
def test_connection_error_with_response(): """ Test error string is formatted correctly when exception is initialized with response """ Response = namedtuple("Response", "status_code reason content") response = Response(status_code="404", reason="Not found", content="Here some content") error = ConnectionError(response) assert str(error) == "Request failed\nStatus code: 404\nResponse message: Not found\nHere some content"
5,332,388
def main(args): """Main CLI function """ args = parse_args(args) setup_logging(args.loglevel) stitcher = Stitcher(args.input) stitcher.generate(args.output, args.cleanup)
5,332,389
def value_iteration(game, depth_limit, threshold): """Solves for the optimal value function of a game. For small games only! Solves the game using value iteration, with the maximum error for the value function less than threshold. This algorithm works for sequential 1-player games or 2-player zero-sum games, with or without chance nodes. Arguments: game: The game to analyze, as returned by `load_game`. depth_limit: How deeply to analyze the game tree. Negative means no limit, 0 means root-only, etc. threshold: Maximum error for state values.. Returns: A `dict` with string keys and float values, mapping string encoding of states to the values of those states. """ if game.num_players() not in (1, 2): raise ValueError("Game must be a 1-player or 2-player game") if (game.num_players() == 2 and game.get_type().utility != pyspiel.GameType.Utility.ZERO_SUM): raise ValueError("2-player games must be zero sum games") # We expect Value Iteration to be used with perfect information games, in # which `str` is assumed to display the state of the game. states = get_all_states.get_all_states( game, depth_limit, True, False, to_string=str) values = {} transitions = {} _initialize_maps(states, values, transitions) error = threshold + 1 # A value larger than threshold min_utility = game.min_utility() while error > threshold: error = 0 for key, state in states.items(): if state.is_terminal(): continue player = state.current_player() value = min_utility if player == 0 else -min_utility for action in state.legal_actions(): next_states = transitions[(key, action)] q_value = sum(p * values[next_state] for next_state, p in next_states) if player == 0: value = max(value, q_value) else: value = min(value, q_value) error = max(abs(values[key] - value), error) values[key] = value return values
5,332,390
def printable_dataframe(data: typing.List[typing.Mapping], ignore_phase: bool = True) -> pd.DataFrame: """ Print performance results using pandas data frames. TODO: Re-write me. """ columns = {'name': 'Model', 'input_shape': 'Input shape', 'num_parameters': '#Parameters', 'param_memory': 'Model size (MB) FP32', 'flops': 'GFLOPs (multiply-add)', 'activation_memory': 'Activation size (MB) FP32'} column_oder = ['name', 'input_shape', 'num_parameters', 'param_memory', 'flops', 'activation_memory'] if not ignore_phase: column_oder.insert(1, 'phase') columns['phase'] = 'Phase' df = pd.DataFrame(data, columns=column_oder) df.rename(columns=columns, inplace=True) return df
5,332,391
def _import_and_infer(save_dir, inputs): """Import a SavedModel into a TF 1.x-style graph and run `signature_key`.""" graph = ops.Graph() with graph.as_default(), session_lib.Session() as session: model = loader.load(session, [tag_constants.SERVING], save_dir) signature = model.signature_def[ signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] assert set(inputs.keys()) == set(signature.inputs.keys()) feed_dict = {} for arg_name in inputs.keys(): feed_dict[graph.get_tensor_by_name(signature.inputs[arg_name].name)] = ( inputs[arg_name]) output_dict = {} for output_name, output_tensor_info in signature.outputs.items(): output_dict[output_name] = graph.get_tensor_by_name( output_tensor_info.name) return session.run(output_dict, feed_dict=feed_dict)
5,332,392
def new_module(fname, main=False, parser_vmx=None): """ `fname` is Python str (or None for internal Module) `main` is Python True for main program (from command line) `argv` is Python list of str (if main is True) `parser_vmx` is Python str for parser VMX file to use returns (CModule, CClosure) if newly loaded module the Closure is the (bootstrap) code to populate the Module returns (CModule, None) if previously loaded (or internal Module) """ if fname: sfname = mkstr(fname) else: sfname = [] # should not be used! illegal as dict key! md = Module.getprop('modules') # Module Dict/directory (Class variable) mdd = md.getvalue() # module directory dict if fname and sfname in mdd: # previously loaded? return mdd[sfname], None # yes; return it, no bootstrap needed scope = scopes.Scope(root_scope) # create base scope for module mod = CModule(scope) scope.defvar(const.DOC, null_value) scope.defvar('__xxl', CObject(class_by_name('XXLObject'))) if fname: mdd[sfname] = mod # save as previously loaded mi = new_modinfo(main=main, module=mod, fname=fname, parser_vmx=parser_vmx) mod.modinfo = mi scope.defvar(const.MODINFO, mi) # make ModInfo visible in module namespace if fname is None: # internal module? return mod, None bootstrap_vmx = xxlobj.find_in_lib_path(os.environ.get('XXL_BOOTSTRAP', 'bootstrap.vmx')) # XXX handle Exceptions for I/O, bad JSON, bad instructions code = vmx.load_vm_json(bootstrap_vmx) boot = CClosure(code, mod.scope) # CClosure with bootstrap_vmx code return mod, boot
5,332,393
def test_question_load_two_answer(monkeypatch): """load a complete question and two answers """ class MonkeyAnswer(exam2pdf.Answer): def __init__(self): super().__init__() self._attr_load_sequence = ("text", "image") self._type_caster_sequence = (str, Path) test_tuple = ("t1", "s1", "p1", "1", "a00", Path("a01"), "a10") quest = exam2pdf.Question() monkeypatch.setattr(quest, "_answer_type", MonkeyAnswer) quest.load_sequentially(iter(test_tuple)) assert quest.text == test_tuple[0] assert quest.subject == test_tuple[1] assert quest.image == Path(test_tuple[2]) assert quest.level == int(test_tuple[3]) assert quest.answers[0].text == test_tuple[4] assert quest.answers[0].image == test_tuple[5] assert quest.answers[1].text == test_tuple[6]
5,332,394
def url_by_properties( config_properties, db_type, submit_dir=None, top_dir=None, rundir_properties=None, cl_properties=None, props=None, ): """ Get URL from the property file """ # Validate parameters if not db_type: raise ConnectionError( "A type should be provided with the property file.", db_type=db_type ) # Parse, and process properties if not props: props = properties.Properties() props.new(config_file=config_properties, rundir_propfile=rundir_properties) _merge_properties(props, cl_properties) if db_type.upper() == DBType.JDBCRC: dburi = _get_jdbcrc_uri(props) elif db_type.upper() == DBType.MASTER: dburi = _get_master_uri(props) elif db_type.upper() == DBType.WORKFLOW: dburi = _get_workflow_uri(props, submit_dir, top_dir) else: raise ConnectionError("Invalid database type '%s'." % db_type, db_type=db_type) if dburi: log.debug("Using database: %s" % dburi) return dburi raise ConnectionError("Unable to find a database URI to connect.", db_type=db_type)
5,332,395
def draw_map_24(inputs): """draw 24+1 maps of a day""" draw_map = DrawMap(inputs) draw_map.read_head() for n in range(draw_map.reader.ini_ntime, 25+draw_map.reader.ini_ntime): draw_map.create_title(n) draw_map.reader_uv_by_time(n) print(draw_map.title_full) draw_map.draw()
5,332,396
def nth(seq, idx): """Return the nth item of a sequence. Constant time if list, tuple, or str; linear time if a generator""" return get(seq, idx)
5,332,397
def db_session(db: log_database.Database) -> log_database.Database.SessionType: """A test fixture which yields an empty graph proto database.""" with db.Session() as session: yield session
5,332,398
def test_expm_depletion(): """Test that depletion is carried out properly""" # ------------------------------------------------------------------------- # DEPLETION # ------------------------------------------------------------------------- dep = MainDepletion(0.0, data) # define metadata (steps, flux, and so on) dep.SetDepScenario(power=None, flux=[flux], timeUnits="seconds", timesteps=[6.630851880276299780234694480896E+05], timepoints=None) # set initial composition dep.SetInitialComposition(ID, N0, vol=1.0) # solve the Bateman equations dep.SolveDepletion(method="expm") # Post depletion analysis dep.DecayHeat() dep.Radiotoxicity() dep.Activity() dep.Mass() assert dep.Nt[1656, 1] == pytest.approx(compareNt[1656], rel=0.001)
5,332,399