content
stringlengths
22
815k
id
int64
0
4.91M
def override_config_file(src_path, dest_path, **kwargs): """ Override settings in a trainer config file. For example, override_config_file(src_path, dest_path, max_steps=42) will copy the config file at src_path to dest_path, but override the max_steps field to 42 for all brains. """ with open(src_path) as f: configs = yaml.safe_load(f) for config in configs.values(): config.update(**kwargs) with open(dest_path, "w") as f: yaml.dump(configs, f)
5,336,200
def get_loss(pred, label): """ :param pred: BxNxC :param label: BxN :param smpw: BxN :return: """ loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=pred) classify_loss = tf.reduce_mean(loss) tf.summary.scalar('classify loss', classify_loss) tf.add_to_collection('losses', classify_loss) return classify_loss
5,336,201
def test_dependencies(dependencies_dialog): """Run dependency widget test.""" # Test sample dependencies.add("zmq", "zmq", "Run introspection services", ">=10.0") dependencies.add("foo", "foo", "Non-existent module", ">=1.0") dependencies.add("bar", "bar", "Non-existing optional module", ">=10.0", kind=dependencies.OPTIONAL) dependencies_dialog.set_data(dependencies.DEPENDENCIES) dependencies_dialog.show() assert dependencies_dialog
5,336,202
def calculate_prec_at_k(k, prediction, target): """ Calculating precision at k. """ best_k_pred = prediction.argsort()[:k] best_k_target = target.argsort()[:k] return len(set(best_k_pred).intersection(set(best_k_target))) / k
5,336,203
def get_ttl(cur): """Get the 'extract' table as lines of Turtle (the lines are returned as a list).""" # Get ttl lines cur.execute( """WITH literal(value, escaped) AS ( SELECT DISTINCT value, replace(replace(replace(value, '\\', '\\\\'), '"', '\\"'), ' ', '\\n') AS escaped FROM tmp_extract ) SELECT '@prefix ' || prefix || ': <' || base || '> .' FROM prefix UNION ALL SELECT DISTINCT subject || ' ' || predicate || ' ' || coalesce( object, '"' || escaped || '"^^' || datatype, '"' || escaped || '"@' || language, '"' || escaped || '"' ) || ' .' FROM tmp_extract LEFT JOIN literal ON tmp_extract.value = literal.value;""" ) lines = [] for row in cur.fetchall(): line = row[0] if not line: continue # Replace newlines line = line.replace("\n", "\\n") lines.append(line) return lines
5,336,204
def pnm80(date1, date2): """ Wrapper for ERFA function ``eraPnm80``. Parameters ---------- date1 : double array date2 : double array Returns ------- rmatpn : double array Notes ----- The ERFA documentation is below. - - - - - - - - - e r a P n m 8 0 - - - - - - - - - Form the matrix of precession/nutation for a given date, IAU 1976 precession model, IAU 1980 nutation model. Given: date1,date2 double TDB date (Note 1) Returned: rmatpn double[3][3] combined precession/nutation matrix Notes: 1) The TDB date date1+date2 is a Julian Date, apportioned in any convenient way between the two arguments. For example, JD(TDB)=2450123.7 could be expressed in any of these ways, among others: date1 date2 2450123.7 0.0 (JD method) 2451545.0 -1421.3 (J2000 method) 2400000.5 50123.2 (MJD method) 2450123.5 0.2 (date & time method) The JD method is the most natural and convenient to use in cases where the loss of several decimal digits of resolution is acceptable. The J2000 method is best matched to the way the argument is handled internally and will deliver the optimum resolution. The MJD method and the date & time methods are both good compromises between resolution and convenience. 2) The matrix operates in the sense V(date) = rmatpn * V(J2000), where the p-vector V(date) is with respect to the true equatorial triad of date date1+date2 and the p-vector V(J2000) is with respect to the mean equatorial triad of epoch J2000.0. Called: eraPmat76 precession matrix, IAU 1976 eraNutm80 nutation matrix, IAU 1980 eraRxr product of two r-matrices Reference: Explanatory Supplement to the Astronomical Almanac, P. Kenneth Seidelmann (ed), University Science Books (1992), Section 3.3 (p145). Copyright (C) 2013-2017, NumFOCUS Foundation. Derived, with permission, from the SOFA library. See notes at end of file. """ (date1, date2,), rmatpn = arrayify_inputs_and_create_d3_fix( [date1, date2], core_dims=[0, 0], out_core_shape=(3, 3), out_dtype=numpy.double) rmatpn = ufunc.pnm80(date1, date2, rmatpn) return rmatpn
5,336,205
def segment_experiment(experiment_root, model, channels='bf', use_gpu=True, overwrite_existing=False): """Segment all 'bf' image files from an experiment directory and annotate poses. For more complex needs, use segment_images.segment_positions. This function is largely a simple example of its usage. Parameters: experiment_root: top-level experiment directory model: path to a model file, or name of a model packaged with the matlab tool. (If there is no '/' in this parameter, it is assumed to be a model name rather than a path.) channels: list/tuple of image channels to segment, or a single channel as a string. use_gpu: whether or not to use the GPU to perform the segmentations overwrite_existing: if False, the segmenter will not be run on existing mask files, nor will existing annotations be modified even if new mask files are generated for a timepoint. """ experiment_root = pathlib.Path(experiment_root) positions = load_data.scan_experiment_dir(experiment_root, channels=channels) mask_root = experiment_root / 'derived_data' / 'mask' segment_images.segment_positions(positions, model, mask_root, use_gpu, overwrite_existing) annotations = load_data.read_annotations(experiment_root) metadata = load_data.read_metadata(experiment_root) age_factor = metadata.get('age_factor', 1) # see if there is an "age factor" stashed in the metadata... width_estimator = worm_widths.WidthEstimator.from_experiment_metadata(metadata, age_factor) segment_images.annotate_poses_from_masks(positions, mask_root, annotations, overwrite_existing, width_estimator) load_data.write_annotations(experiment_root, annotations)
5,336,206
def db_table_ddl(conn, table_name, table_cols, table_seqs, table_cons, **kwargs): """ Generate create table DDL """ # Sequences if table_seqs: for s_ in table_seqs: c_ = _t.m.daffkv(table_cols, "col_name", s_["col_name"]) if c_: c_["is_seq"] = True c_["col_type"] = "serial" else: raise _t.m.DbIntgrError("Sequence '%s' not related to any table '%s' column" % (s_["seq_name"], table_name)) # Columns cols_ = [] for c_ in table_cols: cols_.append("%s %s%s" % (c_["col_name"], c_["col_type"], c_.get("not_null") and " NOT NULL" or "")) # Constraints cons_ = [] if table_cons: for c_ in table_cons: if c_["con_type"] == "c": cons_.append("CONSTRAINT %s %s" % (c_["con_name"], c_["con_src"])) # Table prefix table_pfx_ = kwargs.get("table_prefix", "") # Construct DDL statement stmt_ = "CREATE TABLE %s%s (%s%s)" % (table_pfx_, table_name, ", ".join(cols_), cons_ and ", %s" % ", ".join(cons_) or "") if kwargs.get("apply"): conn.execute(stmt_, **kwargs) return [stmt_, ]
5,336,207
def ne_to_wgs(northing, easting): """ Convert Northings and Eastings (NAD 83 Alaska Albers Equal Area Conic) to WGS84 lat/long . :param northing: AK Albers in meters :param easting: AK Albers in meters :returns: transformed coordinates in WGS84 lat long """ wgspoint = osr.SpatialReference() wgspoint.ImportFromEPSG(4326) nepoint = osr.SpatialReference() nepoint.ImportFromEPSG(3338) transform = osr.CoordinateTransformation(nepoint, wgspoint) return transform.TransformPoint(easting, northing)
5,336,208
def test__set_variables_from_first_table_with_same_db_tables_in_parameters(): """Test _set_variables_from_first_table() when the tables passed are with same tables in parameters""" def dummy_function(param_1: str, param_2: str): # skipcq: PTC-W0049, PY-D0003 pass handler = TableHandler() handler.op_args = () handler.python_callable = dummy_function handler.op_kwargs = {"param_1": "dummy_value", "param_2": "dummy_value"} handler.parameters = { "param_1": Table( conn_id="conn_1", metadata=Metadata( database="database_1", schema="scheme_1", ), ), "param_3": Table( conn_id="conn_1", metadata=Metadata( database="database_1", schema="scheme_1", ), ), } handler._set_variables_from_first_table() assert handler.conn_id == "conn_1" assert handler.database == "database_1" assert handler.schema == "scheme_1"
5,336,209
def register_api_routes(app: Flask): """ use this function to register api routes""" # register the api versions to the main application app.register_blueprint(register_api_routes_v1(latest = True))
5,336,210
def check_for_greeting(sentence, context): """If any of the words in the user's input was a greeting, return a greeting response""" if (sentence.strip() in GREETING_KEYWORDS) and (context==True): return getCurrentTimeGreeting()+", "+random.choice(GREETING_RESPONSES) else: return random.choice(GREETING_RESPONSES)
5,336,211
def user_is_aidant(view=None, redirect_field_name="next"): """ Similar to :func:`~django.contrib.auth.decorators.login_required`, but requires the user to be :term:`allowed to create mandats`. By default, this redirects users to home of espace aidants. """ def test(user): return user.can_create_mandats decorator = user_passes_test( test, login_url="espace_aidant_home", redirect_field_name=redirect_field_name, ) return decorator if (view is None) else decorator(view)
5,336,212
def plot_images(a,b,x_train, y_train, y_pred): """ This function plots the images in a 5x5 grid :param a: true class label :param b: predicted class label :param x_train: training data :param y_train: training data labels :param y_pred: prediction on the test data :return: None """ cl_a = a cl_b = b x_aa = x_train[(y_train == cl_a) & (y_pred == cl_a)] x_ab = x_train[(y_train == cl_a) & (y_pred == cl_b)] x_ba = x_train[(y_train == cl_b) & (y_pred == cl_a)] x_bb = x_train[(y_train == cl_b) & (y_pred == cl_b)] plt.figure(figsize=(8,8)) plt.subplot(221) plot_digits(x_aa[:25], images_per_row=5) plt.subplot(222) plot_digits(x_ab[:25], images_per_row=5) plt.subplot(223) plot_digits(x_ba[:25], images_per_row=5) plt.subplot(224) plot_digits(x_bb[:25], images_per_row=5) plt.show()
5,336,213
def get_time_zone_offset(time_zone, date_time=None): """ Returns the time zone offset (e.g. -0800) of the time zone for given datetime """ date_time = datetime.now(utc) if date_time is None else date_time return _format_time_zone_string(time_zone, date_time, '%z')
5,336,214
def loss_fn_kd(scores, target_scores, T=2.): """Compute knowledge-distillation (KD) loss given [scores] and [target_scores]. Both [scores] and [target_scores] should be tensors, although [target_scores] should be repackaged. 'Hyperparameter': temperature""" device = scores.device log_scores_norm = F.log_softmax(scores / T, dim=1) targets_norm = F.softmax(target_scores / T, dim=1) # if [scores] and [target_scores] do not have equal size, append 0's to [targets_norm] if not scores.size(1) == target_scores.size(1): print('size does not match') n = scores.size(1) if n>target_scores.size(1): n_batch = scores.size(0) zeros_to_add = torch.zeros(n_batch, n-target_scores.size(1)) zeros_to_add = zeros_to_add.to(device) targets_norm = torch.cat([targets_norm.detach(), zeros_to_add], dim=1) # Calculate distillation loss (see e.g., Li and Hoiem, 2017) KD_loss_unnorm = -(targets_norm * log_scores_norm) KD_loss_unnorm = KD_loss_unnorm.sum(dim=1) #--> sum over classes KD_loss_unnorm = KD_loss_unnorm.mean() #--> average over batch # normalize KD_loss = KD_loss_unnorm * T**2 return KD_loss
5,336,215
def geom_cooling(temp, k, alpha = 0.95): """Geometric temperature decreasing.""" return temp * alpha
5,336,216
def populate_db_from_sheet(sheet): """Given excel sheet, check required columns exist, and if so, empty and repopulate database""" dct_index_of_heading = {} for cx in range(sheet.ncols): dct_index_of_heading[sheet.cell_value(0, cx)] = cx not_founds = [] for expected in COLUMN_NAMES: if expected not in dct_index_of_heading: not_founds.append(expected) if not_founds: raise ValueError("Missing column", not_founds) db.empty_tbl_products() sas_db = db.get_db() cursor = sas_db.cursor() sql = """insert into tbl_products (ID, GroupID, SASCode, CompanyCode, BrandName, ProductDescription, PackSize, MaximumQty, PackPrice, PackPremium) values (?,?,?,?,?,?,?,?,?,?)""" for rx in range(1, sheet.nrows): cursor.execute(sql, (rx, cell_value(sheet, rx, dct_index_of_heading[GROUP_ID]), cell_value(sheet, rx, dct_index_of_heading[SAS_CODE]), cell_value(sheet, rx, dct_index_of_heading[COMPANY_CODE]), cell_value(sheet, rx, dct_index_of_heading[BRAND_NAME]), cell_value(sheet, rx, dct_index_of_heading[PRODUCT_DESCRIPTION]), cell_value(sheet, rx, dct_index_of_heading[PACK_SIZE]), cell_value(sheet, rx, dct_index_of_heading[MAXIMUM_QTY]), cell_value(sheet, rx, dct_index_of_heading[PACK_PRICE]), cell_value(sheet, rx, dct_index_of_heading[PACK_PREMIUM]), ) ) sas_db.commit()
5,336,217
def metadata_version(metadata, osmelem, grp_feat, res_feat, feature_suffix): """Compute the version-related features of metadata and append them into the metadata table Parameters ---------- metadata: pd.DataFrame Metadata table to complete osmelem: pd.DataFrame original data used to compute versions; contains a 'elem' feature grp_feat: object string that indicates which feature from 'data' must be used to group items res_feat: object string that indicates the measured feature (how many items correspond feature_suffix: str string designing the end of the new feature names """ osmelem_nodes = osmelem.query('elem=="node"') osmelem_ways = osmelem.query('elem=="way"') osmelem_relations = osmelem.query('elem=="relation"') metadata = group_stats(metadata, osmelem_nodes, grp_feat, res_feat, 'v', '_node'+feature_suffix) metadata = group_stats(metadata, osmelem_ways, grp_feat, res_feat, 'v', '_way'+feature_suffix) metadata = group_stats(metadata, osmelem_relations, grp_feat, res_feat, 'v', '_relation'+feature_suffix) return metadata
5,336,218
def train_lin_reg(): """Trains a LR model and persists it as pickle file""" return render_template( 'default_html.html', endpoint='train_model', data=lr.train_model(), )
5,336,219
def bytes_to_escaped_str(data, keep_spacing=False, escape_single_quotes=False): """ Take bytes and return a safe string that can be displayed to the user. Single quotes are always escaped, double quotes are never escaped: "'" + bytes_to_escaped_str(...) + "'" gives a valid Python string. Args: keep_spacing: If True, tabs and newlines will not be escaped. """ if not isinstance(data, bytes): raise ValueError("data must be bytes, but is {}".format(data.__class__.__name__)) # We always insert a double-quote here so that we get a single-quoted string back # https://stackoverflow.com/questions/29019340/why-does-python-use-different-quotes-for-representing-strings-depending-on-their ret = repr(b'"' + data).lstrip("b")[2:-1] if not escape_single_quotes: ret = re.sub(r"(?<!\\)(\\\\)*\\'", lambda m: (m.group(1) or "") + "'", ret) if keep_spacing: ret = re.sub( r"(?<!\\)(\\\\)*\\([nrt])", lambda m: (m.group(1) or "") + dict(n="\n", r="\r", t="\t")[m.group(2)], ret ) return ret
5,336,220
def _session_path(): """ Return the path to the current session :return: """ path = bpy.data.filepath return path
5,336,221
def user_auth(f): """Checks whether user is logged in or raises error 401.""" def decorator(*args, **kwargs): if True is False: abort(401) return f(*args, **kwargs) return decorator
5,336,222
def is_tensorrt_plugin_loaded(): """Check if TensorRT plugins library is loaded or not. Returns: bool: plugin_is_loaded flag """ # Following strings of text style are from colorama package bright_style, reset_style = '\x1b[1m', '\x1b[0m' red_text, blue_text = '\x1b[31m', '\x1b[34m' white_background = '\x1b[107m' msg = white_background + bright_style + red_text msg += 'DeprecationWarning: This function will be deprecated in future. ' msg += blue_text + 'Welcome to use the unified model deployment toolbox ' msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy' msg += reset_style warnings.warn(msg) global plugin_is_loaded return plugin_is_loaded
5,336,223
def check_md5(config): """ Find MD5 hash in providers. :param config: Parameters object :type config: Parameters :return: plain string with text or exception if not found hash :rtype: str :raises: HashNotFound, InvalidHashFormat """ if not isinstance(config, Parameters): raise TypeError("Expected Parameters, got '%s' instead" % type(config)) providers_to_check = PASSWORD_MD5_CRACKING_PROVIDERS if config.provider == "all" else [config.provider] plain_text = None for p in providers_to_check: # Make URL using md5cracker API url = "http://md5cracker.org/api/api.cracker.php?r=%s&database=%s&hash=%s" % ( randint(500, 10000), p, config.md5_hash) # Proxy setted? open_fn = None if config.proxy is not None: proxy_handler = urllib.request.ProxyHandler({config.proxy.scheme: config.proxy.netloc}) if config.proxy_user is not None: proxy_auth_handler = urllib.request.ProxyBasicAuthHandler() proxy_auth_handler.add_password('realm', 'host', config.proxy_user, config.proxy_pass) opener = urllib.request.build_opener(proxy_handler, proxy_auth_handler) opener = urllib.request.build_opener(proxy_handler) # This time, rather than install the OpenerDirector, we use it directly: open_fn = opener.open else: open_fn = urllib.request.urlopen # Get remote info u = open_fn(url) _tmp_results = u.read().decode('utf-8') if _tmp_results is None: continue _json_results = json.loads(_tmp_results) # Its fails? if _json_results['status'] is False: # Check if reason is for not recoverable error if 'Invalid hash' in _json_results['message']: raise InvalidHashFormat("Invalid Hash Format") # It not found hash continue continue else: # Hash found!!!! plain_text = _json_results['result'] break if plain_text is None: HashNotFound("Plain text not found for hash: '%s'" % config.md5_hash) return plain_text
5,336,224
def is_polindrom(string): """ This function checks whether the given string is a polindrom or not. """ for i,char in enumerate(string): if char != string[-i-1]: return False return True
5,336,225
def check_files(hass): """Return bool that indicates if all files are present.""" # Verify that the user downloaded all files. base = f"{hass.config.path()}/custom_components/{DOMAIN}/" missing = [] for file in REQUIRED_FILES: fullpath = f"{base}{file}" if not os.path.exists(fullpath): missing.append(file) if missing: _LOGGER.critical(f"The following files are missing: {str(missing)}") returnvalue = False else: returnvalue = True return returnvalue
5,336,226
def sensitive_file_response(file): """ This function is helpful to construct your own views that will return the actual bytes for a sensitive image. You need to pass the literal bytes for sensitive photos through your server in order to put security checks in front of those bytes. So for instance you might put something like this in your views.py: def view_photo_of_steve(request, file_name): if request.user.username != 'Steve': raise Exception('Only Steve may look at photos of Steve!') return sensitive_file_response('steves_s3_bucket', file_name) def steves_page(request): return render( request, 'steve.html', {'steve_photo_url': reverse( 'view_photo_of_steve', kwargs={'file_name': SteveFile.objects.first().file_name})}) And something like this in steve.html or whatever <img src="{{ steve_photo_url }}"> """ bucket_config = file.bucket_config() if bucket_config.is_public: raise Exception(( 'S3 bucket {} is public, so performance-wise, it is best to just ' 'leave this server out of it entirely and use public_photo_url ' 'in djaveS3.S3 instead.').format(bucket_config.name)) img_bytes = Bucket(bucket_config).file_bytes(file.file_name) if img_bytes: return HttpResponse( img_bytes, content_type=content_type_from_file_name(file.file_name)) return Http404()
5,336,227
def get_unmapped_read_count_from_indexed_bam(bam_file_name): """ Get number of unmapped reads from an indexed BAM file. Args: bam_file_name (str): Name of indexed BAM file. Returns: int: number of unmapped reads in the BAM Note: BAM must be indexed for lookup using samtools. """ index_output = tk_subproc.check_output('samtools idxstats %s' % bam_file_name, shell=True) return int(index_output.strip().split('\n')[-1].split()[-1])
5,336,228
def model_setup(model_dict, X_train, y_train, X_test, y_test, X_val, y_val, rd=None, layer=None): """ Main function to set up network (create, load, test, save) """ rev = model_dict['rev'] dim_red = model_dict['dim_red'] if rd != None: # Doing dimensionality reduction on dataset print("Doing {} with rd={} over the training data".format(dim_red, rd)) X_train, X_test, X_val, dr_alg = dr_wrapper(X_train, X_test, dim_red, rd, y_train, rev, X_val) else: dr_alg = None # Getting data parameters after dimensionality reduction data_dict = get_data_shape(X_train, X_test, X_val) no_of_dim = data_dict['no_of_dim'] # Prepare Theano variables for inputs and targets if no_of_dim == 2: input_var = T.tensor('inputs') elif no_of_dim == 3: input_var = T.tensor3('inputs') elif no_of_dim == 4: input_var = T.tensor4('inputs') target_var = T.ivector('targets') # Check if model already exists if layer is not None: network, model_exist_flag, layers = model_creator(model_dict, data_dict, input_var, target_var, rd, layer) else: network, model_exist_flag = model_creator(model_dict, data_dict, input_var, target_var, rd, layer) #Defining symbolic variable for network output prediction = lasagne.layers.get_output(network) #Defining symbolic variable for network parameters params = lasagne.layers.get_all_params(network, trainable=True) #Defining symbolic variable for network output with dropout disabled test_prediction = lasagne.layers.get_output(network, deterministic=True) # Building or loading model depending on existence if model_exist_flag == 1: # Load the correct model: param_values = model_loader(model_dict, rd) lasagne.layers.set_all_param_values(network, param_values) elif model_exist_flag == 0: # Launch the training loop. print("Starting training...") if layer is not None: model_trainer(input_var, target_var, prediction, test_prediction, params, model_dict, X_train, y_train, X_val, y_val, network, layers) else: model_trainer(input_var, target_var, prediction, test_prediction, params, model_dict, X_train, y_train, X_val, y_val, network) model_saver(network, model_dict, rd) # Evaluating on retrained inputs test_model_eval(model_dict, input_var, target_var, test_prediction, X_test, y_test, rd) return data_dict, test_prediction, dr_alg, X_test, input_var, target_var
5,336,229
def AreEqual(image1, image2, tolerance=0, likely_equal=True): """Determines whether two images are identical within a given tolerance. Setting likely_equal to False enables short-circuit equality testing, which is about 2-3x slower for equal images, but can be image height times faster if the images are not equal.""" return impl.AreEqual(image1, image2, tolerance, likely_equal)
5,336,230
def ppc_deconvolve(im, kernel, kfft=None, nchans=4, same_scan_direction=False, reverse_scan_direction=False): """PPC image deconvolution Given an image (or image cube), apply PPC deconvolution kernel to obtain the intrinsic flux distribution. If performing PPC deconvolution, make sure to perform channel-by-channel with the kernel in the appropriate scan direction. IPC is usually symmetric, so this restriction may not apply. Parameters ========== im : ndarray Image or array of images. kernel : ndarry Deconvolution kernel. kfft : Complex ndarray Option to directy supply the kernel's FFT rather than calculating it within the function. The supplied ndarray should have shape (ny,nx) equal to the input `im`. Useful if calling ``ipc_deconvolve`` multiple times. """ # Image cube shape sh = im.shape ndim = len(sh) if ndim==2: ny, nx = sh nz = 1 else: nz, ny, nx = sh chsize = int(nx / nchans) im = im.reshape([nz,ny,nchans,-1]) # FFT of kernel if kfft is None: k_big = pad_or_cut_to_size(kernel, (ny,chsize)) kfft = np.fft.fft2(k_big) # Channel-by-channel deconvolution for ch in np.arange(nchans): sub = im[:,:,ch,:] if same_scan_direction: flip = True if reverse_scan_direction else False elif np.mod(ch,2)==0: flip = True if reverse_scan_direction else False else: flip = False if reverse_scan_direction else True if flip: sub = sub[:,:,:,::-1] sub = ipc_deconvolve(sub, kernel, kfft=kfft) if flip: sub = sub[:,:,:,::-1] im[:,:,ch,:] = sub im = im.reshape(sh) return im
5,336,231
def worker(num): """thread worker function""" print 'Worker:', num return
5,336,232
def test_force(): """Verifies that the user can override the check to see if the resulting tarball exists.""" helpers.log_status(cast(FrameType, currentframe()).f_code.co_name) result = helpers.execute_command( ["-v", "snapshot", "--name", "test", "--module", "test", "--force"], command_input="y\n", ) run_assertions(result) assert "Creating snapshot of specified modules" in result.output cleanup() helpers.log_success(cast(FrameType, currentframe()).f_code.co_name)
5,336,233
def linear_blending(aurox_dir_path, current_dirs, c_ome_path, yes_orig): """ Linear blend images. It saves two images: A fused tiff from TileConfiguration.fixed.txt with Z axis set to 0, saved into FUSED_TIFFS and a separate fused tiff from TileConfiguration.txt with original positions from positions.csv file, saved into ORIG_FUSED_TIFFS """ fusloc_dict = {} abs_dirpath = os.path.abspath(aurox_dir_path) prefix = os.path.basename(aurox_dir_path) sav_fu = "%s/%s.tiff" % (current_dirs[0], prefix) sav_orig = "%s/%s.orig.tiff" % (current_dirs[3], prefix) # Creates both images if 'Create image from original positions?' was # checked. if yes_orig: # Linear Blending IJ.run("Grid/Collection stitching", "type=[Positions from file] " "order=[Defined by TileConfiguration]" " directory=[%s] layout_file=TileConfiguration.fixed.txt " "fusion_method=[Linear Blending] " "regression_threshold=0.30 max/avg_displacement_threshold=2.50 " "absolute_displacement_threshold=3.50 " "computation_parameters=[Save computation time (but use more RAM)] " "image_output=[Fuse and display]" % abs_dirpath) IJ.saveAs("Tiff", sav_fu) IJ.run("Close All") # Linear Blending IJ.run("Grid/Collection stitching", "type=[Positions from file] " "order=[Defined by TileConfiguration] " "directory=[%s] layout_file=TileConfiguration.txt " "fusion_method=[Linear Blending] " "regression_threshold=0.30 max/avg_displacement_threshold=2.50 " "absolute_displacement_threshold=3.50 " "computation_parameters=[Save computation time (but use more RAM)] " "image_output=[Fuse and display]" % abs_dirpath) IJ.saveAs("Tiff", sav_orig) IJ.run("Close All") # Creates only a single image with the calculated overlap and new positions else: # Linear Blending IJ.run("Grid/Collection stitching", "type=[Positions from file] " "order=[Defined by TileConfiguration] " "directory=[%s] layout_file=TileConfiguration.fixed.txt " "fusion_method=[Linear Blending] " "regression_threshold=0.30 max/avg_displacement_threshold=2.50 " "absolute_displacement_threshold=3.50 " "computation_parameters=[Save computation time (but use more RAM)] " "image_output=[Fuse and display]" % abs_dirpath) IJ.saveAs("Tiff", sav_fu) IJ.run("Close All") fusloc_dict[sav_fu] = [c_ome_path, current_dirs[1]] return fusloc_dict
5,336,234
def split_data(ratings, min_num_ratings, p_test=0.1, verbose=False, seed=988): """ Splits the data set (ratings) to training data and test data :param ratings: initial data set (sparse matrix of dimensions n items and p users) :param min_num_ratings: all users and items must have at least min_num_ratings per user and per item to be kept :param p_test: proportion of the data dedicated to test :param verbose: True if user wants to print details of computation :param seed: random seed :return: - valid_ratings (initial data set where some items and users where dropped) - train train data (same shape as valid_ratings but with 1-p_test non_zero values) - test data (same shape as valid_ratings but with p_test non zero values """ num_items_per_user = np.array((ratings != 0).sum(axis=0)).flatten() num_users_per_item = np.array((ratings != 0).sum(axis=1).T).flatten() # set seed np.random.seed(seed) # select user and item based on the condition. valid_users = np.where(num_items_per_user >= min_num_ratings)[0] valid_items = np.where(num_users_per_item >= min_num_ratings)[0] valid_ratings = ratings[valid_items, :][:, valid_users] # define the sparse matrix that will contain train and test data train = sp.lil_matrix(valid_ratings.shape) test = sp.lil_matrix(valid_ratings.shape) # get the index of non zero elements of the valid_ratings non_zero_item, non_zero_users = valid_ratings.nonzero() # for each item, select p_test percent of users to put in test and put the rest in train for item in set(non_zero_item): _, indexes = valid_ratings[item].nonzero() test_ind = np.random.choice(indexes, size=int(len(indexes) * p_test)) train_ind = list(set(indexes) - set(test_ind)) train[item, train_ind] = valid_ratings[item, train_ind] test[item, test_ind] = valid_ratings[item, test_ind] if verbose: print('Shape of original ratings : {}'.format(ratings.shape)) print('Shape of valid ratings (and of train and test data) : {}'.format(valid_ratings.shape)) print("Total number of nonzero elements in original data : {v}".format(v=ratings.nnz)) print("Total number of nonzero elements in train data : {v}".format(v=train.nnz)) print("Total number of nonzero elements in test data : {v}".format(v=test.nnz)) return valid_ratings, train, test
5,336,235
def _adjust_block(p, ip, filters, block_id=None): """Adjusts the input `previous path` to match the shape of the `input`. Used in situations where the output number of filters needs to be changed. Arguments: p: Input tensor which needs to be modified ip: Input tensor whose shape needs to be matched filters: Number of output filters to be matched block_id: String block_id Returns: Adjusted Keras tensor """ channel_dim = 1 if K.image_data_format() == 'channels_first' else -1 img_dim = 2 if K.image_data_format() == 'channels_first' else -2 ip_shape = K.int_shape(ip) if p is not None: p_shape = K.int_shape(p) with K.name_scope('adjust_block'): if p is None: p = ip elif p_shape[img_dim] != ip_shape[img_dim]: with K.name_scope('adjust_reduction_block_%s' % block_id): p = Activation('relu', name='adjust_relu_1_%s' % block_id)(p) p1 = AveragePooling2D( (1, 1), strides=(2, 2), padding='valid', name='adjust_avg_pool_1_%s' % block_id)( p) p1 = Conv2D( filters // 2, (1, 1), padding='same', use_bias=False, name='adjust_conv_1_%s' % block_id, kernel_initializer='he_normal')( p1) p2 = ZeroPadding2D(padding=((0, 1), (0, 1)))(p) p2 = Cropping2D(cropping=((1, 0), (1, 0)))(p2) p2 = AveragePooling2D( (1, 1), strides=(2, 2), padding='valid', name='adjust_avg_pool_2_%s' % block_id)( p2) p2 = Conv2D( filters // 2, (1, 1), padding='same', use_bias=False, name='adjust_conv_2_%s' % block_id, kernel_initializer='he_normal')( p2) p = concatenate([p1, p2], axis=channel_dim) p = BatchNormalization( axis=channel_dim, momentum=0.9997, epsilon=1e-3, name='adjust_bn_%s' % block_id)( p) elif p_shape[channel_dim] != filters: with K.name_scope('adjust_projection_block_%s' % block_id): p = Activation('relu')(p) p = Conv2D( filters, (1, 1), strides=(1, 1), padding='same', name='adjust_conv_projection_%s' % block_id, use_bias=False, kernel_initializer='he_normal')( p) p = BatchNormalization( axis=channel_dim, momentum=0.9997, epsilon=1e-3, name='adjust_bn_%s' % block_id)( p) return p
5,336,236
def import_reference_genome_from_ncbi(project, label, record_id, import_format): """Imports a reference genome by accession from NCBI using efetch. """ # Validate the input. assert import_format in ['fasta', 'genbank'], ( 'Import Format must be \'fasta\' or \'genbank\'') # Format keys for Efetch. # More info at: http://www.ncbi.nlm.nih.gov/ # books/NBK25499/table/chapter4.chapter4_table1/?report=objectonly CONVERT_FORMAT = { 'fasta': 'fa', 'genbank': 'gbwithparts' } # What suffix to use for each input format # TODO: Should this be a property of the Dataset TYPE? FORMAT_SUFFIX = { 'fasta': '.fa', 'genbank': '.gb' } Entrez.email = settings.EMAIL handle = Entrez.efetch( db="nuccore", id=record_id, rettype=CONVERT_FORMAT[import_format], retmode="text") # Store results in temporary file. filename_prefix = generate_safe_filename_prefix_from_label(label) + '_' temp = NamedTemporaryFile(delete=False, prefix=filename_prefix, suffix=FORMAT_SUFFIX[import_format]) temp.write(handle.read()) handle.close() temp.close() # Create ref genome from this temporary file. reference_genome = import_reference_genome_from_local_file( project, label, temp.name, import_format, move=True) # Clean up. if os.path.isfile(temp.name): os.remove(temp.name) return reference_genome
5,336,237
def filter_points(points: np.array, image_width: int, image_height: int) -> np.array: """ function finds indexes of points that are within image frame ( within image width and height ) searches for points with x coordinate greater than zero, less than image_width points with y coordinate greater than zero, less than image_height Args: points: points to be filter, shape: number_points,2 image_width: width of image frame image_height: height of image frame return: indexes of points that satisfy both conditions """ # points with x coordinate greater than zero, less than image_width in_w = np.logical_and(points[:, 0] > 0, points[:, 0] < image_width) # points with y coordinate greater than zero, less than image_height in_h = np.logical_and(points[:, 1] > 0, points[:, 1] < image_height) return np.logical_and(in_w, in_h)
5,336,238
def url_of_LumpClass(LumpClass: object) -> str: """gets a url to the definition of LumpClass in the GitHub repo""" script_url = LumpClass.__module__[len("bsp_tool.branches."):].replace(".", "/") line_number = inspect.getsourcelines(LumpClass)[1] lumpclass_url = f"{branches_url}{script_url}.py#L{line_number}" return lumpclass_url
5,336,239
def correlation_sum(indicators, embedding_dim): """ Calculate a correlation sum Useful as an estimator of a correlation integral Parameters ---------- indicators : 2d array matrix of distance threshold indicators embedding_dim : integer embedding dimension Returns ------- corrsum : float Correlation sum indicators_joint matrix of joint-distance-threshold indicators """ if not indicators.ndim == 2: raise ValueError('Indicators must be a matrix') if not indicators.shape[0] == indicators.shape[1]: raise ValueError('Indicator matrix must be symmetric (square)') if embedding_dim == 1: indicators_joint = indicators else: corrsum, indicators = correlation_sum(indicators, embedding_dim - 1) indicators_joint = indicators[1:, 1:]*indicators[:-1, :-1] nobs = len(indicators_joint) corrsum = np.mean(indicators_joint[np.triu_indices(nobs, 1)]) return corrsum, indicators_joint
5,336,240
def test_submit_data(): """ Test that data is stored into the data after a submission Test that columns are added to the dataframe as expected even when using query string parameters in the URL hash """ form_id=str(uuid.uuid4()) # add a survey app_tables.forms.add_row(form_id=form_id, last_modified=datetime.now(), schema=schema, title=schema['title']) # submit some initial data cols=['a', 'b', 'c'] data=[1,2,3] submit_data(cols.copy(), data.copy(), {'form_id': form_id}) # submit data with query parameters (meta data) cols_from_hash=['form_id', '1st_param', '2nd_param'] submit_data(cols.copy(), data.copy(), {cols_from_hash[0]: form_id, cols_from_hash[1]: 'foo', cols_from_hash[2]: 'bar'}) media=app_tables.forms.get(form_id=form_id)['submissions'] df = pd.read_csv(io.BytesIO(media.get_bytes()), index_col=0) should_be_cols=cols + cols_from_hash new_cols=list(df.columns) assert new_cols==should_be_cols
5,336,241
def find_suitable_serializer(obj): """ Find serializer that is suitable for this operation :param T obj: The object that needs to be serialized :return: The first suitable serializer for this type of object :rtype: mlio.io.serializers.implementations.SerializerBase """ for serializer in __serializers_registry.values(): if serializer.can_serialize(obj): return serializer raise UnknownObjectType("Cannot find a suitalble serializer for object of type {}".format(type(object)))
5,336,242
async def test_subscribe_unsubscribe_logbook_stream( hass, recorder_mock, hass_ws_client ): """Test subscribe/unsubscribe logbook stream.""" now = dt_util.utcnow() await asyncio.gather( *[ async_setup_component(hass, comp, {}) for comp in ("homeassistant", "logbook", "automation", "script") ] ) await hass.async_block_till_done() init_count = sum(hass.bus.async_listeners().values()) hass.states.async_set("binary_sensor.is_light", STATE_ON) hass.states.async_set("binary_sensor.is_light", STATE_OFF) state: State = hass.states.get("binary_sensor.is_light") await hass.async_block_till_done() await async_wait_recording_done(hass) websocket_client = await hass_ws_client() await websocket_client.send_json( {"id": 7, "type": "logbook/event_stream", "start_time": now.isoformat()} ) msg = await asyncio.wait_for(websocket_client.receive_json(), 2) assert msg["id"] == 7 assert msg["type"] == TYPE_RESULT assert msg["success"] msg = await asyncio.wait_for(websocket_client.receive_json(), 2) assert msg["id"] == 7 assert msg["type"] == "event" assert msg["event"]["events"] == [ { "entity_id": "binary_sensor.is_light", "state": "off", "when": state.last_updated.timestamp(), } ] assert msg["event"]["start_time"] == now.timestamp() assert msg["event"]["end_time"] > msg["event"]["start_time"] assert msg["event"]["partial"] is True hass.states.async_set("light.alpha", "on") hass.states.async_set("light.alpha", "off") alpha_off_state: State = hass.states.get("light.alpha") hass.states.async_set("light.zulu", "on", {"color": "blue"}) hass.states.async_set("light.zulu", "off", {"effect": "help"}) zulu_off_state: State = hass.states.get("light.zulu") hass.states.async_set( "light.zulu", "on", {"effect": "help", "color": ["blue", "green"]} ) zulu_on_state: State = hass.states.get("light.zulu") await hass.async_block_till_done() hass.states.async_remove("light.zulu") await hass.async_block_till_done() hass.states.async_set("light.zulu", "on", {"effect": "help", "color": "blue"}) msg = await asyncio.wait_for(websocket_client.receive_json(), 2) assert msg["id"] == 7 assert msg["type"] == "event" assert "partial" not in msg["event"]["events"] assert msg["event"]["events"] == [] msg = await asyncio.wait_for(websocket_client.receive_json(), 2) assert msg["id"] == 7 assert msg["type"] == "event" assert "partial" not in msg["event"]["events"] assert msg["event"]["events"] == [ { "entity_id": "light.alpha", "state": "off", "when": alpha_off_state.last_updated.timestamp(), }, { "entity_id": "light.zulu", "state": "off", "when": zulu_off_state.last_updated.timestamp(), }, { "entity_id": "light.zulu", "state": "on", "when": zulu_on_state.last_updated.timestamp(), }, ] hass.bus.async_fire( EVENT_AUTOMATION_TRIGGERED, { ATTR_NAME: "Mock automation", ATTR_ENTITY_ID: "automation.mock_automation", ATTR_SOURCE: "numeric state of sensor.hungry_dogs", }, ) hass.bus.async_fire( EVENT_SCRIPT_STARTED, { ATTR_NAME: "Mock script", ATTR_ENTITY_ID: "script.mock_script", ATTR_SOURCE: "numeric state of sensor.hungry_dogs", }, ) hass.bus.async_fire(EVENT_HOMEASSISTANT_START) await hass.async_block_till_done() msg = await asyncio.wait_for(websocket_client.receive_json(), 2) assert msg["id"] == 7 assert msg["type"] == "event" assert msg["event"]["events"] == [ { "context_id": ANY, "domain": "automation", "entity_id": "automation.mock_automation", "message": "triggered by numeric state of sensor.hungry_dogs", "name": "Mock automation", "source": "numeric state of sensor.hungry_dogs", "when": ANY, }, { "context_id": ANY, "domain": "script", "entity_id": "script.mock_script", "message": "started", "name": "Mock script", "when": ANY, }, { "domain": "homeassistant", "icon": "mdi:home-assistant", "message": "started", "name": "Home Assistant", "when": ANY, }, ] context = core.Context( id="ac5bd62de45711eaaeb351041eec8dd9", user_id="b400facee45711eaa9308bfd3d19e474", ) automation_entity_id_test = "automation.alarm" hass.bus.async_fire( EVENT_AUTOMATION_TRIGGERED, { ATTR_NAME: "Mock automation", ATTR_ENTITY_ID: automation_entity_id_test, ATTR_SOURCE: "state of binary_sensor.dog_food_ready", }, context=context, ) hass.bus.async_fire( EVENT_SCRIPT_STARTED, {ATTR_NAME: "Mock script", ATTR_ENTITY_ID: "script.mock_script"}, context=context, ) hass.states.async_set( automation_entity_id_test, STATE_ON, {ATTR_FRIENDLY_NAME: "Alarm Automation"}, context=context, ) entity_id_test = "alarm_control_panel.area_001" hass.states.async_set(entity_id_test, STATE_OFF, context=context) hass.states.async_set(entity_id_test, STATE_ON, context=context) entity_id_second = "alarm_control_panel.area_002" hass.states.async_set(entity_id_second, STATE_OFF, context=context) hass.states.async_set(entity_id_second, STATE_ON, context=context) await hass.async_block_till_done() msg = await asyncio.wait_for(websocket_client.receive_json(), 2) assert msg["id"] == 7 assert msg["type"] == "event" assert msg["event"]["events"] == [ { "context_id": "ac5bd62de45711eaaeb351041eec8dd9", "context_user_id": "b400facee45711eaa9308bfd3d19e474", "domain": "automation", "entity_id": "automation.alarm", "message": "triggered by state of binary_sensor.dog_food_ready", "name": "Mock automation", "source": "state of binary_sensor.dog_food_ready", "when": ANY, }, { "context_domain": "automation", "context_entity_id": "automation.alarm", "context_event_type": "automation_triggered", "context_id": "ac5bd62de45711eaaeb351041eec8dd9", "context_message": "triggered by state of " "binary_sensor.dog_food_ready", "context_name": "Mock automation", "context_source": "state of binary_sensor.dog_food_ready", "context_user_id": "b400facee45711eaa9308bfd3d19e474", "domain": "script", "entity_id": "script.mock_script", "message": "started", "name": "Mock script", "when": ANY, }, { "context_domain": "automation", "context_entity_id": "automation.alarm", "context_event_type": "automation_triggered", "context_message": "triggered by state of " "binary_sensor.dog_food_ready", "context_name": "Mock automation", "context_source": "state of binary_sensor.dog_food_ready", "context_user_id": "b400facee45711eaa9308bfd3d19e474", "entity_id": "alarm_control_panel.area_001", "state": "on", "when": ANY, }, { "context_domain": "automation", "context_entity_id": "automation.alarm", "context_event_type": "automation_triggered", "context_message": "triggered by state of " "binary_sensor.dog_food_ready", "context_name": "Mock automation", "context_source": "state of binary_sensor.dog_food_ready", "context_user_id": "b400facee45711eaa9308bfd3d19e474", "entity_id": "alarm_control_panel.area_002", "state": "on", "when": ANY, }, ] hass.bus.async_fire( EVENT_AUTOMATION_TRIGGERED, {ATTR_NAME: "Mock automation 2", ATTR_ENTITY_ID: automation_entity_id_test}, context=context, ) await hass.async_block_till_done() msg = await websocket_client.receive_json() assert msg["id"] == 7 assert msg["type"] == "event" assert msg["event"]["events"] == [ { "context_domain": "automation", "context_entity_id": "automation.alarm", "context_event_type": "automation_triggered", "context_id": "ac5bd62de45711eaaeb351041eec8dd9", "context_message": "triggered by state of binary_sensor.dog_food_ready", "context_name": "Mock automation", "context_source": "state of binary_sensor.dog_food_ready", "context_user_id": "b400facee45711eaa9308bfd3d19e474", "domain": "automation", "entity_id": "automation.alarm", "message": "triggered", "name": "Mock automation 2", "source": None, "when": ANY, } ] await async_wait_recording_done(hass) hass.bus.async_fire( EVENT_AUTOMATION_TRIGGERED, {ATTR_NAME: "Mock automation 3", ATTR_ENTITY_ID: automation_entity_id_test}, context=context, ) await hass.async_block_till_done() msg = await websocket_client.receive_json() assert msg["id"] == 7 assert msg["type"] == "event" assert msg["event"]["events"] == [ { "context_domain": "automation", "context_entity_id": "automation.alarm", "context_event_type": "automation_triggered", "context_id": "ac5bd62de45711eaaeb351041eec8dd9", "context_message": "triggered by state of binary_sensor.dog_food_ready", "context_name": "Mock automation", "context_source": "state of binary_sensor.dog_food_ready", "context_user_id": "b400facee45711eaa9308bfd3d19e474", "domain": "automation", "entity_id": "automation.alarm", "message": "triggered", "name": "Mock automation 3", "source": None, "when": ANY, } ] await websocket_client.send_json( {"id": 8, "type": "unsubscribe_events", "subscription": 7} ) msg = await asyncio.wait_for(websocket_client.receive_json(), 2) assert msg["id"] == 8 assert msg["type"] == TYPE_RESULT assert msg["success"] # Check our listener got unsubscribed assert sum(hass.bus.async_listeners().values()) == init_count
5,336,243
def square(x): """Return x squared.""" return x * x
5,336,244
def int_div_test(equation, val): """ Comparison for the integer division binary search. :equation: Equation to test :val: Input to the division """ r1 = equation(val) if r1 == None: return None r2 = equation(val - 1) if r2 == None: return None if r1 == 1 and r2 == 0: return 0 elif r1 >= 1: return 1 else: return -1
5,336,245
def get_market_deep(symbols=None, output_format='json', **kwargs): """ Top-level function to obtain DEEP data for a symbol or list of symbols Parameters ---------- symbols: str or list, default None A symbol or list of symbols output_format: str, default 'json', optional Desired output format. JSON required. kwargs: Additional Request Parameters (see base class) Notes ----- Pandas not supported as an output format for the DEEP endpoint. """ return DEEP(symbols, output_format, **kwargs).fetch()
5,336,246
def test_only_ParametersAction_parameters_considered(build): """Actions other than ParametersAction can have dicts called parameters.""" expected = { 'param': 'value', } build._data = { 'actions': [ { '_class': 'hudson.model.SomeOtherAction', 'parameters': [ {'name': 'Not', 'value': 'OurValue'}, ] }, { '_class': 'hudson.model.ParametersAction', 'parameters': [ {'name': 'param', 'value': 'value'}, ] } ] } params = build.get_params() assert params == expected
5,336,247
def merge_duplicates(data: Sequence[Message]) -> Iterator[Message]: """Merge duplicate messages.""" for _, group_ in itertools.groupby(data, _get_line): group = list(sorted(list(group_), key=_get_program_message)) for _, duplicates_ in itertools.groupby(group, _get_program_message): duplicates = iter(duplicates_) output = next(duplicates) # nosa: pylint[R1708] for dup in duplicates: for field in dataclasses.fields(dup): value = getattr(dup, field.name, LESS) if value is LESS: continue if getattr(output, field.name, LESS) is LESS: setattr(output, field.name, value) output.extras.extend(dup.extras) yield output
5,336,248
def _ensure_func_attrs(func): """Add the relevant attributes to the function. """ if not hasattr(func, '__args__'): func.__args__ = [] func.__args_mapping__ = {}
5,336,249
def read_ss(path, dataset, order=None): """ Read secondary structure prediction file using specified order or automatically determined order based on results""" with open(path, 'r') as f: lines = f.readlines() lines = [line.split() for line in lines] start = 0 length = len(dataset.sequences[0]) for i, line in enumerate(lines): if len(line) == 6 and line[0] == '1' and line[2] in ['C', 'E', 'H']: start = i break data = np.array(lines[start:start+length]) seq = ''.join(list(data[:,1])) assert str(seq) == str(dataset.sequences[0]) if order is None: coil_defined = 1 sheet_defined = 1 helix_defined = 1 current_line = 0 order = np.zeros(3) while coil_defined + sheet_defined + helix_defined > 1: if data[current_line, 2] == 'C' and coil_defined > 0: values = np.array(data[current_line, 3:6]) a = np.argmax(values) if values[a-1] < values[a] and values[a-2] < values[a]: order[0] = a + 3 coil_defined = 0 elif data[current_line, 2] == 'E' and sheet_defined > 0: values = np.array(data[current_line, 3:6]) a = np.argmax(values) if values[a-1] < values[a] and values[a-2] < values[a]: order[1] = a + 3 sheet_defined = 0 elif data[current_line, 2] == 'H' and helix_defined > 0: values = np.array(data[current_line, 3:6]) a = np.argmax(values) if values[a-1] < values[a] and values[a-2] < values[a]: order[2] = a + 3 helix_defined = 0 if coil_defined + sheet_defined + helix_defined == 1: order[np.argmin(order)] = 12 - np.sum(order) current_line = current_line + 1 assert sorted(order) == [3, 4, 5] order = np.array(order, dtype=int) return np.array(np.stack([data[:, order[0]], data[:, order[1]], data[:, order[2]]], axis=1), dtype=float)
5,336,250
def test_multimodel_feature_extraction(): """Illustrate multimodel feature extraction. This is a test illustrating how to perform feature extraction with a distributed model using tfutils.base.test_from_params. The basic idea is to specify a validation target that is simply the actual output of the model at some layer. (See the "get_extraction_target" function above as well.) This test assumes that test_train has run first. After the test is run, the results of the feature extraction are saved in the Grid File System associated with the mongo database, with one file per batch of feature results. See how the features are accessed by reading the test code below. """ # set up parameters testcol = testcol_multi exp_id = 'validation1' params = {} model1_params = {'func': model.mnist_tfutils} model2_params = {'func': model.mnist_tfutils} model_params = [model1_params, model2_params] num_models = len(model_params) params['model_params'] = model_params params['load_params'] = {'host': testhost, 'port': testport, 'dbname': testdbname, 'collname': testcol, 'exp_id': 'training0'} params['save_params'] = {'exp_id': exp_id, 'save_intermediate_freq': 1, 'save_to_gfs': ['features', 'more_features']} targdict1 = {'func': get_extraction_target, 'to_extract': {'features': 'model_0/validation/valid1/hidden1/output:0', 'more_features': 'model_0/validation/valid1/hidden2/output:0'}} targdict2 = {'func': get_extraction_target, 'to_extract': {'features': 'model_1/validation/valid1/hidden1/output:0', 'more_features': 'model_1/validation/valid1/hidden2/output:0'}} targdict1.update(base.DEFAULT_LOSS_PARAMS) targdict2.update(base.DEFAULT_LOSS_PARAMS) validation_params1 = {'valid1': {'data_params': {'func': data.MNIST, 'batch_size': 100, 'group': 'test', 'n_threads': 4}, 'queue_params': {'queue_type': 'fifo', 'batch_size': 100}, 'targets': targdict1, 'num_steps': 10, 'online_agg_func': utils.reduce_mean_dict}} validation_params2 = {'valid1': {'data_params': {'func': data.MNIST, 'batch_size': 100, 'group': 'test', 'n_threads': 4}, 'queue_params': {'queue_type': 'fifo', 'batch_size': 100}, 'targets': targdict2, 'num_steps': 10, 'online_agg_func': utils.reduce_mean_dict}} params['validation_params'] = [validation_params1, validation_params2] params['skip_check'] = True conn = pm.MongoClient(host=testhost, port=testport) for i in range(num_models): valid_exp_id = 'validation0_model_{}'.format(i) conn[testdbname][testcol + '.files'].delete_many({'exp_id': valid_exp_id}) # actually run the feature extraction base.test_from_params(**params) # check that things are as expected. coll = conn[testdbname][testcol + '.files'] for i in range(num_models): exp_id = 'validation1_model_{}'.format(i) assert coll.find({'exp_id': exp_id}).count() == 11 # ... load the containing the final "aggregate" result after all features have been extracted q = {'exp_id': exp_id, 'validation_results.valid1.intermediate_steps': {'$exists': True}} assert coll.find(q).count() == 1 r = coll.find(q)[0] # ... check that the record is well-formed asserts_for_record(r, params, train=False) # ... check that the correct "intermediate results" (the actual features extracted) records exist # and are correctly referenced. q1 = {'exp_id': exp_id, 'validation_results.valid1.intermediate_steps': {'$exists': False}} ids = coll.find(q1).distinct('_id') assert r['validation_results']['valid1']['intermediate_steps'] == ids # ... actually load feature batch 3 idval = r['validation_results']['valid1']['intermediate_steps'][3] fn = coll.find({'item_for': idval})[0]['filename'] fs = gridfs.GridFS(coll.database, testcol) fh = fs.get_last_version(fn) saved_data = cPickle.loads(fh.read()) fh.close() first_results = saved_data['validation_results']['valid1'] assert 'features' in first_results and 'more_features' in first_results features = saved_data['validation_results']['valid1']['features'] more_features = saved_data['validation_results']['valid1']['more_features'] assert features.shape == (100, 128) assert features.dtype == np.float32 assert more_features.shape == (100, 32) assert more_features.dtype == np.float32
5,336,251
def handleCharacter(ch, time_str, caller_name, config): """Handles character. Handles character. Options: q (quit) Args: ch: Character. time_str: Time string. caller_name: Calling function's name. """ global quit_all name = inspect.stack()[0][3] try: if ch == 'q': logging.info("%s:%s Quitting." % (caller_name, name,)) quit_all = True except Exception as e: handleException("%s:%s"%(caller_name,name), e) quit_all = True
5,336,252
def get_model(): """ """ # Describe the Convolutional Neural Network model = tf.keras.Sequential([ # Convolutions # Pooling # Flatten units tf.keras.layers.Flatten(), # Input Layer # Avoid overfitting # Output layer - NUM_SHAPE_TYPES units tf.keras.layers.Dense(NUM_SHAPE_TYPES, activation="softmax") ]) # Train the model model.compile( optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"] ) return model
5,336,253
def test_simulated_module_annotation_crossref(simulated_ann_crossref_vcf, simulated_ann_crossref_csv): """Test the cross-reference annotation of the simulated VCF file using the imported module.""" annotate(SIMULATED_VCF, TEST_VCF, crossref=True, csv=True) with open(TEST_VCF) as out: assert simulated_ann_crossref_vcf.read() == out.read() with open(TEST_CSV) as out: assert simulated_ann_crossref_csv.read() == out.read()
5,336,254
def Packet_genReadVpeMagnetometerAdvancedTuning(errorDetectionMode, buffer, size): """Packet_genReadVpeMagnetometerAdvancedTuning(vn::protocol::uart::ErrorDetectionMode errorDetectionMode, char * buffer, size_t size) -> size_t""" return _libvncxx.Packet_genReadVpeMagnetometerAdvancedTuning(errorDetectionMode, buffer, size)
5,336,255
def compare(times_list=None, name=None, include_list=True, include_stats=True, delim_mode=False, format_options=None): """ Produce a formatted comparison of timing datas. Notes: If no times_list is provided, produces comparison reports on all parallel subdivisions present at the root level of the current timer. To compare parallel subdivisions at a lower level, get the times data, navigate within it to the parallel list of interest, and provide that as input here. As with report(), any further parallel subdivisions encountered have only their member with the greatest total time reported on (no branching). Args: times_list (Times, optional): list or tuple of Times objects. If not provided, uses current root timer. name (any, optional): Identifier, passed through str(). include_list (bool, optional): Display stamps hierarchy. include_stats (bool, optional): Display stamp comparison statistics. delim_mode (bool, optional): If True, format for spreadsheet. format_options (None, optional): Formatting options, see below. Formatting Keywords & Defaults: Human-readable Mode - 'stamp_name_width': 18 - 'list_column_width': 12 - 'list_tab_width': 2 - 'stat_column_width': 8 - 'stat_tab_width': 2 - 'indent_symbol: ' ' (one space) Delimited Mode - 'delimiter': '\t' (tab) - 'ident_symbol': '+' Returns: str: Times data comparison as formatted string. Raises: TypeError: If any element of provided collection is not a Times object. """ if times_list is None: rep = '' for par_dict in itervalues(f.root.times.par_subdvsn): for par_name, par_list in iteritems(par_dict): rep += report_loc.compare(par_list, par_name, include_list, include_stats, delim_mode, format_options) else: if not isinstance(times_list, (list, tuple)): raise TypeError("Expected a list/tuple of times instances for param 'times_list'.") if not all([isinstance(times, Times) for times in times_list]): raise TypeError("At least one member of param 'times_list' is not a Times object.") rep = report_loc.compare(times_list, name, include_list, include_stats, delim_mode, format_options) return rep
5,336,256
def spaces(elem, doc): """ Add LaTeX spaces when needed. """ # Is it in the right format and is it a Space? if doc.format in ["latex", "beamer"] and isinstance(elem, Space): if isinstance(elem.prev, Str) and elem.prev.text in ["«", "“", "‹"]: return RawInline("\\thinspace{}", "tex") if isinstance(elem.next, Str): if elem.next.text == ":": return RawInline("~", "tex") if elem.next.text in [";", "?", "!", "»", "”", "›"]: return RawInline("\\thinspace{}", "tex") return None
5,336,257
def clean_caption(text): """ Remove brackets with photographer names or locations at the end of some captions :param text: a photo caption :return: text cleaned """ text = str(text) text = re.sub(r'\s*\[.+?\]$', '.', text) text = re.sub(r'\s*\(photo.+?\)', '', text) return re.sub(r'-- --.+', '.', text).strip()
5,336,258
def index(limit = None): """Prints a suitable index of blog posts, using limit if necessary.""" global dbh c = dbh.cursor() c.execute("SELECT post_name, post_date, post_title FROM %s WHERE post_type = %s AND post_status = %s ORDER BY post_date DESC", (table, "post", "publish")) if limit: rows = c.fetchmany(limit) else: rows = c.fetchall() printtitle() printblankline() for row in rows: printitem("h", "%s (%s)" % (row[2], row[1]), row[0]) printblankline() if limit: printitem("i", "Only the last %d entries are listed here." % limit) printitem("1", "View all entries", "all") else: printitem("i", "All blog entries are shown.") printblankline() printitem("7", "Search this blog") printblankline() printcopyright()
5,336,259
def test_ls372_stop_acq_while_running(agent): """'stop_acq' should return True if acq Process is running.""" session = create_session('stop_acq') # Have to init before running anything else agent.init_lakeshore(session, None) # Mock running the acq Process agent.take_data = True res = agent._stop_acq(session, params=None) assert res[0] is True assert agent.take_data is False
5,336,260
def take_slasher_snapshot(client): """ Collects all the command changes from the client's slash command processor. Parameters ---------- client : ``Client`` The client, who will be snapshotted. Returns ------- collected : `None` or `tuple` of (`dict` of (`int`, `list` of `tuple` \ (`bool`, ``SlasherApplicationCommand``)) items, `None` or `set` of ``ComponentCommand``) The collected commands of the slasher. """ slasher = getattr(client, 'slasher', None) if (slasher is None) or (not isinstance(slasher, Slasher)): collected = None else: command_states = slasher._command_states collected_application_commands = None for guild_id, command_state in command_states.items(): if guild_id == SYNC_ID_NON_GLOBAL: active_commands = command_state._active if (active_commands is None): continue command_changes = [(True, command) for command in active_commands] else: changes = command_state._changes if changes is None: continue command_changes = [tuple(change) for change in changes] if collected_application_commands is None: collected_application_commands = {} collected_application_commands[guild_id] = command_changes collected_component_commands = slasher._component_commands if collected_component_commands: collected_component_commands = collected_component_commands.copy() else: collected_component_commands = None if (collected_application_commands is None) and (collected_component_commands is None): collected = None else: collected = (collected_application_commands, collected_component_commands) return collected
5,336,261
def zigpy_device_mains(zigpy_device_mock): """Device tracker zigpy device.""" def _dev(with_basic_channel: bool = True): in_clusters = [general.OnOff.cluster_id] if with_basic_channel: in_clusters.append(general.Basic.cluster_id) endpoints = { 3: { "in_clusters": in_clusters, "out_clusters": [], "device_type": zigpy.profiles.zha.DeviceType.ON_OFF_SWITCH, } } return zigpy_device_mock( endpoints, node_descriptor=b"\x02@\x84_\x11\x7fd\x00\x00,d\x00\x00" ) return _dev
5,336,262
def test_decorator(): """It should be a decorator.""" breaker = CircuitBreaker() @breaker def suc(): """Docstring""" pass @breaker def err(): """Docstring""" raise DummyException() assert 'Docstring' == suc.__doc__ assert 'Docstring' == err.__doc__ assert 'suc' == suc.__name__ assert 'err' == err.__name__ assert 0 == breaker.fail_counter with raises(DummyException): err() assert 1 == breaker.fail_counter suc() assert 0 == breaker.fail_counter
5,336,263
def test_valid_mapping() -> None: """Use a correct mapping. When ds_df_mapping is correctly provided, ensure that it is correctly stored. """ events = pd.DataFrame({ 'event_type': ['pass', 'goal'], 'start_frame': [1, 100], 'end_frame': [200, 250] }) ds = xr.Dataset( data_vars={ 'ball_trajectory': ( ['frame', 'cartesian_coords'], np.exp(np.linspace((-6, -8), (3, 2), 250)) ) }, coords={'frame': np.arange(1, 251), 'cartesian_coords': ['x', 'y']}, attrs={'match_id': 7, 'resolution_fps': 25} ) ds_df_mapping = {'frame': ('start_frame', 'end_frame')} result = ds.assign_attrs(_events=events, _ds_df_mapping=ds_df_mapping) assert_identical( ds.events.load(events, ds_df_mapping), result )
5,336,264
def compute_average_precision(precision, recall): """Compute Average Precision according to the definition in VOCdevkit. Precision is modified to ensure that it does not decrease as recall decrease. Args: precision: A float [N, 1] numpy array of precisions recall: A float [N, 1] numpy array of recalls Raises: ValueError: if the input is not of the correct format Returns: average_precison: The area under the precision recall curve. NaN if precision and recall are None. """ if precision is None: if recall is not None: raise ValueError('If precision is None, recall must also be None') return np.NAN if not isinstance(precision, np.ndarray) or not isinstance( recall, np.ndarray): raise ValueError('precision and recall must be numpy array') if precision.dtype != np.float or recall.dtype != np.float: raise ValueError('input must be float numpy array.') if len(precision) != len(recall): raise ValueError('precision and recall must be of the same size.') if not precision.size: return 0.0 if np.amin(precision) < 0 or np.amax(precision) > 1: raise ValueError('Precision must be in the range of [0, 1].') if np.amin(recall) < 0 or np.amax(recall) > 1: raise ValueError('recall must be in the range of [0, 1].') if not all(recall[i] <= recall[i + 1] for i in range(len(recall) - 1)): raise ValueError('recall must be a non-decreasing array') recall = np.concatenate([[0], recall, [1]]) precision = np.concatenate([[0], precision, [0]]) # Preprocess precision to be a non-decreasing array for i in range(len(precision) - 2, -1, -1): precision[i] = np.maximum(precision[i], precision[i + 1]) indices = np.where(recall[1:] != recall[:-1])[0] + 1 average_precision = np.sum( (recall[indices] - recall[indices - 1]) * precision[indices]) return average_precision
5,336,265
def minimize_experiment_document(document): """ Takes a document belonging to an experiment or to a library in an experiment and strips it down to a subset of desired fields. This differs from other non-experiment documents in that the attachment is a dictionary rather than a simple string concatenation of document @id and hrefself. """ minimized_document = {} for key in ('document_type', 'urls', 'references', 'attachment'): if key in document: if key == 'attachment': minimized_document[key] = minimize_attachment(document[key], document['@id']) else: minimized_document[key] = document[key] return minimized_document
5,336,266
def detect_container(path: pathlib.Path) -> type[containers.Container]: """Detect the container of a file""" container_type = filetype.archive_match(path) container_mime_type = container_type.mime if container_type else None return containers.get_container_by_mime_type(container_mime_type)
5,336,267
def score_mod(mod, word_count, mod_count, mod_match_unlabel): """计算模式的评分""" p = word_count[mod] u = len(mod_match_unlabel[mod]) t = mod_count[mod] return (p / t) * math.log(u + 1, 2) * math.log(p + 1, 2)
5,336,268
def get_matrix(costs='direct'): """Table used to compare the most appropriate building class for DCs""" health_care = eeio(['233210/health care buildings/us'], [1]) manu_bldg = eeio(['233230/manufacturing buildings/us'], [1]) util_bldg = eeio(['233240/utilities buildings and infrastructure/us'], [1]) _df = util_bldg.level_sectors() _df.columns = ['util_bldg'] if costs == 'direct': _df['manu_bldf'], _df['health_care']=manu_bldg.level_sectors('direct')['direct_costs'], health_care.level_sectors('direct')['direct_costs'] elif costs == 'total': _df['manu_bldf'], _df['health_care']=manu_bldg.level_sectors()['total_costs'], health_care.level_sectors()['total_costs'] return _df
5,336,269
def redshift_resource(context): """This resource enables connecting to a Redshift cluster and issuing queries against that cluster. Example: .. code-block:: python from dagster import ModeDefinition, execute_solid, solid from dagster_aws.redshift import redshift_resource @solid(required_resource_keys={'redshift'}) def example_redshift_solid(context): return context.resources.redshift.execute_query('SELECT 1', fetch_results=True) result = execute_solid( example_redshift_solid, run_config={ 'resources': { 'redshift': { 'config': { 'host': 'my-redshift-cluster.us-east-1.redshift.amazonaws.com', 'port': 5439, 'user': 'dagster', 'password': 'dagster', 'database': 'dev', } } } }, mode_def=ModeDefinition(resource_defs={'redshift': redshift_resource}), ) assert result.output_value() == [(1,)] """ return RedshiftResource(context)
5,336,270
def ifft2(a, s=None, axes=(-2, -1), norm=None): """Compute the two-dimensional inverse FFT. Args: a (cupy.ndarray): Array to be transform. s (None or tuple of ints): Shape of the transformed axes of the output. If ``s`` is not given, the lengths of the input along the axes specified by ``axes`` are used. axes (tuple of ints): Axes over which to compute the FFT. norm (``"backward"``, ``"ortho"``, or ``"forward"``): Optional keyword to specify the normalization mode. Default is ``None``, which is an alias of ``"backward"``. Returns: cupy.ndarray: The transformed array which shape is specified by ``s`` and type will convert to complex if the input is other. .. seealso:: :func:`numpy.fft.ifft2` """ func = _default_fft_func(a, s, axes) return func(a, s, axes, norm, cufft.CUFFT_INVERSE)
5,336,271
def get_displayable_story_summary_dicts(user_id, story_summaries): """Returns a displayable summary dict of the story summaries given to it. Args: user_id: str. The id of the learner. story_summaries: list(StorySummary). A list of the summary domain objects. Returns: list(dict). The summary dict corresponding to the given summary. """ summary_dicts = [] story_ids = [story_summary.id for story_summary in story_summaries] stories = story_fetchers.get_stories_by_ids(story_ids) topic_ids = [story.corresponding_topic_id for story in stories] topics = topic_fetchers.get_topics_by_ids(topic_ids) for index, story_summary in enumerate(story_summaries): summary_dicts.append({ 'id': story_summary.id, 'title': story_summary.title, 'node_titles': story_summary.node_titles, 'thumbnail_filename': story_summary.thumbnail_filename, 'thumbnail_bg_color': story_summary.thumbnail_bg_color, 'description': story_summary.description, 'url_fragment': story_summary.url_fragment, 'story_is_published': ( story_services.is_story_published_and_present_in_topic( stories[index])), 'completed_node_titles': [ node.title for node in ( story_fetchers.get_completed_nodes_in_story( user_id, story_summary.id))], 'all_node_dicts': [ node.to_dict() for node in stories[index].story_contents.nodes ], 'topic_name': topics[index].name, 'topic_url_fragment': topics[index].url_fragment, 'classroom_url_fragment': ( classroom_services.get_classroom_url_fragment_for_topic_id( stories[index].corresponding_topic_id)) }) return summary_dicts
5,336,272
def checkRange(value, ranges): """Checks if the value is in the defined range. The range definition is a list/iterator from: - float values belonging to the defined range M{x \in {a}} - 2-tuples of two floats which define a range not including the tuple values itself M{x \in ]a,b[} - 2-list of two floats which define a range including the list values M{x \in [a,b]} The order of elements is not important. So could define the set of integer numbers by a generator returning the following sequence: M{0,1,-1,2,-2,3-,3,...} . It returns True if the value is in one of the defined ranges. Otherwise it returns false. """ for part in ranges: if isinstance(part, float): if part == value: return True elif isinstance(part, list) and len(part) == 2: if part[0] <= value and value <= part[1]: return True elif isinstance(part, tuple) and len(part) == 2: if part[0] < value and value < part[1]: return True else: from fuzzy.Exception import FuzzyException raise FuzzyException("Range definition is wrong") return False
5,336,273
def __main__(argv): """Command Line parser for scRna-Seq pipeline. Inputs- command line arguments """ # the first command, -c, tells us which arguments we need to check for next, and which function we are going to call command = argv[1].replace('-c=', '') # create the argument parser parser = argparse.ArgumentParser(description=__doc__,formatter_class=argparse.RawDescriptionHelpFormatter) if command == 'count': # CellRanger count method # add arguments parser.add_argument('--sampleId', '-id', help='Id of sample being run') parser.add_argument('--commaFastqs', '-cf', help='Comma seperated String with list of fastq directories', default='') parser.add_argument('--fastqs', '-fs', help='List of fastq directories', nargs='+', default=['']) parser.add_argument('--expectCells', '-E', help='Number of cells to expect', default='') parser.add_argument('--forceCells', '-F', help='Force number of cells', default='') parser.add_argument('--chemistry', '-C', help='Chemistry of fastqs', default = 'threeprime') parser.add_argument('--secondary', '-S', help='Run cellranger secondary analysis', default = 'true') parser.add_argument('--transcriptome', '-tf', help='Transcriptome file', default = '') parser.add_argument('--doForceCells', '-dfc', help='Boolean to use force cells', default = '') parser.add_argument('--command', '-c', help='Command to run', default = '') # call the method with parsed args args = parser.parse_args() cellranger_count(sample_id= args.sampleId, transcriptome= args.transcriptome, comma_fastqs= args.commaFastqs, fastqs= args.fastqs, expect_cells= args.expectCells, force_cells= args.forceCells, chemistry = args.chemistry, do_force_cells = args.doForceCells) elif command == 'mkfastq': # CellRanger mkfastq method # add arguments parser.add_argument('--bcl', '-b', help='Location of bcl') parser.add_argument('--masterCsv', '-M', help='Master Csv file containing maps of information', default='') parser.add_argument('--output_directory', '-O', help='List of fastq directories', default='') parser.add_argument('--command', '-c', help='Command to run', default = '') # call the method with parsed args args = parser.parse_args() cellranger_mkfastq(bcl = args.bcl, master_csv = args.masterCsv, output_directory = args.output_directory) elif command == 'parse': # Orchestra parseCsv Method # add arguments parser.add_argument('--masterCsv', '-M', help='Master Csv file containing maps of information', default='') parser.add_argument('--command', '-c', help='Command to run', default = '') # call the method with parsed args args = parser.parse_args() orchestra_parse_csv(master_csv = args.masterCsv) elif command == 'analysis': # Orchestra generate analysis csv method # add arguments parser.add_argument('--masterCsv', '-M', help='Master Csv file containing maps of information', default='') parser.add_argument('--h5s', '-hs', help='H5 output files', nargs='+', default=['']) parser.add_argument('--command', '-c', help='Command to run', default = '') # call the method with parsed args args = parser.parse_args() orchestra_analysis_csv(master_csv = args.masterCsv, h5s = args.h5s) elif command == 'filter': # Orchestra filter method # add arguments parser.add_argument('--masterCsv', '-M', help='Master Csv file containing maps of information', default='') parser.add_argument('--paths', '-p', help='Paths to fastq directories', nargs='+', default=['']) parser.add_argument('--sampleIds', '-S', help='List of sample names', nargs='+', default=['']) parser.add_argument('--transMap', '-t', help='CSV map of reference names to gsurls', default='') parser.add_argument('--command', '-c', help='Command to run', default = '') # call the method with parsed args args = parser.parse_args() orchestra_filter(master_csv = args.masterCsv, paths = args.paths, sample_ids = args.sampleIds, trans_map = args.transMap) args = parser.parse_args()
5,336,274
def vertical_nemenyi_plot(data, num_reps, alpha = 0.05, cmap = plt.cm.Greens): """Vertical Nemenyi plot to compare model ranks and show differences.""" return
5,336,275
def read_anno_content(anno_file: str): """Read anno content.""" with open(anno_file) as opened: content = json.load(opened) return content
5,336,276
def test_example(tmpdir, example_name): """Builds an example, ensures the rendered output matches the expected.""" source_directory = os.path.join(PROJECT_ROOT, "examples", example_name) working_directory = os.path.join(str(tmpdir), example_name) shutil.copytree(source_directory, working_directory) # On windows, when the repo is checked out with git, all the unix-standard # NL linefeed characters are converted to the dos-standard CRNL newline # characters. However, jinja2 always uses NL newlines. Thus all the # rendered files in `build/` will have NL, but all the files in # `build-EXPECTED/` will have CRNL. Therefore convert `build-EXPECTED/` # to use NL, so we can meaningfully compare them. # I figured it's better to just do the conversion here, as opposed to a # change in globl .gitconfig. if os.name == "nt": pattern = os.path.join( working_directory, "build-EXPECTED", "**", "*.html", ) file_list = glob.glob(pattern, recursive=True) args = ["dos2unix"] + file_list assert subprocess.call(args) == 0, "dos2unix failed" # os.getcwd() starts in the project's root directory. # But, we want to emulate running build.sh as if we were inside # the example directory. initial_directory = os.getcwd() try: os.chdir(working_directory) print("Running example {}".format(source_directory)) print("Working directory for debugging: {}".format(working_directory)) assert subprocess.call(["sh", "build.sh"]) == 0, "build.sh failed" check_same("build-EXPECTED", "build") finally: os.chdir(initial_directory)
5,336,277
def openRotatorPort(portNum=0, timeout=5): """ Open a serial port for the rotator Open commport ``portNum`` with a timeout of ``timeout``. Parameters ---------- portNum : integer, default: 0 commport for the serial connection to the rotator timeout : number, default: 5 sec timeout for the commport Returns ------- port : serial port object thingy object that you use to communicate now with the serial port. """ ser = serial.Serial(0,timeout=5) return ser
5,336,278
def disable_poll_nodes_list( nodes: List[str], credentials: HTTPBasicCredentials = Depends( check_credentials ), # pylint: disable=unused-argument ) -> Dict[str, str]: """Disable (snmp) polling on a list of nodes. Exple of simplest call : curl -X GET --user u:p -H "Content-type: application/json" \ http://127.0.0.1/api/disable_poll_nodes_list \ -d '["node1", "node2", "node3"]'""" for node in nodes: disable_node(node) return {"response": "Ok"}
5,336,279
def twitter_sp(name): """ This function is dedicated to extract location names from Twitter data(.txt file) :param name: str, filename :return: None """ def twitter_profile(user): """ This function fetch the the Twitter user's profiles for the location in them :param user: str, user name :return: str, user location """ consumer_key = credentials.consumer_key consumer_secret = credentials.consumer_secret access_token = credentials.access_token access_secret = credentials.access_secret api = twitter.Api(consumer_key=consumer_key, consumer_secret=consumer_secret, access_token_key=access_token, \ access_token_secret=access_secret) # print(user) try: user_id = api.UsersLookup(screen_name=user) stat = user_id[0].location except: stat = '' # print(stat) return stat tmp = open(name, 'r') text = tmp.read() text = text.split('\n')[:-1] # print(len(set(text))) output = defaultdict(list) p1 = re.compile(r'[<](.*?)[>]') # pattern for getting the user name in the text for tweet in text: # username = re.findall(p1, tweet)[0] geo_stat = '' # geo_stat = twitter_profile(username) time = tweet[20:44] msg = tweet[54::] if geo_stat: msg += geo_stat res = token_ex(msg) if res: output[time].append(res) with open('loca_' + name + '.json', 'w') as outfile: json.dump(output, outfile)
5,336,280
def extrudePoints(points, disp): """ Return a list of points including the initial points and extruded end """ farEnd=deepcopy(points) farEnd[:,0]+=disp[0] farEnd[:,1]+=disp[1] farEnd[:,2]+=disp[2] return np.vstack( (points,farEnd) )
5,336,281
def hex_form(hash): """Returns the hash formatted in hexadecimal form""" final_hash = '' for i in range(len(hash)): final_hash += format(hash[i], '02x') return final_hash
5,336,282
def construct_job_file(tile_list, job_path): """ Construct a CurveAlign job list for the CHTC batch processing software :param tile_list: List of image files :param job_path: Path to save the job file at :return: """ with tarfile.open(job_path, 'w') as tar: roi_dir = tarfile.TarInfo('ROI_management') roi_dir.type = tarfile.DIRTYPE roi_dir.mode = 0o777 for tile in tile_list: tar.add(tile, arcname=tile.name, recursive=False) roi_path = Path(tile.parent, 'ROI_management', tile.stem + '_ROIs.mat') tar_roi_name = Path('ROI_management', roi_path.name) tar.add(roi_path, arcname=tar_roi_name, recursive=False)
5,336,283
def patch_init_py(init_py, version): """Patch __init__.py to remove version check and append hard-coded version.""" # Open top-level __init__.py and read whole file log.info("patching %s to bake in version '%s'", init_py, version) with open(init_py, 'r+') as init_file: lines = init_file.readlines() # Search for sentinels indicating version checking block try: begin = lines.index("# BEGIN VERSION CHECK\n") end = lines.index("# END VERSION CHECK\n") except ValueError: begin = end = len(lines) # Delete existing repo version checking block in file. Add a baked-in # version string in its place (or at the end), unless already present # (this happens in pip sdist installs). init_file.seek(0) pre_lines = lines[:begin] post_lines = lines[end+1:] init_file.writelines(pre_lines) version_cmd = "__version__ = '{0}'\n".format(version) if version_cmd not in pre_lines and version_cmd not in post_lines: init_file.write("\n# Automatically added by katversion\n") init_file.write(version_cmd) init_file.writelines(post_lines) init_file.truncate()
5,336,284
def rangeFromString(commaString): """ Convert a comma string like "1,5-7" into a list [1,5,6,7] Returns -------- myList : list of integers Reference ------- http://stackoverflow.com/questions/6405208/\ how-to-convert-numeric-string-ranges-to-a-list-in-python """ listOfLists = [rangeFromHyphen(r) for r in commaString.split(',')] flatList = itertools.chain(*listOfLists) return flatList
5,336,285
def wup(synset1: Synset, synset2: Synset) -> float: """Return the Wu-Palmer similarity of *synset1* and *synset2*.""" lch = synset1.lowest_common_hypernyms(synset2, simulate_root=True)[0] n = lch.max_depth() + 1 n1 = len(synset1.shortest_path(lch, simulate_root=True)) n2 = len(synset2.shortest_path(lch, simulate_root=True)) return (2 * n) / (n1 + n2 + 2 * n)
5,336,286
def _adjust_mq_version_headers(ev): """ Unify evidence table headers of different MQ versions. """ ev.rename(columns={ 'Leading razor protein': 'Leading Razor Protein', 'Leading proteins': 'Leading Proteins' }, inplace=True)
5,336,287
def transform_staging_tables(cur, conn): """ This function is used once the connection to the Redshift cluster is effective It executes SQL instructions based on queries provided in the transform_table_queries list """ for query in transform_table_queries: cur.execute(query) conn.commit()
5,336,288
def sum_per_agent(df: pd.DataFrame, columns: List[str]) -> pd.DataFrame: """Calculates summed values per agent for each given column individually""" all_values_per_agent = pd.DataFrame(columns=columns) for column in columns: function = calc_sum(column) value_per_agent = call_function_per_agent(df, function) for agent_id, value in value_per_agent.items(): all_values_per_agent.at[agent_id, column] = value return all_values_per_agent
5,336,289
def _add_last_conditional_coloring( sheet, worksheet, status_col, last_col, update_cell, start_row, length ): """Mark running jobs without updates.""" status_col_name = string.ascii_uppercase[status_col] last_col_name = string.ascii_uppercase[last_col] time_different_rule = f"(TO_PURE_NUMBER({update_cell}) - TO_PURE_NUMBER({last_col_name}{start_row + 1})) * 24 > 1" is_running_rule = f'REGEXMATCH({status_col_name}{start_row + 1}, "RUNN")' boolean_rules = [ { "condition": { "type": "CUSTOM_FORMULA", "values": [ {"userEnteredValue": f"=AND({time_different_rule}, {is_running_rule})"} ], }, "format": {"backgroundColor": {"red": 0.99, "green": 0.9, "blue": 0.8}}, } ] for i, rule in enumerate(boolean_rules): request = { "addConditionalFormatRule": { "rule": { "ranges": [ { "sheetId": worksheet.id, "startColumnIndex": last_col, "endColumnIndex": last_col + 1, "startRowIndex": start_row, "endRowIndex": start_row + length, } ], "booleanRule": rule, }, "index": 100 + i, } } sheet.custom_request(request, "")
5,336,290
async def vcx_ledger_get_fees() -> str: """ Get ledger fees from the sovrin network Example: fees = await vcx_ledger_get_fees() :return: JSON representing fees { "txnType1": amount1, "txnType2": amount2, ..., "txnTypeN": amountN } """ logger = logging.getLogger(__name__) if not hasattr(vcx_ledger_get_fees, "cb"): logger.debug("vcx_ledger_get_fees: Creating callback") vcx_ledger_get_fees.cb = create_cb(CFUNCTYPE(None, c_uint32)) result = await do_call('vcx_ledger_get_fees', vcx_ledger_get_fees.cb) logger.debug("vcx_ledger_get_fees completed") return result
5,336,291
def calc_one_sample_metric(sample): """ 计算 V1 数据一个样本的 rouge-l 和 bleu4 分数 """ if len(sample['best_match_scores']) == 0: # bad case return -1, -1 pred_answers, ref_answers = [], [] pred_answers.append({'question_id': sample['question_id'], 'question_type': sample['question_type'], # 取 gold fake answer 作为预测的答案 'answers': [''.join(sample['fake_answers'][sample['best_match_scores'].index(max(sample['best_match_scores']))])], 'entity_answers': [[]], 'yesno_answers': []}) ref_answers.append({'question_id': sample['question_id'], 'question_type': sample['question_type'], 'segmented_question': sample['segmented_question'], 'answers': [''.join(seg_ans) for seg_ans in sample['segmented_answers']], 'entity_answers': [[]], 'yesno_answers': [], 'documents': sample['documents']}) pred_dict = read_data_to_dict(pred_answers) ref_dict = read_data_to_dict(ref_answers, is_ref=True) metrics = compute_bleu_rouge(pred_dict, ref_dict) rouge_l, bleu4 = metrics['ROUGE-L'], metrics['BLEU-4'] return rouge_l, bleu4
5,336,292
def ta_1d(x, a, w_0, w_1): """1d tanh function.""" return a * np.tanh(w_0 + (w_1 * x))
5,336,293
def log(*args): """ <Purpose> Used to store program output. Prints output to console by default. <Arguments> Takes a variable number of arguments to print. They are wrapped in str(), so it is not necessarily a string. <Exceptions> None <Returns> Nothing """ for arg in args: print(arg)
5,336,294
def get_flat_topic_df(all_topics, n_topics): """ Get df with Multiindex to plot easier :param all_topics: the IDs of the topics as list :param n_topics: the number of topics in the model :return: df with index [TopicID, Word] and weight """ init_topic = all_topics.columns[0] # TODO refator due duplication. topics_flat = all_topics[[init_topic]].copy().dropna(axis=0) topics_flat.index.rename("Word", inplace=True) topics_flat.columns = ["weight"] topics_flat["TopicID"] = init_topic topics_flat.set_index("TopicID", inplace=True, append=True) # ADD the index topics_flat = topics_flat.reorder_levels(["TopicID", "Word"]) for init_topic in all_topics.columns[1:]: tf = all_topics[[init_topic]].copy().dropna(axis=0) tf.index.rename("Word", inplace=True) tf.columns = ["weight"] tf["TopicID"] = init_topic tf.set_index("TopicID", inplace=True, append=True) # ADD the index tf = tf.reorder_levels(["TopicID", "Word"]) topics_flat = pd.concat([topics_flat, tf], axis=0) topics_flat = pd.concat( [topics_flat. iloc[topics_flat.index.get_level_values("TopicID") == x, :] .copy().sort_values(by="weight", ascending=False) for x in range(n_topics)], axis=0) return topics_flat
5,336,295
def loadConfig(filename): """Load and parse .yaml configuration file Args: filename (str): Path to system configuration file Returns: dict: representing configuration information Raises: BdsError: if unable to get configuration information """ try: with open(filename) as stream: config = yaml.load(stream) return config['bdsSnmpAdapter'] except Exception as exc: raise error.BdsError( 'Failed to read configuration file %s: %s' % (filename, exc))
5,336,296
def test_generate_script_salt_api(tmpdir): """ Test script generation for the salt-api CLI script """ script_path = cli_scripts.generate_script(tmpdir.strpath, "salt-api") with open(script_path) as rfh: contents = rfh.read() expected = textwrap.dedent( """\ from __future__ import absolute_import import os import sys # We really do not want buffered output os.environ[str("PYTHONUNBUFFERED")] = str("1") # Don't write .pyc files or create them in __pycache__ directories os.environ[str("PYTHONDONTWRITEBYTECODE")] = str("1") import atexit import salt.cli.api import salt.utils.process salt.utils.process.notify_systemd() def main(): sapi = salt.cli.api.SaltAPI() sapi.start() if __name__ == '__main__': exitcode = 0 try: main() except SystemExit as exc: exitcode = exc.code sys.stdout.flush() sys.stderr.flush() atexit._run_exitfuncs() os._exit(exitcode) """ ) assert contents == expected
5,336,297
def scale_t50(t50_val = 1.0, zval = 1.0): """ Change a t50 value from lookback time in Gyr at a given redshift to fraction of the age of the universe. inputs: t50 [Gyr, lookback time], redshift outputs: t50 [fraction of the age of the universe, cosmic time] """ return (1 - t50_val/cosmo.age(zval).value)
5,336,298
def configure( processors: Optional[Iterable[Processor]] = None, wrapper_class: Optional[Type[BindableLogger]] = None, context_class: Optional[Type[Context]] = None, logger_factory: Optional[Callable[..., WrappedLogger]] = None, cache_logger_on_first_use: Optional[bool] = None, ) -> None: """ Configures the **global** defaults. They are used if `wrap_logger` or `get_logger` are called without arguments. Can be called several times, keeping an argument at `None` leaves it unchanged from the current setting. After calling for the first time, `is_configured` starts returning `True`. Use `reset_defaults` to undo your changes. :param processors: The processor chain. See :doc:`processors` for details. :param wrapper_class: Class to use for wrapping loggers instead of `structlog.BoundLogger`. See `standard-library`, :doc:`twisted`, and `custom-wrappers`. :param context_class: Class to be used for internal context keeping. :param logger_factory: Factory to be called to create a new logger that shall be wrapped. :param cache_logger_on_first_use: `wrap_logger` doesn't return an actual wrapped logger but a proxy that assembles one when it's first used. If this option is set to `True`, this assembled logger is cached. See `performance`. .. versionadded:: 0.3.0 *cache_logger_on_first_use* """ _CONFIG.is_configured = True if processors is not None: _CONFIG.default_processors = processors if wrapper_class is not None: _CONFIG.default_wrapper_class = wrapper_class if context_class is not None: _CONFIG.default_context_class = context_class if logger_factory is not None: _CONFIG.logger_factory = logger_factory # type: ignore if cache_logger_on_first_use is not None: _CONFIG.cache_logger_on_first_use = cache_logger_on_first_use
5,336,299