content
stringlengths
22
815k
id
int64
0
4.91M
def named(name): """ This function is used to decorate middleware functions in order for their before and after sections to show up during a verbose run. For examples see documentation to this module and tests. """ def new_annotate(mware): def new_middleware(handler): new_handler = mware(handler) def verbose_handler(ctx): _print_inwards(name) new_ctx = new_handler(ctx) _print_outwards(name) return new_ctx return verbose_handler return new_middleware return new_annotate
5,338,500
def concatFile(file_list): """ To combine files in file list. """ config = getConfig() print('[load]concating...') df_list = [] for f in file_list: print(f) tmp = pd.read_csv(config['dir_raw']+f, index_col=None, header=0) df_list.append(tmp) df = pd.concat(df_list, axis=0, ignore_index=True) return df
5,338,501
def apply_reflection(P, source_idx, target_idx, R, n_forward): """Modifies `P` in-place.""" for _source_idx, _target_idx, _R in zip(source_idx, target_idx, R): if _source_idx < n_forward: P[_target_idx, -1] += _R * P[_source_idx, -2] else: P[_target_idx, 0] += _R * P[_source_idx, 1]
5,338,502
def multiply_str(char, times): """ Return multiplied character in string """ return char * times
5,338,503
def calories_per_item(hundr, weight, number_cookies, output_type): """ >>> calories_per_item(430, 0.3, 20, 0) 'One item has 64.5 kcal.' >>> calories_per_item(430, 0.3, 20, 1) 'One item has 64.5 Calories.' >>> calories_per_item(1, 1000, 10, 1) 'One item has 1000.0 Calories.' >>> calories_per_item(1, 1000, 10, 0) 'One item has 1000.0 kcal.' >>> calories_per_item(0, 1000, 10, 0) 'One item has 0.0 kcal.' """ kcal_per_item = hundr * 10 # convert kcal per 100g to kcal per kg unit = 'kcal' if output_type == 1: # change output unit based on input unit = 'Calories' return 'One item has ' + str((kcal_per_item * weight) / number_cookies) + ' ' + unit + '.'
5,338,504
def bitcoind_call(*args): """ Run `bitcoind`, return OS return code """ _, retcode, _ = run_subprocess("/usr/local/bin/bitcoind", *args) return retcode
5,338,505
def getItemProduct(db, itemID): """ Get an item's linked product id :param db: database pointer :param itemID: int :return: int """ # Get the one we want item = db.session.query(Item).filter(Item.id == itemID).first() # if the query didn't return anything, raise noresult exception if (not item): raise NoResult # otherwise, return the product_id else: # Filter the thing off return item.product_id
5,338,506
def add_tech_types(net, tech): """ Add std_type to an existing net """ for i, t in tech.iterrows(): # i is ID of tech, t is tech data data = dict(c_nf_per_km=t.C, r_ohm_per_km=t.R, x_ohm_per_km=t.X, max_i_ka=t.Imax/1000, q_mm=t.Section, type='oh' if t.Type == 'Overhead' else 'cs') pp.create_std_type(net, name=i, data=data, element='line')
5,338,507
def find_board(board_id: BoardID) -> Optional[Board]: """Return the board with that id, or `None` if not found.""" board = db.session.get(DbBoard, board_id) if board is None: return None return _db_entity_to_board(board)
5,338,508
def test_isin_pattern_0(): """ Test IsIn pattern which expresses the IsIn/OneOf semantics. """ inputs = Tensor(np.ones([42]), mindspore.float16) softmax_model = nn.Softmax() @register_pass(run_only_once=True) def softmax_relu_pass(): x = Any() softmax_pattern = Prim(P.Softmax()) call_softmax = Call(softmax_pattern, [x]) relu_pattern = Prim(P.ReLU()) call_relu = Call(relu_pattern, [x]) pattern = OneOf([call_softmax, call_relu]) relu6_pattern = Prim(P.ReLU6()) target = Call(relu6_pattern, [x]) return pattern, target transformed_repr = get_func_graph(softmax_model, inputs).get_return().expanded_str(2) unregister_pass(softmax_relu_pass) assert "ReLU6" in transformed_repr assert "Softmax" not in transformed_repr
5,338,509
def ENsimtime(): """retrieves the current simulation time t as datetime.timedelta instance""" return datetime.timedelta(seconds= _current_simulation_time.value )
5,338,510
def solar_true_longitude(solar_geometric_mean_longitude, solar_equation_of_center): """Returns the Solar True Longitude with Solar Geometric Mean Longitude, solar_geometric_mean_longitude, and Solar Equation of Center, solar_equation_of_center.""" solar_true_longitude = solar_geometric_mean_longitude + solar_equation_of_center return solar_true_longitude
5,338,511
def image_to_fingerprint(image, size=FINGERPRINT_SIZE): """Create b64encoded image signature for image hash comparisons""" data = image.copy().convert('L').resize((size, size)).getdata() return base64.b64encode(bytes(data)).decode()
5,338,512
def get_indices( time: str | datetime | date, smoothdays: int = None, forcedownload: bool = False ) -> pandas.DataFrame: """ alternative going back to 1931: ftp://ftp.ngdc.noaa.gov/STP/GEOMAGNETIC_DATA/INDICES/KP_AP/ 20 year Forecast data from: https://sail.msfc.nasa.gov/solar_report_archives/May2016Rpt.pdf """ dtime = todatetime(time) fn = downloadfile(dtime, forcedownload) # %% load data dat: pandas.DataFrame = load(fn) # %% optional smoothing over days if isinstance(smoothdays, int): periods = np.rint(timedelta(days=smoothdays) / (dat.index[1] - dat.index[0])).astype(int) if "f107" in dat: dat["f107s"] = dat["f107"].rolling(periods, min_periods=1).mean() if "Ap" in dat: dat["Aps"] = dat["Ap"].rolling(periods, min_periods=1).mean() # %% pull out the times we want i = [dat.index.get_loc(t, method="nearest") for t in dtime] Indices = dat.iloc[i, :] return Indices
5,338,513
def get_args(): """Get CLI arguments and options :return: AccuRev branch, git repository location, append option boolean """ parser = argparse.ArgumentParser(description='Migrate AccuRev branch history to git') parser.add_argument('accurevBranch', help='The AccuRev branch which will be migrated', type=is_stream) parser.add_argument('repoLocation', help='The location of the git repository in which the clone will happen', action=FullPaths, type=is_valid_dest) parser.add_argument('-a', '--append', help='Append new AccuRev branch history to an existing git repository', action='store_true') args = parser.parse_args() source = args.accurevBranch dest = args.repoLocation append = args.append return source, dest, append
5,338,514
def check(device, value): """Test for valid setpoint without actually moving.""" value = json.loads(value) return zmq_single_request("check_value", {"device": device, "value": value})
5,338,515
def prove(formula, verbose): """ :param formula: String representation of a modal formula. The syntax for such a formula is per the grammar as stipulated in the README. Example input: "(a|b) & (~c => d)" :return string showing the outcome of the proof, that is valid or not valid. """ try: sys.setrecursionlimit(15000) negated_fml = "~(" + str(formula) + ")" negated_clausal_fml = call_function(verbose, transform, negated_fml, False) if call_function(verbose, k_prove, negated_clausal_fml) == sat: return "Psi is NOT valid" else: return "Psi is valid" finally: sys.setrecursionlimit(1000)
5,338,516
def _assert_covers_from(covers, y, fol): """Assert that each element of `covers` is a subset of `y`.""" for cover in covers: assert y | ~ cover == fol.true
5,338,517
def get_projectID(base_url, start, teamID, userID): """ Get all the project from jama Args: base_url (string): jama instance base url start (int): start at a specific location teamID (string): user team ID, for OAuth userID (string): user ID, for OAuth Returns: (dict): Returns JSON object of the Jama API /projects """ url = base_url + "/rest/latest/projects?startAt=" +\ str(start) + "&maxResults=50" return api_caller.get(teamID, userID, url)
5,338,518
def to_world(points_3d, key2d, root_pos): """ Trasform coordenates from camera to world coordenates """ _, _, rcams = data_handler.get_data_params() n_cams = 4 n_joints_h36m = 32 # Add global position back points_3d = points_3d + np.tile(root_pos, [1, n_joints_h36m]) # Load the appropriate camera key3d = data_handler.get_key3d(key2d[:3]) subj, _, sname = key3d subj = int(subj) cname = sname.split('.')[1] # <-- camera name scams = {(subj, c+1): rcams[(subj, c+1)] for c in range(n_cams)} # cams of this subject scam_idx = [scams[(subj, c+1)][-1] for c in range(n_cams)].index(cname) # index of camera used the_cam = scams[(subj, scam_idx+1)] # <-- the camera used R, T, f, c, k, p, name = the_cam assert name == cname def cam2world_centered(data_3d_camframe): data_3d_worldframe = cameras.camera_to_world_frame(data_3d_camframe.reshape((-1, 3)), R, T) data_3d_worldframe = data_3d_worldframe.reshape((-1, n_joints_h36m*3)) # subtract root translation return data_3d_worldframe - np.tile(data_3d_worldframe[:, :3], (1, n_joints_h36m)) # Apply inverse rotation and translation return cam2world_centered(points_3d)
5,338,519
async def test_camera_generic_update( hass: HomeAssistant, ufp: MockUFPFixture, camera: ProtectCamera ): """Tests generic entity update service.""" await init_entry(hass, ufp, [camera]) assert_entity_counts(hass, Platform.CAMERA, 2, 1) entity_id = "camera.test_camera_high" assert await async_setup_component(hass, "homeassistant", {}) state = hass.states.get(entity_id) assert state and state.state == "idle" ufp.api.update = AsyncMock(return_value=None) await hass.services.async_call( "homeassistant", "update_entity", {ATTR_ENTITY_ID: entity_id}, blocking=True, ) state = hass.states.get(entity_id) assert state and state.state == "idle"
5,338,520
def remove_tags(pipelineId=None, tagKeys=None): """ Removes existing tags from the specified pipeline. See also: AWS API Documentation Exceptions :example: response = client.remove_tags( pipelineId='string', tagKeys=[ 'string', ] ) :type pipelineId: string :param pipelineId: [REQUIRED]\nThe ID of the pipeline.\n :type tagKeys: list :param tagKeys: [REQUIRED]\nThe keys of the tags to remove.\n\n(string) --\n\n :rtype: dict ReturnsResponse Syntax {} Response Structure (dict) -- Contains the output of RemoveTags. Exceptions DataPipeline.Client.exceptions.InternalServiceError DataPipeline.Client.exceptions.InvalidRequestException DataPipeline.Client.exceptions.PipelineNotFoundException DataPipeline.Client.exceptions.PipelineDeletedException :return: {} :returns: DataPipeline.Client.exceptions.InternalServiceError DataPipeline.Client.exceptions.InvalidRequestException DataPipeline.Client.exceptions.PipelineNotFoundException DataPipeline.Client.exceptions.PipelineDeletedException """ pass
5,338,521
def test_insert_items_rebal_right_left_rotation(): """Test that the tree rebalances on a left right rotation.""" from bst import AVLBST avl = AVLBST() avl.insert(85) avl.insert(2) avl.insert(88) avl.insert(79) avl.insert(55) assert avl.root.val == 85 assert avl.root.right.val == 88 assert avl.root.left.val == 55 assert avl.root.left.left.val == 2 assert avl.root.left.right.val == 79 avl.insert(50) assert avl.balance() == 1 or avl.balance() == 0 or avl.balance() == -1 assert avl.root.val == 55
5,338,522
def orbital_energies_from_filename(filepath): """Returns the orbital energies from the given filename through functional composition :param filepath: path to the file """ return orbital_energies(spe_list( lines=list(content_lines(filepath, CMNT_STR))))
5,338,523
def _dict_merge(a, b): """ `_dict_merge` deep merges b into a and returns the new dict. """ if not isinstance(b, dict): return b result = deepcopy(a) for k, v in b.items(): if k in result and isinstance(result[k], dict): result[k] = _dict_merge(result[k], v) else: result[k] = deepcopy(v) return result
5,338,524
def flags(flags: int, modstring: str) -> int: """ Modifies the stat flags according to *modstring*, mirroring the syntax for POSIX `chmod`. """ mapping = { 'r': (stat.S_IRUSR, stat.S_IRGRP, stat.S_IROTH), 'w': (stat.S_IWUSR, stat.S_IWGRP, stat.S_IWOTH), 'x': (stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH) } target, direction = 'a', None for c in modstring: if c in '+-': direction = c continue if c in 'ugoa': target = c direction = None # Need a - or + after group specifier. continue if c in 'rwx' and direction and direction in '+-': if target == 'a': mask = functools.reduce(operator.or_, mapping[c]) else: mask = mapping[c]['ugo'.index(target)] if direction == '-': flags &= ~mask else: flags |= mask continue raise ValueError('invalid chmod: {!r}'.format(modstring)) return flags
5,338,525
def _can_contain(ob1, ob2, other_objects, all_obj_locations, end_frame, min_dist): """ Return true if ob1 can contain ob2. """ assert len(other_objects) == len(all_obj_locations) # Only cones do the contains, and can contain spl or smaller sphere/cones, # cylinders/cubes are too large if (len(ob1) == 1 and ob1[0][0]['sized'] > ob2[0][0]['sized'] and ob1[0][0]['shape'] == 'cone' and ob2[0][0]['shape'] in ['cone', 'sphere', 'spl']): # Also make sure the moved object will not collide with anything # there collisions = [ _obj_overlap( # ob2 location since the ob1 will be moved to ob2's location # but will have the size of ob1, (ob2[0][1].location[0], ob2[0][1].location[1], ob1[0][1].location[2]), ob1[0][0]['sized'], # top objects location at the end point, and its size other_locations[0][end_frame], other_obj[0][0]['sized'], min_dist) for other_obj, other_locations in zip(other_objects, all_obj_locations)] if not any(collisions): return True return False
5,338,526
def twolmodel(attr, pulse='on'): """ This is the 2-layer ocean model requires a forcing in W/m2 pulse = on - radiative pulse W/m2 pulse = off - time varyin radaitive forcing W/m2/yr pulse = time - use output from simple carbon model """ #### Parameters #### yeartosec = 30.25*24*60*60*12 rho = 1025 # density of sea water kg/m3 cw = 3985 # specific heat of sea water J/KgK ################### # define time steps of the model timesteps = np.arange(0,attr['endtime']+attr['dt'],attr['dt']) df = pd.DataFrame(index=timesteps,columns=['T_sfc','T_deep'],data=np.zeros((len(timesteps), 2))) for t in range(len(timesteps)-1): if pulse is 'on': if t == 0: df.iloc[t+1,df.columns.get_indexer(['T_sfc'])] = df.iloc[t]['T_sfc'] + (attr['dt']*yeartosec/(rho*cw*attr['hsfc'])) * (attr['lb']*df.iloc[t]['T_sfc'] + attr['R'] + attr['beta']*attr['e']*(df.iloc[t]['T_deep'] - df.iloc[t]['T_sfc'])) df.iloc[t+1,df.columns.get_indexer(['T_deep'])] = df.iloc[t]['T_deep'] + (attr['dt']*yeartosec/(rho*cw*attr['hdeep'])) * (attr['beta'] * (df.iloc[t]['T_sfc'] - df.iloc[t]['T_deep'])) else: df.iloc[t+1,df.columns.get_indexer(['T_sfc'])] = df.iloc[t]['T_sfc'] + (attr['dt']*yeartosec/(rho*cw*attr['hsfc'])) * (attr['lb']*df.iloc[t]['T_sfc'] + 0 + attr['beta']*attr['e']*(df.iloc[t]['T_deep'] - df.iloc[t]['T_sfc'])) df.iloc[t+1,df.columns.get_indexer(['T_deep'])] = df.iloc[t]['T_deep'] + (attr['dt']*yeartosec/(rho*cw*attr['hdeep'])) * (attr['beta'] * (df.iloc[t]['T_sfc'] - df.iloc[t]['T_deep'])) elif pulse is 'off': df.iloc[t+1,df.columns.get_indexer(['T_sfc'])] = df.iloc[t]['T_sfc'] + (attr['dt']*yeartosec/(rho*cw*attr['hsfc'])) * (attr['lb']*df.iloc[t]['T_sfc'] + attr['R']*timesteps[t] + attr['beta']*attr['e']*(df.iloc[t]['T_deep'] - df.iloc[t]['T_sfc'])) df.iloc[t+1,df.columns.get_indexer(['T_deep'])] = df.iloc[t]['T_deep'] + (attr['dt']*yeartosec/(rho*cw*attr['hdeep'])) * (attr['beta'] * (df.iloc[t]['T_sfc'] - df.iloc[t]['T_deep'])) elif pulse is 'time': df.iloc[t+1,df.columns.get_indexer(['T_sfc'])] = df.iloc[t]['T_sfc'] + (attr['dt']*yeartosec/(rho*cw*attr['hsfc'])) * (attr['lb']*df.iloc[t]['T_sfc'] + attr['R'][t] + attr['beta']*attr['e']*(df.iloc[t]['T_deep'] - df.iloc[t]['T_sfc'])) df.iloc[t+1,df.columns.get_indexer(['T_deep'])] = df.iloc[t]['T_deep'] + (attr['dt']*yeartosec/(rho*cw*attr['hdeep'])) * (attr['beta'] * (df.iloc[t]['T_sfc'] - df.iloc[t]['T_deep'])) return df
5,338,527
def save_df_to_s3(df_local, s3_bucket, destination): """ Saves a pandas dataframe to S3 Input: df_local: Dataframe to save s3_bucket: Bucket name destination: Prefix """ csv_buffer = StringIO() s3_resource = boto3.resource("s3") df_local.to_csv(csv_buffer, index=False) s3_resource.Object(s3_bucket, destination).put(Body=csv_buffer.getvalue())
5,338,528
def test_set_entity_ids(nlp: Language, patterns: List[Dict[str, Any]]) -> None: """It writes ids to entities.""" ruler = SpaczzRuler(nlp, spaczz_patterns=patterns) nlp.add_pipe(ruler) doc = nlp("Grint Anderson was prescribed Zithroma.") assert len(doc.ents) == 2 assert doc.ents[0].label_ == "NAME" assert doc.ents[0].ent_id_ == "Developer" assert doc.ents[1].label_ == "DRUG" assert doc.ents[1].ent_id_ == "Antibiotic"
5,338,529
def maximumToys(prices, k): """Problem solution.""" prices.sort() c = 0 for toy in prices: if toy > k: return c else: k -= toy c += 1 return c
5,338,530
def batch_process(process, **kwargs): """Runs a process on a set of files and batches them into subdirectories. Arguments: process ((IN, OUT, Verbosity) -> str): The function to execute on each file. Keyword Arguments: file (Optional[str]): The input files and directories. outpu_dir (Optional[str]): The output directory. batch_size (Optional[int]): The size of each subdirectory or the number of subdirectories, depending on the batch_mode. batch_mode (Optional[str]): The batch mode. Can be one of 'count' or 'divide'. In count mode, each batch will contain at most the number of files specified by the batch_size (default 10). In divide mode, there will be that number of batch directories, and files will be divided evenly between them. batch_dir_format (Optional[str]): The format string for batch subdirectory names. Defaults to 'batch{:03}' verbosity (int): The verbosity of the output. Returns: (None) """ # Get values from kwargs: search_locations = kwargs.get('file', ['.']) search_locations.extend(kwargs.get('extra_files', [])) extensions = kwargs.get('extensions', []) recursive = kwargs.get('recursive', False) output_dir = os.path.abspath(kwargs.get('output_dir', '.')) no_overwrite = kwargs.get('no_overwrite', []) verbosity = kwargs.get('verbose', 1) if kwargs.get('quiet', False): verbosity = 0 batch_mode = kwargs.get('batch_mode', 'none') batch_size = kwargs.get('batch_size', 10) batch_dir_format = kwargs.get('batch_dir_prefix', 'batch{:03}') # Get files to process files, file_count = get_files(search_locations, extensions, recursive) if verbosity >= 3: pprint(kwargs) # Prepare batching info if batch_mode == 'none': batch_count = 0 elif batch_mode == 'divide': batch_count = batch_size elif batch_mode == 'count': batch_count = int(ceil(file_count / batch_size)) batches = [] for batch_num in range(0, batch_count): batch_name = batch_dir_format.format(batch_num) batch_path = os.path.join(output_dir, batch_name) batches.append((batch_path, batch_name)) # Create batch directory. try: if verbosity >= 3: print('Creating directory: {}', os.path.relpath(batch_path)) os.makedirs(batch_path) except OSError as e: if e.errno == errno.EEXIST: # We don't care if directory already exists. pass # Assign files to batches using (input_file, output_location) out = output_dir assigned_files = [] for i, item in enumerate(files): if batch_count > 0: out, short = batches[i % len(batches)] assigned_files.append((item, out)) # Check for already existing outputs. existing = get_files(output_dir, no_overwrite, recursive=True)[0] existing = {split_ext(x)[0] : x for x in existing} if verbosity >= 3: print('Process preventing extensions:', no_overwrite) if no_overwrite: if verbosity >= 1: print('\n--- Checking for existing files of types: {} ---' ''.format(no_overwrite)) # Function for checking if file exists in output_dir def check(file_name): base, ext = split_ext(file_name) over_written = existing.get(base, False) if over_written: existing_ext = split_ext(existing[base])[1] if existing_ext.endswith(tuple(no_overwrite)): print('Skip {}{} -> "{}"' ''.format(base, ext, os.path.relpath(existing[base]))) return False return True # Filter for files that don't exist in output_dir assigned_files = [x for x in assigned_files if check(x[0])] if verbosity >= 1 and len(assigned_files) == 0: print('--- No files to process ---\n') return if verbosity >= 1: print('\n--- Begin Processing {} files ---' ''.format(len(assigned_files))) # Process each file: for item, out in assigned_files: process(item, out, verbosity=verbosity) if verbosity >= 1: print('--- End Processing ---\n')
5,338,531
async def test_config_bad_key(hass): """Check config with bad key.""" config = {"name": "test", "asdf": 5, "platform": "universal"} config = validate_config(config) assert "asdf" not in config
5,338,532
def get_X_HBR_d_t_i(X_star_HBR_d_t): """(47) Args: X_star_HBR_d_t: 日付dの時刻tにおける負荷バランス時の居室の絶対湿度(kg/kg(DA)) Returns: 日付dの時刻tにおける暖冷房区画iの実際の居室の絶対湿度(kg/kg(DA)) """ X_star_HBR_d_t_i = np.tile(X_star_HBR_d_t, (5, 1)) return X_star_HBR_d_t_i
5,338,533
def add_route(url: str, response: Optional[str] = None, method: str = 'GET', response_type: str = 'JSON', status_code: int = 200, headers: Optional[Dict[str, str]] = None, callback: Optional[Callable[[Any], None]] = None, ) -> None: """ Add route to app. :param url: the URL rule as string :param response: return value :param method: HTTP method :param response_type: type of response (JSON, HTML, RSS) :param status_code: return status code :param headers: return headers :param callback: function will be executes before response returns """ endpoint = '{url}::{method}::{status_code}'.format( url=url, method=method, status_code=status_code ) @app.route(url, endpoint=endpoint, methods=[method]) def handler(*args, **kwargs): if callback is not None: callback(request, *args, **kwargs) json_response = jsonify(response) if headers is not None: json_response.headers.update(headers) return json_response, status_code
5,338,534
def findUsername(data): """Find a username in a Element Args: data (xml.etree.ElementTree.Element): XML from PMS as a Element Returns: username or None """ elem = data.find('User') if elem is not None: return elem.attrib.get('title') return None
5,338,535
def test_non_unischema_with_many_colums_with_one_shot_iterator(carbon_many_columns_non_unischema_dataset): """Just a bunch of read and compares of all values to the expected values""" with make_batch_carbon_reader(carbon_many_columns_non_unischema_dataset.url, workers_count=1) as reader: dataset = make_pycarbon_dataset(reader) iterator = dataset.make_one_shot_iterator() # Make sure we have static shape info for all fields for shape in dataset.output_shapes: # TODO(yevgeni): check that the shapes are actually correct, not just not None assert shape.dims is not None # Read a bunch of entries from the dataset and compare the data to reference with tf.Session() as sess: iterator = iterator.get_next() sample = sess.run(iterator)._asdict() assert set(sample.keys()) == set(carbon_many_columns_non_unischema_dataset.data[0].keys())
5,338,536
def preprocess_config_factory(args: argparse.Namespace, ref_paths: dict, dataset_type: str) -> Union[BratsConfig, CamCanConfig, IBSRConfig, CANDIConfig, IXIConfig]: """Factory method to create a pre-processing config based on the parsed command line arguments.""" if dataset_type == 'brats': config = BratsConfig( dataset_name=args.dataset_name, dataset_root_path=args.dataset_root_path, do_pre_processing=args.pre_process, do_create_dataset=args.create_dataset, modalities={modality: modality in args.modalities for modality in VALID_BRATS_MODALITIES}, limit_to_n_samples=args.limit_n_samples, exclude_empty_slices=args.exclude_empty_slices, do_bias_correction=not args.no_bias_correction, force_bias_correction=args.force_bias_correction, do_histogram_matching=not args.no_histogram_matching, ref_paths=ref_paths, do_normalization=not args.no_normalization, normalization_method=args.normalization_method, shuffle_pre_processing=args.shuffle_pre_processing, background_value=BACKGROUND_VALUE, hdf5_out_folder_path=args.hdf5_out_dir_path, n4_executable_path=N4_EXECUTABLE_PATH, store_pre_processing_output=not args.no_output, print_debug=args.print_debug ) return config elif dataset_type == 'camcan': config = CamCanConfig( dataset_name=args.dataset_name, dataset_root_path=args.dataset_root_path, image_modality=args.modality, limit_to_n_samples=args.limit_n_samples, exclude_empty_slices=args.exclude_empty_slices, do_histogram_matching=not args.no_histogram_matching, ref_paths=ref_paths, do_normalization=not args.no_normalization, normalization_method=args.normalization_method, background_value=args.background_value, hdf5_out_folder_path=args.hdf5_out_dir_path, n4_executable_path=N4_EXECUTABLE_PATH, val_fraction=args.val_fraction, print_debug=args.print_debug ) return config elif dataset_type == 'ixi': config = IXIConfig( dataset_name=args.dataset_name, dataset_root_path=args.dataset_root_path, image_modality=args.modality, limit_to_n_samples=args.limit_n_samples, exclude_empty_slices=args.exclude_empty_slices, do_histogram_matching=not args.no_histogram_matching, ref_paths=ref_paths, do_normalization=not args.no_normalization, normalization_method=args.normalization_method, background_value=args.background_value, hdf5_out_folder_path=args.hdf5_out_dir_path, n4_executable_path=N4_EXECUTABLE_PATH, val_fraction=args.val_fraction, print_debug=args.print_debug ) return config elif dataset_type == 'ibsr': config = IBSRConfig( dataset_name=args.dataset_name, dataset_root_path=args.dataset_root_path, image_modality='t1', limit_to_n_samples=args.limit_n_samples, exclude_empty_slices=args.exclude_empty_slices, do_histogram_matching=not args.no_histogram_matching, ref_paths=ref_paths, do_normalization=not args.no_normalization, normalization_method=args.normalization_method, background_value=args.background_value, hdf5_out_folder_path=args.hdf5_out_dir_path, n4_executable_path=N4_EXECUTABLE_PATH, val_fraction=args.val_fraction, print_debug=args.print_debug ) return config elif dataset_type == 'candi': config = CANDIConfig( dataset_name=args.dataset_name, dataset_root_path=args.dataset_root_path, image_modality='t1', limit_to_n_samples=args.limit_n_samples, exclude_empty_slices=args.exclude_empty_slices, do_histogram_matching=not args.no_histogram_matching, ref_paths=ref_paths, do_normalization=not args.no_normalization, normalization_method=args.normalization_method, background_value=args.background_value, hdf5_out_folder_path=args.hdf5_out_dir_path, n4_executable_path=N4_EXECUTABLE_PATH, val_fraction=args.val_fraction, print_debug=args.print_debug ) return config else: raise KeyError(f'Given dataset_type {dataset_type} not supported.')
5,338,537
def GET(request): """Get this Prefab.""" request.check_required_parameters(path={'prefabId': 'string'}) prefab = Prefab.from_id(request.params_path['prefabId']) prefab.check_exists() prefab.check_user_access(request.google_id) return Response(200, 'Successfully retrieved prefab', prefab.obj)
5,338,538
def fix_empty_strings(tweet_dic): """空文字列を None に置換する""" def fix_media_info(media_dic): for k in ['title', 'description']: if media_dic.get('additional_media_info', {}).get(k) == '': media_dic['additional_media_info'][k] = None return media_dic for m in tweet_dic.get('entities', {}).get('media', []): m = fix_media_info(m) for m in tweet_dic.get('extended_entities', {}).get('media', []): m = fix_media_info(m) for m in tweet_dic.get('extended_tweet', {}).get('entities', {}).get('media', []): m = fix_media_info(m) for m in tweet_dic.get('extended_tweet', {}).get('extended_entities', {}).get('media', []): m = fix_media_info(m) for k in [ 'profile_background_image_url', 'profile_background_image_url_https', 'profile_image_url', 'profile_image_url_https', ]: if tweet_dic.get('user', {}).get(k) == '': tweet_dic['user'][k] = None return tweet_dic
5,338,539
def group_joinrequest(request, group_id): """ Handle post request to join a group. """ if not request.is_ajax() or request.method != 'POST': raise Http404 result = {} content_type = 'application/json; charset=utf-8' group_id = int(group_id) group = get_group(group_id) if not group: raise Http404 user = request.user.username # TODO: Group creator is group staff now, but may changed in future. staff = group.creator_name if is_group_user(group_id, user): # Already in the group. Normally, this case should not happen. err = _(u'You are already in the group.') return HttpResponseBadRequest(json.dumps({'error': err}), content_type=content_type) else: form = GroupJoinMsgForm(request.POST) if form.is_valid(): group_join_msg = form.cleaned_data['group_join_msg'] # Send the message to group staff. use_https = request.is_secure() domain = RequestSite(request).domain t = loader.get_template('group/group_join_email.html') c = { 'staff': staff, 'user': user, 'group_name': group.group_name, 'group_join_msg': group_join_msg, 'site_name': SITE_NAME, } try: send_mail(_(u'apply to join the group'), t.render(Context(c)), None, [staff], fail_silently=False) messages.success(request, _(u'Sent successfully, the group admin will handle it.')) return HttpResponse(json.dumps('success'), content_type=content_type) except: err = _(u'Failed to send. You can try it again later.') return HttpResponse(json.dumps({'error': err}), status=500, content_type=content_type) else: return HttpResponseBadRequest(json.dumps(form.errors), content_type=content_type)
5,338,540
def fibonacci(**kwargs): """Fibonacci Sequence as a numpy array""" n = int(math.fabs(kwargs.pop('n', 2))) zero = kwargs.pop('zero', False) weighted = kwargs.pop('weighted', False) if zero: a, b = 0, 1 else: n -= 1 a, b = 1, 1 result = np.array([a]) for i in range(0, n): a, b = b, a + b result = np.append(result, a) if weighted: fib_sum = np.sum(result) if fib_sum > 0: return result / fib_sum else: return result else: return result
5,338,541
def temp_directory(): """This context manager gives the path to a new temporary directory that is deleted (with all it's content) at the end of the with block. """ directory = tempfile.mkdtemp() try: yield directory finally: shutil.rmtree(directory)
5,338,542
def read_data(): """Reads in the data from (currently) only the development file and returns this as a list. Pops the last element, because it is empty.""" with open('../PMB/parsing/layer_data/4.0.0/en/gold/dev.conll') as file: data = file.read() data = data.split('\n\n') data.pop(-1) return data
5,338,543
def argrelextrema(data, comparator, axis=0, order=1, mode='clip'): """ Calculate the relative extrema of `data`. Parameters ---------- data : ndarray Array in which to find the relative extrema. comparator : callable Function to use to compare two data points. Should take two arrays as arguments. axis : int, optional Axis over which to select from `data`. Default is 0. order : int, optional How many points on each side to use for the comparison to consider ``comparator(n, n+x)`` to be True. mode : str, optional How the edges of the vector are treated. 'wrap' (wrap around) or 'clip' (treat overflow as the same as the last (or first) element). Default is 'clip'. See `numpy.take`. Returns ------- extrema : tuple of ndarrays Indices of the maxima in arrays of integers. ``extrema[k]`` is the array of indices of axis `k` of `data`. Note that the return value is a tuple even when `data` is one-dimensional. See Also -------- argrelmin, argrelmax Notes ----- .. versionadded:: 0.11.0 Examples -------- >>> from scipy.signal import argrelextrema >>> x = np.array([2, 1, 2, 3, 2, 0, 1, 0]) >>> argrelextrema(x, np.greater) (array([3, 6]),) >>> y = np.array([[1, 2, 1, 2], ... [2, 2, 0, 0], ... [5, 3, 4, 4]]) ... >>> argrelextrema(y, np.less, axis=1) (array([0, 2]), array([2, 1])) """ results = _boolrelextrema(data, comparator, axis, order, mode) return np.where(results)
5,338,544
def test_as_airbyte_stream_incremental(mocker): """ Should return an incremental refresh AirbyteStream with information matching the provided Stream interface. """ test_stream = StreamStubIncremental() mocker.patch.object(StreamStubIncremental, "get_json_schema", return_value={}) airbyte_stream = test_stream.as_airbyte_stream() exp = AirbyteStream( name="stream_stub_incremental", json_schema={}, supported_sync_modes=[SyncMode.full_refresh, SyncMode.incremental], default_cursor_field=["test_cursor"], source_defined_cursor=True, source_defined_primary_key=[["primary_key"]], ) assert exp == airbyte_stream
5,338,545
def test_get_temperature_rise_no_spec_sheet(): """get_temperature_rise_spec_sheet() should raise a KeyError when passed an unkown page number.""" with pytest.raises(KeyError): inductor.get_temperature_rise_spec_sheet(22)
5,338,546
def dijkstra(G, s): """ find all shortest paths from s to each other vertex in graph G """ n = len(G) visited = [False]*n weights = [math.inf]*n path = [None]*n queue = [] weights[s] = 0 hq.heappush(queue, (0, s)) while len(queue) > 0: g, u = hq.heappop(queue) visited[u] = True for v, w in G[u]: if not visited[v]: print(v, w, g, u) f = g + w if f < weights[v]: weights[v] = f path[v] = u hq.heappush(queue, (f, v)) return path, weights
5,338,547
def prepare_create_user_db(): """Clear a user from the database to be created.""" username = TEST_USERS[0][0] connection = connect_db() connection.cursor().execute('DELETE FROM Users WHERE username=%s', (username,)) connection.commit() close_db(connection) return username
5,338,548
def _getlocal(ui, rpath): """Return (path, local ui object) for the given target path. Takes paths in [cwd]/.hg/hgrc into account." """ try: wd = os.getcwd() except OSError, e: raise util.Abort(_("error getting current working directory: %s") % e.strerror) path = cmdutil.findrepo(wd) or "" if not path: lui = ui else: lui = ui.copy() lui.readconfig(os.path.join(path, ".hg", "hgrc"), path) if rpath and rpath[-1]: path = lui.expandpath(rpath[-1]) lui = ui.copy() lui.readconfig(os.path.join(path, ".hg", "hgrc"), path) return path, lui
5,338,549
def get_size_and_sha256(infile): """ Returns the size and SHA256 checksum (as hex) of the given file. """ h = hashlib.sha256() size = 0 while True: chunk = infile.read(8192) if not chunk: break h.update(chunk) size += len(chunk) return (size, h.hexdigest())
5,338,550
def process_log_data(spark, input_data, output_data): """ Function to create users, time, and songplays table and to process log data """ # get filepath to log data file log_data = input_data + "log_data/*/*" # read log data file df = spark.read.json(log_data) # filter by actions for song plays df = df.filter(df.page == 'NextSong') # extract columns for users table users_table = (df.select(col('userId').alias('user_id'), col('firstName').alias('first_name'), col('lastName').alias('last_name'), col('gender').alias('gender'), col('level').alias('level')).distinct() ) # write users table to parquet files users_table.write.parquet(output_data + "/users.parquet", mode="overwrite") # create timestamp column from original timestamp column get_timestamp = udf(lambda x: x / 1000, IntegerType()) df = df.withColumn('start_time', get_timestamp('ts')) # create datetime column from original timestamp column get_datetime = udf(lambda x: F.from_unixtime(x)) df = df.withColumn('date_time', F.from_unixtime('start_time')) # extract columns to create time table time_table = (df.select(col('start_time'), hour('date_time').alias('hour'), dayofmonth('date_time').alias('day'), weekofyear('date_time').alias('week'), month('date_time').alias('month'), year('date_time').alias('year'), date_format('date_time', 'E').alias('weekday')) ) # write time table to parquet files partitioned by year and month time_table.write.partitionBy(['year', 'month']).parquet(output_data + '/time.parquet', mode='overwrite') # read in song data to use for songplays table song_df = spark.read.parquet(output_data + '/songs.parquet') # extract columns from joined song and log datasets to create songplays table songplays_table = ( df.withColumn('songplay_id', F.monotonically_increasing_id()) .join(song_df, song_df.title == df.song) .select('songplay_id', col('ts').alias('start_time'), col('userId').alias('user_id'), col('level').alias('level'), col('song_id').alias('song_id'), col("artist_id").alias('artist_id'), col('sessionId').alias('session_id'), col('location').alias('location'), col('userAgent').alias('user_agent'), year('date_time').alias('year'), month('date_time').alias('month')) ) # write songplays table to parquet files partitioned by year and month songplays_table.write.partitionBy(['year', 'month']).parquet(output_data + "/songplays.parquet", mode='overwrite')
5,338,551
def cmServiceAbort(): """CM SERVICE ABORT Section 9.2.7""" a = TpPd(pd=0x5) b = MessageType(mesType=0x23) # 00100011 packet = a / b return packet
5,338,552
def get_distribution(distribution_id): """ Lists inforamtion about specific distribution by id. :param distribution_id: Id of CDN distribution """ cloudfront = CloudFront() return cloudfront.get_distribution(distribution_id=distribution_id)
5,338,553
def get_information_per_topic(db_path: str, topic: str, field: str): """ Query all alert data monitoring rows for a given topic Parameters ---------- db_path: str Path to the monitoring database. The database will be created if it does not exist yet. topic: str Topic name of a stream field: str Field for which you want the data. Returns ---------- df: pd.DataFrame Pandas DataFrame with data of matching alert rows. Examples ---------- >>> df = get_information_per_topic(db_fn, "tutu", "objectId") >>> print(len(df)) 1 """ con = sqlite3.connect(db_path) statement = f"SELECT {field} FROM `{ALERT_TABLE}` WHERE topic = '{topic}';" # catch error if the DB is empty try: df = pd.read_sql_query(statement, con) alert_id = list(df[field]) except pd.io.sql.DatabaseError as e: print(e) alert_id = [""] return alert_id
5,338,554
def _testOpen(): """ >>> from defcon.test.testTools import getTestFontPath >>> from defcon.objects.font import Font >>> font = Font(getTestFontPath('TestOpenContour.ufo')) >>> glyph = font['A'] >>> glyph[0].open True >>> glyph[1].open False >>> glyph[2].open True >>> glyph[3].open False """
5,338,555
def add_packer_codebuild_job(t, name, environment=None): """ Add the packer AMI build to the codebuild job """ cfn_name = sanitize_cfn_resource_name(name) with open(os.path.dirname(os.path.realpath(__file__)) + "/buildspecs/packer.yml") as spec: build_spec = spec.read() codebuild_job_environments = [{ 'Name': 'CLUSTER_NAME', 'Value': name }] if environment: codebuild_job_environments.append({'Name': 'ENVIRONMENT', 'Value': environment}) PackerRole = Role( "CodeBuildPackerRole", AssumeRolePolicyDocument={ "Statement": [{ "Effect": "Allow", "Action": "sts:AssumeRole", "Principal": {"Service": [ "codebuild.amazonaws.com", "ec2.amazonaws.com" ]}, }] }, Policies=[ Policy( PolicyName="codebuild-packer", PolicyDocument={ "Statement": [{ "Effect": "Allow", "Action": [ "ec2:AttachVolume", "ec2:AuthorizeSecurityGroupIngress", "ec2:CopyImage", "ec2:CreateImage", "ec2:CreateKeypair", "ec2:CreateSecurityGroup", "ec2:CreateSnapshot", "ec2:CreateTags", "ec2:CreateVolume", "ec2:DeleteKeyPair", "ec2:DeleteSecurityGroup", "ec2:DeleteSnapshot", "ec2:DeleteVolume", "ec2:DeregisterImage", "ec2:Describe*", "ec2:DetachVolume", "ec2:GetPasswordData", "ec2:ModifyImageAttribute", "ec2:ModifyInstanceAttribute", "ec2:ModifySnapshotAttribute", "ec2:RegisterImage", "ec2:RunInstances", "ec2:StopInstances", "ec2:TerminateInstances", "iam:PassRole" ], "Resource": "*" }, { "Effect": "Allow", "Action": ["logs:*"], "Resource": "*" }, { "Effect": "Allow", "Action": [ "ssm:GetParametersByPath", "ssm:GetParameters", "ssm:GetParameter" ], "Resource": [ "arn:aws:ssm:*:*:parameter/aws/service/ecs*" ] }, { "Effect": "Allow", "Action": "s3:*", "Resource": [{ "Fn::Sub": "arn:aws:s3:::ecs-cluster-deployer-${AWS::AccountId}-${AWS::Region}" }, { "Fn::Sub": "arn:aws:s3:::ecs-cluster-deployer-${AWS::AccountId}-${AWS::Region}/*" }] }] } ) ] ) PackerInstanceProfile = InstanceProfile( "PackerInstanceProfile", InstanceProfileName=f"{cfn_name}PackerInstanceProfile", Roles=[Ref(PackerRole)] ) environment = Environment( ComputeType="BUILD_GENERAL1_SMALL", Type="LINUX_CONTAINER", Image="aws/codebuild/standard:2.0", EnvironmentVariables=codebuild_job_environments, PrivilegedMode=True ) PackerCodebuild = Project( "PackerAMIBuilder", Name=f"{cfn_name}PackerAMIBuilder", Artifacts=Artifacts(Type='CODEPIPELINE'), Environment=environment, ServiceRole=Ref(PackerRole), Source=Source( Type="CODEPIPELINE", BuildSpec=Sub(build_spec) ) ) t.add_resource(PackerRole) t.add_resource(PackerCodebuild) t.add_resource(PackerInstanceProfile)
5,338,556
def gripper_client(finger_positions): """Send a gripper goal to the action server.""" action_address = '/' + prefix + 'driver/fingers_action/finger_positions' client = actionlib.SimpleActionClient(action_address, kinova_msgs.msg.SetFingersPositionAction) client.wait_for_server() goal = kinova_msgs.msg.SetFingersPositionGoal() goal.fingers.finger1 = float(finger_positions[0]) goal.fingers.finger2 = float(finger_positions[1]) goal.fingers.finger3 = float(finger_positions[2]) client.send_goal(goal) if client.wait_for_result(rospy.Duration(50.0)): return client.get_result() else: client.cancel_all_goals() rospy.WARN(' the gripper action timed-out') return None
5,338,557
def plot_scalar_field(snap: Step, fieldname: str) -> None: """Plot scalar field with plate information. Args: snap: a :class:`~stagpy._step.Step` of a StagyyData instance. fieldname: name of the field that should be decorated with plate informations. """ fig, axis, _, _ = field.plot_scalar(snap, fieldname) if conf.plates.continents: c_field = np.ma.masked_where( ~_continents_location(snap, at_surface=False), snap.fields['c'].values[0, :, :, 0]) cmap = colors.ListedColormap(["k", "g", "m"]) with conf.field.context_(colorbar=False): field.plot_scalar(snap, 'c', c_field, axis, cmap=cmap, norm=colors.BoundaryNorm([2, 3, 4, 5], cmap.N)) # plotting velocity vectors field.plot_vec(axis, snap, 'sx' if conf.plates.stress else 'v') # Put arrow where ridges and trenches are _plot_plate_limits_field(axis, snap) saveplot(fig, f'plates_{fieldname}', snap.isnap, close=conf.plates.zoom is None) # Zoom if conf.plates.zoom is not None: if not 0 <= conf.plates.zoom <= 360: raise error.InvalidZoomError(conf.plates.zoom) if 45 < conf.plates.zoom <= 135: ladd, radd, uadd, dadd = 0.8, 0.8, 0.05, 0.1 elif 135 < conf.plates.zoom <= 225: ladd, radd, uadd, dadd = 0.05, 0.1, 0.8, 0.8 elif 225 < conf.plates.zoom <= 315: ladd, radd, uadd, dadd = 0.8, 0.8, 0.1, 0.05 else: # >315 or <=45 ladd, radd, uadd, dadd = 0.1, 0.05, 0.8, 0.8 xzoom = (snap.geom.rcmb + 1) * np.cos(np.radians(conf.plates.zoom)) yzoom = (snap.geom.rcmb + 1) * np.sin(np.radians(conf.plates.zoom)) axis.set_xlim(xzoom - ladd, xzoom + radd) axis.set_ylim(yzoom - dadd, yzoom + uadd) saveplot(fig, f'plates_zoom_{fieldname}', snap.isnap)
5,338,558
def validate_column(column_name,value,lookup_values): """Validates columns found in Seq&Treat tuberculosis AST donation spreadsheets. This function understands either the format of a passed column or uses values derived from lookup Pandas dataframes to check each value in a spreadsheet. Args: column_name (str): the name of the column. Not checked at present! value: the contents to check Returns: True/False """ # the SITEID must exist in the table if column_name=='site_id': result=str(value) in lookup_values['SITES'] # as must the COUNTRY code elif column_name=='country_where_sample_taken': result=value in lookup_values['COUNTRIES'] elif column_name=='instrument_model': result=value in lookup_values['SEQUENCERS'] elif column_name=='isolate_number': try: result=value>0 except: result=False elif column_name=='sequence_replicate_number': result=bool(re.match('^[_0-9]+$',str(value))) elif column_name in ['dataset_name','lab_id','subject_id']: if 'nan' in str(value): return(False) else: result=bool(re.match('^[_\-A-Za-z0-9]+$',str(value))) elif column_name in ['collection_date','submission_date']: # this will catch nans if value!=value: result=True # otherwise the pandas date converters will have picked it up else: result=isinstance(value,datetime.datetime) elif column_name=='reads_file_1': result=bool(re.match('^[\-_A-Za-z0-9]+_R1.fastq.gz$',str(value))) elif column_name=='reads_file_2': result=bool(re.match('^[\-_A-Za-z0-9]+_R2.fastq.gz$',str(value))) elif column_name in ['reads_file_1_md5','reads_file_2_md5']: result=bool(re.match('^[a-z0-9]+$',str(value))) elif column_name in ['ena_deposited']: result=value in [True,False] elif column_name in ['ena_run_accession']: result=False if isinstance(value,float) and numpy.isnan(value): result=True elif isinstance(value,str): result=bool(re.match('^(E|D|S)RR[0-9]{6,}$',value)) elif column_name in ['ena_sample_accession']: result=False if isinstance(value,float) and numpy.isnan(value): result=True elif isinstance(value,str): result=bool(re.match('^(E|D|S)RS[0-9]{6,}$',value)) or bool(value[:5]=='SAMEA') elif column_name=='method': if isinstance(value,float) and numpy.isnan(value): result=True else: result=value in lookup_values['AST_METHODS'] elif column_name=='phenotype': if isinstance(value,float): if numpy.isnan(value): result=True else: result=value>0 elif isinstance(value,str): if value in ['R','S','U']: return True else: if ',' in value: value=value.replace(',','.') if '≥' in value: value=value.replace('≥','>=') if '≤' in value: value=value.replace('≤','<=') if ' ' in value: value=value.replace(' ','') # FIXME: hack to allow through incorrect >=32 MICs (should be >32) if value[:2]=='>=': try: result=float(value[2:])>0 except: result=False elif value[0]=='>': try: result=float(value[1:])>0 except: result=False elif value[:2]=='<=': try: result=float(value[2:])>0 except: result=False # FIXME: hack to allow through incorrect <0.06 MICs (should be <=0.06) elif value[0]=='<': try: result=float(value[1:])>0 except: result=False else: try: result=float(value)>0 except: result=False else: result=value>0 elif column_name=='cc': result=False if isinstance(value,str): result=value in ['WHO','UK'] elif isinstance(value,float): if numpy.isnan(value): return(True) else: result=value>0 elif isinstance(value,int): result=value>0 return result
5,338,559
def get_html_from_url(url, timeout=None): """Get HTML document from URL Parameters url (str) : URL to look for timeout (float) : Inactivity timeout in seconds Return The HTML document as a string """ resp = reqget(url, timeout=timeout) return resp.text
5,338,560
def save_hdf5(connection, filepath): """ Save network parameters in HDF5 format. Parameters ---------- {save_dict.connection} filepath : str Path to the HDF5 file that stores network parameters. Examples -------- >>> from neupy import layers, storage >>> >>> connection = layers.Input(10) > layers.Softmax(3) >>> storage.save_hdf5(connection, '/path/to/parameters.hdf5') """ hdf5 = load_hdf5_module() connection = extract_connection(connection) data = save_dict(connection) with hdf5.File(filepath, mode='w') as f: layer_names = [] for layer in data['layers']: layer_name = layer['name'] layer_group = f.create_group(layer_name) for attrname, attrvalue in layer.items(): if attrname != 'parameters': layer_group.attrs[attrname] = json.dumps( attrvalue, default=repr) for param_name, param in layer['parameters'].items(): dataset = layer_group.create_dataset( param_name, data=param['value']) dataset.attrs['trainable'] = param['trainable'] layer_names.append(layer_name) f.attrs['metadata'] = json.dumps(data['metadata']) f.attrs['graph'] = json.dumps(data['graph']) f.attrs['layer_names'] = json.dumps(layer_names)
5,338,561
def clean(column, output_column=None, file_path=None, df=None, symbols='!@#$%^&*()+={}[]:;’\”/<>', replace_by_space=True, keep_original=False): """ cleans the cell values in a column, creating a new column with the clean values. Args: column: the column to be cleaned. output_column: the name of the column where cleaned column values are stored. If not provided, the name of the new column is the name of the input column with the suffix _clean. file_path: input file path df: or input dataframe symbols: a string containing the set of characters to be removed: default is “!@#$%^&*()+={}[]:;’\”/<>” replace_by_space: when True (default) all instances of the symbols are replaced by a space. In case of removal of multiple consecutive characters, they’ll be replaced by a single space. The value False causes the symbols to be deleted. keep_original: when True, the output column will contain the original value and the clean value will be appended, separated by |. Default is False Returns: a dataframe with the new output clean containing clean values """ if file_path is None and df is None: raise RequiredInputParameterMissingException( 'One of the input parameters is required: {} or {}'.format(file_path, df)) symbols = list(symbols) if output_column is None: output_column = '{}_clean'.format(column) if file_path: df = pd.read_csv(file_path) df[output_column] = df[column].map(lambda x: string_clean(x, symbols, replace_by_space, keep_original)) return df
5,338,562
def len_lt(name, value): """ Only succeed if the length of the given register location is less than the given value. USAGE: .. code-block:: yaml foo: check.len_lt: - value: 42 run_remote_ex: local.cmd: - tgt: '*' - func: test.ping - require: - check: foo """ ret = {"name": name, "result": False, "comment": "", "changes": {}} if name not in __reg__: ret["result"] = False ret["comment"] = "Value {0} not in register".format(name) return ret if len(__reg__[name]["val"]) < value: ret["result"] = True return ret
5,338,563
def _ll_to_xy(latitude, longitude, wrfin=None, timeidx=0, stagger=None, method="cat", squeeze=True, cache=None, _key=None, as_int=True, **projparams): """Return the x,y coordinates for a specified latitude and longitude. The *latitude* and *longitude* arguments can be a single value or a sequence of values. The leftmost dimension of the returned array represents two different quantities: - return_val[0,...] will contain the X (west_east) values. - return_val[1,...] will contain the Y (south_north) values. Args: latitude (:obj:`float` or sequence): A single latitude or a sequence of latitude values to be converted. longitude (:obj:`float` or sequence): A single longitude or a sequence of latitude values to be converted. wrfin (:class:`netCDF4.Dataset`, :class:`Nio.NioFile`, or an \ iterable): WRF-ARW NetCDF data as a :class:`netCDF4.Dataset`, :class:`Nio.NioFile` or an iterable sequence of the aforementioned types. timeidx (:obj:`int` or :data:`wrf.ALL_TIMES`, optional): The desired time index. This value can be a positive integer, negative integer, or :data:`wrf.ALL_TIMES` (an alias for None) to return all times in the file or sequence. The default is 0. stagger (:obj:`str`): By default, the latitude and longitude are returned on the mass grid, but a staggered grid can be chosen with the following options: - 'm': Use the mass grid (default). - 'u': Use the same staggered grid as the u wind component, which has a staggered west_east (x) dimension. - 'v': Use the same staggered grid as the v wind component, which has a staggered south_north (y) dimension. method (:obj:`str`, optional): The aggregation method to use for sequences. Must be either 'cat' or 'join'. 'cat' combines the data along the Time dimension. 'join' creates a new dimension for the file index. The default is 'cat'. squeeze (:obj:`bool`, optional): Set to False to prevent dimensions with a size of 1 from being automatically removed from the shape of the output. Default is True. cache (:obj:`dict`, optional): A dictionary of (varname, ndarray) that can be used to supply pre-extracted NetCDF variables to the computational routines. It is primarily used for internal purposes, but can also be used to improve performance by eliminating the need to repeatedly extract the same variables used in multiple diagnostics calculations, particularly when using large sequences of files. Default is None. _key (:obj:`int`, optional): A caching key. This is used for internal purposes only. Default is None. as_int (:obj:`bool`): Set to True to return the x,y values as :obj:`int`, otherwise they will be returned as :obj:`float`. **projparams: Map projection keyword arguments to set manually. Returns: :class:`xarray.DataArray` or :class:`numpy.ndarray`: The x,y coordinate value(s) whose leftmost dimension is 2 (0=X, 1=Y). If xarray is enabled and the *meta* parameter is True, then the result will be a :class:`xarray.DataArray` object. Otherwise, the result will be a :class:`numpy.ndarray` object with no metadata. """ if wrfin is not None: (map_proj, truelat1, truelat2, stdlon, ref_lat, ref_lon, pole_lat, pole_lon, known_x, known_y, dx, dy, latinc, loninc) = _get_proj_params(wrfin, timeidx, stagger, method, squeeze, cache, _key) else: (map_proj, truelat1, truelat2, stdlon, ref_lat, ref_lon, pole_lat, pole_lon, known_x, known_y, dx, dy, latinc, loninc) = _kwarg_proj_params(**projparams) if isinstance(latitude, Iterable): lats = np.asarray(latitude) lons = np.asarray(longitude) # Note: For scalars, this will make a single element array lats = lats.ravel() lons = lons.ravel() if (lats.size != lons.size): raise ValueError("'latitude' and 'longitude' " "must be the same length") if ref_lat.size == 1: outdim = [2, lats.size] extra_dims = [outdim[1]] else: # Moving domain will have moving ref_lats/ref_lons outdim = [2, ref_lat.size, lats.size] extra_dims = outdim[1:] result = np.empty(outdim, np.float64) for left_idxs in iter_left_indexes(extra_dims): # Left indexes is a misnomer, since these will be on the right x_idxs = (0,) + left_idxs y_idxs = (1,) + left_idxs if ref_lat.size == 1: ref_lat_val = ref_lat[0] ref_lon_val = ref_lon[0] else: ref_lat_val = ref_lat[left_idxs[-2]] ref_lon_val = ref_lon[left_idxs[-2]] lat = lats[left_idxs[-1]] lon = lons[left_idxs[-1]] xy = _lltoxy(map_proj, truelat1, truelat2, stdlon, ref_lat_val, ref_lon_val, pole_lat, pole_lon, known_x, known_y, dx, dy, latinc, loninc, lat, lon) # Note: comes back from fortran as y,x result[x_idxs] = xy[1] result[y_idxs] = xy[0] else: result = np.empty((2,), np.float64) fort_out = _lltoxy(map_proj, truelat1, truelat2, stdlon, ref_lat, ref_lon, pole_lat, pole_lon, known_x, known_y, dx, dy, latinc, loninc, latitude, longitude) # Note, comes back from fortran as y,x. So, need to swap them. result[0] = fort_out[1] result[1] = fort_out[0] # Make indexes 0-based result = result - 1 if as_int: result = np.rint(result).astype(int) return result
5,338,564
def p_command_box(p): """command : BOX NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER | BOX NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL | BOX SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER | BOX SYMBOL NUMBER NUMBER NUMBER NUMBER NUMBER NUMBER SYMBOL""" cmd = {'op' : p[1], 'constants' : None, 'cs' : None, 'args':[]} arg_start = 2 if isinstance(p[2], str): cmd['constants'] = p[2] arg_start = 3 if len(p) == 9 and isinstance(p[8], str): cmd['cs'] = p[8] if len(p) == 10 and isinstance(p[9], str): cmd['cs'] = p[9] cmd['args'] = p[arg_start:arg_start+6] commands.append(cmd)
5,338,565
def partner_to_partners(apps, schema_editor): """ Adds the Partner object in Authorization.partner to the many-to-many relationship in Authorization.partners """ authorization_model = apps.get_model("users", "Authorization") for authorization in authorization_model.objects.all(): authorization.partners.add(authorization.partner)
5,338,566
def spawn(): """ Spawn a machine to package or to slave into the network :return None: """ args = parse_spawn_arguments() spawn_machine( assimilate=not args.no_assimilate, after_assimilate=not args.no_after_assimilate, after_mesh=not args.no_after_mesh, provision=not args.no_provision, server_type=args.server_type, compute_type=args.compute_type, only_check_available=args.check_available )
5,338,567
def verify_in_person_group(subscription_key): """VerifyInPersonGroup. This will verify whether faces detected as similar in a group are of the same person. """ face_base_url = "https://{}.api.cognitive.microsoft.com".format(FACE_LOCATION) face_client = FaceClient(endpoint=face_base_url, credentials=CognitiveServicesCredentials(subscription_key)) image_url_prefix = "https://csdx.blob.core.windows.net/resources/Face/Images/" target_image_file_names = ["Family1-Dad1.jpg", "Family1-Dad2.jpg"] source_image_file_name1 = "Family1-Dad3.jpg" # Create a person group. person_group_id = str(uuid.uuid4()) print("Create a person group {}.".format(person_group_id)) face_client.person_group.create(person_group_id=person_group_id, name=person_group_id) person_id = face_client.person_group_person.create(person_group_id=person_group_id, name="Dad").person_id # Create a person group person. p = Person(name="Dad", user_data="Person for sample", person_id=person_id) print("Create a person group person {}.".format(p.name)) for target_image_file_name in target_image_file_names: # Add face to the person group. print("Add face to the person group person {} from image {}.".format(p.name, target_image_file_name)) faces = face_client.person_group_person.add_face_from_url( person_group_id=person_group_id, person_id=p.person_id, url=image_url_prefix + target_image_file_name, user_data=target_image_file_name ) if not faces: raise Exception("No persisted face from image {}.".format(target_image_file_name)) # Verification example for faces of the same person. verify_result = face_client.face.verify_face_to_person( face_id=_detect_faces_helper(face_client=face_client, image_url=image_url_prefix + source_image_file_name1)[0].face_id, person_id=p.person_id, person_group_id=person_group_id ) if verify_result.is_identical: print("Faces from {} & {} are of the same (Positive) person, similarity confidence: {}.".format(source_image_file_name1, p.name, verify_result.confidence)) else: print("Faces from {} & {} are of different (Negative) persons, similarity confidence: {}.".format(source_image_file_name1, p.name, verify_result.confidence)) # Delete the person group. print("Delete the person group {}.\n".format(person_group_id)) face_client.person_group.delete(person_group_id=person_group_id)
5,338,568
def onedsinusoid(x,H,A,omega,phi): """ Returns a 1-dimensional sinusoid of form H+A*np.sin(omega*x+phi) """ phi = np.pi/180 * phi return H+A*np.sin(omega*x+phi)
5,338,569
def _make_wrapper_func(func_name): """ make_eus_instance()から呼ばれるEus_pkgクラスコンストラクタにて、eusの関数名のエントリからラッパー関数を作成する際の補助関数。 引数部の構築は_translate_args()を用いて行う。 Args: func_name (str): もとのEuslispでの関数名でpkg::を含む。なお、関数は内部シンボルと仮定している。(exportされてたら外部シンボルアクセス:(1個)を使わなければならない)。 Returns: wrapper (function): 複数引数, キーワード引数を適切にEuslispで処理可能な形で変換しS式を送り込む処理を行う関数 """ def wrapper(*args, **kwargs): # TODO: checking phase here # mapping phase if len(args) == len(wrapper.arg_constructors): args = tuple([constructor(arg) if not isinstance(arg, Eus_proxy) and constructor is not None else arg for arg, constructor in zip(args, wrapper.arg_constructors)]) # wrapper.arg_constructorsの要素constructorがNoneであることもある。下記logging phaseを見よ if len(kwargs) == len(wrapper.kwarg_constructors): kwargs = {key:(wrapper.kwarg_constructors[key](kwargs[key]) if not isinstance(kwargs[key], Eus_proxy) else kwargs[key]) for key in wrapper.kwarg_constructors} # evaluation phase result = eval_foreign_vm_proxy('({}{})'.format(func_name, _translate_args(args, kwargs))) # logging phase (ここに来ている時点でevaluation phaseにてEusErrorは投げられていないことがわかる。今回の型は正当である) # logging時にargの要素やkwargのvalueがproxyでなかった場合(つまりデフォルトルールでの変換が行われた場合)、Noneをarg_constructorsに登録する # proxyのクラスからコンストラクタを特定する上で、nil-terminated cons vs. non-nil-terminated consの競合が発生する。使用頻度を考えてEusListの方であろうと決め打ちを行うことにする! # for ind, elm in enumerate(args): # if isinstance(elm, Eus_proxy): # pass # else: # pass # for ind, key in enumerate(kwargs): # if isinstance(key, Eus_proxy): # pass # else: # pass return result wrapper.arg_constructors = [] wrapper.kwarg_constructors = {} wrapper.arg_possible_types = [] wrapper.kwarg_possible_types = {} return wrapper
5,338,570
def number_in_english(number): """Returns the given number in words >>> number_in_english(0) 'zero' >>> number_in_english(5) 'five' >>> number_in_english(11) 'eleven' >>> number_in_english(745) 'seven hundred and fourty five' >>> number_in_english(1380) 'one thousand three hundred and eighty' >>> number_in_english(3204000) 'three million two hundred four thousand' >>> number_in_english(15000) 'fifteen thousand' >>> number_in_english(1005) 'one thousand and five' """ if not number: return 'zero' # split number into blocks of 3 # e.g. 1234567 -> ['567', '234', '1'] numBlocks = int(ceil((log10(number)+1)/3)) # number of digits / 3 number_split = [(number//1000**x)%1000 for x in range(numBlocks)] # translate each block individual and add the word for the power # start with the lowest power word = '' for n, p in zip(number_split, powers): if n: # only the tenner block can have an 'and' (e.g. 'one hundred and five' but not 'one million and one thousand') word = _hundred_in_english(n, (p == '')) + ' ' + p + ' ' + word # remove 'and' that was added but is not precede by a number (e.g. 5 -> 'and five') if word.startswith('and'): word = word.replace('and', '') return word.strip()
5,338,571
def create_app(environment): """Factory Method that creates an instance of the app with the given config. Args: environment (str): Specify the configuration to initilize app with. Returns: app (Flask): it returns an instance of Flask. """ app = Flask(__name__) app.config.from_object(env_configuration[environment]) db.init_app(app) api = Api( app=app, default='Api', default_label="Available Endpoints", title='MovieBuff API', version='2.0.0', description="""MovieBuff Api Endpoint Documentation 📚""" ) # enable cross origin resource sharing CORS(app) api.add_resource(Users, "/api/v2/auth/<string:operation>", endpoint="user") api.add_resource(Movies, "/api/v2/movie", endpoint="movie") api.add_resource(Categories, "/api/v2/movie/category", "/api/v2/movie/category/<string:category_id>", endpoint="category") api.add_resource(UserMovieRatings, "/api/v2/movie/ratings", endpoint="ratings") api.add_resource(Search, "/api/v2/movie/search", endpoint="search") # handle default 404 exceptions @app.errorhandler(404) def resource_not_found(error): response = jsonify(dict( error='Not found', message='The requested URL was not found on the server.')) response.status_code = 404 return response # handle default 500 exceptions @app.errorhandler(500) def internal_server_error(error): response = jsonify(dict( error='Internal server error', message="The server encountered an internal error.")) response.status_code = 500 return response return app
5,338,572
def heating_design_temp(tmy_id): """Returns the heating design temperature (deg F) for the TMY3 site identified by 'tmy_id'. """ return df_tmy_meta.loc[tmy_id].heating_design_temp
5,338,573
def tonal_int(x): """ >>> tonal_int((4,7)) 7 >>> tonal_int((4,7,2)) 31 >>> tonal_int((6,11,-1)) -1 >>> tonal_int((0,-1,-1)) -13 >>> tonal_int((6,0,0)) 12 >>> tonal_int((0,11,0)) -1 >>> tonal_int((0,11)) -1 >>> tonal_int((2, 0)) 0 """ if len(x) == 2: x = _tonal_unmodulo(x) return x[1] d = x[0] c = x[1] base_c = MS[d].c # Example: Cb --- base=0 c=11 c-base=11 11 - 12 = -1 if c - base_c > 3: c = c - C_LEN # Example: B# --- base=11 c=0 c-base=-11 c+C_LEN =12 if c - base_c < -3: c = c + C_LEN return c + x[2]*(C_LEN)
5,338,574
def read_vcf(vcf_file, gene_filter=None, experimentalDesig=None): """ Reads an vcf v4.0 or 4.1 file and generates :class:`~epytope.Core.Variant.Variant` objects containing all annotated :class:`~epytope.Core.Transcript.Transcript` ids an outputs a list :class:`~epytope.Core.Variant.Variant`. Only the following variants are considered by the reader where synonymous labeled variants will not be integrated into any variant: filter_variants = ['missense_variant', 'frameshift_variant', 'stop_gained', 'missense_variant&splice_region_variant', "synonymous_variant", "inframe_deletion", "inframe_insertion"] :param str vcf_file: The path ot the vcf file :param list(str) gene_filter: A list of gene names of interest (only variants associated with these genes are generated) :return: List of :class:`~epytope.Core.Variant.Variants fully annotated :rtype: Tuple of (list(:class:`~epytope.Core.Variant.Variant`), list(transcript_ids) """ vl = list() with open(vcf_file, 'rb') as tsvfile: vcf_reader = vcf.Reader(open(vcf_file, 'r')) vl = [r for r in vcf_reader] list_vars = [] transcript_ids = [] genotye_dict = {"het": False, "hom": True, "ref": True} for num, record in enumerate(vl): c = record.CHROM.strip('chr') # chrom p = record.POS - 1 # vcf is 1-based & epytope 0-based variation_dbid = record.ID # e.g. rs0123 r = str(record.REF) # reference nuc (seq) v_list = record.ALT # list of variants q = record.QUAL # ? f = record.FILTER # empty if PASS, content otherwise # I guess we shouldn't expect that keyword to be there ?! #z = record.INFO['SOMATIC'] #if true somatic vt = VariationType.UNKNOWN if record.is_snp: vt = VariationType.SNP elif record.is_indel: if len(v_list)%3 == 0: # no frameshift if record.is_deletion: vt = VariationType.DEL else: vt = VariationType.INS else: # frameshift if record.is_deletion: vt = VariationType.FSDEL else: vt = VariationType.FSINS gene = None # WHICH VARIANTS TO FILTER ? filter_variants = ['missense_variant', 'frameshift_variant', 'stop_gained', 'missense_variant&splice_region_variant', "synonymous_variant", "inframe_deletion", "inframe_insertion"] for alt in v_list: isHomozygous = False if 'HOM' in record.INFO: #TODO set by AF & FILTER as soon as available isHomozygous = record.INFO['HOM'] == 1 elif 'SGT' in record.INFO: zygosity = record.INFO['SGT'].split("->")[1] if zygosity in genotye_dict: isHomozygous = genotye_dict[zygosity] else: if zygosity[0] == zygosity[1]: isHomozygous = True else: isHomozygous = False else: for sample in record.samples: if 'GT' in sample.data: isHomozygous = sample.data['GT'] == '1/1' if "ANN" in record.INFO and record.INFO['ANN']: isSynonymous = False coding = dict() for annraw in record.INFO['ANN']: # for each ANN only add a new coding! see GSvar annots = annraw.split('|') obs, a_mut_type, impact, a_gene, a_gene_id, feature_type, transcript_id, exon, tot_exon, trans_coding, prot_coding, cdna, cds, aa, distance, warns = annots if a_mut_type in filter_variants: tpos = 0 ppos = 0 # get cds/protein positions and convert mutation syntax to epytope format if trans_coding != '': positions = re.findall(r'\d+', trans_coding) ppos = int(positions[0]) - 1 if prot_coding != '': positions = re.findall(r'\d+', prot_coding) tpos = int(positions[0]) - 1 isSynonymous = (a_mut_type == "synonymous_variant") #rather take gene_id than gene name gene = a_gene_id #REFSEQ specific ? Do have to split because of biomart ? transcript_id = transcript_id.split(".")[0] #TODO vcf are not REFSEQ only #coding string not parsed anyway ? just use the one given by SnpEff coding[transcript_id] = MutationSyntax(transcript_id, ppos, tpos, trans_coding, prot_coding) transcript_ids.append(transcript_id) if coding and not isSynonymous: if vt == VariationType.SNP: pos, reference, alternative = p, str(r), str(alt) elif vt == VariationType.DEL or vt == VariationType.FSDEL: if alt != '-': pos, reference, alternative = p + len(alt), r[len(alt):], '-' else: pos, reference, alternative = p, str(r), str(alt) elif vt == VariationType.INS or vt == VariationType.FSINS: if r != '-': if alt != '-': pos, reference, alternative = p + len(r), '-', str(alt)[len(r):] else: pos, reference, alternative = p + len(r), '-', str(alt) else: pos, reference, alternative = p, str(r), str(alt) var = Variant("line" + str(num), vt, c, pos, reference, alternative, coding, isHomozygous, isSynonymous, experimentalDesign=experimentalDesig) var.gene = gene var.log_metadata("vardbid", variation_dbid) list_vars.append(var) else: warnings.warn("Skipping unannotated variant", UserWarning) return list_vars, transcript_ids
5,338,575
def serial_read_and_publish(ser, mqtt): """thread for reading serial data and publishing to MQTT client""" ser.flushInput() while True: line = ser.readline() # this is blocking line = line.decode() if debug: print() cleaned_line = line.replace('\r', '').replace('\n', '') print("Received from ninja cape:\n{}".format(cleaned_line)) # split the JSON packet up here and publish on MQTT json_data = json.loads(line) if 'DEVICE' in json_data: # Received device update try: device = str(json_data['DEVICE'][0]['D']) + "_" + str(json_data['DEVICE'][0]['G']) message = str(json_data['DEVICE'][0]['DA']) except KeyError as e: print("Error while extracting device or message from received DEVICE JSON data: {}".format(e)) else: # No exceptions - data ok topic = "ninjaCape/input/" + device print("Publishing MQTT: topic='{}', message='{}'".format(topic, message)) mqtt.publish(topic, message) elif 'ACK' in json_data: # Received ACK # {"ACK":[{"G":"0","V":0,"D":1007,"DA":"FFFF00"}]} try: device = str(json_data['ACK'][0]['D']) + "_" + str(json_data['ACK'][0]['G']) message = str(json_data['ACK'][0]['DA']) except KeyError as e: print("Error while extracting device or message from received ACK JSON data: {}".format(e)) else: print("ACK from ninjaCape: device='{}', message='{}'".format(device, message)) else: print("Unknown message type: {}".format(json_data))
5,338,576
def tid() -> Tuple[int, int, int]: """ Return the current thread indices for a 3d kernel launch. Use ``i,j,k = wp.tid()`` syntax to retrieve the coordinates inside the kernel thread grid. """ ...
5,338,577
def sqrtmod(a, p): """ Returns a square root of a modulo p. Input: a -- an integer that is a perfect square modulo p (this is checked) p -- a prime Output: int -- a square root of a, as an integer between 0 and p-1. Examples: >>> sqrtmod(4, 5) # p == 1 (mod 4) 3 #rand >>> sqrtmod(13, 23) # p == 3 (mod 4) 6 #rand >>> sqrtmod(997, 7304723089) # p == 1 (mod 4) 761044645L #rand """ a %= p if p == 2: return a assert legendre(a, p) == 1, "a must be a square mod p." if p%4 == 3: return powermod(a, (p+1)/4, p) def mul(x, y): # multiplication in R # (1) return ((x[0]*y[0] + a*y[1]*x[1]) % p, \ (x[0]*y[1] + x[1]*y[0]) % p) def pow(x, n): # exponentiation in R # (2) ans = (1,0) xpow = x while n != 0: if n%2 != 0: ans = mul(ans, xpow) xpow = mul(xpow, xpow) n /= 2 return ans while True: z = randrange(2,p) u, v = pow((1,z), (p-1)/2) if v != 0: vinv = inversemod(v, p) for x in [-u*vinv, (1-u)*vinv, (-1-u)*vinv]: if (x*x)%p == a: return x%p assert False, "Bug in sqrtmod."
5,338,578
def hints(missing_includes): """Output hints for how to configure missing includes on some platforms""" if platform.system() == "Darwin" and "GL/glut.h" in missing_includes: print(""" NOTE: On macOS, include GLUT/glut.h instead of GL/glut.h. Suggested code: # ifdef __APPLE__ # include <GLUT/glut.h> # else # include <GL/glut.h> # endif """) if platform.system() == "Darwin" and "GL/gl.h" in missing_includes: print(""" NOTE: On macOS, include OpenGL/gl.h instead of GL/gl.h. Suggested code: # ifdef __APPLE__ # include <OpenGL/gl.h> # else # include <GL/gl.h> # endif """)
5,338,579
def DrtVariableExpression(variable): """ This is a factory method that instantiates and returns a subtype of ``DrtAbstractVariableExpression`` appropriate for the given variable. """ if is_indvar(variable.name): return DrtIndividualVariableExpression(variable) elif is_funcvar(variable.name): return DrtFunctionVariableExpression(variable) elif is_eventvar(variable.name): return DrtEventVariableExpression(variable) else: return DrtConstantExpression(variable)
5,338,580
def init_db(): """Initiate the database.""" with app.app_context(): db = get_db() with app.open_resource('schema.sql', mode='r') as f: db.cursor().executescript(f.read()) db.commit()
5,338,581
def ShowNexuses(cmd_args=None): """ Show Nexus. usage: shownexues """ nexus_summaries = [] nexuses = kern.globals.nx_head for nx in IterateRBTreeEntry(nexuses, 'struct kern_nexus*', 'nx_link'): nexus_summaries.append(GetStructNexusSummary(nx)) nexus_summaries.sort() for nx_str in nexus_summaries: print "{0:s}".format(nx_str)
5,338,582
def process_cv_results(cv_results): """ This function reformats the .cv_results_ attribute of a fitted randomized search (or grid search) into a dataframe with only the columns we care about. Args -------------- cv_results : the .cv_results_ attribute of a fitted randomized search (or grid search) object Returns -------------- a sorted dataframe with select information """ results = pd.DataFrame(cv_results) cols = ['mean_test_score', 'mean_train_score', 'std_test_score'] if 'mean_train_score' not in cv_results.keys(): cols = ['mean_test_score', 'std_test_score'] cols += [c for c in results.columns.values if c.startswith('param_')] return results[cols].sort_values(by='mean_test_score', ascending=False)
5,338,583
def cbow(currentWord, C, contextWords, tokens, inputVectors, outputVectors, dataset, word2vecCostAndGradient = softmaxCostAndGradient): """ CBOW model in word2vec """ # Implement the continuous bag-of-words model in this function. # Input/Output specifications: same as the skip-gram model # We will not provide starter code for this function, but feel # free to reference the code you previously wrote for this # assignment! ################################################################# # IMPLEMENTING CBOW IS EXTRA CREDIT, DERIVATIONS IN THE WRIITEN # # ASSIGNMENT ARE NOT! # ################################################################# cost = 0 gradIn = np.zeros(inputVectors.shape) gradOut = np.zeros(outputVectors.shape) ### YOUR CODE HERE #raise NotImplementedError ### END YOUR CODE return cost, gradIn, gradOut
5,338,584
def generate_dlf_yaml(in_yaml): """ Generate DLF-compatible YAML configuration file using "templates/dlf_out.yaml" as template. :param in_yaml: dict representation of a YAML document defining placeholder values in "templates/dlf_out.yaml" :type in_yaml: dict :raises PlaceholderNotFoundError: a {{...}} placeholder referenced in "templates/dlf_out.yaml" was not found :raises ValueError in_yaml is not of type dict :return: DLF-compatible YAML file :rtype: str """ dlf_yaml_dict = generate_dlf_yaml_dict(in_yaml) dlf_yaml = yaml.safe_dump(dlf_yaml_dict, default_flow_style=False, allow_unicode=True, sort_keys=False) return dlf_yaml
5,338,585
def dice_coefficient(x, target): """ Dice Loss: 1 - 2 * (intersection(A, B) / (A^2 + B^2)) :param x: :param target: :return: """ eps = 1e-5 n_inst = x.size(0) x = x.reshape(n_inst, -1) target = target.reshape(n_inst, -1) intersection = (x * target).sum(dim=1) union = (x ** 2.0).sum(dim=1) + (target ** 2.0).sum(dim=1) + eps loss = 1. - (2 * intersection / union) return loss
5,338,586
def batch_intersection_union(output, target, nclass): """mIoU""" # inputs are numpy array, output 4D, target 3D predict = np.argmax(output, axis=1) + 1 # [N,H,W] target = target.astype(float) + 1 # [N,H,W] predict = predict.astype(float) * np.array(target > 0).astype(float) intersection = predict * np.array(predict == target).astype(float) # areas of intersection and union # element 0 in intersection occur the main difference from np.bincount. set boundary to -1 is necessary. area_inter, _ = np.array(np.histogram(intersection, bins=nclass, range=(1, nclass+1))) area_pred, _ = np.array(np.histogram(predict, bins=nclass, range=(1, nclass+1))) area_lab, _ = np.array(np.histogram(target, bins=nclass, range=(1, nclass+1))) area_all = area_pred + area_lab area_union = area_all - area_inter return area_inter, area_union
5,338,587
def plot_pairs_corr(base="CFs_Result", xlim=[-10,10], figsize=(8,4), exclude_folders=["Logs","Plot"],cmap="PiYG"): """ Plot ambient noise results of each pair under the base path Parameters base: base path for the cross-correlation result xlim: lag time range for plot in seconds figsize: figure size for plot exclude_folders: exclude folders as they are not station pair folders """ for item in os.listdir(base): item_path = os.path.join(base,item) if os.path.isdir(item_path) and item not in exclude_folders: sta_pair = item fig,ax = plt.subplots(1,1,figsize=figsize) status = plot_corr(ax,sta_pair,base,xlim=xlim,cmap=cmap) if status == True: fig.savefig(os.path.join(base,sta_pair,f"{sta_pair}.pdf"))
5,338,588
def StepFailure(check, step_odict, step): """Assert that a step failed. Args: step (str) - The step to check for a failure. Usage: yield ( TEST + api.post_process(StepFailure, 'step-name') ) """ check(step_odict[step].status == 'FAILURE')
5,338,589
def convert(trainset,testset,seed=1,batch_size=128, num_workers=2,pin_memory=True): """ Converts DataSet Object to DataLoader """ SEED = 1 cuda = torch.cuda.is_available() torch.manual_seed(SEED) if cuda: torch.cuda.manual_seed(SEED) dataloader_args = dict(shuffle=True, batch_size=128, num_workers=2, pin_memory=pin_memory) if cuda else dict(shuffle=True, batch_size=64) trainloader = torch.utils.data.DataLoader(trainset, **dataloader_args) testloader = torch.utils.data.DataLoader(testset, **dataloader_args) return trainloader, testloader
5,338,590
def exit(): """quits the application""" System.exit(0)
5,338,591
def test_pipeline_target_encoding_correct(): """ The correct processing of the categorical target at the Pipeline is tested. Moreover, target contains nans and has incorrect shape. Source and predicted labels should not differ. """ classification_data = data_with_categorical_target(with_nan=True) pipeline = Pipeline(PrimaryNode('dt')) pipeline.fit(classification_data) predicted = pipeline.predict(classification_data, output_mode='labels') predicted_labels = predicted.predict assert predicted_labels[0] == 'blue' assert predicted_labels[-1] == 'di'
5,338,592
def build_gradcam(img_path, heatmap, color_map, original_image_colormap, alpha=0.5): """ Builds the gradcam. Args: img_path (_type_): Image path. heatmap (_type_): Heatmap. color_map (_type_): Color map. original_image_colormap (_type_): Original image colormap. alpha (float, optional): Alpha. Defaults to 0.5. Returns: _type_: Gradcam. """ img = keras.preprocessing.image.load_img(img_path, color_mode=original_image_colormap) img = keras.preprocessing.image.img_to_array(img) heatmap = np.uint8(255 * heatmap) jet = cm.get_cmap(color_map) jet_colors = jet(np.arange(256))[:, :3] jet_heatmap = jet_colors[heatmap] jet_heatmap = keras.preprocessing.image.array_to_img(jet_heatmap) jet_heatmap = jet_heatmap.resize((img.shape[1], img.shape[0])) jet_heatmap = keras.preprocessing.image.img_to_array(jet_heatmap) superimposed_img = jet_heatmap * alpha + img superimposed_img = keras.preprocessing.image.array_to_img(superimposed_img) return superimposed_img
5,338,593
def delete_data(): """ テストで使用したデータの削除 zabbix関連テーブルは試験後に削除されているので不要 """ # テーブル初期化 RuleType.objects.all().delete() DataObject.objects.all().delete()
5,338,594
def sample_joint_comorbidities(age, country): """ Default country is China. For other countries pass value for country from {us, Republic of Korea, japan, Spain, italy, uk, France} """ return sample_joint(age, p_comorbidity(country, 'diabetes'), p_comorbidity(country, 'hypertension'))
5,338,595
def generate_tsdf_3d_ewa_image(depth_image, camera, camera_extrinsic_matrix=np.eye(4, dtype=np.float32), field_shape=np.array([128, 128, 128]), default_value=1, voxel_size=0.004, array_offset=np.array([-64, -64, 64]), narrow_band_width_voxels=20, back_cutoff_voxels=np.inf, gaussian_covariance_scale=1.0): """ Generate 3D TSDF field based on elliptical Gaussian averages (EWA) of depth values from the provided image. Elliptical Gaussian filters are projected from spherical 3D Gaussian functions onto the depth image and convolved with a circular 2D Gaussain filter before averaging the depth values. :type depth_image: np.ndarray :param depth_image: depth image to use :type camera: calib.camera.DepthCamera :param camera: camera used to generate the depth image :param voxel_size: voxel size, in meters :param array_offset: offset of the TSDF grid from the world origin :param camera_extrinsic_matrix: matrix representing transformation of the camera (incl. rotation and translation) [ R | T] [ 0 | 1] :param default_value: default initial TSDF value :param field_shape: shape of the TSDF grid to generate :param narrow_band_width_voxels: span (in voxels) where signed distance is between -1 and 1 :param back_cutoff_voxels: where to truncate the negative voxel values (currently not supported!) :param gaussian_covariance_scale: scale of elliptical gaussians (relative to voxel size) :return: resulting 3D TSDF """ # TODO: use back_cutoff_voxels for additional limit on # "if signed_distance < -narrow_band_half_width" (maybe?) if default_value == 1: field = np.ones(field_shape, dtype=np.float32) elif default_value == 0: field = np.zeros(field_shape, dtype=np.float32) else: field = np.ndarray(field_shape, dtype=np.float32) field.fill(default_value) camera_intrinsic_matrix = camera.intrinsics.intrinsic_matrix depth_ratio = camera.depth_unit_ratio narrow_band_half_width = narrow_band_width_voxels / 2 * voxel_size # in metric units w_voxel = 1.0 camera_rotation_matrix = camera_extrinsic_matrix[0:3, 0:3] covariance_voxel_sphere_world_space = np.eye(3) * (gaussian_covariance_scale * voxel_size) covariance_camera_space = camera_rotation_matrix.dot(covariance_voxel_sphere_world_space) \ .dot(camera_rotation_matrix.T) image_space_scaling_matrix = camera.intrinsics.intrinsic_matrix[0:2, 0:2] squared_radius_threshold = 4.0 * gaussian_covariance_scale * voxel_size for z_field in range(field_shape[2]): for y_field in range(field_shape[1]): for x_field in range(field_shape[0]): # coordinates deliberately flipped here to maintain consistency between Python & C++ implementations # Eigen Tensors being used are column-major, whereas here we use row-major layout by default x_voxel = (z_field + array_offset[0]) * voxel_size y_voxel = (y_field + array_offset[1]) * voxel_size z_voxel = (x_field + array_offset[2]) * voxel_size voxel_world = np.array([[x_voxel, y_voxel, z_voxel, w_voxel]], dtype=np.float32).T voxel_camera = camera_extrinsic_matrix.dot(voxel_world).flatten()[:3] if voxel_camera[2] <= near_clipping_distance: continue # distance along ray from camera to voxel center ray_distance = np.linalg.norm(voxel_camera) # squared distance along optical axis from camera to voxel z_cam_squared = voxel_camera[2] ** 2 inv_z_cam = 1 / voxel_camera[2] projection_jacobian = \ np.array([[inv_z_cam, 0, -voxel_camera[0] / z_cam_squared], [0, inv_z_cam, -voxel_camera[1] / z_cam_squared], [voxel_camera[0] / ray_distance, voxel_camera[1] / ray_distance, voxel_camera[2] / ray_distance]]) remapped_covariance = projection_jacobian.dot(covariance_camera_space) \ .dot(projection_jacobian.T) final_covariance = image_space_scaling_matrix.dot(remapped_covariance[0:2, 0:2]).dot( image_space_scaling_matrix.T) + np.eye(2) Q = np.linalg.inv(final_covariance) gaussian = eg.EllipticalGaussian(eg.ImplicitEllipse(Q=Q, F=squared_radius_threshold)) voxel_image = (camera_intrinsic_matrix.dot(voxel_camera) / voxel_camera[2])[:2] voxel_image = voxel_image.reshape(-1, 1) bounds_max = gaussian.ellipse.get_bounds() result = find_sampling_bounds_helper(bounds_max, depth_image, voxel_image) if result is None: continue else: (start_x, end_x, start_y, end_y) = result weights_sum = 0.0 depth_sum = 0 for y_sample in range(start_y, end_y): for x_sample in range(start_x, end_x): sample_centered = np.array([[x_sample], [y_sample]], dtype=np.float64) - voxel_image dist_sq = gaussian.get_distance_from_center_squared(sample_centered) if dist_sq > squared_radius_threshold: continue weight = gaussian.compute(dist_sq) surface_depth = depth_image[y_sample, x_sample] * depth_ratio if surface_depth <= 0.0: continue depth_sum += weight * surface_depth weights_sum += weight if depth_sum <= 0.0: continue final_depth = depth_sum / weights_sum signed_distance = final_depth - voxel_camera[2] field[z_field, y_field, x_field] = common.compute_tsdf_value(signed_distance, narrow_band_half_width) return field
5,338,596
def locate(name): """ Locate the object for the given name """ obj = pydoc.locate(name) if not obj: obj = globals().get(name, None) return obj
5,338,597
def qmap(func, q, eos_marker=EOS): """ Converts queue to an iterator. For every `item` in the `q` that is not `eos_marker`, `yield proc(item)` Takes care of calling `.task_done()` on every item extracted from the queue. """ while True: item = q.get(block=True) if item is eos_marker: q.task_done() break else: try: yield func(item) finally: q.task_done()
5,338,598
def get_mesh_deforms(mesh, deforms, origin, **kwargs): """ input : mesh object deforms forward for mesh?! origin (from volume) output: PointSet of mesh vertices (duplicates removed) and list with deforms (PointSets) of mesh vertices """ from stentseg.utils import PointSet from stentseg.utils.centerline import points_from_mesh # for vertice in mesh._vertices: # vertice[-1] = vertice[-1]*-1 # x,y,z with z flipped # # Turn surfacepoints into a pointset # pp = PointSet(3, dtype='float32') # [pp.append(*p) for p in mesh._vertices] pp = points_from_mesh(mesh, **kwargs) # removes duplicates # Get deformation for all points pp_deforms = [] samplePoints = pp - PointSet([o for o in reversed(origin)], dtype='float32') for deform in deforms: delta_z = deform.get_field_in_points(samplePoints, 0).reshape(-1, 1) delta_y = deform.get_field_in_points(samplePoints, 1).reshape(-1, 1) delta_x = deform.get_field_in_points(samplePoints, 2).reshape(-1, 1) delta = PointSet( np.concatenate((delta_x, delta_y, delta_z), axis=1) ) pp_deforms.append(delta) return pp, pp_deforms
5,338,599