content
stringlengths
22
815k
id
int64
0
4.91M
def show_output(frame, frame_number, y_value): """ Shows the video output in a window. """ cv2.putText(frame, "Frame: %i" % frame_number, (150, 200), cv2.FONT_HERSHEY_SIMPLEX, 2, 255, thickness=3) cv2.putText(frame, "Y: %s" % str(y_value), (150, 300), cv2.FONT_HERSHEY_SIMPLEX, 2, 255, thickness=3) cv2.namedWindow('Output', cv2.WINDOW_NORMAL) cv2.resizeWindow('Output', 800, 800) cv2.imshow('Output', frame) if DEBUG_MODE: if cv2.waitKey(0) & 0xFF == ord('n'): pass else: cv2.waitKey(1)
20,800
def make_map_counts(events, ref_geom, pointing, offset_max): """Build a WcsNDMap (space - energy) with events from an EventList. The energy of the events is used for the non-spatial axis. Parameters ---------- events : `~gammapy.data.EventList` Event list ref_geom : `~gammapy.maps.WcsGeom` Reference WcsGeom object used to define geometry (space - energy) offset_max : `~astropy.coordinates.Angle` Maximum field of view offset. Returns ------- cntmap : `~gammapy.maps.WcsNDMap` Count cube (3D) in true energy bins """ count_map = WcsNDMap(ref_geom) fill_map_counts(count_map, events) # Compute and apply FOV offset mask offset_map = make_separation_map(ref_geom, pointing) offset_mask = offset_map.data >= offset_max count_map.data[:, offset_mask] = 0 return count_map
20,801
def stats_aggregate(): """ RESTful CRUD Controller """ return crud_controller()
20,802
def format_dB(num): """ Returns a human readable string of dB. The value is divided by 10 to get first decimal digit """ num /= 10 return f'{num:3.1f} {"dB"}'
20,803
def _check_index_dtype(k): """ Check the dtype of the index. Parameters ---------- k: slice or array_like Index into an array Examples -------- >>> _check_index_dtype(0) dtype('int64') >>> _check_index_dtype(np.datetime64(0, 'ms')) dtype('<M8[ms]') >>> _check_index_dtype(slice(5, 8)) dtype('int64') """ if not isinstance(k, slice): if hasattr(k, "__len__") and len(k) == 0: return np.intp return np.asarray(k).dtype arr = [v for v in (k.start, k.stop, k.step) if v is not None] return _check_index_dtype(arr)
20,804
def _wrapper_for_precessing_snr(args): """Wrapper function for _precessing_snr for a pool of workers Parameters ---------- args: tuple All args passed to _precessing_snr """ return _precessing_snr(*args)
20,805
def readpcr(path): """ Only for multipattern formats. """ with open(path) as file: lines = file.readlines() # Strip comments lines = [line for line in lines if not line.startswith("!")] pcr = {} # main dictionary line = 0 # line reference ##### start read # pcr name pcr['name'] = lines[line].strip() line = line + 1 # patterns pcr['patterns'] = {} patt_1 = lines[line].split() npatt = int(patt_1[1]) pcr['patterns']["npatt"] = npatt for n in range(npatt): pcr['patterns'][n] = {"is_refined": bool(patt_1[n + 2])} line = line + 1 # pattern weights weights = list(map(float, lines[line].split()[1:])) for n in range(npatt): pcr['patterns'][n]['weight'] = weights[n] line = line + 1 # global flags flags = list(map(int, lines[line].split())) pcr["phases"] = {"nphases": flags[0]} pcr["fl_divergence"] = flags[1] pcr["fl_refl_reorder"] = flags[2] pcr["fl_single_crystal_job"] = flags[3] pcr["fl_optimisations"] = flags[4] pcr["fl_automatic_refine"] = flags[5] line = line + 1 # pattern flags for n in range(npatt): pattflags = {} flags = list(map(int, lines[line].split()[0:14])) pattflags["jobtype"] = flags[0] pattflags["profile_type"] = flags[1] pattflags["background_type"] = flags[2] pattflags["excluded_regions"] = flags[3] pattflags["scatterfactor_userdef"] = flags[4] pattflags["preferred_orientation_type"] = flags[5] pattflags["refine_weighting_type"] = flags[6] pattflags["lorentz_polar_corr"] = flags[7] pattflags["resolution_function_type"] = flags[8] pattflags["reduction_factor"] = flags[9] pattflags["scattering_unit"] = flags[10] pattflags["intensity_corr"] = flags[11] pattflags["anm"] = flags[12] pattflags["int"] = flags[13] pcr['patterns'][n] = {"flags": pattflags} line = line + 1 # pattern names for n in range(npatt): pcr['patterns'][n]["filename"] = lines[line].strip() line = line + 1 # output flags flags = list(map(int, lines[line].split())) pcr["out_correlation_matrix"] = flags[0] pcr["out_update_pcr"] = flags[1] pcr["out_nli"] = flags[2] pcr["out_sym_file"] = flags[3] pcr["out_rpa"] = flags[4] pcr["out_reduced_verbose"] = flags[5] line = line + 1 # output pattern flags for n in range(npatt): pattflags = {} flags = list(map(int, lines[line].split()[0:11])) pattflags["out_integrated"] = flags[0] pattflags["out_ppl"] = flags[1] pattflags["out_ioc"] = flags[2] pattflags["out_ls1"] = flags[3] pattflags["out_ls2"] = flags[4] pattflags["out_ls3"] = flags[5] pattflags["out_prf"] = flags[6] pattflags["out_ins"] = flags[7] pattflags["out_hkl"] = flags[8] pattflags["out_fou"] = flags[9] pattflags["out_ana"] = flags[10] pcr['patterns'][n]['output'] = pattflags line = line + 1 # experiment pattern flags for n in range(npatt): expatt = {} flags = list(map(float, lines[line].split())) expatt["lmd_1"] = flags[0] expatt["lmd_2"] = flags[1] expatt["lmd_ratio"] = flags[2] expatt["background_start"] = flags[3] expatt["prf_cutoff"] = flags[4] expatt["monocrh_polarization_corr"] = flags[5] expatt["absorp_corr"] = flags[6] expatt["asymetry_corr_lim"] = flags[7] expatt["polarization_factor"] = flags[8] expatt["2nd-muR"] = flags[9] pcr['patterns'][n]["flags"].update(expatt) line = line + 1 # refinement flags flags = lines[line].split() pcr["ref_cycles"] = int(flags[0]) pcr["ref_convergence"] = float(flags[1]) pcr["ref_r_atomic"] = float(flags[2]) pcr["ref_r_anisotropic"] = float(flags[3]) pcr["ref_r_profile"] = float(flags[4]) pcr["ref_r_global"] = float(flags[5]) line = line + 1 # refinement pattern for n in range(npatt): refpatt = {} flags = list(map(float, lines[line].split())) refpatt["theta_min"] = flags[0] refpatt["steo"] = flags[1] refpatt["theta_max"] = flags[2] refpatt["incident_angle"] = flags[3] refpatt["max_beam_angle"] = flags[4] pcr['patterns'][n]["flags"].update(refpatt) line = line + 1 # excluded regions for n in range(npatt): excluded = pcr['patterns'][n]["flags"]['excluded_regions'] if excluded != 0: ranges = [] for _ in range(excluded): ranges.append(tuple(map(float, lines[line].split()))) line = line + 1 pcr['patterns'][n]["excluded"] = ranges else: line = line + 1 # refined parameters nrefined = int(lines[line].split()[0]) line = line + 1 # data setup per pattern type for n in range(npatt): # powder data setup scattering_unit = pcr['patterns'][n]["flags"]['scattering_unit'] if scattering_unit == 0: flags = list(map(float, lines[line].split())) expatt["zero_point"] = flags[0] expatt["zero_point_code"] = flags[1] expatt["systematic_shift_cos"] = flags[2] expatt["systematic_shift_cos_code"] = flags[3] expatt["systematic_shift_sin"] = flags[4] expatt["systematic_shift_sin_code"] = flags[5] expatt["wavelength"] = flags[6] expatt["wavelength_code"] = flags[7] more = bool(flags[8]) if more: # microadsorption (not implemented) line = line + 1 pcr['patterns'][n]["flags"].update(expatt) elif scattering_unit == 1: raise NotImplementedError elif scattering_unit == 2: raise NotImplementedError line = line + 1 # background coefficients background_type = pcr['patterns'][n]["flags"]['background_type'] if background_type == 0: pcr['patterns'][n]['background_poly'] = list(map(float, lines[line].split())) line = line + 1 pcr['patterns'][n]['background_code'] = list(map(float, lines[line].split())) else: raise NotImplementedError line = line + 1 # start phase reading nphases = pcr["phases"]["nphases"] for ph in range(nphases): phase = {} # read name phase["name"] = lines[line].strip() line = line + 1 # read codes phcodes = lines[line].split() phase["natoms"] = int(phcodes[0]) phase["n_constraints_distance"] = int(phcodes[1]) phase["n_constraints_angle"] = int(phcodes[2]) # TODO can be n_constraints_magmoment phase["job_type"] = int(phcodes[3]) phase["symmetry_reading_mode"] = int(phcodes[4]) phase["size_strain_mode"] = int(phcodes[5]) phase["n_usedef_parameters"] = int(phcodes[6]) phase["weight_coeff"] = float(phcodes[7]) phase["n_propagation_vectors"] = int(phcodes[8]) line = line + 1 more = int(phcodes[9]) if more: raise NotImplementedError # read contribution contributes = list(map(bool, lines[line].split())) phase["pattern"] = {} for n in range(npatt): phase["pattern"][n] = {'contributes': contributes[n]} line = line + 1 # specific pattern parameters if any(contributes): for n in range(npatt): params_1 = list(map(int, lines[line].split())) line = line + 1 params_2 = list(map(float, lines[line].split())) line = line + 1 phase["pattern"][n]["reflexions"] = params_1[0] phase["pattern"][n]["profile_type"] = params_1[1] phase["pattern"][n]["job_type"] = params_1[2] phase["pattern"][n]["Nsp_Ref"] = params_1[3] phase["pattern"][n]["Ph_Shift"] = params_1[4] phase["pattern"][n]["preferred_orientation_d1"] = params_2[0] phase["pattern"][n]["preferred_orientation_d2"] = params_2[1] phase["pattern"][n]["preferred_orientation_d3"] = params_2[2] phase["pattern"][n]["brindley_coeff"] = params_2[3] phase["pattern"][n]["reflx_int_data_weight"] = params_2[4] phase["pattern"][n]["reflx_int_exclusion"] = params_2[5] phase["pattern"][n]["reflx_chi2_weight"] = params_2[6] else: line = line + 1 # spacegroup phase["spacegroup"] = lines[line][0:21].strip() line = line + 1 # atoms natoms = phase["natoms"] atoms = {} for n in range(natoms): atom_flags = lines[line].split() line = line + 1 atom_codes = lines[line].split() line = line + 1 atoms[n] = {} atoms[n]["label"] = atom_flags[0] atoms[n]["type"] = atom_flags[1] atoms[n]["x"] = float(atom_flags[2]) atoms[n]["y"] = float(atom_flags[3]) atoms[n]["z"] = float(atom_flags[4]) atoms[n]["biso"] = float(atom_flags[5]) atoms[n]["occ"] = float(atom_flags[6]) atoms[n]["symmetry_subs_in"] = int(atom_flags[7]) atoms[n]["symmetry_subs_fin"] = int(atom_flags[8]) atoms[n]["isotropic_type"] = int(atom_flags[9]) atoms[n]["specie"] = int(atom_flags[10]) atoms[n]["x_code"] = atom_codes[0] atoms[n]["y_code"] = atom_codes[1] atoms[n]["z_code"] = atom_codes[2] atoms[n]["biso_code"] = atom_codes[3] atoms[n]["occ_code"] = atom_codes[4] phase["atoms"] = atoms # profile parameters for n in range(npatt): profile_1 = lines[line].split() line = line + 1 profile_1_codes = list(map(float, lines[line].split())) line = line + 1 profile_2 = list(map(float, lines[line].split())) line = line + 1 profile_2_codes = list(map(float, lines[line].split())) line = line + 1 phase['pattern'][n]['scale'] = float(profile_1[0]) phase['pattern'][n]['shape'] = float(profile_1[1]) phase['pattern'][n]['biso_overall'] = float(profile_1[2]) phase['pattern'][n]['strain_param1'] = float(profile_1[3]) phase['pattern'][n]['strain_param2'] = float(profile_1[4]) phase['pattern'][n]['strain_param3'] = float(profile_1[5]) phase['pattern'][n]['strain_model'] = int(profile_1[6]) phase['pattern'][n]['halfwidth_U'] = profile_2[0] phase['pattern'][n]['halfwidth_V'] = profile_2[1] phase['pattern'][n]['halfwidth_W'] = profile_2[2] phase['pattern'][n]['lorrenzian_strain_X'] = profile_2[3] phase['pattern'][n]['lorrenzian_strain_Y'] = profile_2[4] phase['pattern'][n]['gaussian_particle_size'] = profile_2[5] phase['pattern'][n]['lorenzian_particle_size'] = profile_2[6] cell = list(map(float, lines[line].split())) line = line + 1 cell_codes = list(map(float, lines[line].split())) line = line + 1 phase["cell"] = {} phase['cell']['a'] = cell[0] phase['cell']['b'] = cell[1] phase['cell']['c'] = cell[2] phase['cell']['alpha'] = cell[3] phase['cell']['beta'] = cell[4] phase['cell']['gamma'] = cell[5] phase['pattern'][n]['halfwidth_U'] = profile_2[0] phase['pattern'][n]['halfwidth_V'] = profile_2[1] phase['pattern'][n]['halfwidth_W'] = profile_2[2] phase['pattern'][n]['lorrenzian_strain_X'] = profile_2[3] phase['pattern'][n]['lorrenzian_strain_Y'] = profile_2[4] phase['pattern'][n]['gaussian_particle_size'] = profile_2[5] phase['pattern'][n]['lorenzian_particle_size'] = profile_2[6] orientation = list(map(float, lines[line].split())) line = line + 1 orientation_codes = list(map(float, lines[line].split())) line = line + 1 phase['pattern'][n]['orientation_param1'] = profile_2[0] phase['pattern'][n]['orientation_param2'] = profile_2[1] phase['pattern'][n]['assymetry_param1'] = profile_2[2] phase['pattern'][n]['assymetry_param2'] = profile_2[3] phase['pattern'][n]['assymetry_param3'] = profile_2[4] phase['pattern'][n]['assymetry_param4'] = profile_2[5] pcr["phases"][ph] = phase # pattern to plot pcr["plot_pattern"] = list(map(float, lines[line].split())) return pcr
20,806
def put_file_store(store_name, store, block_on_existing=None, user=None): # noqa: E501 """Create/update store # noqa: E501 :param store_name: Name of the store :type store_name: str :param store: Store information :type store: dict | bytes :rtype: FileStore """ if connexion.request.is_json: store = SwaggerFileStore.from_dict(connexion.request.get_json()) # noqa: E501 if store_name != store.name: return Error(code=400, message="URL and body names don't match"), 400 session = Database.get_session() try: # Check the store q = session.query(FileStore).filter(FileStore.name == store_name) # type: Query # Create new store or use existing model = None if q.first(): # Existing store if block_on_existing: return Error(code=1000, message="Already exists."), 400 model = q.first() else: model = FileStore() session.add(model) model.from_swagger_model(store, user=user) session.commit() q = session.query(FileStore).filter(FileStore.uid == model.uid) return q.first().to_swagger_model(user=user), 200 except Exception as e: logging.exception("File store put failed") session.rollback() return Error(code=500, message="Exception occurred"), 500
20,807
def assign_sections(region_table: RegionTable, sections: Dict[str, int]): """Assign memory sections. This is a packing problem and therefore reasonably complex. A simplistic algorithm is used here which may not always be optimal if user assigned addresses are used for some sections. """ used_space: Set[Tuple[int, int]] = set() def in_used_space(start, end): return start > 0xfff or end > 0xfff or any( map(lambda x: (start >= x[0] and start <= x[1]) or (end >= x[0] and end <= x[1]), used_space)) def find_free_space(size): for _, end in used_space: start_to_try = end + 1 end_to_try = end + size if not in_used_space(start_to_try, end_to_try): return start_to_try, end_to_try raise AssemblyError("ran out of free space") for name, item in region_table.items(): if in_used_space(item.start, item.end): raise AssemblyError("region {} assigned in used space, memory is likely full".format(name)) used_space.add((item.start, item.end)) for section_name, section_size in sections.items(): section_start, section_end = find_free_space(section_size) used_space.add((section_start, section_end)) region_table[section_name] = Region(type="user", start=section_start, end=section_end, count=0)
20,808
def humanize_arrow_date( date ): """ Date is internal UTC ISO format string. Output should be "today", "yesterday", "in 5 days", etc. Arrow will try to humanize down to the minute, so we need to catch 'today' as a special case. """ try: then = arrow.get(date).to('local') now = arrow.utcnow().to('local') if then.date() == now.date(): human = "Today" else: human = then.humanize(now) if human == "in a day": human = "Tomorrow" except: human = date return human
20,809
def getCourseTeeHoles(request, courseId, courseTeeId): """ Getter function for list of courses and tees """ resultList = list(Tee.objects.filter(course_tee_id=courseTeeId).values('id', 'yardage', 'par', 'handicap', 'hole__id', 'hole__name', 'hole__number')) return JsonResponse({'data' : resultList})
20,810
def simulate(SNOwGLoBESdir, tarball_path, detector_input="all", verbose=False): """Takes as input the neutrino flux files and configures and runs the supernova script inside SNOwGLoBES, which outputs calculated event rates expected for a given (set of) detector(s). These event rates are given as a function of the neutrino energy and time, for each interaction channel. Parameters ---------- SNOwGLoBESdir : str Path to directory where SNOwGLoBES is installed. tarball_path : str Path of compressed .tar file produced e.g. by ``generate_time_series()`` or ``generate_fluence()``. detector_input : str Name of detector. If ``"all"``, will use all detectors supported by SNOwGLoBES. verbose : bool Whether to generate verbose output, e.g. for debugging. """ sng = SNOwGLoBES(SNOwGLoBESdir) if detector_input == 'all': detector_input = list(sng.detectors) detector_input.remove('d2O') elif isinstance(detector_input,str): detector_input = [detector_input] result = {} #Extracts data from tarfile and sets up lists of paths and fluxfilenames for later use with TemporaryDirectory(prefix='snowglobes') as tempdir: with tarfile.open(tarball_path) as tar: tar.extractall(tempdir) flux_files = list(Path(tempdir).glob('*.dat')) if len(detector_input)>0: detector_input = tqdm(detector_input, desc='Detectors', leave=False) for det in detector_input: res=sng.run(flux_files, det) result[det]=dict(zip((f.stem for f in flux_files),res)) # save result to file for re-use in collate() cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy' logging.info(f'Saving simulation results to {cache_file}') np.save(cache_file, result) return result
20,811
def args(): """ Argument Parsing Handler: -m <path_to_keras> : Path to keras model -o <model_output> : Path to directory that will store pb model """ parser = argparse.ArgumentParser() parser.add_argument("-m", "--path_to_keras", type=str, help="Path to keras model.", default='') parser.add_argument("-o", "--model_output", type=str, help="Path to directory that will store pb model.", default='') return parser.parse_args()
20,812
def test_add_htmls_to_section(): """Test adding html str to mne report.""" report = Report(info_fname=raw_fname, subject='sample', subjects_dir=subjects_dir) html = '<b>MNE-Python is AWESOME</b>' caption, section = 'html', 'html_section' report.add_htmls_to_section(html, caption, section) idx = report._sectionlabels.index('report_' + section) html_compare = report.html[idx] assert (html in html_compare) assert (repr(report))
20,813
def set_transactions_received(sender, instance, signal, created, **kwargs): """ Check if a transactions was received """ if instance.balance <= 0: return # Balance too small for transaction_request in instance.transactionrequest_set.all(): if transaction_request.payment_received: continue amount = transaction_request.amount + transaction_request.service_fee if instance.balance >= amount: transaction_request.payment_received = True transaction_request.save()
20,814
def create_zone_ajax(request): """ This view tries to create a new zone and returns an JSON with either 'success' = True or 'success' = False and some errors. """ qd = request.POST.copy() # See if the domain exists. # Fail if it already exists or if it's under a delegated domain. root_domain = qd.get('root_domain', None) primary = qd.get('soa_primary', None) contact = qd.get('soa_contact', None) # Find all the NS entries nss = [] number_re = re.compile('nameserver_(\d+)') # parse nameserver bits from POST request. # compile some tuples that look like: # (<server_fqdn>, <ttl>, [<view_name>,..]) for k, server in request.POST.iteritems(): if k.startswith('nameserver_'): n = number_re.search(k) if not n: continue ns_number = n.groups()[0] views = [] if qd.get('private_view_{0}'.format(ns_number), 'off') == 'on': views.append('private') if qd.get('public_view_{0}'.format(ns_number), 'off') == 'on': views.append('public') ttl = qd.get('ttl_{0}'.format(ns_number)) if ttl and ttl.isdigit(): ttl = int(ttl) else: ttl = None nss.append( (server, ttl, views) ) try: with transaction.commit_on_success(): domain = _create_zone(root_domain, primary, contact, nss) except (ValueError, ValidationError), e: return HttpResponse(json.dumps({ 'success': False, 'error': str(e) }), status=400) return HttpResponse(json.dumps({ 'success': True, 'success_url': '/en-US/core/search/#q=zone=:{0}'.format( domain.name ) }))
20,815
def print_shape(torch_dict, paddle_dict): """ Compare state dictionary from pytorch and paddle. :param torch_dict: :param paddle_dict: :return: """ for k, v in torch_dict.items(): if k in paddle_dict: print("key:", k, ", torch shape:", v.shape, ", paddle shape:", paddle_dict[k].shape)
20,816
def region_distance(**kwargs): """Create scatter plot of distance to theoretical AF over every region.""" reader, regions = _setup_region_values(**kwargs) region_distance_main( reader, kwargs.get("output_dir"), regions, kwargs.get("name"), kwargs.get("dpi") )
20,817
def main_plot(): """The view for rendering the scatter chart""" img = get_main_image() return send_file(img, mimetype='image/png', cache_timeout=0)
20,818
def test_arbitration_id_integer(expected: int, parts: ArbitrationIdParts) -> None: """It should convert parts to an arbitration id.""" c = ArbitrationId(parts=parts) assert c.id == expected
20,819
def symbols_involved(expression): """ Lists the symbols that are present in this expression. """ expression.atoms(sympy.Symbol)
20,820
def Ising2dT(beta = 0.4, h = 0, isSym = False): """ T = Ising2dT(J,h). ------------------------- Set up the initial tensor for 2d classical Ising model on a square lattice. Argument: J is defined to be beta * J = J / kT, and h is defined to be beta*h = h / kT, where J and h are conventional coupling constants. Return: a rank 4 tensor T[i,j,k,l]. Each index of the tensor represents physical classical spin, and the tensor T represents the Boltzmann weight for interaction on one plaquettes. """ pars = {"model":"ising", "dtype":"float64", "J":1, "H":h, "beta":beta, "symmetry_tensors":isSym} T0 = get_initial_tensor(pars) return T0
20,821
def __grid_count(self): """Get number of grids in the case""" try: return self.__case_stub.GetGridCount(self.__request()).count except grpc.RpcError as exception: if exception.code() == grpc.StatusCode.NOT_FOUND: return 0 return 0
20,822
def run( # noqa: C901 in_json: in_params.InputConfigurationType, out_dir: str, epi_step: int = 30, region_size: int = 500, disparity_margin: float = 0.02, epipolar_error_upper_bound: float = 10.0, epipolar_error_maximum_bias: float = 0.0, elevation_delta_lower_bound: float = -1000.0, elevation_delta_upper_bound: float = 1000.0, mode: str = "local_dask", nb_workers: int = 4, walltime: str = "00:59:00", check_inputs: bool = False, ): """ Main function of the prepare step subcommand This function will perform the following steps: 1. Compute stereo-rectification grids for the input pair 2. Compute all possible sift matches in epipolar geometry 3. Derive an optimal disparity range to explore from the matches 4. Derive a bilinear correction model of the stereo-rectification grid for right image in order to minimize epipolar error 5. Apply correction to right grid 6. Export left and corrected right grid :param in_json: dictionary describing input data (see README.md for format) :param out_dir: Directory where all outputs will be written, including a content.json file describing its content :param epi_step: Step of the epipolar grid to compute (in pixels in epipolar geometry) :param region_size: Size of regions used for sift matching :param disparity_margin: Percent of the disparity range width to add at each end as security margin :param epipolar_error_upper_bound: Upper bound of expected epipolar error (in pixels) :param epipolar_error_maximum_bias: Maximum bias for epipolar error (in pixels) :param elevation_delta_lower_bound: Lower bound for elevation delta with respect to initial DSM (in meters) :param elevation_delta_upper_bound: Upper bound for elevation delta with respect to initial DSM (in meters) :param mode: Parallelization mode :param nb_workers: Number of dask workers to use for the sift matching step :param walltime: Walltime of the dask workers :param check_inputs: activation of the inputs consistency checking """ out_dir = os.path.abspath(out_dir) # Ensure that outdir exists try: os.makedirs(out_dir) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(out_dir): pass else: raise log_conf.add_log_file(out_dir, "prepare") if not check_inputs: logging.info( "The inputs consistency will not be checked. " "To enable the inputs checking, add --check_inputs " "to your command line" ) # Check configuration dict config = inputs.check_json(in_json, json_schema.input_conf_schema()) # Retrieve static parameters (sift and low res dsm) static_params = static_conf.get_cfg() # Initialize output json dict # (use local variables to keep indentation and line not too long) epi_step_tag = output_prepare.EPI_STEP_TAG disp_margin_tag = output_prepare.DISPARITY_MARGIN_TAG epi_error_up_bound_tag = output_prepare.EPIPOLAR_ERROR_UPPER_BOUND_TAG epi_error_max_bias_tag = output_prepare.EPIPOLAR_ERROR_MAXIMUM_BIAS_TAG elev_delta_low_bound_tag = output_prepare.ELEVATION_DELTA_LOWER_BOUND_TAG elev_delta_up_bound_tag = output_prepare.ELEVATION_DELTA_UPPER_BOUND_TAG out_json = { in_params.INPUT_SECTION_TAG: config, output_prepare.PREPROCESSING_SECTION_TAG: { output_prepare.PREPROCESSING_VERSION_TAG: __version__, output_prepare.PREPROCESSING_PARAMETERS_SECTION_TAG: { epi_step_tag: epi_step, disp_margin_tag: disparity_margin, epi_error_up_bound_tag: epipolar_error_upper_bound, epi_error_max_bias_tag: epipolar_error_maximum_bias, elev_delta_low_bound_tag: elevation_delta_lower_bound, elev_delta_up_bound_tag: elevation_delta_upper_bound, }, in_params.STATIC_PARAMS_TAG: { static_conf.prepare_tag: static_params[static_conf.prepare_tag], static_conf.loaders_tag: static_params[static_conf.loaders_tag], static_conf.geoid_path_tag: static_conf.get_geoid_path(), }, output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG: {}, }, } # Read input parameters # TODO Refacto conf : why not a config.get() for these two here ? # TODO looks like we only get in_params from the config file, # so maybe a single conf.read_config() or smth like that ? img1 = config[in_params.IMG1_TAG] img2 = config[in_params.IMG2_TAG] srtm_dir = config.get(in_params.SRTM_DIR_TAG, None) nodata1 = config.get(in_params.NODATA1_TAG, None) nodata2 = config.get(in_params.NODATA2_TAG, None) mask1 = config.get(in_params.MASK1_TAG, None) mask2 = config.get(in_params.MASK2_TAG, None) mask1_classes = config.get(in_params.MASK1_CLASSES_TAG, None) mask2_classes = config.get(in_params.MASK2_CLASSES_TAG, None) default_alt = config.get(in_params.DEFAULT_ALT_TAG, 0) # retrieve masks classes usages classes_usage = {} if mask1_classes is not None: mask1_classes_dict = mask_classes.read_mask_classes(mask1_classes) classes_usage[ output_prepare.MASK1_IGNORED_BY_SIFT_MATCHING_TAG ] = mask1_classes_dict.get( mask_classes.ignored_by_sift_matching_tag, None ) if mask2_classes is not None: mask2_classes_dict = mask_classes.read_mask_classes(mask2_classes) classes_usage[ output_prepare.MASK2_IGNORED_BY_SIFT_MATCHING_TAG ] = mask2_classes_dict.get( mask_classes.ignored_by_sift_matching_tag, None ) if mask1_classes is not None or mask2_classes is not None: out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_PARAMETERS_SECTION_TAG ][output_prepare.PREPARE_MASK_CLASSES_USAGE_TAG] = classes_usage # log information considering reference altitudes used if srtm_dir is not None: srtm_tiles = os.listdir(srtm_dir) if len(srtm_tiles) == 0: logging.warning( "SRTM directory is empty, " "the default altitude will be used as reference altitude." ) else: logging.info( "Indicated SRTM tiles valid regions " "will be used as reference altitudes " "(the default altitude is used " "for undefined regions of the SRTM)" ) else: logging.info("The default altitude will be used as reference altitude.") if check_inputs: logging.info("Checking inputs consistency") if ( inputs.rasterio_get_nb_bands(img1) != 1 or inputs.rasterio_get_nb_bands(img2) != 1 ): raise Exception( "{} and {} are not mono-band images".format(img1, img2) ) if mask1 is not None: if inputs.rasterio_get_size(img1) != inputs.rasterio_get_size( mask1 ): raise Exception( "The image {} and the mask {} " "do not have the same size".format(img1, mask1) ) if mask2 is not None: if inputs.rasterio_get_size(img2) != inputs.rasterio_get_size( mask2 ): raise Exception( "The image {} and the mask {} " "do not have the same size".format(img2, mask2) ) with rio.open(img1) as img1_reader: trans = img1_reader.transform if trans.e < 0: logging.warning( "{} seems to have an incoherent pixel size. " "Input images has to be in sensor geometry.".format(img1) ) with rio.open(img2) as img2_reader: trans = img2_reader.transform if trans.e < 0: logging.warning( "{} seems to have an incoherent pixel size. " "Input images has to be in sensor geometry.".format(img2) ) # Check geometric models consistency if not static_conf.get_geometry_loader().check_products_consistency(config): raise Exception("Problem while reading the geometric models") # Check that the envelopes intersect one another logging.info("Computing images envelopes and their intersection") shp1 = os.path.join(out_dir, "left_envelope.shp") shp2 = os.path.join(out_dir, "right_envelope.shp") out_envelopes_intersection = os.path.join( out_dir, "envelopes_intersection.gpkg" ) inter_poly, ( inter_xmin, inter_ymin, inter_xmax, inter_ymax, ) = projection.ground_intersection_envelopes( config, shp1, shp2, out_envelopes_intersection, dem_dir=srtm_dir, default_alt=default_alt, ) conf_out_dict = out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ] conf_out_dict[output_prepare.LEFT_ENVELOPE_TAG] = shp1 conf_out_dict[output_prepare.RIGHT_ENVELOPE_TAG] = shp2 conf_out_dict[ output_prepare.ENVELOPES_INTERSECTION_TAG ] = out_envelopes_intersection conf_out_dict[output_prepare.ENVELOPES_INTERSECTION_BB_TAG] = [ inter_xmin, inter_ymin, inter_xmax, inter_ymax, ] if check_inputs: logging.info("Checking DEM coverage") _, epsg1 = inputs.read_vector(shp1) __, dem_coverage = projection.compute_dem_intersection_with_poly( srtm_dir, inter_poly, epsg1 ) if dem_coverage < 100.0: logging.warning( "The input DEM covers {}% of the useful zone".format( int(dem_coverage) ) ) # Generate rectification grids ( grid1, grid2, grid_origin, grid_spacing, epipolar_size, disp_to_alt_ratio, ) = grids.generate_epipolar_grids( config, dem=srtm_dir, default_alt=default_alt, epipolar_step=epi_step, geoid=static_conf.get_geoid_path(), ) out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.EPIPOLAR_SIZE_X_TAG] = epipolar_size[0] out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.EPIPOLAR_SIZE_Y_TAG] = epipolar_size[1] out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.EPIPOLAR_ORIGIN_X_TAG] = grid_origin[0] out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.EPIPOLAR_ORIGIN_Y_TAG] = grid_origin[1] out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.EPIPOLAR_SPACING_X_TAG] = grid_spacing[0] out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.EPIPOLAR_SPACING_Y_TAG] = grid_spacing[1] out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.DISP_TO_ALT_RATIO_TAG] = disp_to_alt_ratio logging.info( "Size of epipolar images: {}x{} pixels".format( epipolar_size[0], epipolar_size[1] ) ) logging.info( "Disparity to altitude factor: {} m/pixel".format(disp_to_alt_ratio) ) # Get satellites angles from ground: Azimuth to north, Elevation angle ( left_az, left_elev_angle, right_az, right_elev_angle, convergence_angle, ) = projection.get_ground_angles(config) logging.info( "Left satellite coverture: Azimuth angle : {:.1f}°, " "Elevation angle: {:.1f}°".format(left_az, left_elev_angle) ) logging.info( "Right satellite coverture: Azimuth angle : {:.1f}°, " "Elevation angle: {:.1f}°".format(right_az, right_elev_angle) ) logging.info( "Stereo satellite convergence angle from ground: {:.1f}°".format( convergence_angle ) ) out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.LEFT_AZIMUTH_ANGLE_TAG] = left_az out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.LEFT_ELEVATION_ANGLE_TAG] = left_elev_angle out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.RIGHT_AZIMUTH_ANGLE_TAG] = right_az out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.RIGHT_ELEVATION_ANGLE_TAG] = right_elev_angle out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.CONVERGENCE_ANGLE_TAG] = convergence_angle logging.info("Sparse matching ...") # Compute the full range needed for sparse matching disp_lower_bound = elevation_delta_lower_bound / disp_to_alt_ratio disp_upper_bound = elevation_delta_upper_bound / disp_to_alt_ratio disparity_range_width = disp_upper_bound - disp_lower_bound logging.info( "Full disparity range width " "for sparse matching: {} pixels".format(disparity_range_width) ) disparity_range_center = ( elevation_delta_upper_bound + elevation_delta_lower_bound ) / (2 * disp_to_alt_ratio) # Compute the number of offsets to consider so as to explore the full range nb_splits = 1 + int(math.floor(float(disparity_range_width) / region_size)) actual_region_size = int( math.ceil((region_size + disparity_range_width) / nb_splits) ) actual_range = nb_splits * actual_region_size actual_range_start = ( disparity_range_center - actual_range / 2 + region_size / 2 ) logging.info( "Disparity range will be explored " "in {} regions of size {}, starting at {} pixels".format( nb_splits, actual_region_size, actual_range_start ) ) regions = tiling.split( 0, 0, epipolar_size[0], epipolar_size[1], region_size, region_size ) logging.info( "Number of splits to process for sparse matching: {}".format( len(regions) ) ) cluster = None client = None # TODO: prepare mp mode # Use dask # Save dask config used dask_config_used = dask.config.config outputs.write_dask_config( dask_config_used, out_dir, output_prepare.PREPROCESSING_DASK_CONFIG_TAG ) use_dask = {"local_dask": True, "pbs_dask": True} if mode not in use_dask: raise NotImplementedError("{} mode is not implemented".format(mode)) if mode == "local_dask": cluster, client = start_local_cluster(nb_workers) else: cluster, client = start_cluster(nb_workers, walltime, out_dir) # Write temporary grid tmp1 = os.path.join(out_dir, "tmp1.tif") grids.write_grid(grid1, tmp1, grid_origin, grid_spacing) tmp2 = os.path.join(out_dir, "tmp2.tif") grids.write_grid(grid2, tmp2, grid_origin, grid_spacing) # Compute margins for right region margins = [ int( math.floor(epipolar_error_upper_bound + epipolar_error_maximum_bias) ), int( math.floor(epipolar_error_upper_bound + epipolar_error_maximum_bias) ), int( math.floor(epipolar_error_upper_bound + epipolar_error_maximum_bias) ), int( math.ceil(epipolar_error_upper_bound + epipolar_error_maximum_bias) ), ] logging.info( "Margins added to right region for matching: {}".format(margins) ) # Matching tasks as delayed objects delayed_matches = [] for left_region in regions: for offset in range(nb_splits): offset_ = actual_range_start + offset * actual_region_size # Pad region to include margins for right image right_region = [ left_region[0] + offset_, left_region[1], left_region[0] + offset_ + actual_region_size, left_region[3], ] # Pad with margin and crop to largest region right_region = tiling.crop( tiling.pad(right_region, margins), [0, 0, epipolar_size[0], epipolar_size[1]], ) # Avoid empty regions if not tiling.empty(right_region): delayed_matches.append( dask.delayed(matching_wrapper)( left_region, right_region, img1, img2, tmp1, tmp2, mask1, mask2, mask1_classes, mask2_classes, nodata1, nodata2, epipolar_size[0], epipolar_size[1], ) ) # Transform delayed tasks to future logging.info("Submitting {} tasks to dask".format(len(delayed_matches))) future_matches = client.compute(delayed_matches) # Initialize output matches array matches = np.empty((0, 4)) # Wait for all matching tasks to be completed for __, result in tqdm( as_completed(future_matches, with_results=True), total=len(future_matches), desc="Performing matching ...", ): matches = np.concatenate((matches, result)) raw_nb_matches = matches.shape[0] logging.info( "Raw number of matches found: {} matches".format(raw_nb_matches) ) # Export matches logging.info("Writing raw matches file") raw_matches_array_path = os.path.join(out_dir, "raw_matches.npy") out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.RAW_MATCHES_TAG] = raw_matches_array_path np.save(raw_matches_array_path, matches) # Filter matches that are out of margin if epipolar_error_maximum_bias == 0: epipolar_median_shift = 0 else: epipolar_median_shift = np.median(matches[:, 3] - matches[:, 1]) matches = matches[ ((matches[:, 3] - matches[:, 1]) - epipolar_median_shift) >= -epipolar_error_upper_bound ] matches = matches[ ((matches[:, 3] - matches[:, 1]) - epipolar_median_shift) <= epipolar_error_upper_bound ] matches_discarded_message = "{} matches discarded \ because their epipolar error is greater \ than --epipolar_error_upper_bound = {} pix".format( raw_nb_matches - matches.shape[0], epipolar_error_upper_bound ) if epipolar_error_maximum_bias != 0: matches_discarded_message += " considering a shift of {} pix".format( epipolar_median_shift ) logging.info(matches_discarded_message) filtered_nb_matches = matches.shape[0] matches = matches[matches[:, 2] - matches[:, 0] >= disp_lower_bound] matches = matches[matches[:, 2] - matches[:, 0] <= disp_upper_bound] logging.info( "{} matches discarded because they fall outside of disparity range " "defined by --elevation_delta_lower_bound = {} m and " "--elevation_delta_upper_bound = {} m : [{} pix., {} pix.]".format( filtered_nb_matches - matches.shape[0], elevation_delta_lower_bound, elevation_delta_upper_bound, disp_lower_bound, disp_upper_bound, ) ) # Retrieve number of matches nb_matches = matches.shape[0] # Check if we have enough matches # TODO: we could also make it a warning and continue with uncorrected grid # and default disparity range if nb_matches < 100: logging.error( "Insufficient amount of matches found (< 100), can not safely " "estimate epipolar error correction and disparity range" ) # stop cluster stop_cluster(cluster, client) # Exit immediately return logging.info( "Number of matches kept for epipolar " "error correction: {} matches".format(nb_matches) ) # Remove temporary files os.remove(tmp1) os.remove(tmp2) # Compute epipolar error epipolar_error = matches[:, 1] - matches[:, 3] logging.info( "Epipolar error before correction: mean = {:.3f} pix., " "standard deviation = {:.3f} pix., max = {:.3f} pix.".format( np.mean(epipolar_error), np.std(epipolar_error), np.max(np.fabs(epipolar_error)), ) ) # Commpute correction for right grid logging.info("Generating correction for right epipolar grid ...") corrected_right_grid, corrected_matches, __, __ = grids.correct_right_grid( matches, grid2, grid_origin, grid_spacing ) corrected_epipolar_error = corrected_matches[:, 1] - corrected_matches[:, 3] logging.info( "Epipolar error after correction: mean = {:.3f} pix., " "standard deviation = {:.3f} pix., max = {:.3f} pix.".format( np.mean(corrected_epipolar_error), np.std(corrected_epipolar_error), np.max(np.fabs(corrected_epipolar_error)), ) ) # TODO: add stats in content.json out_left_grid = os.path.join(out_dir, "left_epipolar_grid.tif") out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.LEFT_EPIPOLAR_GRID_TAG] = out_left_grid grids.write_grid(grid1, out_left_grid, grid_origin, grid_spacing) # Export corrected right grid out_right_grid = os.path.join(out_dir, "right_epipolar_grid.tif") out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.RIGHT_EPIPOLAR_GRID_TAG] = out_right_grid grids.write_grid( corrected_right_grid, out_right_grid, grid_origin, grid_spacing ) # Export uncorrected right grid logging.info("Writing uncorrected right grid") out_right_grid_uncorrected = os.path.join( out_dir, "right_epipolar_grid_uncorrected.tif" ) out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][ output_prepare.RIGHT_EPIPOLAR_UNCORRECTED_GRID_TAG ] = out_right_grid_uncorrected grids.write_grid( grid2, out_right_grid_uncorrected, grid_origin, grid_spacing ) # Compute the disparity range (we filter matches that are too off epipolar # lins after correction) corrected_std = np.std(corrected_epipolar_error) corrected_matches = corrected_matches[ np.fabs(corrected_epipolar_error) < 3 * corrected_std ] logging.info( "{} matches discarded because " "their epipolar error is greater than 3*stdev of epipolar error " "after correction (3*stddev = {:.3f} pix.)".format( nb_matches - corrected_matches.shape[0], 3 * corrected_std ) ) logging.info( "Number of matches kept " "for disparity range estimation: {} matches".format( corrected_matches.shape[0] ) ) dmin, dmax = sparse_matching.compute_disparity_range( corrected_matches, static_conf.get_disparity_outliers_rejection_percent(), ) margin = abs(dmax - dmin) * disparity_margin dmin -= margin dmax += margin logging.info( "Disparity range with margin: [{:.3f} pix., {:.3f} pix.] " "(margin = {:.3f} pix.)".format(dmin, dmax, margin) ) out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.MINIMUM_DISPARITY_TAG] = dmin out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.MAXIMUM_DISPARITY_TAG] = dmax logging.info( "Equivalent range in meters: [{:.3f} m, {:.3f} m] " "(margin = {:.3f} m)".format( dmin * disp_to_alt_ratio, dmax * disp_to_alt_ratio, margin * disp_to_alt_ratio, ) ) # Export matches logging.info("Writing matches file") matches_array_path = os.path.join(out_dir, "matches.npy") out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.MATCHES_TAG] = matches_array_path np.save(matches_array_path, corrected_matches) # Devibration part : # 1. Compute low resolution DSM from sparse matching matches # 2. Get Initial DEM (typically SRTM) on the same grid resolution # 3. Correction estimation of DSM difference (with splines) # 4. Compute corrected low resolution DSM # and corrected disparity to use in compute_dsm pipeline align mode. # First, triangulate matches points_cloud_from_matches = triangulation.triangulate_matches( out_json, corrected_matches ) # Then define the size of the lower res DSM to rasterize low_res_dsm_params = static_conf.get_low_res_dsm_params() lowres_dsm_resolution = getattr( low_res_dsm_params, static_conf.low_res_dsm_resolution_in_degree_tag, # Value in degree ) lowres_dsm_sizex = int( math.ceil((inter_xmax - inter_xmin) / lowres_dsm_resolution) ) lowres_dsm_sizey = int( math.ceil((inter_ymax - inter_ymin) / lowres_dsm_resolution) ) logging.info( "Generating low resolution ({}°) DSM" " from matches ({}x{})".format( lowres_dsm_resolution, lowres_dsm_sizex, lowres_dsm_sizey ) ) lowres_dsm = rasterization.simple_rasterization_dataset( [points_cloud_from_matches], lowres_dsm_resolution, 4326, color_list=None, xstart=inter_xmin, ystart=inter_ymax, xsize=lowres_dsm_sizex, ysize=lowres_dsm_sizey, ) lowres_dsm_file = os.path.join(out_dir, "lowres_dsm_from_matches.nc") # TODO add proper CRS info lowres_dsm.to_netcdf(lowres_dsm_file) out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.LOWRES_DSM_TAG] = lowres_dsm_file # Now read the exact same grid on initial DEM lowres_initial_dem = otb_pipelines.read_lowres_dem( startx=inter_xmin, starty=inter_ymax, sizex=lowres_dsm_sizex, sizey=lowres_dsm_sizey, dem=srtm_dir, default_alt=default_alt, geoid=static_conf.get_geoid_path(), resolution=lowres_dsm_resolution, ) lowres_initial_dem_file = os.path.join(out_dir, "lowres_initial_dem.nc") lowres_initial_dem.to_netcdf(lowres_initial_dem_file) out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.LOWRES_INITIAL_DEM_TAG] = lowres_initial_dem_file # also write the difference lowres_elevation_difference_file = os.path.join( out_dir, "lowres_elevation_diff.nc" ) lowres_dsm_diff = lowres_initial_dem - lowres_dsm (lowres_dsm_diff).to_netcdf(lowres_elevation_difference_file) out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][ output_prepare.LOWRES_ELEVATION_DIFFERENCE_TAG ] = lowres_elevation_difference_file # Now, estimate a correction to align DSM on the lowres initial DEM splines = None cfg_low_res_dsm_min_sizex = getattr( low_res_dsm_params, static_conf.low_res_dsm_min_sizex_for_align_tag ) cfg_low_res_dsm_min_sizey = getattr( low_res_dsm_params, static_conf.low_res_dsm_min_sizey_for_align_tag ) if ( lowres_dsm_sizex > cfg_low_res_dsm_min_sizex and lowres_dsm_sizey > cfg_low_res_dsm_min_sizey ): logging.info( "Estimating correction " "between low resolution DSM and initial DEM" ) # First, we estimate direction of acquisition time for both images time_direction_vector, _, _ = projection.acquisition_direction( config, srtm_dir ) origin = [ float(lowres_dsm_diff[cst.X][0].values), float(lowres_dsm_diff[cst.Y][0].values), ] out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.TIME_DIRECTION_LINE_ORIGIN_X_TAG] = origin[0] out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][output_prepare.TIME_DIRECTION_LINE_ORIGIN_Y_TAG] = origin[1] out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][ output_prepare.TIME_DIRECTION_LINE_VECTOR_X_TAG ] = time_direction_vector[ 0 ] out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][ output_prepare.TIME_DIRECTION_LINE_VECTOR_Y_TAG ] = time_direction_vector[ 1 ] # Then we estimate the correction splines splines = devib.lowres_initial_dem_splines_fit( lowres_dsm, lowres_initial_dem, origin, time_direction_vector, ext=getattr(low_res_dsm_params, static_conf.low_res_dsm_ext_tag), order=getattr( low_res_dsm_params, static_conf.low_res_dsm_order_tag ), ) else: logging.info( "Low resolution DSM is not large enough " "(minimum size is {}x{}) " "to estimate correction " "to fit initial DEM, skipping ...".format( cfg_low_res_dsm_min_sizex, cfg_low_res_dsm_min_sizey ) ) if splines is not None: # Save model to file lowres_dem_splines_fit_file = os.path.join( out_dir, "lowres_dem_splines_fit.pck" ) with open(lowres_dem_splines_fit_file, "wb") as splines_fit_file_reader: pickle.dump(splines, splines_fit_file_reader) out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][ output_prepare.LOWRES_DEM_SPLINES_FIT_TAG ] = lowres_dem_splines_fit_file logging.info("Generating corrected low resolution DSM from matches") # Estimate correction on point cloud from matches points_cloud_from_matches_z_correction = splines( projection.project_coordinates_on_line( points_cloud_from_matches.x, points_cloud_from_matches.y, origin, time_direction_vector, ) ) # Estimate disparity correction points_cloud_disp_correction = ( points_cloud_from_matches_z_correction / disp_to_alt_ratio ) # Correct matches disparity z_corrected_matches = corrected_matches z_corrected_matches[:, 2] = ( z_corrected_matches[:, 2] - points_cloud_disp_correction[:, 0] ) # Triangulate and rasterize again corrected_points_cloud_from_matches = ( triangulation.triangulate_matches(out_json, z_corrected_matches) ) corrected_lowres_dsm = rasterization.simple_rasterization_dataset( [corrected_points_cloud_from_matches], lowres_dsm_resolution, corrected_points_cloud_from_matches.attrs["epsg"], xstart=inter_xmin, ystart=inter_ymax, xsize=lowres_dsm_sizex, ysize=lowres_dsm_sizey, ) # Write corrected lowres dsm corrected_lowres_dsm_file = os.path.join( out_dir, "corrected_lowres_dsm_from_matches.nc" ) # TODO add proper CRS info corrected_lowres_dsm.to_netcdf(corrected_lowres_dsm_file) out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][ output_prepare.CORRECTED_LOWRES_DSM_TAG ] = corrected_lowres_dsm_file # also write the difference corrected_lowres_elevation_difference_file = os.path.join( out_dir, "corrected_lowres_elevation_diff.nc" ) corrected_lowres_dsm_diff = ( lowres_initial_dem - corrected_lowres_dsm ) (corrected_lowres_dsm_diff).to_netcdf( corrected_lowres_elevation_difference_file ) out_json[output_prepare.PREPROCESSING_SECTION_TAG][ output_prepare.PREPROCESSING_OUTPUT_SECTION_TAG ][ output_prepare.CORRECTED_LOWRES_ELEVATION_DIFFERENCE_TAG ] = corrected_lowres_elevation_difference_file # Write the output json try: inputs.check_json(out_json, output_prepare.content_schema()) except CheckerError as check_error: logging.warning( "content.json does not comply with schema: {}".format(check_error) ) out_json_path = os.path.join(out_dir, "content.json") output_prepare.write_preprocessing_content_file(out_json, out_json_path) # stop cluster stop_cluster(cluster, client)
20,823
def load_fixtures(): """Loads data from tests/fixtures into the connected database""" db.database_proxy.create_tables([StorageGroup, StorageNode]) # Check we're starting from a clean slate assert StorageGroup.select().count() == 0 assert StorageNode.select().count() == 0 with open(path.join(tests_path, "fixtures/storage.yml")) as f: fixtures = yaml.safe_load(f) StorageGroup.insert_many(fixtures["groups"]).execute() groups = dict(StorageGroup.select(StorageGroup.name, StorageGroup.id).tuples()) # fixup foreign keys for the nodes for node in fixtures["nodes"]: node["group"] = groups[node["group"]] # bulk load the nodes StorageNode.insert_many(fixtures["nodes"]).execute() nodes = dict(StorageNode.select(StorageNode.name, StorageNode.id).tuples()) return {"groups": groups, "nodes": nodes}
20,824
def timer(method): """ Decorator to time a function. :param method: Method to time. :type method: function """ def wrapper(*args, **kwargs): """ Start clock, do function with args, print rounded elapsed time. """ starttime = compat.perf_clock() method(*args, **kwargs) endtime = compat.perf_clock() - starttime endtime_proper = math.ceil(endtime * 100) / 100 # rounding mins, secs = divmod(endtime_proper, 60) hrs, mins = divmod(mins, 60) print("COMPLETED IN {0:02d}:{1:02d}:{2:02d}".format(int(hrs), int(mins), int(secs))) return wrapper
20,825
def test_get_tool_descriptor_given_relative_path_success(): """Test `GET /tools/{id}/versions/{version_id}/{type}/descriptor/ {relative_path}` for retrieving descriptor given relative file_path. """ relative_path = MOCK_DESCRIPTOR_FILE["tool_file"]["path"] endpoint = ( f"/tools/{test_obj_id}/versions/{test_version_obj_id}/CWL/descriptor/" f"{relative_path}" ) response = requests.get(base_url + endpoint, headers=headers) assert response.status_code == 200 assert json.loads(response.content) == MOCK_DESCRIPTOR_FILE["file_wrapper"]
20,826
def plot_map_from_nc(path_nc, out_path, var_name, xaxis_min=0.0, xaxis_max=1.1, xaxis_step=0.1, annotate_date=False, yr=0, date=-1, xlabel='', title='', tme_name='time', show_plot=False, any_time_data=True, format='%.2f', land_bg=True, cmap=plt.cm.RdBu, grid=False, fill_mask=False): """ Plot var_name variable from netCDF file \b Args: path_nc: Name of netCDF file including path out_path: Output directory path + file name var_name: Name of variable in netCDF file to plot on map Returns: Nothing, side-effect: save an image """ logger.info('Plotting ' + var_name + ' in ' + path_nc) # Read netCDF file and get time dimension nc = util.open_or_die(path_nc, 'r', format='NETCDF4') lon = nc.variables['lon'][:] lat = nc.variables['lat'][:] if any_time_data: ts = nc.variables[tme_name][:] # time-series if date == -1: # Plot either the last year {len(ts)-1} or whatever year the user wants plot_yr = len(ts) - 1 else: plot_yr = date - ts[0] # Draw empty basemap m = Basemap(projection='robin', resolution='c', lat_0=0, lon_0=0) # m.drawcoastlines() # m.drawcountries() # Find x,y of map projection grid. lons, lats = np.meshgrid(lon, lat) x, y = m(lons, lats) if fill_mask: nc_vars = np.ma.filled(nc.variables[var_name], fill_value=np.nan) else: nc_vars = np.array(nc.variables[var_name]) # Plot # Get data for the last year from the netCDF file array if any_time_data: mask_data = maskoceans(lons, lats, nc_vars[int(plot_yr), :, :]) else: mask_data = maskoceans(lons, lats, nc_vars[:, :]) m.etopo() if land_bg: m.drawlsmask(land_color='white', ocean_color='none', lakes=True) # land_color = (0, 0, 0, 0) for transparent else: m.drawlsmask(land_color=(0, 0, 0, 0), ocean_color='none', lakes=True) cs = m.contourf(x, y, mask_data, np.arange(xaxis_min, xaxis_max, xaxis_step), cmap=cmap) if annotate_date: plt.annotate(str(yr), xy=(0.45, 0.1), xycoords='axes fraction', size=20) if grid: # where labels intersect = [left, right, top, bottom] m.drawmeridians(np.arange(-180, 180, 60), labels=[0,0,1,0], labelstyle='+/-', linewidth=0.5) m.drawparallels([-40, 0, 40], labels=[1, 0, 0, 0], labelstyle='+/-', linewidth=0.5) # Add colorbar cb = m.colorbar(cs, "bottom", size="3%", pad='2%', extend='both', drawedges=False, spacing='proportional', format=format) cb.set_label(xlabel) plt.title(title, y=1.08) plt.tight_layout() if not show_plot: plt.savefig(out_path, dpi=constants.DPI) plt.close() else: plt.show() nc.close() return out_path
20,827
def f5_add_policy_method_command(client: Client, policy_md5: str, new_method_name: str, act_as_method: str) -> CommandResults: """ Add allowed method to a certain policy. Args: client (Client): f5 client. policy_md5 (str): MD5 hash of the policy. new_method_name (str): Display name of the new method. act_as_method(str): functionality of the new method. default is GET. """ result = client.add_policy_method(policy_md5, new_method_name, act_as_method) outputs, headers = build_output(OBJECT_FIELDS, result) readable_output = tableToMarkdown('f5 data for adding policy methods:', outputs, headers, removeNull=True) command_results = CommandResults( outputs_prefix='f5.PolicyMethods', outputs_key_field='id', readable_output=readable_output, outputs=remove_empty_elements(outputs), raw_response=result ) return command_results
20,828
def shortdate(value: Union[datetime, date]) -> str: """Render a date in short form (deprecated for lack of i18n support).""" dt: Union[datetime, date] utc_now: Union[datetime, date] if isinstance(value, datetime): tz = get_timezone() if value.tzinfo is None: dt = utc.localize(value).astimezone(tz) else: dt = value.astimezone(tz) utc_now = request_timestamp().astimezone(tz) else: dt = value utc_now = request_timestamp().date() if dt > ( utc_now - timedelta(days=int(current_app.config.get('SHORTDATE_THRESHOLD_DAYS', 0))) ): return dt.strftime('%e %b') else: # The string replace hack is to deal with inconsistencies in the underlying # implementation of strftime. See https://bugs.python.org/issue8304 return str(dt.strftime("%e %b '%y")).replace("'", "’")
20,829
def test_set(sc, idfModel, numFeatures, test_file = "data/test_clean.csv" ): """ Input : IDF model obtained in the training phase number of retained features in the tweet-term structure Output : normalized tweet-term format test set """ test_text = sc.textFile(test_file) test_df = test_text.map(lambda x : (0,x)).toDF(["nothing" , "sentence"]) tokenizer_test = Tokenizer(inputCol="sentence", outputCol="words") wordsData_test = tokenizer_test.transform(test_df) hashingTF_test = HashingTF(inputCol="words", outputCol="rawFeatures", numFeatures=numFeatures) featurizedData_test = hashingTF_test.transform(wordsData_test) rescaledData_test = idfModel.transform(featurizedData_test) rescaled_test_df = rescaledData_test.select("features") return rescaled_test_df
20,830
def is_object_based_ckpt(ckpt_path: str) -> bool: """Returns true if `ckpt_path` points to an object-based checkpoint.""" var_names = [var[0] for var in tf.train.list_variables(ckpt_path)] return '_CHECKPOINTABLE_OBJECT_GRAPH' in var_names
20,831
def search(todos, **kwargs): """Return a list of todos that matches the provided filters. It takes the exact same parameters as the :class:`todotxtio. Todo` object constructor, and return a list of :class:`todotxtio.Todo` objects as well. All criteria defaults to ``None`` which means that the criteria is ignored. A todo will be returned in the results list if all of the criteria matches. From the moment when a todo is sent in the results list, it will never be checked again. :param list todos: List of todos to test :param dict kwargs: Dictionary of tests :rtype: list """ results = [] for todo in todos: matches = [] for key, value in kwargs.items(): if key == 'or': or_matches = [] for k, v in value.items(): or_matches += [search_todo(todo, k, v)] matches.append(any(match is True for match in or_matches)) else: matches += [search_todo(todo, key, value)] if matches and all(match is True for match in matches): results.append(todo) return results
20,832
def evaluate_prettiness(sampler=None, folder=None, input_2='cifar10-train', n=50000, batch_size=1000, clean_afterwards=False, fid=False, isc=False, kid=False): """Evaluate a generative model in terms of IS, FID, or KID. At least one of `model` or `folder` must be present. Args: sampler (object, optional): An objective with the method `func` that samples from the model. folder (str, optional): Path to the folder that contains all the images. input_2 (str, optional): Name of registered dataset or a path to a folder. n (int, optional): Number of samples to take. batch_size (int, optional): Number of samples in each batch. clean_afterwards (bool, optional): Clean the local cache if True. Returns: A dictionary of metric values. """ import torch_fidelity import matplotlib.pyplot as plt if sampler is None and folder is None: raise ValueError(f"model and folder cannot both be none") if folder is None: now = datetime.datetime.now().strftime("%d:%m:%Y-%H:%M:%S") folder = os.path.join(os.path.expanduser("~"), 'evaluate_prettiness', f'{now}') os.makedirs(folder, exist_ok=True) idx = 0 for _ in tqdm.tqdm(range(n // batch_size), desc='spawn samples'): batch = sampler(batch_size=batch_size).detach().cpu().numpy() if batch.shape[1] == 3: batch = batch.transpose((0, 2, 3, 1)) for img in batch: img_path = os.path.join(folder, f'{idx:06d}.png') plt.imsave(img_path, img) idx += 1 stats = torch_fidelity.calculate_metrics(folder, input_2, isc=isc, fid=fid, kid=kid) if clean_afterwards: shutil.rmtree(folder) return stats
20,833
def read_file(input_file): """ Read an SRT file to SrtEntry objects, Args: input_file (string): input file name Yields: generator of SrtEntry objects """ for seq, begin, end, text in read_file_tuples(input_file): entry = SrtEntry() entry.seq = seq entry.begin = begin entry.end = end entry.text = text yield(entry)
20,834
def send_desc_request(controller, switch): """Method responsible for request stats of flow to switches. Args: controller(:class:`~kytos.core.controller.Controller`): the controller being used. switch(:class:`~kytos.core.switch.Switch`): target to send a stats request. """ multipart_request = MultipartRequest() multipart_request.multipart_type = MultipartType.OFPMP_DESC emit_message_out(controller, switch.connection, multipart_request)
20,835
def test_authenticate_returns_a_user(rf, django_user_model): """ We can't do the real authentication but we do need to make sure a real user is returned from the backend authenticate method if the TokenValidator succeeds, so fake success and see what happens. """ user = django_user_model.objects.create_user("testuser", "testuser@example.com") with patch( "okta_oauth2.backend.TokenValidator.tokens_from_auth_code", Mock(return_value=(user, None)), ): backend = OktaBackend() assert backend.authenticate(rf, auth_code="123456", nonce="imanonce") == user
20,836
def print_shape(varname, var): """ :param varname: tensor name :param var: tensor variable """ print('{0} : {1}'.format(varname, var.get_shape()))
20,837
def gather(first_step: str = PATH, *, filename: str = FILE, stamp: bool = True) -> dict[str, dict[str, Any]]: """Walk the steps on the path to read the trees of configuration.""" user = USER if filename == FILE else filename.split('.')[0] trees = [(where, tree) for where, tree in walk_the_path(first_step, filename=filename) if tree is not None] return {f'{user}_{steps:{PAD}}': dict(tree, **{LABEL: where}) if stamp else dict(tree) for steps, (where, tree) in enumerate(reversed(trees))}
20,838
def test_mlp (learning_rate = 0.01, L1_reg = 0.00, L2_reg = 0.0001, n_epochs = 1000, dataset = 'mnist.pkl.gz', batch_size = 20, n_hidden = 500): """ Gradient descent on a multi-layer-perceptron learning_rate : float factor for gradient descent L1_reg : float, L1-Norm of weights L2_reg : float, L2-Norm of weights n_epochs : int, maximal number of epochs to run on the system dataset : string, path to the MNIST dataset """ datasets = load_data(dataset) train_set_x, train_set_y = datasets[0] valid_set_x, valid_set_y = datasets[1] test_set_x , test_set_y = datasets[2] # compute the number of mini-batches for training, validation and testing n_train_batches = train_set_x.get_value(borrow = True).shape[0] // batch_size n_valid_batches = valid_set_x.get_value(borrow = True).shape[0] // batch_size n_test_batches = test_set_x.get_value(borrow = True).shape[0] // batch_size ##################### # BUILD ACTUAL MODEL # ##################### print ('Building the model ...') # allocate symbolic variables for the data index = T.lscalar() # Generate symbolic varibales for input : x and labels : y x = T.matrix('x') y = T.ivector('y') rng = numpy.random.RandomState(1234) # Construct the MLP Class classifier = MLP( rng = rng, input = x, n_in = 28*28, n_hidden = n_hidden, n_out = 10 ) cost = ( classifier.negative_log_likelihood(y) + L1_reg * classifier.L1 + L2_reg * classifier.L2_sqr ) # computing the gradient of cost with respect to theta gparams = [T.grad(cost, param) for param in classifier.params] # specifying the update expression as a list of tuples: # (variable, update expression) pairs updates = [ (param, param - learning_rate * gparam) for param, gparam in zip(classifier.params, gparams)] train_model = theano.function( inputs = [index], outputs = cost, updates = updates, givens = { x : train_set_x[ index * batch_size : (index + 1) * batch_size], y : train_set_y[ index * batch_size : (index + 1) * batch_size] } ) test_model = theano.function( inputs = [index], outputs= classifier.errors(y), givens = { x : test_set_x[index * batch_size : (index + 1) * batch_size], y : test_set_y[index * batch_size : (index + 1) * batch_size] } ) validate_model = theano.function( inputs = [index], outputs= classifier.errors(y), givens = { x : valid_set_x[index * batch_size : (index + 1) * batch_size], y : valid_set_y[index * batch_size : (index + 1) * batch_size] } ) ################# ## TRAIN MODEL ## ################# print ('Training the model ...') # Early stopping parameters """ Early Stopping Procedure We'll have patience about the improvement in performance, after the patience is over. Early stopping rules provide guidance as to how many iterations can be run before the learner begins to over-fit. """ # look at these many examples before patience is up patience = 5000 # wait this much longer when a new best is found patience_increase = 2 improvement_threshold = 0.995 # a relative improvement of this much is considered significant validation_frequency = min(n_train_batches, patience // 2) # go through these many mini-batches before checking the network # on the validation set ; in this case we check every epoch best_validation_loss = numpy.inf test_score = 0. start_time = timeit.default_timer() done_looping = False epoch = 0 while (epoch < n_epochs) and (not done_looping): epoch = epoch + 1 for minibatch_index in range(n_train_batches): minibatch_avg_cost = train_model(minibatch_index) #iteration number iter = (epoch - 1) * n_train_batches + minibatch_index if (iter + 1) % validation_frequency == 0: # compute zero-one loss on validation set validation_losses = [validate_model(i) for i in range(n_valid_batches)] this_validation_loss = numpy.mean(validation_losses) print( 'epoch %i, minibatch %i/%i, validation error %f %%' % ( epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100. ) ) if this_validation_loss < best_validation_loss: # improve patience if loss improvement is good enough if this_validation_loss < best_validation_loss * improvement_threshold: patience = max(patience, iter * patience_increase) best_validation_loss = this_validation_loss # test it on the test set test_losses = [test_model(i) for i in range(n_test_batches)] test_score = numpy.mean(test_losses) print( ( 'epoch %i, minibatch %i / %i, test error of' ' best model %f %%' ) % ( epoch, minibatch_index + 1, n_test_batches, test_score * 100. ) ) # save the best model with open('best_model_mnist.pkl', 'wb') as f: pickle.dump (classifier, f) if patience <= iter: done_looping = True break end_time = timeit.default_timer() print ( ( 'Optimiation complete with best validation score of %f %%,' 'with test performance %f %%' ) % (best_validation_loss * 100., test_score * 100.) ) print ('The code runs for %d epochs, with %f epochs/sec '%( epoch, 1. * epoch / (end_time - start_time) / 60.0 )) print (('The code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time))), file=sys.stderr)
20,839
def WriteUmbrellaHeader(output_path, imported_headers): """Writes the umbrella header. Args: output_path: The path to the umbrella header. imported_headers: A list of headers to #import in the umbrella header. """ year = datetime.date.today().year header_guard = ComputeHeaderGuard(output_path) imports = '\n'.join([ '#import "%s"' % os.path.basename(header) for header in sorted(imported_headers) ]) with open(output_path, 'w') as output_file: output_file.write( HEADER_TEMPLATE.safe_substitute({ 'year': year, 'header_guard': header_guard, 'imports': imports, }))
20,840
def main(args): """ An intermediate function that will call either REINFORCE learn or PPO learn. Parameters: args - the arguments defined below Return: None """ if args.alg == 'PPO': train_ppo(args) elif args.alg == 'reinforce': train_reinforce(args) else: raise ValueError(f'Algorithm {args.alg} not defined; options are reinforce or PPO.')
20,841
def _extractKernelVersion(kernel): """ Extract version string from raw kernel binary. @param bytes kernel Raw kernel binary. @return string Version string if found. """ try: versionOffset = kernel.index(b'Linux version') for i in range(versionOffset, versionOffset+1024): if kernel[i]==0x00: return kernel[versionOffset:i] return None except IndexError as exc: return None
20,842
def get_vdw_rad(atomic_num): """Function to get the user defined atomic radius""" atomic_rad_dict = {6: 1.7, 7: 1.55, 8: 1.52, 9: 1.47} if atomic_num in atomic_rad_dict: return atomic_rad_dict[atomic_num] else: return float(Chem.GetPeriodicTable().GetRvdw(atomic_num))
20,843
def main(): """Main export program""" # Read input arguments args = get_args() # Read articles from database print('Exporting retracted articles to %s...' % args.file) rows = [] articles = Article.query.order_by(Article.id).all() for article in articles: fields = { k: v for k, v in article.__dict__.items() if not k.startswith('_') and k != 'citations' } for citation in article.citations: rows.append( CsvRow(**fields, citation=citation.value, type=citation.type) ) # Write to file write_csv(args.file, rows) time.sleep(.5) print('Done.')
20,844
def input_literal(term, prompt): """Get console input of literal values and structures.""" while True: input_string = read_line(term, prompt) if input_string: break return eval_literal(input_string)
20,845
def walk2(top, topdown=True, onerror=None, followlinks=False, level=False, excludes=None): """ Add options to os.walk: exclusive filtering for dirnames, filenames (list or regex) level option :param top: <string>; see os.walk :param topdown: <boolean>; see os.walk :param onerror: <boolean>; see os.walk :param followlinks: <boolean>; see os.walk :param level: <int>; folder search level depth :param excludes: <regex string>|<list> :returns: <generator>; dirpath, dirnames, filenames """ # Compile re expression try: re_excludes = re.compile(excludes) except TypeError: pass # level if level: top = top.rstrip(os.path.sep) assert os.path.isdir(top) num_sep = top.count(os.path.sep) # loop through os.walk for dirpath, dirnames, filenames in os.walk(top, topdown, onerror, followlinks): # modify dirnames in place try: dirnames[:] = [d for d in dirnames if d not in excludes] dirnames[:] = [d for d in dirnames if not re_excludes.match(d)] except (UnboundLocalError, TypeError, ValueError, AttributeError): pass # yield result yield dirpath, dirnames, filenames # level if level: num_sep_current = dirpath.count(os.path.sep) if num_sep + level <= num_sep_current: del dirnames[:]
20,846
def test_example(cx, name=None, tag=None, ): """Test a specific doc example in the current virtual environment.""" if name is None: examples = visit_examples() else: examples = [name] for example in examples: path = example assert path.exists() and path.is_dir(), \ f"Example {example.stem} doesn't exist at {path}" # TODO: add support for reports and such print("tag is ignored") cx.run(f"pytest tests/test_docs/test_examples/test_{example.stem}.py", warn=True)
20,847
def _parse_string( value_expr: str, target_expr: str, ref_parts: List[str], a_type: Union[mapry.String, mapry.Path], pattern_uids: Mapping[Pattern[str], int], auto_id: mapry.go.generate.AutoID) -> str: """ Generate the code to parse a string. The code parses the JSONable ``value_expr`` into the ``target_expr``. :param value_expr: Go expression of the value :param target_expr: Go expression of where to store the parsed value :param ref_parts: Go expression of reference path segments to the value :param a_type: mapry definition of the value type :param pattern_uids: uniquely identified patterns :param auto_id: generator of unique identifiers :return: generated code """ uid = auto_id.next_identifier() return _PARSE_STRING_TPL.render( uid=uid, value_expr=value_expr, ref_parts=ref_parts, target_expr=target_expr, a_type=a_type, pattern_uids=pattern_uids).rstrip("\n")
20,848
def add_metadata_for_subject (rdf_graph,subject_uri,namespaces,nidm_obj): """ Cycles through triples for a particular subject and adds them to the nidm_obj :param rdf_graph: RDF graph object :param subject_uri: URI of subject to query for additional metadata :param namespaces: Namespaces in input graph :param nidm_obj: NIDM object to add metadata :return: None """ #Cycle through remaining metadata and add attributes for predicate, objects in rdf_graph.predicate_objects(subject=subject_uri): # if this isn't a qualified association, add triples if predicate != URIRef(Constants.PROV['qualifiedAssociation']): # make predicate a qualified name obj_nm, obj_term = split_uri(predicate) found_uri = find_in_namespaces(search_uri=URIRef(obj_nm), namespaces=namespaces) # if obj_nm is not in namespaces then it must just be part of some URI in the triple # so just add it as a prov.Identifier if (not found_uri) and (obj_nm != Constants.PROV) and (obj_nm != Constants.XSD): predicate = pm.QualifiedName(namespace=Namespace(str(predicate)), localpart="") # else add as explicit prov.QualifiedName because it's easier to read #else: # predicate = Identifier(predicate) if (validators.url(objects)) and (predicate != Constants.PROV['Location']): # try to split the URI to namespace and local parts, if fails just use the entire URI. try: #create qualified names for objects obj_nm,obj_term = split_uri(objects) # added because PyNIDM agent, activity, and entity classes already add the type if ((objects == Constants.PROV['Activity']) or (objects == Constants.PROV['Agent']) or (objects == Constants.PROV['Entity'])): continue # special case if obj_nm is prov, xsd, or nidm namespaces. These are added # automatically by provDocument so they aren't accessible via the namespaces list # so we check explicitly here if ((obj_nm == str(Constants.PROV))): nidm_obj.add_attributes({predicate: pm.QualifiedName(Constants.PROV,obj_term)}) elif ((obj_nm == str(Constants.NIDM))): nidm_obj.add_attributes({predicate: pm.QualifiedName(Constants.NIDM,obj_term)}) else: found_uri = find_in_namespaces(search_uri=URIRef(obj_nm),namespaces=namespaces) # if obj_nm is not in namespaces then it must just be part of some URI in the triple # so just add it as a prov.Identifier if not found_uri: nidm_obj.add_attributes({predicate: Identifier(objects)}) # else add as explicit prov.QualifiedName because it's easier to read else: nidm_obj.add_attributes({predicate: pm.QualifiedName(found_uri, obj_term)}) except: nidm_obj.add_attributes({predicate: pm.QualifiedName(namespace=Namespace(str(objects)),localpart="")}) else: # check if this is a qname and if so expand it # added to handle when a value is a qname. this should expand it.... if (":" in objects) and isinstance(objects,URIRef): objects = from_n3(objects) # check if objects is a url and if so store it as a URIRef else a Literal if (validators.url(objects)): obj_nm, obj_term = split_uri(objects) nidm_obj.add_attributes({predicate : Identifier(objects)}) else: nidm_obj.add_attributes({predicate : get_RDFliteral_type(objects)}) # now find qualified associations for bnode in rdf_graph.objects(subject=subject_uri, predicate=Constants.PROV['qualifiedAssociation']): # create temporary resource for this bnode r = Resource(rdf_graph, bnode) # get the object for this bnode with predicate Constants.PROV['hadRole'] for r_obj in r.objects(predicate=Constants.PROV['hadRole']): # if this is a qualified association with a participant then create the prov:Person agent if r_obj.identifier == URIRef(Constants.NIDM_PARTICIPANT.uri): # get identifier for prov:agent part of the blank node for agent_obj in r.objects(predicate=Constants.PROV['agent']): # check if person exists already in graph, if not create it if agent_obj.identifier not in nidm_obj.graph.get_records(): person = nidm_obj.add_person(uuid=agent_obj.identifier,add_default_type=False) # add rest of meatadata about person add_metadata_for_subject(rdf_graph=rdf_graph, subject_uri=agent_obj.identifier, namespaces=namespaces, nidm_obj=person) else: # we need the NIDM object here with uuid agent_obj.identifier and store it in person for obj in nidm_obj.graph.get_records(): if agent_obj.identifier == obj.identifier: person = obj # create qualified names for objects obj_nm, obj_term = split_uri(r_obj.identifier) found_uri = find_in_namespaces(search_uri=URIRef(obj_nm),namespaces=namespaces) # if obj_nm is not in namespaces then it must just be part of some URI in the triple # so just add it as a prov.Identifier if not found_uri: #nidm_obj.add_qualified_association(person=person, role=pm.Identifier(r_obj.identifier)) nidm_obj.add_qualified_association(person=person, role=pm.QualifiedName(Namespace(obj_nm),obj_term)) else: nidm_obj.add_qualified_association(person=person, role=pm.QualifiedName(found_uri, obj_term)) # else it's an association with another agent which isn't a participant else: # get identifier for the prov:agent part of the blank node for agent_obj in r.objects(predicate=Constants.PROV['agent']): # check if the agent exists in the graph else add it if agent_obj.identifier not in nidm_obj.graph.get_records(): generic_agent = nidm_obj.graph.agent(identifier=agent_obj.identifier) # add rest of meatadata about the agent add_metadata_for_subject(rdf_graph=rdf_graph, subject_uri=agent_obj.identifier, namespaces=namespaces, nidm_obj=generic_agent) # try and split uri into namespacea and local parts, if fails just use entire URI try: # create qualified names for objects obj_nm, obj_term = split_uri(r_obj.identifier) found_uri = find_in_namespaces(search_uri=URIRef(obj_nm), namespaces=namespaces) # if obj_nm is not in namespaces then it must just be part of some URI in the triple # so just add it as a prov.Identifier if not found_uri: nidm_obj.add_qualified_association(person=generic_agent, role=pm.QualifiedName(Namespace(obj_nm),obj_term)) else: nidm_obj.add_qualified_association(person=generic_agent, role=pm.QualifiedName(found_uri, obj_term)) except: nidm_obj.add_qualified_association(person=generic_agent, role=pm.QualifiedName(Namespace(r_obj.identifier),""))
20,849
def cleanup_makefile(): """Delete any leftover BUILD files from the Makefile build. These files could interfere with Bazel parsing. """ makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow', 'contrib', 'makefile', 'downloads') if os.path.isdir(makefile_download_dir): for root, _, filenames in os.walk(makefile_download_dir): for f in filenames: if f.endswith('BUILD'): os.remove(os.path.join(root, f))
20,850
def get_dots_case_json(casedoc, anchor_date=None): """ Return JSON-ready array of the DOTS block for given patient. Pulling properties from PATIENT document. Patient document trumps casedoc in this use case. """ if anchor_date is None: anchor_date = datetime.now(tz=timezone(settings.TIME_ZONE)) enddate = anchor_date ret = { 'regimens': [ # non art is 0 int(getattr(casedoc, CASE_NONART_REGIMEN_PROP, None) or 0), # art is 1 int(getattr(casedoc, CASE_ART_REGIMEN_PROP, None) or 0), ], 'regimen_labels': [ list(casedoc.nonart_labels), list(casedoc.art_labels) ], 'days': [], # dmyung - hack to have query_observations timezone # be relative specific to the eastern seaboard 'anchor': anchor_date.strftime("%d %b %Y"), } observations = query_observations( casedoc._id, enddate-timedelta(days=DOT_DAYS_INTERVAL), enddate) for delta in range(DOT_DAYS_INTERVAL): obs_date = enddate - timedelta(days=delta) day_arr = filter_obs_for_day(obs_date.date(), observations) day_data = DOTDay.merge_from_observations(day_arr) ret['days'].append(day_data.to_case_json(casedoc, ret['regimen_labels'])) ret['days'].reverse() return ret
20,851
def rel_path(path, parent_path): """Return path relative to parent_path.""" # Use realpath to avoid issues with symlinked dirs (see gh-7707) pd = os.path.realpath(os.path.abspath(parent_path)) apath = os.path.realpath(os.path.abspath(path)) if len(apath) < len(pd): return path if apath == pd: return '' if pd == apath[:len(pd)]: assert apath[len(pd)] in [os.sep], repr((path, apath[len(pd)])) path = apath[len(pd)+1:] return path
20,852
def sleeping_func(arg, secs=10, result_queue=None): """This methods illustrates how the workers can be used.""" import time time.sleep(secs) if result_queue is not None: result_queue.put(arg) else: return arg
20,853
def _callable_intersect(in_file, callable_bed, data): """Return list of original VCF SVs intersected by callable regions. Does not try to handle BNDs. We should resolve these and return where possible. """ with tx_tmpdir(data) as tmpdir: in_bed = os.path.join(tmpdir, "%s-convert.bed" % utils.splitext_plus(os.path.basename(in_file))[0]) with utils.open_gzipsafe(in_file) as in_handle: with open(in_bed, "w") as out_handle: for parts in (l.split("\t") for l in in_handle if not l.startswith("#")): start, end = _get_start_end(parts) if end: out_handle.write("\t".join([parts[0], start, end] + parts) + "\n") out_file = os.path.join(tmpdir, "%s-subset.tsv" % utils.splitext_plus(os.path.basename(in_file))[0]) cmd = "bedtools intersect -a {in_bed} -b {callable_bed} -wa -wb > {out_file}" do.run(cmd.format(**locals()), "Intersect VCF by callable") with open(out_file) as in_handle: for line in in_handle: yield line.rstrip().split("\t")[3:]
20,854
def suffix(s): """Add '3' suffix to programs for Python 3.""" if sys.version_info[0] == 3: s = s + '3' return s
20,855
def test_resolve_recursive_reference_returns_and_adds_orphan( mocker: MockerFixture, ) -> None: """Returns first referenced component and adds orphans to schema.""" component = mocker.MagicMock(spec=ModelElement) orphan = mocker.MagicMock(spec=ModelElement) model_element_uris = [None, component, None, orphan] object_creator_mock = mocker.patch( "jsonschematordf.modelldcatnofactory.create_model_element", side_effect=model_element_uris, ) mock_schema = mocker.MagicMock() mocker.patch.object( mock_schema, "get_components_by_path", return_value=range(len(model_element_uris)), ) add_orphan_mock = mocker.patch.object(mock_schema, "add_orphan_elements") actual = modelldcatno_factory._resolve_recursive_reference("ref", mock_schema) object_creator_mock.assert_called() add_orphan_mock.assert_called_once_with([orphan]) assert actual == component
20,856
def make_coll(db_auth, db_user, db_pass, mongo_server_ip='127.0.0.1'): """ Function to establish a connection to a local MonoDB instance. Parameters ---------- coll_name: String. Name of MongoDB collection to retrieve. db_auth: String. MongoDB database that should be used for user authentication. db_user: String. Username for MongoDB authentication. db_user: String. Password for MongoDB authentication. Returns ------- collection: pymongo.collection.Collection. Collection within MongoDB that holds the scraped news stories. """ connection = MongoClient(mongo_server_ip) if db_auth: connection[db_auth].authenticate(db_user, db_pass) db = connection.event_scrape collection = db['stories'] return collection
20,857
def convert_directory_to_txt(directory): """ Converts an entire directory of PDF to TXT file Variables --------- directory: directory in which many PDFs are placed """ for filename in glob.iglob('{}*.pdf'.format(directory)): convert_pdf_to_txt(filename)
20,858
def sms_send(recipient): """ Attempt to send SMS message using Twilio's API. If this fails, use the Summit API to send the SMS message. """ body = request.get_data() try: message = send_sms_through_provider('Twilio', recipient, body) except TwilioRestException: message = send_sms_through_provider('Summit', recipient, body) return jsonify({ message.id_key: getattr(message, message.id_key), 'from': message.from_, 'to': message.to, 'body': message.body, })
20,859
def cat_to_sub_cat( dp: Image, categories_dict_names_as_key: Dict[str, str], cat_to_sub_cat_dict: Optional[Dict[str, str]] = None ) -> Image: """ Replace some category with its affiliated sub category of CategoryAnnotations. Suppose your category name is 'foo' and comes along with sub_category_annotations 'foo_1' and 'foo_2' then this adapter will replace 'foo' with 'foo_1' or 'foo_2', respectively. :param dp: Image datapoint :param categories_dict_names_as_key: A dict of all possible categories and their ids :param cat_to_sub_cat_dict: e.g. {"foo": "sub_cat_1", "bak":"sub_cat_2"} :return: Image with updated Annotations """ if cat_to_sub_cat_dict is None: return dp categories_dict = categories_dict_names_as_key for ann in dp.get_annotation_iter(category_names=list(cat_to_sub_cat_dict.keys())): sub_cat_type = cat_to_sub_cat_dict.get(ann.category_name, "") sub_cat = ann.get_sub_category(sub_cat_type) if sub_cat: ann.category_name = sub_cat.category_name ann.category_id = categories_dict[ann.category_name] return dp
20,860
def index_to_str(idx): """ Generates a string representation from an index array. :param idx: The NumPy boolean index array. :return: The string representation of the array. """ num_chars = int(idx.shape[0] / 6 + 0.5) s = "" for i in range(num_chars): b = i * 6 six = idx[b:b+6] c = 0 for j in range(six.shape[0]): c = c * 2 + int(six[j]) s = s + chr(c+32) return s
20,861
def ais_refactor(package, proprietary, consent, color, organisation, industry, country, admin_area): """Refactor a STIX package to meet AIS requirements.""" # Add an AIS Marking to the header # Note add_ais_marking() removes existing markings ais.add_ais_marking( stix_package=package, proprietary=proprietary, consent=consent, color=color, country_name_code=country, industry_type=industry, admin_area_name_code=admin_area, organisation_name=organisation, country_name_code_type='ISO-3166-1_alpha-2', admin_area_name_code_type='ISO-3166-2', ) # Dereference observables dereference_observables(package) # Remove the observables from the root of the package package.observables = None
20,862
def ArclinkStatusLine_ClassName(): """ArclinkStatusLine_ClassName() -> char const *""" return _DataModel.ArclinkStatusLine_ClassName()
20,863
def build_sfdisk_partition_line(table_type, dev_path, size, details): """Build sfdisk partition line using passed details, returns str.""" line = f'{dev_path} : size={size}' dest_type = '' source_filesystem = str(details.get('fstype', '')).upper() source_table_type = '' source_type = details.get('parttype', '') # Set dest type if re.match(r'^0x\w+$', source_type): # Both source and dest are MBR source_table_type = 'MBR' if table_type == 'MBR': dest_type = source_type.replace('0x', '').lower() elif re.match(r'^\w{8}-\w{4}-\w{4}-\w{4}-\w{12}$', source_type): # Source is a GPT type source_table_type = 'GPT' if table_type == 'GPT': dest_type = source_type.upper() if not dest_type: # Assuming changing table types, set based on FS if source_filesystem in cfg.ddrescue.PARTITION_TYPES.get(table_type, {}): dest_type = cfg.ddrescue.PARTITION_TYPES[table_type][source_filesystem] line += f', type={dest_type}' # Safety Check if not dest_type: std.print_error(f'Failed to determine partition type for: {dev_path}') raise std.GenericAbort() # Add extra details if details.get('partlabel', ''): line += f', name="{details["partlabel"]}"' if details.get('partuuid', '') and source_table_type == table_type: # Only add UUID if source/dest table types match line += f', uuid={details["partuuid"].upper()}' # Done return line
20,864
def test_load_from_env_missing(example_config_env_missing): """Test looking up a config file via an environment variable that is set to a nonexistent file.""" a = ExampleConfig.load( number=3, floaty_number=5, flag=False, word='hello', _lookup_config_envvar='config', ) assert a.number == 3 assert a.floaty_number == 5.0 assert a.flag is False assert a.word == 'hello'
20,865
def parse_args() -> argparse.Namespace: """Parse user command line arguments.""" parser = argparse.ArgumentParser( description='compare annotations in xml format between different image label sets') parser.add_argument('--verbose', action='store_true') parser.add_argument('--prune', action='store_true') parser.add_argument('--check', choices=['relaxed', 'normal', 'strict'], default='normal') parser.add_argument('--data', required=False, help='xml and image directories', nargs='+') parser.add_argument('--out', required=False, help='output directory') return parser
20,866
def get_allocation_window(allocation, default_start_date=_get_zero_date_utc(), default_end_date=_get_current_date_utc()): """ Returns a tuple containing the allocation windows start and end date """ if not allocation.start_date: window_start_date = default_start_date else: window_start_date = allocation.start_date if not allocation.end_date: window_end_date = default_end_date else: window_end_date = allocation.end_date return window_start_date, window_end_date
20,867
def get_instances_in_service(group, region: str): """Get set of instance IDs with ELB "InService" state""" instances_in_service = set() # TODO: handle auto scaling groups without any ELB lb_names = group["LoadBalancerNames"] if lb_names: # check ELB status elb = BotoClientProxy("elb", region) for lb_name in lb_names: result = elb.describe_instance_health(LoadBalancerName=lb_name) for instance in result["InstanceStates"]: if instance["State"] == "InService": instances_in_service.add(instance["InstanceId"]) else: # just use ASG LifecycleState group = get_auto_scaling_group( BotoClientProxy("autoscaling", region), group["AutoScalingGroupName"] ) for instance in group["Instances"]: if instance["LifecycleState"] == "InService": instances_in_service.add(instance["InstanceId"]) return instances_in_service
20,868
def IDFromUID(s,code=''): """ Create an ID object from the given string UID. This can raise an Error in case the string does not map to a valid UID. code is used in the verification process if given. """ id = _EmptyClass() id.__class__ = ID id.set_uid(s,code) return id
20,869
async def async_setup_entry( hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback ) -> None: """Set up Tuya (de)humidifier dynamically through Tuya discovery.""" hass_data: HomeAssistantTuyaData = hass.data[DOMAIN][entry.entry_id] @callback def async_discover_device(device_ids: list[str]) -> None: """Discover and add a discovered Tuya (de)humidifier.""" entities: list[TuyaHumidifierEntity] = [] for device_id in device_ids: device = hass_data.device_manager.device_map[device_id] if description := HUMIDIFIERS.get(device.category): entities.append( TuyaHumidifierEntity(device, hass_data.device_manager, description) ) async_add_entities(entities) async_discover_device([*hass_data.device_manager.device_map]) entry.async_on_unload( async_dispatcher_connect(hass, TUYA_DISCOVERY_NEW, async_discover_device) )
20,870
def ordered_pair_accuracy(labels, predictions, weights=None, name=None): """Computes the percentage of correctedly ordered pair. For any pair of examples, we compare their orders determined by `labels` and `predictions`. They are correctly ordered if the two orders are compatible. That is, labels l_i > l_j and predictions s_i > s_j and the weight for this pair is the weight from the l_i. Args: labels: A `Tensor` of the same shape as `predictions`. predictions: A `Tensor` with shape [batch_size, list_size]. Each value is the ranking score of the corresponding example. weights: A `Tensor` of the same shape of predictions or [batch_size, 1]. The former case is per-example and the latter case is per-list. name: A string used as the name for this metric. Returns: A metric for the accuracy or ordered pairs. """ with ops.name_scope(name, 'ordered_pair_accuracy', (labels, predictions, weights)): clean_labels, predictions, weights, _ = _prepare_and_validate_params( labels, predictions, weights) label_valid = math_ops.equal(clean_labels, labels) valid_pair = math_ops.logical_and( array_ops.expand_dims(label_valid, 2), array_ops.expand_dims(label_valid, 1)) pair_label_diff = array_ops.expand_dims( clean_labels, 2) - array_ops.expand_dims(clean_labels, 1) pair_pred_diff = array_ops.expand_dims( predictions, 2) - array_ops.expand_dims(predictions, 1) # Correct pairs are represented twice in the above pair difference tensors. # We only take one copy for each pair. correct_pairs = math_ops.to_float(pair_label_diff > 0) * math_ops.to_float( pair_pred_diff > 0) pair_weights = math_ops.to_float( pair_label_diff > 0) * array_ops.expand_dims( weights, 2) * math_ops.to_float(valid_pair) return math_ops.reduce_mean(correct_pairs * pair_weights)
20,871
def samp(*args, **kwargs): """ The HTML <samp> element is an element intended to identify sample output from a computer program. It is usually displayed in the browser's default monotype font (such as Lucida Console). """ return el('samp', *args, **kwargs)
20,872
def get_fsuae_dir(): """Get FS-UAE dir""" user_home_dir = os.path.expanduser('~') directories = [os.path.join(user_home_dir, _f) for _f in os.listdir(user_home_dir) \ if os.path.isdir(os.path.join(user_home_dir, _f))] for directory in directories: fsuae_dir = os.path.join(directory, 'FS-UAE') fsuae_config_dir = os.path.join(fsuae_dir, 'Configurations') if os.path.isdir(fsuae_config_dir): return fsuae_dir return None
20,873
def _generate_sections_of_url(url: str) -> 'List[str]': """Generate Sections of a URL's path :param url: The URL you wish to split :type url: str :return: A list of url paths :rtype: List[str] """ path = urlparse.urlsplit(url).path sections = [] temp = "" while (path != '/'): temp = os.path.split(path) if temp[0] == '': break path = temp[0] # Insert at the beginning to keep the proper url order sections.insert(0, temp[1]) return sections
20,874
def get_ucs_cco_image_list(username=None, password=None, mdf_id_list=None, proxy=None): """ Gets the list of images available on CCO Args: username (str): username to connect to image server password (str): password to connect to image server mdf_id_list (list): list of mdf id proxy (str): proxy used for connection Returns: List of UcsCcoImage objects Example: image_list = get_ucs_cco_image_list("username", "password") """ import getpass import xml.dom import xml.dom.minidom import base64 if username is None: username = raw_input("Username: ") if password is None: password = getpass.getpass() ucs_mdf_ids = (283612660, 283853163, 283862063) url = "https://www.cisco.com/cgi-bin/front.x/ida/locator/locator.pl" ida_xml_query_header = 'input_xml=<?xml version="1.0" encoding="UTF-8"?>' \ '<locator>' \ '<input>' ida_xml_query_mdf_id = '<mdfConcept id="%s" name=""/>' ida_xml_query_footer = '</input></locator>' # create input_xml string to post as # data to the respective url via post method input_xml = "" input_xml += ida_xml_query_header if not mdf_id_list: for mdf_id in ucs_mdf_ids: input_xml += ida_xml_query_mdf_id % mdf_id else: for mdf_id in mdf_id_list: input_xml += ida_xml_query_mdf_id % mdf_id input_xml += ida_xml_query_footer log.debug(input_xml) # base64encode for Authorization header credential = base64.b64encode((username + ":" + password).encode()).decode( 'utf-8') log.debug(credential) # send request to server driver = UcsDriver(proxy) driver.add_header("Authorization", "Basic %s" % credential) ida_xml_response = driver.post(uri=url, data=input_xml.encode(), dump_xml=True, read=True) if not ida_xml_response: raise UcsValidationException("No Response from <%s>" % url) doc = xml.dom.minidom.parseString(ida_xml_response) image_node_list = doc.getElementsByTagName("image") if not image_node_list: raise UcsValidationException("No Images Found") # Serialize image nodes in objects cco_image_list = [] for image_node in image_node_list: # print image_node.toxml() image = UcsCcoImage() image.network_credential = credential property_node_list = [child_node for child_node in image_node.childNodes if child_node.nodeType == child_node.ELEMENT_NODE and child_node.localName == "property"] for property_node in property_node_list: if not property_node.hasAttribute("name"): continue if property_node.getAttribute( "name") == _UcsCcoImageList.IDAC_TAG_VERSION: image.version = property_node.getAttribute("value") continue if property_node.getAttribute( "name") == _UcsCcoImageList.IDAC_TAG_IMAGE_NAME: image.image_name = property_node.getAttribute("value") continue if property_node.getAttribute( "name") == _UcsCcoImageList.IDAC_TAG_URL: image.url = property_node.getAttribute("value") continue if property_node.getAttribute( "name") == _UcsCcoImageList.IDAC_TAG_IP_URL: image.ip_url = property_node.getAttribute("value") continue if property_node.getAttribute( "name") == _UcsCcoImageList.IDAC_TAG_SIZE: image.size = int(property_node.getAttribute("value")) continue if property_node.getAttribute( "name") == _UcsCcoImageList.IDAC_TAG_CHECKSUM: if property_node.getAttribute("type") == "md5": image.checksum_md5 = property_node.getAttribute("value") continue if property_node.getAttribute( "name") == _UcsCcoImageList.IDAC_TAG_FILE_DESCRIPTION: image.file_description = property_node.getAttribute("value") continue cco_image_list.append(image) return cco_image_list
20,875
def knownTypes(): """Returns all known resource types""" return loader.typeToExtension.keys()+['WorldModel','MultiPath','Point','Rotation','Matrix3','ContactPoint']
20,876
def run(args: argparse.Namespace) -> None: """Run.""" with open_read_text(args.input) as fp: coco: GtType = json.load(fp) scalabel, config = coco_to_scalabel(coco) has_videos = all(frame.videoName is not None for frame in scalabel) if not has_videos: assert args.output.endswith(".json"), "output should be a json file" save(args.output, Dataset(frames=scalabel, config=config)) else: scalabels = group_and_sort(scalabel) if not os.path.isdir(args.output): os.makedirs(args.output) save_paths = [ os.path.join(args.output, str(video_anns[0].videoName) + ".json") for video_anns in scalabels ] with Pool(args.nproc) as pool: pool.starmap( save, tqdm(zip(save_paths, scalabels), total=len(scalabels)), )
20,877
def _check_dict_value(_dict: dict, prefix: str = ''): """ 递归检查字典中任意字段的值是否符合要求 :param _dict: 被检查的字典 :param prefix: 递归时键值的前缀 :return: """ keys = list(_dict.keys()) for key in keys: value = _dict[key] if isinstance(value, (np.str, str)) or value is None: continue elif isinstance(value, dict): _check_dict_value(value, prefix=prefix + ':' + key) elif 'torch.Tensor' in str(type(value)): try: value = value.item() _dict[key] = value except: value = str(value.tolist()) _dict[key] = value elif 'numpy.ndarray' in str(type(value)): total_ele = 1 for dim in value.shape: total_ele *= dim if total_ele == 1: _dict[key] = value.reshape(1)[0] else: _dict[key] = str(value.tolist()) elif isinstance(value, np.bool_): _dict[key] = bool(value) elif isinstance(value, np.integer): _dict[key] = int(value) elif isinstance(value, np.floating): _dict[key] = float(value) else: _dict[key] = str(value)
20,878
def args_for_blocking_web_whatsapp_com_http(): """ Returns arguments for blocking web.whatsapp.com over http """ return ["-iptables-reset-keyword", "Host: web.whatsapp.com"]
20,879
def zeta_vector(): """The :func:`zeta` vector. :func:`zeta_vector` returns :math:`\zeta` parameters calculated by formula (5) on page 17 in `the technical paper`_, which is .. math:: \\bf \zeta= W^{-1}(p-\mu) """ return np.linalg.inv(W_matrix()) @ (m_vector() - mu_vector())
20,880
def check_if_raster_file_exists(fpath: str): """ Check if GeoTif file exists """ if not os.path.isfile(fpath): raise FileNotFoundError(f'{fpath} not found!\nRun download()')
20,881
def get_fourier_col_name(k, col_name, function_name="sin", seas_name=None): """Returns column name corresponding to a particular fourier term, as returned by fourier_series_fcn :param k: int fourier term :param col_name: str column in the dataframe used to generate fourier series :param function_name: str sin or cos :param seas_name: strcols_interact appended to new column names added for fourier terms :return: str column name in DataFrame returned by fourier_series_fcn """ # patsy doesn't allow "." in formula term. Replace "." with "_" rather than quoting "Q()" all fourier terms name = f"{function_name}{k:.0f}_{col_name}" if seas_name is not None: name = f"{name}_{seas_name}" return name
20,882
def field_by_regex(fc, field_regex, escape_tables=True): """Returns a list of field names matching a regular expression.""" for f in arcpy.Describe(fc).fields: if escape_tables: field_regex = field_regex.replace("$.", "\\$\\.") if re.findall(field_regex, f.name): yield f.name
20,883
def unique_filename(): """Creates a UUID-based unique filename""" return str(uuid.uuid1())
20,884
def _create_trajectory(molecule): """Create an `mdtraj` topology from a molecule object. Parameters ---------- molecule: openff.toolkit.topology.Molecule The SMILES pattern. Returns ------- mdtraj.Trajectory The created trajectory. """ import mdtraj # Check whether the molecule has a configuration defined, and if not, # define one. if molecule.n_conformers <= 0: molecule.generate_conformers(n_conformers=1) # We need to save out the molecule and then reload it as the toolkit # will not always save the atoms in the same order that they are # present in the molecule object. with tempfile.NamedTemporaryFile(suffix=".pdb") as file: molecule.to_file(file.name, "PDB") # Load the pdb into an mdtraj object. mdtraj_trajectory = mdtraj.load_pdb(file.name) # Change the assigned residue name (sometimes molecules are assigned # an amino acid residue name even if that molecule is not an amino acid, # e.g. C(CO)N is not Gly) and save the altered object as a pdb. for residue in mdtraj_trajectory.topology.residues: _generate_residue_name(residue, molecule.to_smiles()) return mdtraj_trajectory
20,885
def display_notification(video_source, requested_target, remote_domain): """Show notification to the user""" notification_ui = notification.Notification() notification_ui.video_source = video_source notification_ui.requested_target = requested_target notification_ui.remote_domain = remote_domain notification_ui.show()
20,886
def preprocess(image): """Load and preprocess image.""" # Create the array of the right shape to feed into the keras model data = [] size = (96, 96) image = ImageOps.fit(image, size, Image.ANTIALIAS) image = np.asarray(image) x = preprocess_input(image) data.append(x) data = np.array(data) return data
20,887
def create_images(link_array, c_id): """ Inserts every image in the array with a category Parameters ---------- link_array : array an array of links. c_id : int id of a category """ for link in link_array: Image.create_image(link=link[0], description='', attribute=link[1], c_id=c_id)
20,888
def move_to(obj, device): """Credit: https://discuss.pytorch.org/t/pytorch-tensor-to-device-for-a-list-of-dict/66283 Arguments: obj {dict, list} -- Object to be moved to device device {torch.device} -- Device that object will be moved to Raises: TypeError: object is of type that is not implemented to process Returns: type(obj) -- same object but moved to specified device """ if torch.is_tensor(obj): return obj.to(device) elif isinstance(obj, dict): res = {k: move_to(v, device) for k, v in obj.items()} return res elif isinstance(obj, list): return [move_to(v, device) for v in obj] elif isinstance(obj, tuple): return tuple(move_to(list(obj), device)) else: raise TypeError("Invalid type for move_to")
20,889
def split(C, dims, axis=1): """ Splits the columns or rows of C. Suppse C = [X_1, X_2, ..., X_B] is an (n x sum_b d_b) matrix. Returns a list of the constituent matrices as a list. Parameters ---------- C: array-like, shape (n, sum_b d_b) The concatonated block matrix. dims: list of ints The dimensions of each matrix i.e. [d_1, ..., d_B] axis: int [0, 1] Which axis to split (1 mean columns 0 means rows) Output ------ blocks: list of array-like [X_1, X_2, ..., X_B] """ idxs = np.append([0], np.cumsum(dims)) blocks = [] if axis == 1: assert idxs[-1] == C.shape[1] for b in range(len(dims)): blocks.append(C[:, idxs[b]:idxs[b + 1]]) elif axis == 0: for b in range(len(dims)): blocks.append(C[idxs[b]:idxs[b + 1], :]) else: raise ValueError('axis must be either 0 or 1') return blocks
20,890
def test_underscored_number( parse_tokens, assert_errors, default_options, code, ): """Ensures that underscored numbers raise a warning.""" file_tokens = parse_tokens(code) visitor = WrongPrimitivesVisitor(default_options, file_tokens=file_tokens) visitor.run() assert_errors(visitor, [UnderscoredNumberViolation])
20,891
def write_xyz_file_from_structure(struct, filename, labels=True): """ From a StructureData, returns an xyz file located in `filename` absolute path. """ xyz_tuple = struct._prepare_xyz() if labels: # We add the labels open(filename, 'wb').write(xyz_tuple[0]) else: # We need to remove the labels # First, turn bytes into string and split lines = xyz_tuple[0].decode().split("\n") with open(filename, "w") as fileo: # # Write first two lines # fileo.write("{}\n".format(lines[0])) fileo.write("{}\n".format(lines[1])) # # Write only positions # for line in lines[2:]: pos = line.split()[1:] fileo.write("{} {} {}\n".format(pos[0], pos[1], pos[2]))
20,892
def build_binary_value(char_str, bits, alphabet) -> str: """ This method converts a string char_str into binary, using n bits per character and decoding from the supplied alphabet or from ASCII when bits=7 This is almost the inverse method to build_string in the decompress module. :param char_str: string. :param bits: number of bits per character. :param alphabet: Alphabet. :return: binary value. """ if bits == 7: indices = [ord(char_) for char_ in char_str] else: indices = [alphabet.index(char_) for char_ in char_str] binary_char_list = ["{0:b}".format(index).zfill(bits) for index in indices] return ''.join(binary_char_list)
20,893
def computeAnswer(inputData): """Compute the answer to the task, from the input data.""" # Do some calculations on the inputData answer = str(int(inputData) * 2) # EDIT ME (remove this line once done) return answer
20,894
def label_schema_matching( df, endpoint=DBpedia, uri_data_model=False, to_lowercase=True, remove_prefixes=True, remove_punctuation=True, prefix_threshold=1, progress=True, caching=True): """A schema matching method by checking for attribute -- rdfs:label between links. Args: df (pd.DataFrame): The dataframe where matching attributes are supposed to be found. endpoint (Endpoint, optional): SPARQL Endpoint to be queried. Defaults to DBpedia. uri_data_model (bool, optional): If enabled, the URI is directly queried instead of a SPARQL endpoint. Defaults to False. to_lowercase (bool, optional): Converts queried strings to lowercase. Defaults to True. remove_prefixes (bool, optional): Removes prefices of queried strings. Defaults to True. remove_punctuation (bool, optional): Removes punctuation from queried strings. Defaults to True. prefix_threshold (int, optional): The number of occurences after which a prefix is considered "common". Defaults to 1. progress (bool, optional): If True, progress bars will be shown to inform the user about the progress made by the process (if "uri_data_model" = True). Defaults to True. caching (bool, optional): Turn result-caching for queries issued during the execution on or off. Defaults to True. Returns: pd.DataFrame: Two columns with matching links and a third column with the overlapped label. """ matches = pd.DataFrame( columns=["uri_1", "uri_2", "same_label"]) # Get URIs from the column names cat_cols = [col for col in df.columns if re.findall("https*:", col)] cat_cols_stripped = [re.sub(r"^.*http://", "http://", col) for col in cat_cols] # transform attributes to sparql values list form values = "(<"+pd.Series(cat_cols_stripped).str.cat(sep=">) (<")+">) " if uri_data_model: # Query these URIs for the label query = "SELECT ?value ?o WHERE {VALUES (?value) {(<**URI**>)} ?value rdfs:label ?o. FILTER (lang(?o) = 'en') }" labels = uri_querier(pd.DataFrame(cat_cols_stripped), 0, query, progress = progress, caching=caching).drop_duplicates().set_index("value") else: query = "SELECT ?value ?o WHERE {VALUES (?value) {" + values + \ "} ?value rdfs:label ?o. FILTER (lang(?o) = 'en') }" # query the equivalent classes/properties labels = endpoint_wrapper(query, endpoint, caching=caching).reset_index(drop=True) if labels.empty: return matches # Get common prefixes common_prefixes = get_common_prefixes(labels, prefix_threshold) # Clean the results (i.e. the labels) labels["o"] = labels["o"].apply(lambda x: clean_string( x, common_prefixes, to_lowercase, remove_prefixes, remove_punctuation)) # Create a dictionary if labels.index.name == "value": labels.reset_index(inplace=True) labels_dict = labels.set_index("value").T.to_dict("list") #check if there are no matches tmp = set() for v in labels_dict.values(): tmp.update(v) if len(labels_dict) == len(tmp): combinations = list(itertools.combinations(cat_cols_stripped,2)) combinations_sorted = [sorted(x) for x in combinations] matches = pd.DataFrame(combinations_sorted, columns=["uri_1", "uri_2"]) matches["same_label"] = 0 return matches else: # Combine the uris that have the same labels into a DataFrame new_labels_dict = collections.defaultdict(list) for key, values in labels_dict.items(): for i in values: new_labels_dict[i].append(key) df_labels = pd.DataFrame( list(new_labels_dict.values()), columns=["uri_1", "uri_2"]) #df_labels["same_label"] = pd.DataFrame(list(new_labels_dict.keys())) df_labels.dropna(inplace=True) # restrict the order of uris in one row for _, row in df_labels.iterrows(): new_match = {"uri_1": min(row["uri_1"], row["uri_2"]), "uri_2": max(row["uri_1"], row["uri_2"]), "same_label": 1} matches = matches.append(new_match, ignore_index=True) # Get back the uris that are not quired by rdfs:label and turn df into dict no_label = pd.DataFrame({"value": [ x for x in cat_cols_stripped if x not in list(labels["value"])], "o": np.nan}) labels = labels.append(no_label, ignore_index=True) full_labels_dict = labels.set_index("value").T.to_dict("list") # Create all unique combinations from the URIs, order them alphabetically and turn them into a DataFrame combinations = list(itertools.combinations(full_labels_dict.keys(), 2)) combinations_sorted = [sorted(x) for x in combinations] result = pd.DataFrame(combinations_sorted, columns=["uri_1", "uri_2"]) # merged with the non_matched combinations and drop duplicates for _, row in result.iterrows(): new_match = {"uri_1": min(row["uri_1"], row["uri_2"]), "uri_2": max(row["uri_1"], row["uri_2"]), "same_label": 0} matches = matches.append(new_match, ignore_index=True) matches.drop_duplicates( subset=["uri_1", "uri_2"], inplace=True, ignore_index=True) return matches
20,895
def write_feature_importance(model: int, fname: str): """Write sorted feature importance data to `fname`. Args: model: a Catboost model fname: destination """ importance = model.get_feature_importance(type=EFstrType.FeatureImportance) logging.vlog(1, "Feature importance returned %d values", importance.size) with open(fname, "w") as outp: nz = np.nonzero(importance) ordered = np.argsort(importance[nz[0]]) print("Feature,Importance", file=outp) for i in ordered[::-1]: print(f"{nz[0][i]},{round(importance[[nz[0][i]]][0], 4)}", file=outp)
20,896
def create_stats_table(stats, yaxes): """ Create data table with median statistics Parameters ---------- stats : :obj:`list` List of lists containing data stats for each iterations from :func:`ragavi.ragavi.stats_display` yaxes : :obj:`list` Contains y-axes for the current plot Returns ------- Bokeh column layout containing data table with stats """ # number of y-axes n_ys = len(yaxes) # number of fields, spws and corrs n_items = len(stats) // n_ys stats = np.array(stats) d_stats = dict( spw=stats[:n_items, 0], field=stats[:n_items, 1], corr=stats[:n_items, 2], ) # get the data in a more useable format datas = stats[:, 3].reshape(-1, n_items).T for y in range(n_ys): d_stats[yaxes[y]] = datas[:, y] source = ColumnDataSource(data=d_stats) cols = "spw field corr".split() + yaxes columns = [TableColumn(field=x, title=x.capitalize()) for x in cols] dtab = DataTable(source=source, columns=columns, fit_columns=True, height=150, max_height=180, max_width=600, sizing_mode="stretch_width") t_title = Div(text="Median Statistics") logger.debug("Stats table generated") return column([t_title, dtab], sizing_mode="stretch_both")
20,897
def get_last_month_date_dmy() -> str: """Returns last month date (dd/mm/yyyy for calls report).""" return (datetime.now() - timedelta(30)).date().strftime("%d/%m/%Y")
20,898
def irange(start, end): """Inclusive range from start to end (vs. Python insanity.) irange(1,5) -> 1, 2, 3, 4, 5""" return range( start, end + 1 )
20,899