content
stringlengths
22
815k
id
int64
0
4.91M
def dump_file( file_name: str, data: pd.DataFrame, header: typing.Optional[typing.List[str]] = None, vertical: bool = True ) -> None: """ Dump a file with or without a header to an output file. Arguments: file_name - Name of the output file data - Pandas dataframe containing the data to dump header - List of header names (Default None) vertical - Stack the header vertical or horizontal (default vertical) Returns: None """ if data.empty: raise IOError(f'Cannot write empty data to {file_name}') if vertical: orientation = '\n' else: orientation = '\t' if header is None: export_header = '' else: export_header = '{0}\n'.format(orientation.join(header)) with open(file_name, 'w') as write: write.write(f'{export_header}') data.to_csv(file_name, sep='\t', header=False, index=False, mode='a')
31,600
def read_and_download_profile_information(id): """ linke: https://developer.apple.com/documentation/appstoreconnectapi/read_and_download_profile_information :param id: bundle_id :return: 请求结果 """ data = { "fields[certificates]": "certificateType", "fields[devices]": "platform", "fields[profiles]": "profileType", "include": "bundleId, certificates, devices", "fields[bundleIds]": "app, bundleIdCapabilities, identifier, name, platform, profiles, seedId", "limit[devices]": 50, "limit[certificates]": 50 } result = request_core.GET(api.Profiles_API + '/' + id, data) print(result.text) return result
31,601
def test_vso_attribute_parse(): """Make sure that Parsing of VSO attributes from HEK queries is accurate""" h = hek.HEKClient() hek_query = h.query(hekTime, hekEvent) vso_query = hek2vso.vso_attribute_parse(hek_query[0]) # Checking Time # TODO # Checking Observatory assert vso_query[1].value == hek_query[0]['obs_observatory'] # Checking Instrument assert vso_query[2].value == hek_query[0]['obs_instrument'] # Checking Wavelength assert vso_query[3].min == hek_query[0]['obs_meanwavel'] * u.Unit(hek_query[0]['obs_wavelunit']) assert vso_query[3].max == hek_query[0]['obs_meanwavel'] * u.Unit( hek_query[0]['obs_wavelunit']) assert vso_query[3].unit == u.Unit('Angstrom')
31,602
def get_config(node): """Get the BIOS configuration. The BIOS settings look like:: {'EnumAttrib': {'name': 'EnumAttrib', 'current_value': 'Value', 'pending_value': 'New Value', # could also be None 'read_only': False, 'possible_values': ['Value', 'New Value', 'None']}, 'StringAttrib': {'name': 'StringAttrib', 'current_value': 'Information', 'pending_value': None, 'read_only': False, 'min_length': 0, 'max_length': 255, 'pcre_regex': '^[0-9A-Za-z]{0,255}$'}, 'IntegerAttrib': {'name': 'IntegerAttrib', 'current_value': 0, 'pending_value': None, 'read_only': True, 'lower_bound': 0, 'upper_bound': 65535}} :param node: an ironic node object. :raises: DracOperationError on an error from python-dracclient. :returns: a dictionary containing BIOS settings The above values are only examples, of course. BIOS attributes exposed via this API will always be either an enumerated attribute, a string attribute, or an integer attribute. All attributes have the following parameters: :param name: is the name of the BIOS attribute. :param current_value: is the current value of the attribute. It will always be either an integer or a string. :param pending_value: is the new value that we want the attribute to have. None means that there is no pending value. :param read_only: indicates whether this attribute can be changed. Trying to change a read-only value will result in an error. The read-only flag can change depending on other attributes. A future version of this call may expose the dependencies that indicate when that may happen. Enumerable attributes also have the following parameters: :param possible_values: is an array of values it is permissible to set the attribute to. String attributes also have the following parameters: :param min_length: is the minimum length of the string. :param max_length: is the maximum length of the string. :param pcre_regex: is a PCRE compatible regular expression that the string must match. It may be None if the string is read only or if the string does not have to match any particular regular expression. Integer attributes also have the following parameters: :param lower_bound: is the minimum value the attribute can have. :param upper_bound: is the maximum value the attribute can have. """ client = drac_common.get_drac_client(node) try: return client.list_bios_settings() except drac_exceptions.BaseClientException as exc: LOG.error('DRAC driver failed to get the BIOS settings for node ' '%(node_uuid)s. Reason: %(error)s.', {'node_uuid': node.uuid, 'error': exc}) raise exception.DracOperationError(error=exc)
31,603
def plot_neural_reconstruction_traces( traces_ae, traces_neural, save_file=None, xtick_locs=None, frame_rate=None, format='png'): """Plot ae latents and their neural reconstructions. Parameters ---------- traces_ae : :obj:`np.ndarray` shape (n_frames, n_latents) traces_neural : :obj:`np.ndarray` shape (n_frames, n_latents) save_file : :obj:`str`, optional full save file (path and filename) xtick_locs : :obj:`array-like`, optional tick locations in units of bins frame_rate : :obj:`float`, optional frame rate of behavorial video; to properly relabel xticks format : :obj:`str`, optional any accepted matplotlib save format, e.g. 'png' | 'pdf' | 'jpeg' Returns ------- :obj:`matplotlib.figure.Figure` matplotlib figure handle """ import matplotlib.pyplot as plt import matplotlib.lines as mlines import seaborn as sns sns.set_style('white') sns.set_context('poster') means = np.mean(traces_ae, axis=0) std = np.std(traces_ae) * 2 # scale for better visualization traces_ae_sc = (traces_ae - means) / std traces_neural_sc = (traces_neural - means) / std traces_ae_sc = traces_ae_sc[:, :8] traces_neural_sc = traces_neural_sc[:, :8] fig = plt.figure(figsize=(12, 8)) plt.plot(traces_neural_sc + np.arange(traces_neural_sc.shape[1]), linewidth=3) plt.plot( traces_ae_sc + np.arange(traces_ae_sc.shape[1]), color=[0.2, 0.2, 0.2], linewidth=3, alpha=0.7) # add legend # original latents - gray orig_line = mlines.Line2D([], [], color=[0.2, 0.2, 0.2], linewidth=3, alpha=0.7) # predicted latents - cycle through some colors colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] dls = [] for c in range(5): dls.append(mlines.Line2D( [], [], linewidth=3, linestyle='--', dashes=(0, 3 * c, 20, 1), color='%s' % colors[c])) plt.legend( [orig_line, tuple(dls)], ['Original latents', 'Predicted latents'], loc='lower right', frameon=True, framealpha=0.7, edgecolor=[1, 1, 1]) if xtick_locs is not None and frame_rate is not None: plt.xticks(xtick_locs, (np.asarray(xtick_locs) / frame_rate).astype('int')) plt.xlabel('Time (s)') else: plt.xlabel('Time (bins)') plt.ylabel('Latent state') plt.yticks([]) if save_file is not None: make_dir_if_not_exists(save_file) plt.savefig(save_file + '.' + format, dpi=300, format=format) plt.show() return fig
31,604
def get_only_metrics(results): """Turn dictionary of results into a list of metrics""" metrics_names = ["test/f1", "test/precision", "test/recall", "test/loss"] metrics = [results[name] for name in metrics_names] return metrics
31,605
def max_sub_array(nums): """ Returns the max subarray of the given list of numbers. Returns 0 if nums is None or an empty list. Time Complexity: ? Space Complexity: ? """ if nums == None: return 0 if len(nums) == 0: return 0 pass
31,606
def find_tags_containing(project, commit): """Find all tags containing the given commit. Returns the full list and a condensed list (excluding tags 'after' other tags in the list).""" tags = run_list_command(['git', 'tag', '--contains', commit], project) # The packaging projects had a different format for older tags. if project in ['acs-packaging', 'acs-community-packaging']: # Remove the prefix 'acs-packaging-' if it's present. tags = list(map(lambda tag: tag.replace('{}-'.format(project), ''), tags)) # Exclude tags that aren't just chains of numbers with an optional suffix. tags = list(filter(lambda tag: re.match(version_filter, tag), tags)) # Filter out tags that are before other tags. reduced_tags = reduce_tags(tags) return tags, reduced_tags
31,607
def setup_nupack_input(**kargs): """ Returns the list of tokens specifying the command to be run in the pipe, and the command-line input to be given to NUPACK. Note that individual functions below may modify args or cmd_input depending on their specific usage specification. """ # Set up terms of command-line executable call args = setup_args(**kargs) # Set up command-line input to NUPACK cmd_input = setup_cmd_input(kargs['multi'], kargs['sequences'], kargs['ordering'], kargs.get('structure', '')) return (args, cmd_input)
31,608
def rebuild_current_distribution( fields: np.ndarray, ics: np.ndarray, jj_size: float, current_pattern: List[Union[Literal["f"], str]], sweep_invariants: List[Union[Literal["offset"], Literal["field_to_k"]]] = [ "offset", "field_to_k", ], precision: float = 100, n_points: int = 2 ** 10 + 1, ) -> dict: """Rebuild a current distribution from a Fraunhofer pattern. This assumes a uniform field focusing since allowing a non uniform focusing would lead to a much larger space to explore. Parameters ---------- fields : np.ndarray Out of plane field for which the critical current was measured. ics : np.ndarray Critical current of the junction. jj_size : float Size of the junction. current_pattern : List[Union[Literal["f"], str]] Describe in how many pieces to use to represent the junction. If the input arrays are more than 1D, "f" means that value is the same across all outer dimension, "v" means that the slice takes different value for all outer dimension (ie. one value per sweep). sweep_invariants : Tuple[Union[Literal["offset", "field_to_k"]]] Indicate what quantities are invariants across sweep for more the 1D inputs. precision : float, optional pass n_points : int, optional Returns ------- dict """ # Get the offset and estimated amplitude used in the prior # We do not use the estimated current and phase distribution to give the # more space to the algorithm. offsets, first_node_locs, _, _, _ = guess_current_distribution( field, fraunhofer, site_number, jj_size ) # Gives a Fraunhofer pattern at the first node for v[1] = 1 field_to_ks = 2 * np.pi / jj_size / np.abs(first_node_locs - offsets) # Determine the dimensionality of the problem based on the invariants and # the shape of the inputs. if len(sweep_invariants) > 2: raise ValueError("There are at most 2 invariants.") if any(k for k in sweep_invariants if k not in ("offset", "field_to_k")): raise ValueError( f"Invalid invariant specified {sweep_invariants}, " "valid values are 'offset', 'field_to_k'." ) shape = fields.shape[:-1] shape_product = prod(shape) if shape else 0 if shape_product == 0 and any(p.startswith("v") for p in current_pattern): raise ValueError( "Found variable current in the distribution but the measurements are 1D." ) dim = len(sweep_invariants) + current_pattern.count("f") dim += shape_product * (current_pattern.count("v") + 2 - len(sweep_invariants)) # Pre-compute slices to access elements in the prior and log-like offset_access = slice( 0, 1 if "offset" in sweep_invariants else (shape_product or 1) ) field_to_k_access = slice( offset_access.stop, offset_access.stop + 1 if "field_to_k" in sweep_invariants else (shape_product or 1), ) stop = field_to_k_access.stop current_density_accesses = [] for p in current_pattern: if p == "f": current_density_accesses.append(slice(stop, stop + 1)) stop += 1 elif p == "v": current_density_accesses.append(slice(stop, stop + (shape_product or 1))) stop += current_density_accesses[-1].stop else: raise ValueError( f"Valid values in current_pattern are 'f' and 'v', found '{p}'" ) def prior(u): """Map the sampled in 0-1 to the relevant values range. For all values we consider the values in the prior to be the log of the values we are looking for. """ v = np.empty_like(u) v[offset_access] = 4 * u[offset_access] - 2 v[field_to_k_access] = 4 * u[field_to_k_access] - 2 stop += step # For all the amplitude we map the value between 0 and -X since the # amplitude of a single segment cannot be larger than the total current # X is determined based on the number of segments ampl = -np.log10(len(current_pattern)) for sl in current_density_accesses: v[sl] = u[sl] * ampl return v def loglike(v): """Compute the distance to the data""" # We turn invariant input into their variant form (from 1 occurence in v # to n repetition in w) to ease a systematic writing of the loglike. stop = step = shape_product or 1 w = np.empty((2 + len(current_pattern)) * (shape_product or 1)) stop = step = shape_product or 1 w[0:stop] = w_offset = v[offset_access] w[stop : stop + step] = w_f2k = v[field_to_k_access] stop += step for sl in current_density_accesses: w[stop : stop + step] = v[sl] # Pack the current distribution so that each line corresponds to different # conditions c_density = w[stop + step :].reshape((len(current_pattern), -1)).T err = np.empty_like(ics) it = np.nditer((offsets, first_node_locs, field_to_ks), ["multi_index"]) for i, (off, fnloc, f2k) in enumerate(it): # Compute the offset f_off = off + np.sign(w_off[i]) * 10 ** -abs(w_off[i]) * fnloc # Compute the Fraunhofer pattern f = produce_fraunhofer_fast( (fields[it.multi_index] - f_off[i]), f2k * 10 ** w_f2k[i], jj_size, c_density[i], 2 ** 10 + 1, ) # Compute and store the error err[it.multi_index] = np.sum( (100 * (ics[it.multi_index] - f) / amplitude) ** 2 ) return -np.ravel(err) # XXX do that nasty part later sampler = NestedSampler(loglike, prior, dim) sampler.run_nested(dlogz=precision) res = sampler.results weights = np.exp(res.logwt - res.logz[-1]) mu, cov = utils.mean_and_cov(res["samples"], weights) res["fraunhofer_params"] = { "offset": offset + np.sign(mu[0]) * 10 ** -abs(mu[0]) * first_node_loc, "field_to_k": 2 * np.pi / jj_size / abs(first_node_loc - offset) * 10 ** mu[1], "amplitude": amplitude * 10 ** mu[2], "current_distribution": np.array( [1 - np.sum(mu[3 : 3 + site_number - 1])] + list(mu[3 : 3 + site_number - 1]) ), "phase_distribution": np.array( [0] + list(mu[3 + site_number - 1 : 3 + 2 * site_number - 2]) ), } return res
31,609
def main(): """Make a jazz noise here""" args = get_args() random.seed(args.seed) # nice simple solution # new_text = '' # for char in args.text: # new_text += choose(char) # print(new_text) # list comprehension # print(''.join([choose(char) for char in args.text])) # map print(''.join(map(choose, args.text)))
31,610
def get_LCA(index, item1, item2): """Get lowest commmon ancestor (including themselves)""" # get parent list from if item1 == item2: return item1 try: return LCA_CACHE[index][item1 + item2] except KeyError: pass parent1 = ATT_TREES[index][item1].parent[:] parent2 = ATT_TREES[index][item2].parent[:] parent1.insert(0, ATT_TREES[index][item1]) parent2.insert(0, ATT_TREES[index][item2]) min_len = min(len(parent1), len(parent2)) last_LCA = parent1[-1] # note here: when trying to access list reversely, take care of -0 for i in range(1, min_len + 1): if parent1[-i].value == parent2[-i].value: last_LCA = parent1[-i] else: break LCA_CACHE[index][item1 + item2] = last_LCA.value return last_LCA.value
31,611
def select_workspace_access(cursor, workspace_id): """ワークスペースアクセス情報取得 Args: cursor (mysql.connector.cursor): カーソル workspace_id (int): ワークスペースID Returns: dict: select結果 """ # select実行 cursor.execute('SELECT * FROM workspace_access WHERE workspace_id = %(workspace_id)s', { 'workspace_id' : workspace_id, } ) rows = cursor.fetchall() return rows
31,612
def pkcs7_unpad(data): """ Remove the padding bytes that were added at point of encryption. Implementation copied from pyaspora: https://github.com/mjnovice/pyaspora/blob/master/pyaspora/diaspora/protocol.py#L209 """ if isinstance(data, str): return data[0:-ord(data[-1])] else: return data[0:-data[-1]]
31,613
def leveinshtein_distance(source,target): """ Implement leveintein distance algorithm as described in the reference """ #Step 1 s_len=len(source) t_len=len(target) cost=0 if(s_len==0): return t_len if(t_len==0): return s_len print("Dimensions:\n\tN:%d\n\tM:%d"%(s_len,t_len)) #Step 2 matrix=[[0 for _ in range(0,t_len+1)] for _ in range(0, s_len+1)] #Initialize first row 0..s_len for idx in range(0,s_len+1): matrix[idx][0]=idx #Initialize the first column 0..t_len for idx in range(0, t_len+1): matrix[0][idx]=idx print("===Original===") print_matrix(matrix,source,target) #Step 3 for i in range(1,s_len+1): ch=source[i-1] #print(ch) #Step 4 for j in range(1,t_len+1): #print(">%s"%target[j-1]) #Step 5 if ch==target[j-1]: cost=0 else: cost=1 #Step 6 #print("(i,j)=>(%d,%d)"%(i,j)) #print(matrix[i][j]) matrix[i][j]=minimum( matrix[i-1][j]+1, matrix[i][j-1]+1, matrix[i-1][j-1]+cost ) print("===Final Matrix===") print_matrix(matrix,source,target) return matrix[s_len-1][t_len-1]
31,614
def minus (s): """ заменить последний минус на равенство """ q = s.rsplit ('-', 1) return q[0] + '=' + q[1]
31,615
def _chk_y_path(tile): """ Check to make sure tile is among left most possible tiles """ if tile[0] == 0: return True return False
31,616
def json_project_activities(request): """docstring for json_project_activities""" timestamp = int(request.GET['dt']) pid = int(request.GET['id']) project = get_object_or_404(Project, id=pid) items = project.items(timestamp) objs = [] for item in items: # p.items()[0].tags.all().values() objs.append({ "username": item.username, "tags": [tag['name'] for tag in item.tags.values()], "type": item.type, "source": item.source, "title":item.title, "subtitle": item.subtitle, "dt": "just now", }) return HttpResponse(simplejson.dumps(objs), mimetype='application/javascript')
31,617
def to_complex_matrix(matrix: np.ndarray) -> List: """ Convert regular matrix to matrix of ComplexVals. :param matrix: any matrix. :return: Complex matrix. """ output: List[List] = matrix.tolist() for i in range(len(matrix)): for j in range(len(matrix[i])): if type(matrix[i, j]) == complex or type(matrix[i, j]) == np.complex128: output[i][j] = ComplexVal(matrix[i, j].real, matrix[i, j].imag) else: output[i][j] = ComplexVal(matrix[i, j]) return output
31,618
def skipIfDarwin(func): """Decorate the item to skip tests that should be skipped on Darwin.""" return skipIfPlatform( lldbplatform.translate( lldbplatform.darwin_all))(func)
31,619
def load_dataframe(csv_path: PathLike) -> Tuple[str, pd.DataFrame]: """Returns a tuple (name, data frame). Used to construct a data set by `load_dataframes_from_directory`. See: load_dataframes_from_directory Dataset """ return Path(csv_path).stem, pd.read_csv(csv_path)
31,620
def polar(z): # real signature unknown; restored from __doc__ """ polar(z) -> r: float, phi: float Convert a complex from rectangular coordinates to polar coordinates. r is the distance from 0 and phi the phase angle. """ pass
31,621
def VerifyReleaseChannel(options): """Verify that release image channel is correct. ChromeOS has four channels: canary, dev, beta and stable. The last three channels support image auto-updates, checks that release image channel is one of them. """ return GetGooftool(options).VerifyReleaseChannel( options.enforced_release_channels)
31,622
def make_start_script(cmd, repo, anaconda_path, env, install_pip=(), add_swap_file=False): """ My basic startup template formatter Parameters ---------- cmd : str The actual command to run. repo : str The repository anaconda_path : str The anaconda path on my AMI. env : str The anaconda environment. install_pip : list of str Some last-minute packages that are missing on my AMI. add_swap_file : bool, int Need a swapfile? No problem. Tell me your size. """ swapfile_cmd = '' if add_swap_file: swapfile_cmd = _base_swap_tmp.format(add_swap_file=add_swap_file) if len(install_pip) == 0: install_pip = '' else: install_pip = '\n'.join( ['{anaconda_path}/bin/pip install {package}'.format( anaconda_path=anaconda_path, package=package) for package in install_pip]) script = _base_cmd_tmp.format( anaconda_path=anaconda_path, install_pip=install_pip, swapfile_cmd=swapfile_cmd, repo=repo, env=env, cmd=cmd) return script
31,623
def main(argv=sys.argv): """Main method called by the eggsecutable.""" try: utils.vip_main(ModelicaAgent) except Exception as ex: log.exception(ex)
31,624
def radius_hpmap(glon, glat, R_truncation, Rmin, Npt_per_decade_integ, nside=2048, maplonlat=None): """ Compute a radius map in healpix Parameters ---------- - glon/glat (deg): galactic longitude and latitude in degrees - R_truncation (quantity): the truncation radius - Rmin (quantity): the minimum radius - nside (int): healpix Nside - Npt_per_decade_integ (int): the number of point per decade - maplonlat (2d tuple of np.array): healpix maps of galactic longitude and latitude which can be provided to save time in case of repeated computation Returns ------- - radius (array): the radius array from Rmin to R_truncation - dist_map (array): distance map from center - maplon/lat (array): longitude and latidute maps """ try: import healpy except: print("Healpy is not installed while it is requiered by get_*_hpmap") # Get a coord map if maplonlat is None: npix = healpy.nside2npix(nside) ipix = np.linspace(0, npix, npix, dtype=int) angle = healpy.pix2ang(nside, ipix, lonlat=False) maplon = angle[1] * 180.0/np.pi maplat = 90.0 - angle[0] * 180.0/np.pi else: maplon = maplonlat[0] maplat = maplonlat[1] # Get a cluster distance map (in deg) dist_map = map_tools.greatcircle(maplon, maplat, glon, glat) dist_map[np.isnan(dist_map)] = 180.0 # some pixels are NaN for dist = 180 # Define the radius used fo computing the profile radius = sampling_array(Rmin, R_truncation, NptPd=Npt_per_decade_integ, unit=True) return radius, dist_map, maplon, maplat
31,625
def convert_grayscale_image_to_pil(image): """Converts a 2D grayscale image into a PIL image. Args: image (numpy.ndarray[uint8]): The image to convert. Returns: PIL.Image: The converted image. """ image = np.repeat(image[:, :, None], 3, 2) image_pil = Image.fromarray(image).convert('RGBA') return image_pil
31,626
async def test_add_run_task() -> None: """It should be able to add a task for the "run" phase.""" run_result = False async def run_task() -> None: nonlocal run_result run_result = True subject = TaskQueue() subject.add(phase=TaskQueuePhase.RUN, func=run_task) subject.start() await subject.join() assert run_result is True
31,627
def delete_group(group_id, tasks=False, cached=Conf.CACHED): """ Delete a group. :param str group_id: the group id :param bool tasks: If set to True this will also delete the group tasks. Otherwise just the group label is removed. :param bool cached: run this against the cache backend :return: """ if cached: return delete_group_cached(group_id) return Task.objects.delete_group(group_id, tasks)
31,628
def alphanum_key(string): """Return a comparable tuple with extracted number segments. Adapted from: http://stackoverflow.com/a/2669120/176978 """ convert = lambda text: int(text) if text.isdigit() else text return [convert(segment) for segment in re.split('([0-9]+)', string)]
31,629
def merge_data(attribute_column, geography, chloropleth, pickle_dir): """ Merges geometry geodataframe with chloropleth attribute data. Inputs: dataframe or csv file name for data desired to be choropleth Outputs: dataframe """ gdf = load_pickle(pickle_dir, geography) chloropleth = load_pickle(pickle_dir, chloropleth) chloropleth.columns = ['key', attribute_column] return gdf.merge(chloropleth, on='key', how='left')
31,630
def get_ls8_image_collection(begin_date, end_date, aoi=None): """ Calls the GEE API to collect scenes from the Landsat 7 Tier 1 Surface Reflectance Libraries :param begin_date: Begin date for time period for scene selection :param end_date: End date for time period for scene selection :param aoi: Optional, only select scenes that cover this aoi :return: cloud masked GEE image collection """ if aoi is None: return (ee.ImageCollection('LANDSAT/LC08/C01/T1_SR') .filterDate(begin_date, end_date) .select('B2', 'B3', 'B4', 'B5', 'B6', 'B10', 'B7', 'pixel_qa') .map(rename_ls_bands) .map(cloud_mask_ls8)) else: return (ee.ImageCollection('LANDSAT/LC08/C01/T1_SR') .select('B2', 'B3', 'B4', 'B5', 'B6', 'B10', 'B7', 'pixel_qa') .filterDate(begin_date, end_date).filterBounds(aoi) .map(rename_ls_bands) .map(cloud_mask_ls8))
31,631
def multi_halo(n_halo): """ This routine will repeat the halo generator as many times as the input number to get equivalent amount of haloes. """ r_halo = [] phi_halo = [] theta_halo = [] for i in range(n_halo): r, theta,phi = one_halo(100) r_halo.append(r) theta_halo.append(theta) phi_halo.append(phi) return r_halo, theta_halo, phi_halo
31,632
def allocation_proportion_of_shimenwpp(): """ Real Name: Allocation Proportion of ShiMenWPP Original Eqn: Allocation ShiMen WPP/Total WPP Allocation Units: m3/m3 Limits: (None, None) Type: component Subs: None """ return allocation_shimen_wpp() / total_wpp_allocation()
31,633
def update_cfg(base_cfg, update_cfg): """used for mmcv.Config or other dict-like configs.""" res_cfg = copy.deepcopy(base_cfg) res_cfg.update(update_cfg) return res_cfg
31,634
def check(conn, command, exit=False, timeout=None, **kw): """ Execute a remote command with ``subprocess.Popen`` but report back the results in a tuple with three items: stdout, stderr, and exit status. This helper function *does not* provide any logging as it is the caller's responsibility to do so. """ command = conn.cmd(command) stop_on_error = kw.pop('stop_on_error', True) timeout = timeout or conn.global_timeout if not kw.get('env'): # get the remote environment's env so we can explicitly add # the path without wiping out everything kw = extend_env(conn, kw) conn.logger.info('Running command: %s' % ' '.join(admin_command(conn.sudo, command))) result = conn.execute(_remote_check, cmd=command, **kw) response = None try: response = result.receive(timeout) except Exception as err: # the things we need to do here :( # because execnet magic, we cannot catch this as # `except TimeoutError` if err.__class__.__name__ == 'TimeoutError': msg = 'No data was received after %s seconds, disconnecting...' % timeout conn.logger.warning(msg) # there is no stdout, stderr, or exit code but make the exit code # an error condition (non-zero) regardless return [], [], -1 else: remote_trace = traceback.format_exc() remote_error = RemoteError(remote_trace) if remote_error.exception_name == 'RuntimeError': conn.logger.error(remote_error.exception_line) else: for tb_line in remote_trace.split('\n'): conn.logger.error(tb_line) if stop_on_error: raise RuntimeError( 'Failed to execute command: %s' % ' '.join(command) ) if exit: conn.exit() return response
31,635
def domains_configured(f): """Wraps API calls to lazy load domain configs after init. This is required since the assignment manager needs to be initialized before this manager, and yet this manager's init wants to be able to make assignment calls (to build the domain configs). So instead, we check if the domains have been initialized on entry to each call, and if requires load them, """ @functools.wraps(f) def wrapper(self, *args, **kwargs): if (not self.domain_configs.configured and CONF.identity.domain_specific_drivers_enabled): LOG.warning(_( 'Running an experimental and unsupported configuration ' '(domain_specific_drivers_enabled = True); ' 'this will result in known issues.')) self.domain_configs.setup_domain_drivers( self.driver, self.assignment_api) return f(self, *args, **kwargs) return wrapper
31,636
def test_append(dataset, append_args, n_files): """ Note: ETL will fail for append, because... it's a text file. But also because the destination will likely be removed before the ETL actually places the new node there. """ # TimeSeries package to append into... pkg = TimeSeries("Rando Timeseries") dataset.add(pkg) # upload/append file into package pkg.append_files(*append_args) # TODO: assert append was successful
31,637
def add_modified_tags(original_db, scenarios): """ Add a `modified` label to any activity that is new Also add a `modified` label to any exchange that has been added or that has a different value than the source database. :return: """ # Class `Export` to which the original database is passed exp = Export(original_db) # Collect a dictionary of activities {row/col index in A matrix: code} rev_ind_A = rev_index(create_codes_index_of_A_matrix(original_db)) # Retrieve list of coordinates [activity, activity, value] coords_A = exp.create_A_matrix_coordinates() # Turn it into a dictionary {(code of receiving activity, code of supplying activity): value} original = {(rev_ind_A[x[0]], rev_ind_A[x[1]]): x[2] for x in coords_A} # Collect a dictionary with activities' names and correponding codes codes_names = create_codes_and_names_of_A_matrix(original_db) # Collect list of substances rev_ind_B = rev_index(create_codes_index_of_B_matrix()) # Retrieve list of coordinates of the B matrix [activity index, substance index, value] coords_B = exp.create_B_matrix_coordinates() # Turn it into a dictionary {(activity code, substance code): value} original.update({(rev_ind_A[x[0]], rev_ind_B[x[1]]): x[2] for x in coords_B}) for s, scenario in enumerate(scenarios): print(f"Looking for differences in database {s + 1} ...") rev_ind_A = rev_index(create_codes_index_of_A_matrix(scenario["database"])) exp = Export( scenario["database"], scenario["model"], scenario["pathway"], scenario["year"], "", ) coords_A = exp.create_A_matrix_coordinates() new = {(rev_ind_A[x[0]], rev_ind_A[x[1]]): x[2] for x in coords_A} rev_ind_B = rev_index(create_codes_index_of_B_matrix()) coords_B = exp.create_B_matrix_coordinates() new.update({(rev_ind_A[x[0]], rev_ind_B[x[1]]): x[2] for x in coords_B}) list_new = set(i[0] for i in original.keys()) ^ set(i[0] for i in new.keys()) ds = (d for d in scenario["database"] if d["code"] in list_new) # Tag new activities for d in ds: d["modified"] = True # List codes that belong to activities that contain modified exchanges list_modified = (i[0] for i in new if i in original and new[i] != original[i]) # # Filter for activities that have modified exchanges for ds in ws.get_many( scenario["database"], ws.either(*[ws.equals("code", c) for c in set(list_modified)]), ): # Loop through biosphere exchanges and check if # the exchange also exists in the original database # and if it has the same value # if any of these two conditions is False, we tag the exchange excs = (exc for exc in ds["exchanges"] if exc["type"] == "biosphere") for exc in excs: if (ds["code"], exc["input"][0]) not in original or new[ (ds["code"], exc["input"][0]) ] != original[(ds["code"], exc["input"][0])]: exc["modified"] = True # Same thing for technosphere exchanges, # except that we first need to look up the provider's code first excs = (exc for exc in ds["exchanges"] if exc["type"] == "technosphere") for exc in excs: if ( exc["name"], exc["product"], exc["unit"], exc["location"], ) in codes_names: exc_code = codes_names[ (exc["name"], exc["product"], exc["unit"], exc["location"]) ] if new[(ds["code"], exc_code)] != original[(ds["code"], exc_code)]: exc["modified"] = True else: exc["modified"] = True return scenarios
31,638
def stats_start(server): """Fills in global_member_times and server_wl dictionaries. Creates and populates a table in the database if no such table exists for this server. Args: server (Server): Server object described in the Discord API reference page. We populate global_member_times with this server. """ member_times = {} server_wl[server.id] = set() results = sql.fetch_all(server.id) # Table didn't exist. Create it if results == None: vals = [] for member in server.members: vals.append((member.id,)) member_times[member.id] = [0, 0] sql.create_table(server.id, vals) # Otherwise use the results to populate global_member_times else: for result in results: user_id = result[_ID_INDEX] time = result[_TIME_INDEX] rank = result[_RANK_INDEX] if result[_WL_STATUS_INDEX] == True: server_wl[server.id].add(user_id) member_times[user_id] = [time, rank] global_member_times[server.id] = member_times
31,639
def get_sub_bibliography(year, by_year, bibfile): """Get HTML bibliography for the given year""" entries = ','.join(['@' + x for x in by_year[year]]) input = '---\n' \ f'bibliography: {bibfile}\n' \ f'nocite: "{entries}"\n...\n' \ f'# {year}' out = subprocess.run(['pandoc', '--filter=pandoc-citeproc', '-f', 'markdown'], input=input, capture_output=True, encoding='utf-8') if out.returncode != 0: raise AssertionError(out.stderr) return out.stdout
31,640
def import_tep_sets(lagged_samples: int = 2) -> tuple: """ Imports the normal operation training set and 4 of the commonly used test sets [IDV(0), IDV(4), IDV(5), and IDV(10)] with only the first 22 measured variables and first 11 manipulated variables """ normal_operation = import_sets(0) testing_sets = import_sets([4, 5, 10], skip_training=True) X = normal_operation[0][1] T0 = normal_operation[0][2] T4 = testing_sets[0][1] T5 = testing_sets[1][1] T10 = testing_sets[2][1] ignored_var = list(range(22, 41)) X = np.delete(X, ignored_var, axis=0) T0 = np.delete(T0, ignored_var, axis=0) T4 = np.delete(T4, ignored_var, axis=0) T5 = np.delete(T5, ignored_var, axis=0) T10 = np.delete(T10, ignored_var, axis=0) # Add lagged samples X = add_lagged_samples(X, lagged_samples) T0 = add_lagged_samples(T0, lagged_samples) T4 = add_lagged_samples(T4, lagged_samples) T5 = add_lagged_samples(T5, lagged_samples) T10 = add_lagged_samples(T10, lagged_samples) return(X, T0, T4, T5, T10)
31,641
def to_fgdc(obj): """ This is the priamry function to call in the module. This function takes a UnifiedMetadata object and creates a serialized FGDC metadata record. Parameters ---------- obj : obj A amg.UnifiedMetadata class instance Returns ------- : str A string encoded FGDC compliant XML metadata file """ template = None for s in obj.sources: if isinstance(s, FGDCMetadata): template = s.data populate_projection_information(template, obj) populate_bounding_box(template, obj) populate_raster_info(template, obj) populate_digital_forms(template, obj) populate_accuracies(template, obj) populate_geodetic(template, obj) template.planar_distance_units = 'meters' template.online_linkages = obj.doi if hasattr(obj, 'title'): template.title = obj.title if hasattr(obj, 'processing_environment'): template.processing_environment = obj.processing_environment # Add the point of contact section to the template. template.validate() return template.serialize(use_template=False).decode()
31,642
def compute_iqms(settings, name='ComputeIQMs'): """ Workflow that actually computes the IQMs .. workflow:: from mriqc.workflows.functional import compute_iqms wf = compute_iqms(settings={'output_dir': 'out'}) """ workflow = pe.Workflow(name=name) inputnode = pe.Node(niu.IdentityInterface(fields=[ 'subject_id', 'session_id', 'task_id', 'acq_id', 'rec_id', 'run_id', 'orig', 'epi_mean', 'brainmask', 'hmc_epi', 'hmc_fd', 'in_tsnr', 'metadata']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface( fields=['out_file', 'out_dvars', 'outliers', 'out_spikes', 'out_fft']), name='outputnode') deriv_dir = check_folder(op.abspath(op.join(settings['output_dir'], 'derivatives'))) # Compute DVARS dvnode = pe.Node(nac.ComputeDVARS(save_plot=False, save_all=True), name='ComputeDVARS') dvnode.interface.estimated_memory_gb = settings[ "biggest_file_size_gb"] * 3 # AFNI quality measures fwhm = pe.Node(afni.FWHMx(combine=True, detrend=True), name='smoothness') # fwhm.inputs.acf = True # add when AFNI >= 16 outliers = pe.Node(afni.OutlierCount(fraction=True, out_file='ouliers.out'), name='outliers') outliers.interface.estimated_memory_gb = settings[ "biggest_file_size_gb"] * 2.5 quality = pe.Node(afni.QualityIndex(automask=True), out_file='quality.out', name='quality') quality.interface.estimated_memory_gb = settings[ "biggest_file_size_gb"] * 3 measures = pe.Node(FunctionalQC(), name='measures') measures.interface.estimated_memory_gb = settings[ "biggest_file_size_gb"] * 3 workflow.connect([ (inputnode, dvnode, [('hmc_epi', 'in_file'), ('brainmask', 'in_mask')]), (inputnode, measures, [('epi_mean', 'in_epi'), ('brainmask', 'in_mask'), ('hmc_epi', 'in_hmc'), ('hmc_fd', 'in_fd'), ('in_tsnr', 'in_tsnr')]), (inputnode, fwhm, [('epi_mean', 'in_file'), ('brainmask', 'mask')]), (inputnode, quality, [('hmc_epi', 'in_file')]), (inputnode, outliers, [('hmc_epi', 'in_file'), ('brainmask', 'mask')]), (dvnode, measures, [('out_all', 'in_dvars')]), (dvnode, outputnode, [('out_all', 'out_dvars')]), (outliers, outputnode, [('out_file', 'outliers')]) ]) # Save to JSON file datasink = pe.Node(IQMFileSink( modality='bold', out_dir=deriv_dir), name='datasink') workflow.connect([ (inputnode, datasink, [('subject_id', 'subject_id'), ('session_id', 'session_id'), ('task_id', 'task_id'), ('acq_id', 'acq_id'), ('rec_id', 'rec_id'), ('run_id', 'run_id'), ('metadata', 'metadata')]), (outliers, datasink, [(('out_file', _parse_tout), 'aor')]), (quality, datasink, [(('out_file', _parse_tqual), 'aqi')]), (measures, datasink, [('out_qc', 'root')]), (fwhm, datasink, [(('fwhm', fwhm_dict), 'root0')]), (datasink, outputnode, [('out_file', 'out_file')]) ]) if settings.get('fft_spikes_detector', False): # FFT spikes finder spikes_fft = pe.Node(niu.Function( input_names=['in_file'], output_names=['n_spikes', 'out_spikes', 'out_fft'], function=slice_wise_fft), name='SpikesFinderFFT') workflow.connect([ (inputnode, spikes_fft, [('orig', 'in_file')]), (spikes_fft, outputnode, [('out_spikes', 'out_spikes'), ('out_fft', 'out_fft')]), (spikes_fft, datasink, [('n_spikes', 'spikes_num')]) ]) return workflow
31,643
def get_nbest_bounds_from_membership(membership_logits, n_best_size=1): """ Return possible inclusive start, exclusive end indices given a list of membership logits. :param membership_logits: :return: two lists, each of length n (in nbest) """ # TODO: include heuristic for choosing bounds (not just min/max) # TODO: implement nbest in heuristic too indices = [i for i, m in enumerate(membership_logits) if m > 0] start_index = min(indices) if len(indices) else 0 end_index = max(indices) if len(indices) else 0 return [start_index], [end_index]
31,644
def GetDepthFromIndicesMapping(list_indices): """ GetDepthFromIndicesMapping ========================== Gives the depth of the nested list from the index mapping @param list_indices: a nested list representing the indexes of the nested lists by depth @return: depth """ return max([len(x[0]) for x in list_indices])+1
31,645
def lowpassIter(wp, ws, fs, f, atten=90, n_max=400): """Design a lowpass filter using f by iterating to minimize the number of taps needed. Args: wp: Passband frequency ws: Stopband frequency fs: Sample rate f: Function to design filter atten: desired attenuation (dB) n_max: Maximum semi-length of filter Returns: Filter taps. """ n = bellangerord(0.01, 0.01, fs, (ws-wp))//2 n_prev = 1 n_lo = 1 n_hi = None if n > n_max: n = n_max while n != n_prev: N = 2*n + 1 taps = f(N, wp, ws, fs) w, h = signal.freqz(taps, worN=8000) w = 0.5*fs*w/np.pi hdb = 20*np.log10(np.abs(h)) db = np.max(hdb[w >= ws]) n_prev = n if db > -atten: if n == n_max: break n_lo = n if n_hi: n = (n_lo + n_hi) // 2 else: n = 2*n else: n_hi = n n = (n_lo + n_hi) // 2 if n > n_max: n = n_max return taps
31,646
def evaluate_themes( ref_measurement: Measurement, test_measurement: Measurement, themes: Union[FmaskThemes, ContiguityThemes, TerrainShadowThemes], ) -> Dict[str, float]: """ A generic tool for evaluating thematic datasets. """ values = [v.value for v in themes] n_values = len(values) minv = min(values) maxv = max(values) # read data and reshape to 1D ref_data = ref_measurement.read().ravel() test_data = test_measurement.read().ravel() ref_h = histogram(ref_data, minv=minv, maxv=maxv, reverse_indices="ri") ref_hist = ref_h["histogram"] ref_ri = ref_h["ri"] theme_changes = dict() for theme in themes: i = theme.value # check we have data for this category if ref_hist[i] == 0: # no changes as nothing exists in the reference data theme_changes[theme] = numpy.full((n_values,), numpy.nan) continue idx = ref_ri[ref_ri[i] : ref_ri[i + 1]] values = test_data[idx] h = histogram(values, minv=minv, maxv=maxv) hist = h["histogram"] pdf = hist / numpy.sum(hist) theme_changes[theme] = pdf * 100 # split outputs into separate records result = dict() for theme in themes: for theme2 in themes: key = f"{theme.name.lower()}_2_{theme2.name.lower()}" result[key] = theme_changes[theme][theme2.value] return result
31,647
def test_global_averaging(): """Test that `T==N` and `F==pow2(N_frs_max)` doesn't error, and outputs close to `T==N-1` and `F==pow2(N_frs_max)-1` """ if skip_all: return None if run_without_pytest else pytest.skip() np.random.seed(0) N = 512 params = dict(shape=N, J=9, Q=4, J_fr=5, Q_fr=2, average=True, average_fr=True, out_type='dict:array', pad_mode='reflect', pad_mode_fr='conj-reflect-zero', max_pad_factor=None, max_pad_factor_fr=None, frontend=default_backend, sampling_filters_fr=('resample', 'resample')) x = echirp(N) x += np.random.randn(N) outs = {} metas = {} Ts, Fs = (N - 1, N), (2**6 - 1, 2**6) for T in Ts: # N_frs_max ~= Q*max(p2['j'] for p2 in psi2_f); found 29 at runtime for F in Fs: jtfs = TimeFrequencyScattering1D(**params, T=T, F=F) assert (jtfs.average_fr_global if F == Fs[-1] else not jtfs.average_fr_global) assert (jtfs.average_global if T == Ts[-1] else not jtfs.average_global) out = jtfs(x) out = jtfs_to_numpy(out) outs[ (T, F)] = out metas[(T, F)] = jtfs.meta() T0F0 = coeff_energy(outs[(Ts[0], Fs[0])], metas[(Ts[0], Fs[0])]) T0F1 = coeff_energy(outs[(Ts[0], Fs[1])], metas[(Ts[0], Fs[1])]) T1F0 = coeff_energy(outs[(Ts[1], Fs[0])], metas[(Ts[1], Fs[0])]) T1F1 = coeff_energy(outs[(Ts[1], Fs[1])], metas[(Ts[1], Fs[1])]) if metric_verbose: print("\nGlobal averaging reldiffs:") th = .15 for pair in T0F0: ref = T0F0[pair] reldiff01 = abs(T0F1[pair] - ref) / ref reldiff10 = abs(T1F0[pair] - ref) / ref reldiff11 = abs(T1F1[pair] - ref) / ref assert reldiff01 < th, "%s > %s | %s" % (reldiff01, th, pair) assert reldiff10 < th, "%s > %s | %s" % (reldiff10, th, pair) assert reldiff11 < th, "%s > %s | %s" % (reldiff11, th, pair) if metric_verbose: print("(01, 10, 11) = ({:.2e}, {:.2e}, {:.2e}) | {}".format( reldiff01, reldiff10, reldiff11, pair))
31,648
def test_remove_legacy_lb_backend(mocker, ip_load_balancing_array): """ Test lb.legacy.remove-backend task without error """ mocker.patch( 'ovh_api_tasks.api_wrappers.ip.get_ip_lb_services', return_value=ip_load_balancing_array) mocker.patch( 'ovh_api_tasks.api_wrappers.ip.get_ip_lb_service_backends', side_effect=[['10.0.0.4'], ['10.0.0.5']]) mocker.patch( 'ovh_api_tasks.api_wrappers.ip.delete_ip_lb_service_backend', return_result=None) lb_legacy_tasks.remove_backend_from_legacy_lb( MockContext(), '10.0.0.5', 'ip-10.0.0.1,ip-10.0.0.2') output = sys.stdout.getvalue().strip() error = sys.stderr.getvalue() assert output == 'INFO - Backend 10.0.0.5 not linked to ip-10.0.0.1' assert error == ''
31,649
def poly_coefficients(df: np.ndarray,z: np.ndarray,cov: np.ndarray) -> np.ndarray: """ Calculate the coefficients in the free energy polynomial Parameters ---------- df : [2,iphase] Difference between next and current integration points z: np.ndarray [2,iphase] Conjugate varibales (z1,z2) of currrent point (f1,f2) for both I and II phases cov: np.ndarray [3,iphase] Covariances [cov(z1,Z1),cov(z2,Z2),cov(z1,Z2)] of current point for both I and II phases Returns ------- df : [6,2] Coefficients in the free energy polynomial """ coef = np.zeros((6,2)) coef[0,:] = z[0,:]*df[0,:] coef[1,:] = z[1,:]*df[1,:] coef[2,:] = cov[0,:]*df[0,:]**2 coef[3,:] = cov[1,:]*df[1,:]**2 coef[4,:] = cov[2,:]*df[0,:]*df[1,:] coef[5,:] = cov[2,:]*df[0,:] return coef
31,650
def NoneInSet(s): """Inverse of CharSet (parse as long as character is not in set). Result is string.""" return ConcatenateResults(Repeat(NoneOf(s), -1))
31,651
def _read_part(f, verbose): """Reads the part name and creates a mesh with that name. :param f: The file from where to read the nodes from. :type f: file object at the nodes :param verbose: Determines what level of print out to the console. :type verbose: 0, 1 or 2 :return: Nothing, but has the side effect of setting the pointer in the file object f to the line with the next keyword. """ re_part = re.compile("\*Part, name=(.*)") line = f.readline() match = re_part.match(line) if not match: raise ReadInpFileError("Error parsing file. Expected '*Part, " "name=XXX', read '" + line + "'.") part_name = match.group(1) if verbose == 1 or verbose == 2: print("Read part with name " + str(part_name)) # Initiate a mesh class with the same name as the part return Mesh(part_name)
31,652
async def get_prefix(bot, message): """Checks if the bot has a configuration tag for the prefix. Otherwise, gets the default.""" default_prefix = await get_default_prefix(bot) if isinstance(message.channel, discord.DMChannel): return default_prefix my_roles = [role.name for role in message.guild.me.roles] for role_name in my_roles: if role_name[:11] == "fox_prefix:": return role_name[11:] return default_prefix
31,653
def test_render_parameter_header_description(testrenderer): """Header parameter's 'description' is rendered.""" markup = textify( testrenderer.render_parameter( { "name": "X-Request-Id", "in": "header", "description": "A unique request identifier.", } ) ) assert markup == textwrap.dedent( """\ :reqheader X-Request-Id: A unique request identifier. """.rstrip() )
31,654
def fileprep(f, plate=None, ifu=None, smearing=None, stellar=False, maxr=None, cen=True, fixcent=True, clip=True, remotedir=None, gal=None, galmeta=None, rootdir=None): """ Function to turn any nirvana output file into useful objects. Can take in `.fits`, `.nirv`, `dynesty.NestedSampler`_, or `dynesty.results.Results`_ along with any relevant parameters and spit out galaxy, result dictionary, all livepoint positions, and median values for each of the parameters. Args: f (:obj:`str`, `dynesty.NestedSampler`_, `dynesty.results.Results`_): `.fits` file, sampler, results, `.nirv` file of dumped results from :func:`~nirvana.fitting.fit`. If this is in the regular format from the automatic outfile generator in :func:`~nirvana.scripts.nirvana.main` then it will fill in most of the rest of the parameters by itself. plate (:obj:`int`, optional): MaNGA plate number for desired galaxy. Can be auto filled by `f`. ifu (:obj:`int`, optional): MaNGA IFU number for desired galaxy. Can be auto filled by `f`. smearing (:obj:`bool`, optional): Whether or not to apply beam smearing to models. Can be auto filled by `f`. stellar (:obj:`bool`, optional): Whether or not to use stellar velocity data instead of gas. Can be auto filled by `f`. maxr (:obj:`float`, optional): Maximum radius to make edges go out to in units of effective radii. Can be auto filled by `f`. cen (:obj:`bool`, optional): Whether the position of the center was fit. Can be auto filled by `f`. fixcent (:obj:`bool`, optional): Whether the center velocity bin was held at 0 in the fit. Can be auto filled by `f`. clip (:obj:`bool`, optional): Whether to apply clipping to the galaxy with :func:`~nirvana.data.kinematics.clip` as it is handling it. remotedir (:obj:`str`, optional): Directory to load MaNGA data files from, or save them if they are not found and are remotely downloaded. gal (:class:`~nirvana.data.fitargs.FitArgs`, optional): Galaxy object to use instead of loading the galaxy from scratch. galmeta (:class:`~nirvana.data.manga.MaNGAGlobalPar`, optional): Info on MaNGA galaxy used for plate and ifu Returns: :class:`~nirvana.data.fitargs.FitArgs`: Galaxy object containing relevant data and parameters. :obj:`dict`: Dictionary of results of the fit. """ #unpack fits file if type(f) == str and '.fits' in f: isfits = True #tracker variable #open file and get relevant stuff from header with fits.open(f) as fitsfile: table = fitsfile[1].data maxr = fitsfile[0].header['maxr'] smearing = fitsfile[0].header['smearing'] if smearing is None else smearing scatter = fitsfile[0].header['scatter'] #unpack bintable into dict keys = table.columns.names vals = [table[k][0] for k in keys] resdict = dict(zip(keys, vals)) for v in ['vt','v2t','v2r','vtl','vtu','v2tl','v2tu','v2rl','v2ru']: resdict[v] = resdict[v][resdict['velmask'] == 0] for s in ['sig','sigl','sigu']: resdict[s] = resdict[s][resdict['sigmask'] == 0] #failsafe if 'Stars' in f or 'stel' in f: resdict['type'] = 'Stars' #get galaxy object if gal is None: if rootdir is not None: analysispath = f'{rootdir}/analysis/' reduxpath = f'{rootdir}/redux/' else: analysispath, reduxpath = (None, None) if resdict['type'] == 'Stars': kin = MaNGAStellarKinematics.from_plateifu(resdict['plate'],resdict['ifu'], ignore_psf=not smearing, remotedir=remotedir, analysis_path=analysispath, redux_path=reduxpath) else: kin = MaNGAGasKinematics.from_plateifu(resdict['plate'],resdict['ifu'], ignore_psf=not smearing, remotedir=remotedir, analysis_path=analysispath, redux_path=reduxpath) scatter = ('vel_scatter' in resdict.keys()) and (resdict['vel_scatter'] != 0) else: kin = gal scatter = gal.scatter fill = len(resdict['velmask']) fixcent = resdict['vt'][0] == 0 lenmeds = 6 + 3*(fill - resdict['velmask'].sum() - fixcent) + (fill - resdict['sigmask'].sum()) + 2*scatter meds = np.zeros(lenmeds) else: isfits = False #get sampler in right format if type(f) == str: chains = pickle.load(open(f,'rb')) elif type(f) == np.ndarray: chains = f elif type(f) == dynesty.nestedsamplers.MultiEllipsoidSampler: chains = f.results if gal is None and '.nirv' in f and os.path.isfile(f[:-5] + '.gal'): gal = f[:-5] + '.gal' if type(gal) == str: gal = np.load(gal, allow_pickle=True) if 'Stars' in f or 'stel' in f: stellar=True #load input galaxy object if gal is not None: kin = gal #load in MaNGA data else: #parse the automatically generated filename if plate is None or ifu is None: fname = re.split('/', f[:-5])[-1] info = re.split('/|-|_', fname) plate = int(info[0]) if plate is None else plate ifu = int(info[1]) if ifu is None else ifu stellar = True if 'stel' in info else False cen = True if 'nocen' not in info else False smearing = True if 'nosmear' not in info else False try: maxr = float([i for i in info if 'r' in i][0][:-1]) except: maxr = None if 'fixcent' in info: fixcent = True elif 'freecent' in info: fixcent = False if stellar: kin = MaNGAStellarKinematics.from_plateifu(plate,ifu, ignore_psf=not smearing, remotedir=remotedir) else: kin = MaNGAGasKinematics.from_plateifu(plate,ifu, ignore_psf=not smearing, remotedir=remotedir) print(stellar) #set relevant parameters for galaxy if isinstance(kin, FitArgs): args = kin else: args = FitArgs(kin, smearing=smearing, scatter=scatter) args.setdisp(True) args.setnglobs(4) if not cen else args.setnglobs(6) args.setfixcent(fixcent) #clip data if desired if gal is not None: clip = False if clip: args.clip() vel_r = args.kin.remap('vel') sig_r = args.kin.remap('sig') if args.kin.sig_phys2 is None else np.sqrt(np.abs(args.kin.remap('sig_phys2'))) if not isfits: meds = dynmeds(chains) #get appropriate number of edges by looking at length of meds nbins = (len(meds) - args.nglobs - fixcent - 2*args.scatter)/4 if not nbins.is_integer(): print(len(meds), args.nglobs, fixcent, 2*args.scatter, nbins) raise ValueError('Dynesty output array has a bad shape.') else: nbins = int(nbins) #calculate edges and velocity profiles, get basic data if not isfits: if gal is None: args.setedges(nbins - 1 + args.fixcent, nbin=True, maxr=maxr) resdict = profs(chains, args, stds=True) resdict['plate'] = galmeta.plate if galmeta is not None else None resdict['ifu'] = galmeta.ifu if galmeta is not None else None resdict['type'] = 'Stars' if stellar else 'Gas' else: args.edges = resdict['bin_edges'][~resdict['velmask']] with fits.open(f) as fitsfile: args.kin.vel = args.kin.bin(fitsfile['vel'].data) args.kin.vel_ivar = args.kin.bin(fitsfile['vel_ivar'].data) args.kin.sig_phys2 = args.kin.bin(fitsfile['sigsqr'].data) args.kin.sig = args.kin.bin(fitsfile['sig_ivar'].data) args.kin.sb = args.kin.bin(fitsfile['sb'].data) args.kin.sb_ivar = args.kin.bin(fitsfile['sb_ivar'].data) args.kin.vel_mask = np.array(args.kin.bin(fitsfile['vel_mask'].data), dtype=bool) args.getguess(galmeta=galmeta) args.getasym() return args, resdict
31,655
def HA19(request): """ Returns the render for the sdg graph """ data = dataFrameHA() figure = px.bar(data, x = "Faculty", y = "HA 19", labels = {"Faculty":"Faculties", "HA19":"Number of Modules Corresponding to HA 19"}) figure.write_image("core/static/HA19.png") return render(request, 'HA19.html')
31,656
def student_stop_eligibility_plots(input_directory): """ Create a distribution plot of the number of stop options for """ f = [stop_eligibility_counts(os.path.join( input_directory, 'student-stop-eligibility-{}.csv'.format(s))) for s in ['25', '40', '50', '100', '82']] fig, ax = plt.subplots(figsize=(12, 7)) colors = {'0.25 mi': 'green', '0.4 mi': 'blue', '0.5 mi': 'red', '1.0 / 0.5 mi': 'black', '0.82 mi': 'orange'} df = pd.DataFrame({'0.25 mi': f[0], '0.4 mi': f[1], '0.5 mi': f[2], '1.0 / 0.5 mi': f[3], '0.82 mi': f[4]}).melt() grouped = df.groupby('variable') for key, group in grouped: group.plot(ax=ax, kind='kde', y='value', label=key, color=colors[key]) plt.title('Comparative distributions of candidate' + ' stop counts (by scenario)') return plt
31,657
def test_enrich_asset_properties(properties, properties_to_enrich_dict: Dict, expected): """ Given: - Properties of an asset. - Dict containing properties keys to enrich, and the new names of the enrichment as corresponding values. When: - Case a: Basic enrichment of properties have been asked. - Case b: Full enrichment of properties have been asked. - Case c: Full enrichment of properties have been asked, properties are empty. Then: - Case a: Ensure that only properties keys that are contained in basic enrichment are enriched. - Case b: Ensure that only properties keys that are contained in full enrichment are enriched. - Case c: Ensure that empty dict is returned """ assert enrich_asset_properties(properties, properties_to_enrich_dict) == expected
31,658
def merge( left: pandas.core.frame.DataFrame, right: pandas.core.frame.DataFrame, how: Literal["right"], left_index: bool, right_index: bool, ): """ usage.dask: 4 """ ...
31,659
def beta(data, market, periods, normalize = False): """ .. Beta Parameters ---------- data : `ndarray` An array containing values. market : `ndarray` An array containing market values to be used as the comparison when calculating beta. periods : `int` Number of periods to be used. normalize : `bool`, optional Specify whether to normalize the standard deviation calculation within the beta calculation with n - 1 instead of n. Defaults to False. Returns ------- `ndarray` An array containing beta values. Examples -------- >>> import qufilab as ql >>> import numpy as np ... >>> # Load sample dataframe. >>> df = ql.load_sample('MSFT') >>> df_market = ql.load_sample('DJI') >>> beta = ql.beta(df['close'], df_market['close'], periods = 10) >>> print(beta) [nan nan nan ... 0.67027616 0.45641977 0.3169785] """ return beta_calc(data, market, periods, normalize)
31,660
def h2orapids(): """ Python API test: h2o.rapids(expr) """ rapidTime = h2o.rapids("(getTimeZone)")["string"] print(str(rapidTime))
31,661
def save_hints_trigger_problem(sender, **kwargs): """save Hints of a TriggerProblem""" if hasattr(kwargs['instance'], 'hints_info'): logger.debug('Saving hints: %s %s', str(sender), str(kwargs['instance'])) save_hints(kwargs['instance'])
31,662
def run(config_name="maestral"): """ This is the main interactive entry point which starts the PyQt5 GUI. :param str config_name: Name of Maestral config to run. """ QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling) QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps) app = QtWidgets.QApplication(["Maestral"]) app.setWindowIcon(QtGui.QIcon(APP_ICON_PATH)) app.setQuitOnLastWindowClosed(False) maestral_gui = MaestralGuiApp(config_name) maestral_gui.load_maestral() sys.exit(app.exec())
31,663
def set_process_name(name: str) -> None: """Set a name for this process.""" setproctitle(name)
31,664
def transmission(ctx): """Podcust tools for transmission container image.""" # We can only use ctx.obj to create and share between commands. ctx.obj = TransmissionCust() click.echo("Initializing Podman Custodian Transmission class.")
31,665
def test_index_entry(): """ Test the construction of the list of blocks from text. """ text_list = ("Line one \u00B6.", "Line two. ") entry = search_builder.IndexEntry() entry.text_list.extend(text_list) text = """Line one . Line two.""" nt.assert_equal(text, entry.text)
31,666
def heading_from_to(p1: Vector, p2: Vector) -> float: """ Returns the heading in degrees from point 1 to point 2 """ x1 = p1[0] y1 = p1[1] x2 = p2[0] y2 = p2[1] angle = math.atan2(y2 - y1, x2 - x1) * (180 / math.pi) angle = (-angle) % 360 return abs(angle)
31,667
def delivery_report(err, msg): """ Reports the failure or success of a message delivery. Args: err (KafkaError): The error that occurred on None on success. msg (Message): The message that was produced or failed. Note: In the delivery report callback the Message.key() and Message.value() will be the binary format as encoded by any configured Serializers and not the same object that was passed to produce(). If you wish to pass the original object(s) for key and value to delivery report callback we recommend a bound callback or lambda where you pass the objects along. """ if err is not None: print('Delivery failed for User record {}: {}'.format(msg.key(), err)) return print('User record {} successfully produced to {} [{}] at offset {}'.format( msg.key(), msg.topic(), msg.partition(), msg.offset()))
31,668
def selSPEA2Diverse(individuals, k): """Apply SPEA-II selection operator on the *individuals*. Usually, the size of *individuals* will be larger than *n* because any individual present in *individuals* will appear in the returned list at most once. Having the size of *individuals* equals to *n* will have no effect other than sorting the population according to a strength Pareto scheme. The list returned contains references to the input *individuals*. For more details on the SPEA-II operator see [Zitzler2001]_. :param individuals: A list of individuals to select from. :param k: The number of individuals to select. :returns: A list of selected individuals. .. [Zitzler2001] Zitzler, Laumanns and Thiele, "SPEA 2: Improving the strength Pareto evolutionary algorithm", 2001. """ N = len(individuals) nGenes= len(individuals[0]) L = len(individuals[0].fitness.values) K = math.sqrt(N) strength_fits = [0] * N fits = [0] * N dominating_inds = [list() for i in range(N)] for i, ind_i in enumerate(individuals): for j, ind_j in enumerate(individuals[i+1:], i+1): if ind_i.fitness.dominates(ind_j.fitness): strength_fits[i] += 1 dominating_inds[j].append(i) elif ind_j.fitness.dominates(ind_i.fitness): strength_fits[j] += 1 dominating_inds[i].append(j) for i in range(N): for j in dominating_inds[i]: fits[i] += strength_fits[j] # Choose all non-dominated individuals chosen_indices = [i for i in range(N) if fits[i] < 1] if len(chosen_indices) < k: # The archive is too small print('>>>>>> TOO SMALL', len(chosen_indices),k) distances = populationChromosomeDistances(individuals) distances=distances/np.max(distances) #[print('Chosen',chosen_indices) #[print('Ind',i) for i in range(N): print(distances[i,:]) kth_dist = _randomizedSelect(distances[i,:], 0, N - 1, K) density = 1.0 / (kth_dist + 2.0) fits[i] += density next_indices = [(fits[i], i) for i in range(N) if not i in chosen_indices] next_indices.sort() #print next_indices chosen_indices += [i for _, i in next_indices[:k - len(chosen_indices)]] elif len(chosen_indices) > k: # The archive is too large print('>>>>>> TOO BIG') N = len(chosen_indices) distances = [[0.0] * N for i in range(N)] sorted_indices = [[0] * N for i in range(N)] for i in range(N): for j in range(i + 1, N): dist = 0.0 for l in range(L): val = individuals[chosen_indices[i]].fitness.values[l] - \ individuals[chosen_indices[j]].fitness.values[l] dist += val * val distances[i][j] = dist distances[j][i] = dist distances[i][i] = -1 # Insert sort is faster than quick sort for short arrays for i in range(N): for j in range(1, N): l = j while l > 0 and distances[i][j] < distances[i][sorted_indices[i][l - 1]]: sorted_indices[i][l] = sorted_indices[i][l - 1] l -= 1 sorted_indices[i][l] = j size = N to_remove = [] while size > k: # Search for minimal distance min_pos = 0 for i in range(1, N): for j in range(1, size): dist_i_sorted_j = distances[i][sorted_indices[i][j]] dist_min_sorted_j = distances[min_pos][sorted_indices[min_pos][j]] if dist_i_sorted_j < dist_min_sorted_j: min_pos = i break elif dist_i_sorted_j > dist_min_sorted_j: break # Remove minimal distance from sorted_indices for i in range(N): distances[i][min_pos] = float("inf") distances[min_pos][i] = float("inf") for j in range(1, size - 1): if sorted_indices[i][j] == min_pos: sorted_indices[i][j] = sorted_indices[i][j + 1] sorted_indices[i][j + 1] = min_pos # Remove corresponding individual from chosen_indices to_remove.append(min_pos) size -= 1 for index in reversed(sorted(to_remove)): del chosen_indices[index] print(chosen_indices) Sel=[individuals[i] for i in chosen_indices] print(len(chosen_indices),k) SelU=[] for i in chosen_indices: if individuals[i] not in SelU: SelU.append(individuals[i]) print('Selected') print(len(Sel),k) #jjprint(Sel) print('Unique ones') print(len(SelU),k) #print(SelU) if len(SelU)<k: print('>>>>>> NEED FOR MORE') #import pdb #pdb.set_trace() return Sel
31,669
def utilization_to_states(state_config, utilization): """ Get the state history corresponding to the utilization history. Adds the 0 state to the beginning to simulate the first transition. (map (partial utilization-to-state state-config) utilization)) :param state_config: The state configuration. :type state_config: list(float) :param utilization: The history of the host's CPU utilization. :type utilization: list(float) :return: The state history. :rtype: list(int) """ return [utilization_to_state(state_config, x) for x in utilization]
31,670
def post_captcha(captcha, cookie, id): """Envia o captcha reconhecido para permitir o download. Parameters ---------- captcha : str, captcha reconhecido coookie : str, cookie com as informacoes da sessao id : str, id do CV Notes ----- Esse endpoint retorna um json, com {'estado': 'erro'}, caso o captcha esteja errado ou {'estado': 'sucesso'}, caso o captcha esteja certo. """ captcha_url = 'http://buscatextual.cnpq.br/buscatextual/servlet/captcha?informado=%s&metodo=validaCaptcha' % (captcha) headers = construct_headers(cookie, id) response = requests.get(captcha_url, headers=headers) response = response.json() if response['estado'] == 'erro': return False return True
31,671
def com_google_fonts_check_iso15008_interword_spacing(font, ttFont): """Check if spacing between words is adequate for display use""" l_intersections = xheight_intersections(ttFont, "l") if len(l_intersections) < 2: yield FAIL,\ Message('glyph-not-present', "There was no 'l' glyph in the font," " so the spacing could not be tested") return l_advance = ttFont["hmtx"]["l"][0] l_rsb = l_advance - l_intersections[-1].point.x glyphset = ttFont.getGlyphSet() h_glyph = glyphset["m"] pen = BoundsPen(glyphset) h_glyph._glyph.draw(pen, ttFont.get("glyf")) (xMin, yMin, xMax, yMax) = pen.bounds m_advance = ttFont["hmtx"]["m"][0] m_lsb = xMin m_rsb = m_advance - (m_lsb + xMax - xMin) n_lsb = ttFont["hmtx"]["n"][1] l_m = l_rsb + pair_kerning(font, "l", "m") + m_lsb space_width = ttFont["hmtx"]["space"][0] # Add spacing caused by normal sidebearings space_width += m_rsb + n_lsb if 2.50 <= space_width / l_m <= 3.0: yield PASS, "Advance width of interword space was adequate" else: yield FAIL,\ Message('bad-interword-spacing', f"The interword space ({space_width}) was" f" outside the recommended range ({l_m*2.5}-{l_m*3.0})")
31,672
async def get_pipeline_run_log( organization: str = Path(None, description="Name of the organization"), pipeline: str = Path(None, description="Name of the pipeline"), run: str = Path(None, description="Name of the run"), start: int = Query(None, description="Start position of the log"), download: bool = Query(None, description="Set to true in order to download the file, otherwise it&#39;s passed as a response body"), token_jenkins_auth: TokenModel = Security( get_token_jenkins_auth ), ) -> str: """Get log for a pipeline run""" ...
31,673
def generate_person(results: Dict): """ Create a dictionary from sql that queried a person :param results: :return: """ person = None if len(results) > 0: person = { "id": results[0], "name": results[1].decode("utf-8"), "img_url": results[2].decode("utf-8"), "location": results[3].decode("utf-8"), "colors": (results[4].decode("utf-8")).split(",") } return person
31,674
def XOR(args): """ Another way of finding the XOR of functions. Just pass the sequence of BFs as args. """ standard_op(args, "%")
31,675
def test_get_required_with_fx(): """Test getting required variables for derivation with fx variables.""" variables = get_required('ohc', 'CMIP5') reference = [ {'short_name': 'thetao'}, {'short_name': 'volcello', 'mip': 'fx'}, ] assert variables == reference
31,676
def paliindrome_sentence(sentence: str) -> bool: """ `int` """ string = '' for char in sentence: if char.isalnum(): string += char return string[::-1].casefold() == string.casefold()
31,677
def get_default_pool_set(): """Return the names of supported pooling operators Returns: a tuple of pooling operator names """ output = ['sum', 'correlation1', 'correlation2', 'maximum'] return output
31,678
def _sys_conf_tpf_stub(actual_state_data: StateData, next_state_data: StateData, cfc_spec: CFCSpec): """Stub for the transition probability function.""" pass
31,679
def virtual_networks_list_all(**kwargs): """ .. versionadded:: 2019.2.0 List all virtual networks within a subscription. CLI Example: .. code-block:: bash salt-call azurearm_network.virtual_networks_list_all """ result = {} netconn = __utils__["azurearm.get_client"]("network", **kwargs) try: vnets = __utils__["azurearm.paged_object_to_list"]( netconn.virtual_networks.list_all() ) for vnet in vnets: result[vnet["name"]] = vnet except CloudError as exc: __utils__["azurearm.log_cloud_error"]("network", str(exc), **kwargs) result = {"error": str(exc)} return result
31,680
def variantNameTextChanged(variantName): """ Reacts to the variant name being changed by the user by editing the text. """ # The text field cannot be empty. Reset to default value if it is. if not variantName: cmds.textField('variantNameText', edit=True, text=kDefaultCacheVariantName) else: # Make sure the name user entered doesn't contain any invalid characters. validatedName = Tf.MakeValidIdentifier(variantName) if validatedName != variantName: cmds.textField('variantNameText', edit=True, text=validatedName)
31,681
def generate_state_matrix(Hprime, gamma): """Full combinatorics of Hprime-dim binary vectors with at most gamma ones. :param Hprime: Vector length :type Hprime: int :param gamma: Maximum number of ones :param gamma: int """ sl = [] for g in range(2,gamma+1): for s in combinations(list(range(Hprime)), g): sl.append( np.array(s, dtype=np.int8) ) state_list = sl no_states = len(sl) no_states = no_states sm = np.zeros((no_states, Hprime), dtype=np.uint8) for i in range(no_states): s = sl[i] sm[i, s] = 1 state_matrix = sm state_abs = sm.sum(axis=1) #print("state matrix updated") return state_list, no_states, state_matrix, state_abs
31,682
def excel_file2(): """Test data for custom data column required fields.""" return os.path.join('test', 'data', 'NADataErrors_2018-05-19_v1.0.xlsx')
31,683
def playfair_decipher(message, keyword, padding_letter='x', padding_replaces_repeat=False, letters_to_merge=None, wrap_alphabet=KeywordWrapAlphabet.from_a): """Decipher a message using the Playfair cipher.""" column_order = list(range(5)) row_order = list(range(5)) if letters_to_merge is None: letters_to_merge = {'j': 'i'} grid = polybius_grid(keyword, column_order, row_order, letters_to_merge=letters_to_merge, wrap_alphabet=wrap_alphabet) message_bigrams = playfair_bigrams( sanitise(message), padding_letter=padding_letter, padding_replaces_repeat=padding_replaces_repeat) plaintext_bigrams = [playfair_decipher_bigram(b, grid, padding_letter=padding_letter) for b in message_bigrams] return cat(plaintext_bigrams)
31,684
def test_fails_null_index(driver, function_store): """ Since we do not allow NULL values in queries, it should be banned from index columns in the first place. """ df = pd.DataFrame( { "x": [0, 1, 2, 3], "p": [0, 0, 1, 1], "v": [10, 11, 12, 13], "i1": [0, 1, 2, np.nan], } ) cube = Cube( dimension_columns=["x"], partition_columns=["p"], uuid_prefix="cube", index_columns=["i1"], ) with pytest.raises(ValueError) as exc: driver(data=df, cube=cube, store=function_store) assert 'Found NULL-values in index column "i1"' in str(exc.value) assert not DatasetMetadata.exists(cube.ktk_dataset_uuid("seed"), function_store())
31,685
def create_role(role_name): """Create a role.""" role_dict = { "Version" : "2012-10-17", "Statement" : [ { "Effect" : "Allow", "Principal" : { "Service" : "lambda.amazonaws.com" }, "Action" : "sts:AssumeRole" } ] } cli_input = json.dumps(role_dict) cmd = [ "aws", "iam", "create-role", "--role-name", role_name, "--assume-role-policy-document", cli_input ] output = execute_command(cmd) output_json = json.loads(output.decode("utf-8")) return output_json["Role"]["Arn"]
31,686
def show_file_statuses(file_statuses, verbose=False) -> None: """Helper function to print ignored, missing files""" ignored = [] missing = [] downloaded = [] for status, short_filepath in file_statuses: if status == "IGNORE": ignored.append(short_filepath) elif status == "MISSING": missing.append(short_filepath) elif status == "DOWNLOADED": downloaded.append(short_filepath) if len(ignored) > 0: if len(downloaded) > 0: print() if verbose: print("The following files have been ignored.") for short_filepath in ignored: print(" " + short_filepath) else: if len(ignored) == 1: print("1 file has been ignored, use --verbose for more info") else: print( "{} files have been ignored, use --verbose for more info".format( len(ignored) ) ) if len(missing) > 0: if len(ignored) > 0 or len(downloaded) > 0: print() if verbose: print( "The following files are missing, use --missingdownload to download them." ) for short_filepath in missing: print(" " + short_filepath) else: if len(missing) == 1: print("1 file is missing, use --verbose for more info") else: print( "{} files are missing, use --verbose for more info".format( len(missing) ) )
31,687
def evaluate_field(record, field_spec): """ Evaluate a field of a record using the type of the field_spec as a guide. """ if type(field_spec) is int: return str(record[field_spec]) elif type(field_spec) is str: return str(getattr(record, field_spec)) else: return str(field_spec(record))
31,688
def project_points(X, K, R, T, distortion_params=None): """ Project points from 3d world coordinates to 2d image coordinates """ x_2d = np.dot(K, (np.dot(R, X) + T)) x_2d = x_2d[:-1, :] / x_2d[-1, :] if distortion_params is not None: x_2d_norm = np.concatenate((x_2d, np.ones((1, x_2d.shape[1]))), 0) x_3d_norm = np.dot(np.linalg.pinv(K), x_2d_norm) x_2d_post = x_3d_norm[:-1, :] / x_3d_norm[-1, :] r = np.sqrt(x_2d_post[0, :]**2 + x_2d_post[1, :]**2) correction = (1 + distortion_params[0] * r**2 + distortion_params[1] * r**4 + distortion_params[4] * r**6) x_2d_corr = x_2d_post * correction x_3d_corr = np.concatenate(( x_2d_corr, np.ones((1, x_2d_corr.shape[1]))), 0) x_2d = np.dot(K, x_3d_corr) x_2d = x_2d[:-1, :] / x_2d[-1, :] return x_2d
31,689
def test_add(c, x1, y1, x2, y2, x3, y3): """We expect that on curve c, (x1,y1) + (x2, y2 ) = (x3, y3).""" p1 = Point(c, x1, y1) p2 = Point(c, x2, y2) p3 = p1 + p2 assert p3.x() == x3 and p3.y() == y3
31,690
def project_exists(response: 'environ.Response', path: str) -> bool: """ Determines whether or not a project exists at the specified path :param response: :param path: :return: """ if os.path.exists(path): return True response.fail( code='PROJECT_NOT_FOUND', message='The project path does not exist', path=path ).console( """ [ERROR]: Unable to open project. The specified path does not exist: {path} """.format(path=path) ) return False
31,691
def rate_multipressure(qD, delta_p, B, mu, perm, h): """Calculate Rate as Sum of Constant Flowing Pressures""" import numpy as np return ((.007082 * perm * h) / (B * mu)) * (np.sum(qD * delta_p))
31,692
def osculating_elements_of(position, reference_frame=None, gm_km3_s2=None): """Produce the osculating orbital elements for a position. `position` is an instance of :class:`~skyfield.positionlib.ICRF`. These are commonly returned by the ``at()`` method of any Solar System body. ``reference_frame`` is an optional argument and is a 3x3 numpy array. The reference frame by default is the ICRF. Commonly used reference frames are found in skyfield.data.spice.inertial_frames. ``gm_km3_s2`` is an optional float argument representing the gravitational parameter (G*M) in units of km^3/s^2, which is the sum of the masses of the orbiting object and the object being orbited. If not specified, this is calculated for you. This function returns an instance of :class:`~skyfield.elementslib.OsculatingElements` """ if gm_km3_s2 is None: if not isinstance(position.center, int): raise ValueError('Skyfield is unable to calculate a value for GM. You' ' should specify one using the `gm_km3_s2` keyword argument') gm_km3_s2 = GM_dict.get(position.center, 0.0) orbits_barycenter = 0 <= position.center <= 9 if not orbits_barycenter: gm_km3_s2 += GM_dict.get(position.target, 0.0) if gm_km3_s2 == 0: raise ValueError('Skyfield is unable to calculate a value for GM. You' ' should specify one using the `gm_km3_s2` keyword argument') if reference_frame is not None: position_vec = Distance(reference_frame.dot(position.position.au)) velocity_vec = Velocity(reference_frame.dot(position.velocity.au_per_d)) else: position_vec = position.position velocity_vec = position.velocity return OsculatingElements(position_vec, velocity_vec, position.t, gm_km3_s2)
31,693
def main(): """ perform automatic calibration of pygama DataSets. command line options to specify the DataSet are the same as in processing.py save results in a JSON database for access by other routines. """ run_db, cal_db = "runDB.json", "calDB.json" par = argparse.ArgumentParser(description="pygama calibration suite") arg, st, sf = par.add_argument, "store_true", "store_false" arg("-ds", nargs='*', action="store", help="load runs for a DS") arg("-r", "--run", nargs=1, help="load a single run") arg("-s", "--spec", action=st, help="print simple spectrum") arg("-p1", "--pass1", action=st, help="run pass-1 (linear) calibration") arg("-p2", "--pass2", action=st, help="run pass-2 (peakfit) calibration") arg("-m", "--mode", nargs=1, help="set pass-2 calibration mode") arg("-e", "--etype", nargs=1, help="custom energy param (default is e_ftp)") arg("-t", "--test", action=st, help="set verbose (testing) output") arg("-db", "--writeDB", action=st, help="store results in DB") arg("-pr", "--printDB", action=st, help="print calibration results in DB") args = vars(par.parse_args()) # -- standard method to declare the DataSet from cmd line -- ds = pu.get_dataset_from_cmdline(args, "runDB.json", "calDB.json") # -- start calibration routines -- etype = args["etype"][0] if args["etype"] else "e_ftp" if args["printDB"]: show_calDB(cal_db) # print current DB status if args["spec"]: show_spectrum(ds, etype) if args["pass1"]: calibrate_pass1(ds, etype, args["writeDB"], args["test"]) if args["pass2"]: cal_mode = int(args["mode"][0]) if args["mode"] else 0 calibrate_pass2(ds, cal_mode, args["writeDB"])
31,694
def iterate_docker_images(path: str = os.path.join(ROOT, "docker_images.csv")) -> Iterator[Tuple[Optional[str], ...]]: """ Iterates over the known Docker images. :return: An iterator over the following fields of the known Docker images: - name - version - URL - registry URL - registry username - registry password - CUDA version - framework - framework version - domain - tasks - minimum hardware generation - cpu - license """ yield from iterate_csv_file(path)
31,695
def rename_actions(P: NestedDicts, policy: DetPolicy) -> NestedDicts: """ Renames actions in P so that the policy action is always 0.""" out: NestedDicts = {} for start_state, actions in P.items(): new_actions = copy.copy(actions) policy_action = policy(start_state) new_actions[0], new_actions[policy_action] = actions[policy_action], actions[0] out[start_state] = new_actions return out
31,696
async def test_arm_night_success(hass): """Test arm night method success.""" responses = [RESPONSE_DISARMED, RESPONSE_ARM_SUCCESS, RESPONSE_ARMED_NIGHT] with patch( "homeassistant.components.totalconnect.TotalConnectClient.TotalConnectClient.request", side_effect=responses, ): await setup_platform(hass, ALARM_DOMAIN) assert hass.states.get(ENTITY_ID).state == STATE_ALARM_DISARMED await hass.services.async_call( ALARM_DOMAIN, SERVICE_ALARM_ARM_NIGHT, DATA, blocking=True ) await hass.async_block_till_done() assert hass.states.get(ENTITY_ID).state == STATE_ALARM_ARMED_NIGHT
31,697
def configure(config): """Interactively configure the bot's ``[core]`` config section. :param config: the bot's config object :type config: :class:`~.config.Config` """ config.core.configure_setting('nick', 'Enter the nickname for your bot.') config.core.configure_setting('host', 'Enter the server to connect to.') config.core.configure_setting('use_ssl', 'Should the bot connect with SSL?') if config.core.use_ssl: default_port = 6697 else: default_port = 6667 config.core.configure_setting('port', 'Enter the port to connect on.', default=default_port) config.core.configure_setting( 'owner', "Enter your own IRC name (or that of the bot's owner)") config.core.configure_setting( 'channels', 'Enter the channels to connect to at startup, separated by commas.' ) config.core.configure_setting( 'commands_on_connect', 'Enter commands to perform on successful connection to server (one per \'?\' prompt).' )
31,698
def set_gcc(): """Try to use GCC on OSX for OpenMP support.""" # For macports and homebrew if platform.system() == "Darwin": gcc = extract_gcc_binaries() if gcc is not None: os.environ["CC"] = gcc os.environ["CXX"] = gcc else: global use_openmp use_openmp = False logging.warning('No GCC available. Install gcc from Homebrew ' 'using brew install gcc.')
31,699