content
stringlengths
22
815k
id
int64
0
4.91M
def subtract(value, *args, **kwargs): """ Return the difference between ``value`` and a :class:`relativedelta`. :param value: initial date or datetime. :param args: positional args to pass directly to :class:`relativedelta`. :param kwargs: keyword args to pass directly to :class:`relativedelta`. :return: the resulting date/datetime. """ return value - relativedelta(*args, **kwargs)
14,200
def plot_coastline( axes, bathymetry, coords='grid', isobath=0, xslice=None, yslice=None, color='black', server='local', zorder=2, ): """Plot the coastline contour line from bathymetry on the axes. The bathymetry data may be specified either as a file path/name, or as a :py:class:`netCDF4.Dataset` instance. If a file path/name is given it is opened and read into a :py:class:`netCDF4.Dataset` so, if this function is being called in a loop, it is best to provide it with a bathymetry dataset to avoid the overhead of repeated file reads. :arg axes: Axes instance to plot the coastline contour line on. :type axes: :py:class:`matplotlib.axes.Axes` :arg bathymetry: File path/name of a netCDF bathymetry data file or a dataset object containing the bathymetry data. :type bathymetry: str or :py:class:`netCDF4.Dataset` :arg coords: Type of plot coordinates to set the aspect ratio for; either :kbd:`grid` (the default) or :kbd:`map`. :type coords: str :arg isobath: Depth to plot the contour at; defaults to 0. :type isobath: float :arg xslice: X dimension slice to defined the region for which the contour is to be calculated; defaults to :kbd:`None` which means the whole domain. If an xslice is given, a yslice value is also required. :type xslice: :py:class:`numpy.ndarray` :arg yslice: Y dimension slice to defined the region for which the contour is to be calculated; defaults to :kbd:`None` which means the whole domain. If a yslice is given, an xslice value is also required. :type yslice: :py:class:`numpy.ndarray` :arg color: Matplotlib colour argument :type color: str, float, rgb or rgba tuple :arg zorder: Plotting layer specifier :type zorder: integer :returns: Contour line set :rtype: :py:class:`matplotlib.contour.QuadContourSet` """ # Index names based on results server if server == 'local': lon_name = 'nav_lon' lat_name = 'nav_lat' bathy_name = 'Bathymetry' elif server == 'ERDDAP': lon_name = 'longitude' lat_name = 'latitude' bathy_name = 'bathymetry' else: raise ValueError('Unknown results server name: {}'.format(server)) if any(( xslice is None and yslice is not None, xslice is not None and yslice is None, )): raise ValueError('Both xslice and yslice must be specified') if not hasattr(bathymetry, 'variables'): bathy = nc.Dataset(bathymetry) else: bathy = bathymetry depths = bathy.variables[bathy_name] if coords == 'map': lats = bathy.variables[lat_name] lons = bathy.variables[lon_name] if xslice is None and yslice is None: contour_lines = axes.contour( np.array(lons), np.array(lats), np.array(depths), [isobath], colors=color, zorder=zorder) else: contour_lines = axes.contour( lons[yslice, xslice], lats[yslice, xslice], depths[yslice, xslice].data, [isobath], colors=color, zorder=zorder) else: if xslice is None and yslice is None: contour_lines = axes.contour( np.array(depths), [isobath], colors=color, zorder=zorder) else: contour_lines = axes.contour( xslice, yslice, depths[yslice, xslice].data, [isobath], colors=color, zorder=zorder) if not hasattr(bathymetry, 'variables'): bathy.close() return contour_lines
14,201
def validate_offset(parameter, argument): """ For a given offset parameter, check if its argument is valid. If not, then raise `ValueError`. Examples -------- >>> validate_offset(parameter='last', argument='3d') >>> validate_offset(parameter='last', argument='XYZ') Traceback (most recent call last): ... ValueError: Invalid last argument: 'XYZ' Please specify a string of the format 'X[d/w/y]' where X is ... """ if argument is None: return if not isinstance(argument, str): raise ValueError( f"Invalid {parameter} argument: {argument!r}\n" f"Please specify a string or None." ) suffix = argument[-1:] # slicing here to handle empty strings if suffix not in ("d", "w", "y"): raise ValueError( f"Invalid {parameter} argument: {argument!r}\n" f"Please specify a string of the format 'X[d/w/y]' " "where X is a non-negative integer followed by 'd', 'w', or 'y' " "that indicates days, weeks, or years." )
14,202
def dump_model(object_to_dump, output_path, flags): """Pickle the object and save to the output_path. Args: object_to_dump: Python object to be pickled output_path: (string) output path which can be Google Cloud Storage Returns: None """ with open('model.pkl', 'wb') as model_file: pickle.dump(object_to_dump, model_file) upload_blob(flags.bucket_name, 'model.pkl', output_path+'/model.pkl' )
14,203
def test_ckeditor_file_upload_image_orphan(admin_api_client_logged_in): """ CKEditor file upload should create files protected by sendfile. The user can upload files when still being in the process of creating the Hearing, so the section id is not known yet. """ ckeditor_params = '?CKEditor=id_sections-0-content&CKEditorFuncNum=1&langCode=en' url = '/upload/' + ckeditor_params post_data = {} with open(get_image_path(IMAGES['SMALL']), 'rb') as fp: post_data['upload'] = fp response = admin_api_client_logged_in.post(url, data=post_data, format='multipart') assert response.status_code == 200, 'expected status_code 200, received %s' % response.status_code assert SectionFile.objects.count() == 1 sectionfile_id = SectionFile.objects.first().pk expected = r"window.parent.CKEDITOR.tools.callFunction\(1, 'https?://.+/v1/download/sectionfile/%s/'\);" % sectionfile_id assert re.search(expected, response.content.decode('utf-8'))
14,204
def get_clang_format_from_cache_and_extract(url, tarball_ext): """Get clang-format from mongodb's cache and extract the tarball """ dest_dir = tempfile.gettempdir() temp_tar_file = os.path.join(dest_dir, "temp.tar" + tarball_ext) # Download from file print("Downloading clang-format %s from %s, saving to %s" % (CLANG_FORMAT_VERSION, url, temp_tar_file)) # Retry download up to 5 times. num_tries = 5 for attempt in range(num_tries): try: resp = urllib2.urlopen(url) with open(temp_tar_file, 'wb') as f: f.write(resp.read()) break except urllib2.URLError: if attempt == num_tries - 1: raise continue extract_clang_format(temp_tar_file)
14,205
def validate_manifest(integration: Integration): """Validate manifest.""" try: MANIFEST_SCHEMA(integration.manifest) except vol.Invalid as err: integration.add_error( "manifest", "Invalid manifest: {}".format(humanize_error(integration.manifest, err)), ) integration.manifest = None return if integration.manifest["domain"] != integration.path.name: integration.add_error("manifest", "Domain does not match dir name")
14,206
def gen_files(): """Return a generator of dir names reading in tempfile tempfile has this format: challenge<int>/file_or_dir<str>,is_dir<bool> 03/rss.xml,False 03/tags.html,False ... 03/mridubhatnagar,True 03/aleksandarknezevic,True -> use last column to filter out directories (= True) """ with open(tempfile) as f: raw_data = f.read().splitlines() for line in raw_data: if line.split(',')[1] == 'True': yield line.split(',')[0] pass
14,207
def multi_build(request, recipes_fixture): """ Builds the "one", "two", and "three" recipes. """ if request.param: docker_builder = docker_utils.RecipeBuilder(use_host_conda_bld=True) mulled_test = True else: docker_builder = None mulled_test = False build.build_recipes( recipe_folder=recipes_fixture.basedir, docker_builder=docker_builder, config={}, mulled_test=mulled_test, ) built_packages = recipes_fixture.pkgs yield built_packages for pkgs in built_packages.values(): for pkg in pkgs: ensure_missing(pkg)
14,208
def p_unary_expr(t): """ unary_expr : MINUS expr """ t[0] = FuncCall(t[1],[t[2]])
14,209
def systemd_daemon(agent_name, action=None): """Manage systemd daemon for agent. Args: agent_name: Name of agent action: Action to occur Returns: None """ # Initialize key variables executable = '/bin/systemctl' options = ['start', 'stop', 'restart'] fixed_action = action.lower() # Check user is root running_username = getpass.getuser() if running_username != 'root': log_message = 'You can only run this command as the \'root\' user.' log.log2die(1134, log_message) # Check if agent exists if systemd_exists(agent_name) is False: log_message = 'systemd not configured for daemon {}'.format(agent_name) log.log2die(1122, log_message) # Process request if fixed_action in options: command = '{} {} {}.service'.format( executable, fixed_action, agent_name) run_script(command) else: log_message = ( 'Invalid action "{}" for systemd daemon {}' ''.format(action, agent_name)) log.log2die(1037, log_message)
14,210
def safelog(func): """Version of prism.log that has prism as an optional dependency. This prevents the sql database, which may not be available, from becoming a strict dependency.""" @wraps(func) def inner(self, update, context): try: self.bot.cores["prism"].log_user(update.effective_user) if update.effective_user.id != update.effective_chat.id: # If the chat is not a one-to-one chat with the user. self.bot.cores["prism"].log_chat(update.effective_chat) except KeyError: # If the prism core is not loaded. pass func(self, update, context) return inner
14,211
def test_invalid_edge_color(): """Test providing an invalid edge color raises an exception""" np.random.seed(0) shape = (10, 2, 2) data = np.random.random(shape) data[:, 0, :] = 20 * data[:, 0, :] layer = Vectors(data) with pytest.raises(ValueError): layer.edge_color = 5
14,212
def draw(args): """ Draw a GraphML with the tribe draw method. """ G = nx.read_graphml(args.graphml[0]) draw_social_network(G, args.write) return ""
14,213
def extract_stem_voc(x): """extract word from predefined vocbulary with stemming and lemmatization Args: x ([string]): [a sentence] Returns: [list]: [word after stemming and lemmatization] """ stem = PorterStemmer() # wnl = WordNetLemmatizer() all_words = set(words.words()) # lemma_word = [word for word in map(lambda x: wnl.lemmatize(stem.stem(x)), re.findall('[a-zA-Z][-._a-zA-Z]*[a-zA-Z]', x)) if word in all_words] lemma_word = [word for word in map(lambda x: stem.stem(x), re.findall('[a-zA-Z][-._a-zA-Z]*[a-zA-Z]', x)) if word in all_words] return lemma_word
14,214
def plot_without_vis_spec( conditions_df: Union[str, pd.DataFrame], grouping_list: Optional[List[IdsList]] = None, group_by: str = 'observable', measurements_df: Optional[Union[str, pd.DataFrame]] = None, simulations_df: Optional[Union[str, pd.DataFrame]] = None, plotted_noise: str = MEAN_AND_SD, subplot_dir: Optional[str] = None, plotter_type: str = 'mpl', format_: str = 'png', ) -> Optional[Dict[str, plt.Subplot]]: """ Plot measurements and/or simulations. What exactly should be plotted is specified in a grouping_list. If grouping list is not provided, measurements (simulations) will be grouped by observable, i.e. all measurements for each observable will be visualized on one plot. Parameters ---------- grouping_list: A list of lists. Each sublist corresponds to a plot, each subplot contains the Ids of datasets or observables or simulation conditions for this plot. group_by: Grouping type. Possible values: 'dataset', 'observable', 'simulation' conditions_df: A condition DataFrame in the PEtab format or path to the condition file. measurements_df: A measurement DataFrame in the PEtab format or path to the data file. simulations_df: A simulation DataFrame in the PEtab format or path to the simulation output data file. plotted_noise: A string indicating how noise should be visualized: ['MeanAndSD' (default), 'MeanAndSEM', 'replicate', 'provided'] subplot_dir: A path to the folder where single subplots should be saved. PlotIDs will be taken as file names. plotter_type: Specifies which library should be used for plot generation. Currently, only matplotlib is supported format_: File format for the generated figure. (See :py:func:`matplotlib.pyplot.savefig` for supported options). Returns ------- ax: Axis object of the created plot. None: In case subplots are saved to a file. """ if measurements_df is None and simulations_df is None: raise TypeError('Not enough arguments. Either measurements_data ' 'or simulations_data should be provided.') vis_spec_parser = VisSpecParser(conditions_df, measurements_df, simulations_df) figure, dataprovider = vis_spec_parser.parse_from_id_list( grouping_list, group_by, plotted_noise) if plotter_type == 'mpl': plotter = MPLPlotter(figure, dataprovider) else: raise NotImplementedError('Currently, only visualization with ' 'matplotlib is possible.') return plotter.generate_figure(subplot_dir, format_=format_)
14,215
def _get_culled_faces(face_verts: torch.Tensor, frustum: ClipFrustum) -> torch.Tensor: """ Helper function used to find all the faces in Meshes which are fully outside the view frustum. A face is culled if all 3 vertices are outside the same axis of the view frustum. Args: face_verts: An (F,3,3) tensor, where F is the number of faces in the packed representation of Meshes. The 2nd dimension represents the 3 vertices of a triangle, and the 3rd dimension stores the xyz locations of each vertex. frustum: An instance of the ClipFrustum class with the information on the position of the clipping planes. Returns: faces_culled: An boolean tensor of size F specifying whether or not each face should be culled. """ clipping_planes = ( (frustum.left, 0, "<"), (frustum.right, 0, ">"), (frustum.top, 1, "<"), (frustum.bottom, 1, ">"), (frustum.znear, 2, "<"), (frustum.zfar, 2, ">"), ) faces_culled = torch.zeros( [face_verts.shape[0]], dtype=torch.bool, device=face_verts.device ) for plane in clipping_planes: clip_value, axis, op = plane # If clip_value is None then don't clip along that plane if frustum.cull and clip_value is not None: if op == "<": verts_clipped = face_verts[:, axis] < clip_value else: verts_clipped = face_verts[:, axis] > clip_value # If all verts are clipped then face is outside the frustum faces_culled |= verts_clipped.sum(1) == 3 return faces_culled
14,216
def _build_pep8_output(result): """ Build the PEP8 output based on flake8 results. Results from both tools conform to the following format: <filename>:<line number>:<column number>: <issue code> <issue desc> with some issues providing more details in the description within parentheses. :param result: output from flake8 :returns: list of flake8 output lines by error """ # Aggregate individual errors by error _dict = collections.defaultdict(list) for line in str(result).split("\n"): if line: # Preserve only the code and brief description for each issue to # facilitate aggregating the results. For example, # # E501 line too long (178 > 79 characters) -> E501 line too long # E303 too many blank lines (4) -> E303 too many blank lines parts = line.replace("(", ":").split(":") line_num, col_num, base_issue = parts[1:4] # Strip the whitespace around the base <issue code> <description>. # # Also restore the missing colon, stripped above, if the issue # was 'missing whitespace' surrounding a colon. issue = base_issue.strip() key = "{}:'".format(issue) if issue.endswith("after '") else issue _dict[key].append("{} ({})".format(line_num, col_num)) # Build the output as one issue per entry return ["{}: {}".format(k, ", ".join(_dict[k])) for k in sorted(_dict.keys())]
14,217
def form_value(request, entity, attribute): """ Return value from request params or the given entity. :param request: Pyramid request. :param entity: Instance to get attribute from if it isn't found in the request params. :param str attribute: Name of attribute to search for in the request params or on as an attribute of the given entity. """ # Check for contains, because we want the request value even if it's empty if attribute in request.params: return request.params.get(attribute, '') if entity: # Don't provide a default value, because we want to make attribute typos clear return getattr(entity, attribute) return ''
14,218
def run(session): """Run inside host app.""" try: # Show the UI session.seqRenameUI.show() except: # Create the UI session.seqRenameUI = SequenceRenameApp(parent=UI._main_window()) session.seqRenameUI.show()
14,219
def process_batch_data(batch_words, batch_tags=None): """ Padding batched dataset. Args: batch_words: Words in a batch. batch_tags: Punctuations in a batch. Returns: Words and punctuations after padding. """ b_words, b_words_len = pad_sequences(batch_words) if batch_tags is None: return {"words": b_words, "seq_len": b_words_len, "batch_size": len(b_words)} else: b_tags, _ = pad_sequences(batch_tags) return {"words": b_words, "tags": b_tags, "seq_len": b_words_len, "batch_size": len(b_words)}
14,220
def mock_update_actions_interface( mock_root_fs_interface: MagicMock, mock_partition_manager_invalid_switch: MagicMock ) -> MagicMock: """Mock UpdateActionsInterface""" updater = Updater( root_FS_intf=mock_root_fs_interface, part_mngr=mock_partition_manager_invalid_switch, ) mock = MagicMock(spec=UpdateActionsInterface) mock.from_request.return_value = updater
14,221
def grb2nc(glob_str, in_dir='./', out_dir='./'): """ Creates netCDF files from grib files. :param glob_str: (str) - the naming pattern of the files :param in_dir: (str) - directory of input files :param out_dir: (str) - directory of output files :return fo_names: (list) - list of netCDF files' names """ fi_url = os.path.join(in_dir, glob_str) fi_names = sorted(glob.glob('{}'.format(fi_url))) fo_names = [] for fi_name in fi_names: fo_name_dir = fi_name.replace(in_dir, out_dir) if fi_name.endswith('.grb'): fo_name = fo_name_dir.replace('.grb', '.nc') elif fi_name.endswith('.grb2'): fo_name = fo_name_dir.replace('.grb2', '.nc') elif fi_name.endswith('.grib'): fo_name = fo_name_dir.replace('.grib', '.nc') elif fi_name.endswith('.grib2'): fo_name = fo_name_dir.replace('.grib2', '.nc') os.system("wgrib2 {fi_name} -netcdf {fo_name}".format( fi_name=fi_name, fo_name=fo_name)) fo_names.append(fo_name) if len(fo_names) == 1: return fo_names[0] else: return fo_names
14,222
def test_MSIDset(): """ Read all available MSIDs into a single MSIDset. Use the empirically determined lengths as regression tests. """ msids = [hdr3['msid'] for hdr3 in aca_hdr3.HDR3_DEF.values() if 'value' in hdr3] msids = sorted(msids) # Read all MSIDs as a set dat = aca_hdr3.MSIDset(msids, '2010:001:12:00:00', '2010:003:12:00:00') val_lengths = np.array([len(dat[msid].vals) for msid in msids]) time_lengths = np.array([len(dat[msid].times) for msid in msids]) assert np.all(val_lengths == time_lengths) assert np.all(val_lengths == 44432) for msid in msids: dat[msid].filter_bad() val_lengths = np.array([len(dat[msid].vals) for msid in msids]) time_lengths = np.array([len(dat[msid].times) for msid in msids]) assert np.all(val_lengths == time_lengths) assert np.all(val_lengths == [10679, 40991, 40991, 40528, 40514, 40514, 40991, 40991, 40514, 40991, 40514, 40514, 40991, 40514, 10731, 40528, 40528, 40528, 10679, 10760, 10679])
14,223
def check_that_all_fields_and_groups_invisible(sdk_client: ADCMClient, path, app): """Prepare cluster from `path` and check that all fields and groups invisible.""" _, config = prepare_cluster_and_get_config(sdk_client, path, app) fields = config.get_field_groups() for field in fields: assert not field.is_displayed(), f"Field should be invisible. Field classes: {field.get_attribute('class')}" group_names = config.get_group_elements() assert not group_names, "Group elements should be invisible" config.show_advanced() assert config.advanced, "Advanced fields should be expanded" fields = config.get_field_groups() group_names = config.get_group_elements() assert not group_names, "Advanced group elements should ve invisible" for field in fields: assert ( not field.is_displayed() ), f"Advanced field should be invisible. Field classes: {field.get_attribute('class')}"
14,224
def emit(path_local, path_s3, time, poisson=0.0, ls=None, z_line=None, actin_permissiveness=None, comment = None, write = True, **kwargs): """Produce a structured JSON file that will be consumed to create a run Import emit into an interactive workspace and populate a directory with run configurations to be executed by a cluster. Parameters ---------- path_local: string The local (absolute or relative) directory to which we save both emitted files and run output. path_s3: string The s3 bucket (and optional folder) to save run output to and to which the emitted files should be uploaded. time: iterable Time trace for run, in ms poisson: float poisson ratio of lattice. 0.5 const vol; 0 default const lattice; negative for auxetic ls: float, optional Specifies the initial starting lattice spacing which will act as a zero or offset for the spacing. If not given, the default lattice spacing from hs.hs will be used. z_line: float or iterable, optional If not given, default distance specified in hs.hs is used. If given as float, the z-line distance for the run. If given as an iterable, used as trace for run, timestep by timestep. actin_permissiveness: float or iterable, optional Same as for z-line. comment: string, optional Space for comment on the purpose or other characteristics of the run write: bool, optional True (default) writes file to path_local/name.meta.json. Other values don't. In both cases the dictionary describing the run is returned. **kwargs: Further keyword args will be included in the output dictionary. These are used to sort the resulting runs by their properties of interest. For example, where we are varying phase of activation across a series of runs we would include the argument, e.g. 'phase=0.2', in order to sort over phase when looking at results. Returns ------- rund: dict Copy of run dictionary saved to disk as json. Examples -------- >>> emit('./', None, .1, 100, write=False) {'actin_permissiveness': None, ... 'actin_permissiveness_func': None, ... 'comment': None, ... 'lattice_spacing': None, ... 'lattice_spacing_func': None, ... 'name': ..., ... 'path_local': './', ... 'path_s3': None, ... 'timestep_length': 0.1, ... 'timestep_number': 100, ... 'z_line': None, ... 'z_line_func': None} """ rund = {} name = str(uuid.uuid1()) ## Build dictionary rund['name'] = name rund['comment'] = comment rund['path_local'] = path_local rund['path_s3'] = path_s3 rund['poisson_ratio'] = poisson rund['lattice_spacing'] = ls rund['z_line'] = z_line rund['actin_permissiveness'] = actin_permissiveness rund['timestep_length'] = np.diff(time)[0] rund['timestep_number'] = len(time) ## Include kwargs for k in kwargs: rund[k] = kwargs[k] ## Write out the run description if write is True: output_filename = os.path.join(path_local, name+'.meta.json') with open(output_filename , 'w') as metafile: json.dump(rund, metafile, indent=4) return rund
14,225
def adjust_learning_rate( optimizer: torch.optim, base_lr: float, iteration: int, warm_iter: int, max_iter: int, ) -> float: """ warmup + cosine lr decay """ start_lr = base_lr / 10 if iteration <= warm_iter: lr = start_lr + (base_lr - start_lr) * iteration / warm_iter else: lr = start_lr + (base_lr - start_lr) * 0.5 * (1 + math.cos((iteration - warm_iter) * math.pi / (max_iter - warm_iter))) for param_group in optimizer.param_groups: param_group['lr'] = lr return lr
14,226
def load_data(region): """ Function to read in data according to region Args: region (str): valid values are US, JP and EU Returns: pd.DataFrame containing factors returns """ # region='US' reg_mapper = {'US': 'USA', 'JP': 'JPN', 'EU': 'Europe'} if region not in reg_mapper: raise ValueError('region has to be one of %s' % (', '.join(reg_mapper.keys()))) data_folder = 'Data' filename = 'AQR_Data_Daily.xlsx' filepath = os.path.join(data_folder, filename) qual_df = pd.read_excel(filepath, sheet_name='QMJ Factors', skiprows=18, parse_dates=[0], index_col=[0])[reg_mapper[region]] mkt_df = pd.read_excel(filepath, sheet_name='MKT', skiprows=18, parse_dates=[0], index_col=[0])[reg_mapper[region]] mom_df = pd.read_excel(filepath, sheet_name='UMD', skiprows=18, parse_dates=[0], index_col=[0])[reg_mapper[region]] val_df = pd.read_excel(filepath, sheet_name='HML FF', skiprows=18, parse_dates=[0], index_col=[0])[reg_mapper[region]] rf_df = pd.read_excel(filepath, sheet_name='RF', skiprows=18, parse_dates=[0], index_col=[0])['Risk Free Rate'] data_df = pd.concat([mkt_df.rename('MKT'), val_df.rename('VAL'), mom_df.rename('MOM'), qual_df.rename('QUAL'), rf_df.rename('RF')], axis=1) # Drop dates with NaN RF data_df.dropna(subset=['RF'], inplace=True) # Drop dates with all NaNs data_df.dropna(how='all', inplace=True) # Check that returns are all valid after the first valid index if (data_df.apply(lambda x: x.loc[x.first_valid_index():].isnull().sum(), axis=0) != 0).any(): raise ValueError('Check the data. It has intermediate NaNs') # Provide basic data description print('Basic Description:') print(data_df.apply(lambda x: pd.Series( [x.mean(), x.std(ddof=1), x.skew(), x.kurtosis()], index=['Mean', 'Std Dev', 'Skew', 'Excess Kurtosis']))) print('\nCorrelations:') print(data_df.corr()) return data_df
14,227
def convert(source, destination, worksheet_order=None, style=None, ignore_extra_sheets=True): """ Convert among Excel (.xlsx), comma separated (.csv), and tab separated formats (.tsv) Args: source (:obj:`str`): path to source file destination (:obj:`str`): path to save converted file worksheet_order (:obj:`list` of :obj:`str`): worksheet order style (:obj:`WorkbookStyle`, optional): workbook style for Excel ignore_extra_sheets (:obj:`bool`, optional): true/false should extra sheets in worksheet_order be ignored or should an error be thrown Raises: :obj:`ValueError`: if file extensions are not supported or file names are equal """ # check source != destination if source == destination: raise ValueError('Source and destination names must be different') # check extensions are valid _, ext_src = splitext(source) _, ext_dst = splitext(destination) if ext_src not in ['.xlsx', '.csv', '.tsv']: raise ValueError('Source extension must be one of ".xlsx", ".csv", or ".tsv"') if ext_dst not in ['.xlsx', '.csv', '.tsv']: raise ValueError('Destination extension must be one of ".xlsx", ".csv", or ".tsv"') # if extensions are the same, copy file(s) if ext_src == ext_dst and (worksheet_order is None or ext_src != '.xlsx'): if ext_src == '.xlsx': copyfile(source, destination) else: i_glob = source.find('*') if not list(glob(source)): raise ValueError("glob of path '{}' does not match any files".format(source)) for filename in glob(source): if i_glob == -1: sheet_name = '' else: sheet_name = filename[i_glob:i_glob + len(filename) - len(source) + 1] copyfile(filename, destination.replace('*', sheet_name)) return # read, convert, and write workbook = read(source) ordered_workbook = Workbook() worksheet_order = worksheet_order or [] if not ignore_extra_sheets: difference = set(worksheet_order) - set(workbook.keys()) if difference: raise ValueError("source '{}' missing worksheets: '{}'".format(source, difference)) for worksheet in chain(worksheet_order, workbook.keys()): if worksheet in workbook: ordered_workbook[worksheet] = workbook[worksheet] write(destination, ordered_workbook, style=style)
14,228
def load_data(filename: str): """ Load house prices dataset and preprocess data. Parameters ---------- filename: str Path to house prices dataset Returns ------- Design matrix and response vector (prices) - either as a single DataFrame or a Tuple[DataFrame, Series] """ df = pd.read_csv(filename).drop_duplicates() df = filter_data(df) return df.drop("price", axis=1), df.filter(['price'])
14,229
def face_at_link(shape, actives=None, inactive_link_index=BAD_INDEX_VALUE): """Array of faces associated with links. Returns an array that maps link ids to face ids. For inactive links, which do not have associated faces, set their ids to *inactive_link_index*. Use the *actives* keyword to specify an array that contains the ids of all active links in the grid. The default assumes that only the perimeter nodes are inactive. Examples -------- >>> from landlab.utils.structured_grid import face_at_link >>> faces = face_at_link((3, 4), inactive_link_index=-1) >>> faces # doctest: +NORMALIZE_WHITESPACE array([-1, 0, 1, -1, -1, 2, 3, -1, -1, -1, -1, 4, 5, 6, -1, -1, -1]) """ if actives is None: actives = active_links(shape) num_links = link_count(shape) link_faces = np.empty(num_links, dtype=np.int) link_faces.fill(inactive_link_index) link_faces[actives] = np.arange(len(actives)) return link_faces
14,230
def snrcat(spec,plugmap): """This function calculates the S/N for each fiber. Parameters ---------- spec : SpecFrame object The SpecFrame object that constrains the 1D extracted spectra. plugmap : numpy structured array The plugmap information for each fiber including which fiber contains sky or stars. Returns ------- cat : numpy structured array A catalog containing information on each object in the fibers and the median S/N. Example ------- .. code-block:: python cat = snrcat(spec,plugmap) """ dtype = np.dtype([('apogee_id',np.str,30),('ra',np.float64),('dec',np.float64),('hmag',np.float),('objtype',np.str,30), ('fiberid',np.int),('fiberindex',np.int),('flux',np.float),('err',np.float),('snr',np.float)]) cat = np.zeros(300,dtype=dtype) # Load the spectral data cat['fiberindex'] = np.arange(300) cat['flux'] = np.median(spec.flux,axis=1) cat['err'] = np.median(spec.err,axis=1) err = cat['err'] bad = (err <= 0.0) err[bad] = 1.0 cat['snr'] = cat['flux']/err # Load the plugging data pcat = plugmap['PLUGMAPOBJ'] fibs, = np.where( (pcat['fiberId']>=0) & (pcat['holeType']=='OBJECT') & (pcat['spectrographId']==2) ) fiberindex = 300-pcat[fibs]['fiberId'] cat['apogee_id'][fiberindex] = pcat[fibs]['tmass_style'] cat['ra'][fiberindex] = pcat[fibs]['ra'] cat['dec'][fiberindex] = pcat[fibs]['dec'] cat['hmag'][fiberindex] = pcat[fibs]['mag'][:,1] cat['objtype'][fiberindex] = pcat[fibs]['objType'] cat['fiberid'][fiberindex] = pcat[fibs]['fiberId'] cat = Table(cat) return cat
14,231
def progress_bar( iterations: Any, prefix: str = "", size: int = 60, file: Any = sys.stdout ) -> None: """ A function to display the progress bar related to a process. :param iterations: :param prefix: :param size: :param file: :return: """ count = len(iterations) def show(j): x = int(size * j / count) file.write("%s[%s%s] %i/%i\r" % (prefix, "#" * x, "." * (size - x), j, count)) file.flush() show(0) for i, item in enumerate(iterations): yield item show(i + 1) file.write("\n") file.flush()
14,232
def test_parsed_dbase(): """ Test that parsed_based still matches what is returned by rhessi. """ filename, _ = urlretrieve( "https://hesperia.gsfc.nasa.gov/hessidata/dbase/hsi_obssumm_filedb_200311.txt") dbase = sunpy.instr.rhessi.parse_observing_summary_dbase_file(filename) rows = {} for key in dbase.keys(): rows[key] = dbase[key][:5] assert rows == parsed_dbase()
14,233
def forward(S, A, O, obs): """Calculates the forward probability matrix F. This is a matrix where each (i, j) entry represents P(o_1, o_2, ... o_j, X_t = i| A, O). In other words, each (i, j) entry is the probability that the observed sequence is o_1, ... o_j and that at position j we are in hidden state i. We build F from the first observation o_1 up to the entire observed sequence o_1, ... o_M. Thus F has dimension L x M where L is the number of hidden states and M is the length of our input sample 'obs'. @params: S np.array - state vector for starting distribution. A np.array - transition matrix, L x L for L hidden states, each (i, j) entry is P(X_i | X_j), or the probability of transitioning from start state X_j (column entry) to target state X_i (row entry). O np.array - observation matrix, L x M' for L hidden states and M' total possible observations. each (i, j) entry is P(Y_j | X_i), or the probability of observing observation Y_j while in state X_i. obs np.array, list - the observations. these are assumed to be integers that index correctly into A and O. """ assert np.shape(A)[0] == np.shape(A)[1] # transition matrix should be square L = np.shape(A)[0] # L is the number of hidden states M = len(obs) # M is the number of observations in our sample 'obs' C = [] # the list of coefficients used to normalize each column to 1 F = np.zeros((L, M)) # the foward algorithm generates an L x M matrix F[:, 0] = np.multiply(S, O[:, obs[0]]) # initialize the first column of F via S * (obs[0] column of B) c_0 = np.sum(F[:, 0]) # compute the first normalizing coefficient C.append(c_0) # record c_0 F[:, 0] = np.divide(F[:, 0], c_0) # normalize the first column so the entries sum to 1 # begin the forward algorithm. generate each subsequent column of F via the previous one, # normalizing at each step for j in range(1, M): F[:, j] = np.dot(np.multiply(A, O[:,obs[j]]), F[:,j - 1]) # compute the new column j c_j = np.sum(F[:, j]) # compute the jth coeff. C.append(c_j) # record the jth coeff. F[:, j] = np.divide(F[:, j], c_j) # normalize column j # return the foward matrix F and the list of normalizing coefficients C (these will be used # to normalize the backward probabilities in the backward step) return (F, C)
14,234
def dl_progress(num_blocks, block_size, total_size): """Show a decent download progress indication.""" progress = num_blocks * block_size * 100 / total_size if num_blocks != 0: sys.stdout.write(4 * '\b') sys.stdout.write('{0:3d}%'.format((progress)))
14,235
def sample(): """Sample database commands."""
14,236
def calculate_state(position, dt): """ Sometimes, a data file will include position only. In those cases, the velocity must be calculated before the regression is run. If the position is | position_11 position_21 | | position_12 position_22 | | ....................... | | position_1n position_2n | The value returned is | position_11 position_21 velocity_11 velocity_21 | | position_12 position_22 velocity_12 velocity_22 | | ....................................................... | | position_1n-1 position_2n-1 velocity_1n-1 velocity_2n-1 | The last value of each state is clipped off because given n values, there are n-1 differences between them. """ # velocity is (x1 - x0) * dt velocity = (position[1:, :] - position[:-1, :]) * dt state = np.hstack((position[:-1, :], velocity)) return state
14,237
def QuitChrome(chrome_path): """ Tries to quit chrome in a safe way. If there is still an open instance after a timeout delay, the process is killed the hard way. Args: chrome_path: The path to chrome.exe. """ if not CloseWindows(chrome_path): # TODO(robertshield): Investigate why Chrome occasionally doesn't shut # down. sys.stderr.write('Warning: Chrome not responding to window closure. ' 'Killing all processes belonging to %s\n' % chrome_path) KillNamedProcess(chrome_path)
14,238
def create_tables(conn, create_table_sql): """ create a table from the create_table_sql statement :param conn: Connection object :param create_table_sql: a CREATE TABLE statement :return: """ try: c = conn.cursor() c.execute(create_table_sql) conn.commit() except Exception as e: print("Tables could not be created:", e)
14,239
def get_server(name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServerResult: """ Use this data source to retrieve an auth server from Okta. ## Example Usage ```python import pulumi import pulumi_okta as okta example = okta.auth.get_server(name="Example Auth") ``` :param str name: The name of the auth server to retrieve. """ __args__ = dict() __args__['name'] = name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('okta:auth/getServer:getServer', __args__, opts=opts, typ=GetServerResult).value return AwaitableGetServerResult( audiences=__ret__.audiences, credentials_last_rotated=__ret__.credentials_last_rotated, credentials_next_rotation=__ret__.credentials_next_rotation, credentials_rotation_mode=__ret__.credentials_rotation_mode, description=__ret__.description, id=__ret__.id, issuer=__ret__.issuer, issuer_mode=__ret__.issuer_mode, kid=__ret__.kid, name=__ret__.name, status=__ret__.status)
14,240
def estimate_aps_user_defined(ml, X_c = None, X_d = None, data = None, C: Sequence = None, D: Sequence = None, L: Dict[int, Set] = None, S: int = 100, delta: float = 0.8, seed: int = None, pandas: bool = False, pandas_cols: Sequence = None, keep_order: bool = False, reorder: Sequence = None, parallel: bool = False, nprocesses: int = None, ntasks: int = 1, **kwargs): """Estimate APS for given dataset and user defined ML function Approximate propensity score estimation involves taking draws :math:`X_c^1, \\ldots,X_c^S` from the uniform distribution on :math:`N(X_{ci}, \\delta)`, where :math:`N(X_{ci},\\delta)` is the :math:`p_c` dimensional ball centered at :math:`X_{ci}` with radius :math:`\\delta`. :math:`X_c^1, \\ldots,X_c^S` are destandardized before passed for ML inference. The estimation equation is :math:`p^s(X_i;\\delta) = \\frac{1}{S} \\sum_{s=1}^{S} ML(X_c^s, X_{di})`. Parameters ----------- ml: Object User defined ml function X_c: array-like, default: None 1D/2D vector of continuous input variables X_d: array-like, default: None 1D/2D vector of discrete input variables data: array-like, default: None Dataset containing ML input variables C: array-like, default: None Integer column indices for continous variables D: array-like, default: None Integer column indices for discrete variables L: Dict[int, Set] Dictionary with keys as indices of X_c and values as sets of discrete values S: int, default: 100 Number of draws for each APS estimation delta: float, default: 0.8 Radius of sampling ball seed: int, default: None Seed for sampling pandas: bool, default: False Whether to cast inputs into pandas dataframe pandas_cols: Sequence, default: None Columns names for dataframe input keep_order: bool, default: False Whether to maintain the column order if data passed as a single 2D array reorder: Sequence, default: False Indices to reorder the data assuming original order [X_c, X_d] parallel: bool, default: False Whether to parallelize the APS estimation nprocesses: int, default: None Number of processes to parallelize. Defaults to number of processors on machine. ntasks: int, default: 1 Number of tasks to send to each worker process. **kwargs: keyword arguments to pass into user function Returns ----------- np.ndarray Array of estimated APS for each observation in sample Notes ------ X_c, X_d, and data should never have any overlapping variables. This is not checkable through the code, so please double check this when passing in the inputs. The arguments `keep_order`, `reorder`, and `pandas_cols` are applied sequentially, in that order. This means that if `keep_order` is set, then `reorder` will reorder the columns from the original column order as `data`. `pandas_cols` will then be the names of the new ordered dataset. The default ordering of inputs is [X_c, X_d], where the continuous variables and discrete variables will be in the original order regardless of how their input is passed. If `reorder` is called without `keep_order`, then the reordering will be performed on this default ordering. Parallelization uses the `Pool` module from pathos, which will NOT be able to deal with execution on GPU. If the user function enables inference on GPU, then it is recommended to implement parallelization within the user function as well. The optimal settings for nprocesses and nchunks are specific to each machine, and it is highly recommended that the user pass these arguments to maximize the performance boost. `This SO thread <https://stackoverflow.com/questions/42074501/python-concurrent-futures-processpoolexecutor-performance-of-submit-vs-map>`_ recommends setting nchunks to be 14 * # of workers for optimal performance. """ # Set X_c and X_d based on inputs if X_c is None and data is None: raise ValueError("APS estimation requires continuous data!") # Prioritize explicitly passed variables if X_c is not None: X_c = np.array(X_c).astype(float) if X_d is not None: X_d = np.array(X_d).astype(float) if data is not None: data = np.array(data).astype(float) # If X_c not given, but data is, then we assume all of data is X_c if X_c is None and X_d is not None and data is not None: print("`X_c` not given but both `X_d` and `data` given. We will assume that all the variables in `data` are continuous.") X_c = data # If X_d not given, but data is, then we assume all of data is X_d if X_c is not None and X_d is None and data is not None: print("`X_d` not given but both `X_c` and `data` given. We will assume that all the variables in `data` are discrete.") X_d = data # If both X_c and X_d are none, then use indices order = None if X_c is None and X_d is None: # Save original order if keep order in place if keep_order: order = _get_og_order(data.shape[1], C, D) if C is None and D is None: print("`data` given but no indices passed. We will assume that all the variables in `data` are continuous.") X_c = data elif C is None: if isinstance(D, int): d_len = 1 else: d_len = len(D) X_d = data[:,D] if d_len >= data.shape[1]: raise ValueError(f"Passed discrete indices of length {d_len} for input data of shape {data.shape}. Continuous variables are necessary to conduct APS estimation.") else: print(f"Passed discrete indices of length {d_len} for input data of shape {data.shape}. Remaining columns of `data` will be assumed to be continuous variables.") X_c = np.delete(data, D, axis = 1) elif D is None: if isinstance(C, int): c_len = 1 else: c_len = len(C) X_c = data[:,C] if c_len < data.shape[1]: print(f"Passed continuous indices of length {c_len} for input data of shape {data.shape}. Remaining columns of `data` will be assumed to be discrete variables.") X_d = np.delete(data, C, axis = 1) else: X_c = data[:,C] X_d = data[:,D] # Force X_c to be 2d array if X_c.ndim == 1: X_c = X_c[:,np.newaxis] if X_d is not None: if X_d.ndim == 1: X_d = X_d[:,np.newaxis] # === Preprocess mixed variables === if L is not None: L_keys = np.array(list(L.keys())) L_vals = np.array(list(L.values())) X_c, mixed_og_vals, mixed_og_inds = _preprocessMixedVars(X_c, L_keys, L_vals) mixed_rows, mixed_cols = mixed_og_inds else: mixed_og_vals = None mixed_og_inds = None # === Standardize continuous variables === # Formula: (X_ik - u_k)/o_k; k represents a continuous variable X_c, mu, sigma = standardize(X_c) if seed is not None: np.random.seed(seed) # If parallelizing, then force inference on CPU if parallel == True: cpu = True import pathos from functools import partial from itertools import repeat computeUserAPS_frozen = partial(_computeUserAPS, ml = ml, S = S, delta = delta, mu = mu, sigma = sigma, pandas = pandas, pandas_cols = pandas_cols, order = order, reorder = reorder, **kwargs) mp = pathos.helpers.mp p = mp.Pool(nprocesses) if nprocesses is None: workers = "default (# processors)" nprocesses = mp.cpu_count() else: workers = nprocesses print(f"Running APS estimation with {workers} workers...") # Split input arrays into chunked rows nchunks = ntasks * nprocesses X_c_split = np.array_split(X_c, nchunks) iter_c = iter(X_c_split) if X_d is None: iter_d = repeat(None) else: iter_d = iter(np.array_split(X_d, nchunks)) if L is None: iter_L_ind = repeat(None) iter_L_val = repeat(None) else: # Split indices depending on which chunk they fall into chunksizes = np.append([0], np.cumsum([c.shape[0] for c in X_c_split])) chunked_inds = [(mixed_rows[np.where(np.isin(mixed_rows, range(chunksizes[i], chunksizes[i+1])))] - chunksizes[i], mixed_cols[np.where(np.isin(mixed_rows, range(chunksizes[i], chunksizes[i+1])))]) for i in range(len(chunksizes) - 1)] chunked_vals = [mixed_og_vals[np.where(np.isin(mixed_rows, range(chunksizes[i], chunksizes[i+1])))] for i in range(len(chunksizes) - 1)] iter_L_ind = iter(chunked_inds) iter_L_val = iter(chunked_vals) iter_args = zip(iter_c, iter_d, iter_L_ind, iter_L_val) p_out = p.starmap(computeUserAPS_frozen, iter_args) p.close() p.join() aps_vec = np.concatenate(p_out) else: aps_vec = _computeUserAPS(X_c, X_d, mixed_og_inds, mixed_og_vals, ml, S, delta, mu, sigma, pandas, pandas_cols, order, reorder, **kwargs) # Compute APS for each individual i aps_vec = np.array(aps_vec) return aps_vec
14,241
def shared_vinchain_instance(): """ This method will initialize ``SharedInstance.instance`` and return it. The purpose of this method is to have offer single default vinchainio instance that can be reused by multiple classes. """ if not SharedInstance.instance: clear_cache() SharedInstance.instance = vin.VinChain() return SharedInstance.instance
14,242
def export_viewpoint_to_nw(view_point: ViewPoint) -> Element: """ Represents current view point as a NavisWorks view point XML structure :param view_point: ViewPoint instance that should be represented in XML :return: XML Element instance with inserted view point """ path_to_viewpoint_template = os.path.join( BASE_DIR, 'EasyView', 'static', 'EasyView', 'export', 'view_point_template.xml') viewpoint_template = ET.parse(path_to_viewpoint_template) view = viewpoint_template.getroot() # View point - fov, position and rotation camera = view[0][0] pos3f = camera[0][0] quaternion = camera[1][0] camera_attributes = ( ('height', str(math.radians(view_point.fov))), ) # Either a remark description (if presented), a view point description(if presented) or generated name description = view_point.description if not view_point.description: description = f'Точка обзора {view_point.pk}' related_remark = Remark.objects.filter(view_point=view_point) if related_remark: description = view_point.remark.description view_attributes = ( ('guid', str(uuid.uuid4())), ('name', description), ) pos3f_attributes = tuple(zip(('x', 'y', 'z',), map(lambda x: str(x), view_point.position))) quaternion_attributes = tuple(zip(('a', 'b', 'c', 'd'), map(lambda x: str(x), view_point.quaternion))) # Clipping planes clip_plane_set = view[1] clip_planes = clip_plane_set[1] clipped = False clip_counter = 0 for i, status in enumerate(view_point.clip_constants_status): if status: if not clipped: clipped = True clip_counter += 1 clip_planes[i].set('state', 'enabled') clip_planes[i][0].set('distance', f'{view_point.clip_constants[i]:.10f}') if clipped: clip_plane_set.set('enabled', '1') clip_plane_set.set('current', str(clip_counter - 1)) element_attribute_pairs = ( (camera, camera_attributes), (view, view_attributes), (pos3f, pos3f_attributes), (quaternion, quaternion_attributes), ) for element, attributes in element_attribute_pairs: for attribute, value in attributes: element.set(attribute, value) return view
14,243
def generateMfccFeatures(filepath): """ :param filepath: :return: """ y, sr = librosa.load(filepath) mfcc_features = librosa.feature.mfcc(y=y, sr=sr, n_mfcc=40) return mfcc_features
14,244
def homography_crop_resize(org_img_size, crop_y, resize_img_size): """ compute the homography matrix transform original image to cropped and resized image :param org_img_size: [org_h, org_w] :param crop_y: :param resize_img_size: [resize_h, resize_w] :return: """ # transform original image region to network input region ratio_x = resize_img_size[1] / org_img_size[1] ratio_y = resize_img_size[0] / (org_img_size[0] - crop_y) H_c = np.array([[ratio_x, 0, 0], [0, ratio_y, -ratio_y*crop_y], [0, 0, 1]]) return H_c
14,245
def writeSetFLFinnisSinclair( nrho, drho, nr, dr, eampots, pairpots, out = sys.stdout, comments = ["", "", ""], cutoff = None): """Creates Finnis-Sinclar EAM potential in the DYNAMO ``setfl`` format. The format should be used with the ``LAMMPS`` `eam/fs pair_style <http://lammps.sandia.gov/doc/pair_eam.html>`_. The :class:`~atsim.potentials.EAMPotential` instances within the ``eampots`` list are expected to provide individual density functions for each species pair in the species being tabulated. See :meth:`atsim.potentials.EAMPotential.__init__` for how these are specified to the :class:`atsim.potentials.EAMPotential` constructor. .. seealso :: For a working example using this function see :ref:`eam_example_3a` :param nrho: Number of points used to describe embedding function :type nrho: int :param drho: Increment used when tabulating embedding function :type drho: float :param nr: Number of points used to describe density and pair potentials :type nr: int :param dr: Separation increment used when tabulating density function and pair potentials :type dr: float :param eampots: Instances of lammps.writeEAMTable.EAMPotential() which encapsulate information about each species :type eampots: list :param pairpots: Instance of potentials.Potential, these describe repulsive pair potential component of EAM potential :type pairpots: list :param out: Python file object into which EAM potential data should be written :type out: file object :param comments: List containing three strings, these form the header of the created file :type comments: list :param cutoff: Pair potential and density cutoff. If None then value of ``nr`` * ``dr`` is used. :type cutoff: float""" if not cutoff: cutoff = nr * dr #Specialise _writeSetFL to use _writeSetFLDensityFunctionFinnisSinclar to write multiple density functions _writeSetFL( nrho, drho, nr, dr, cutoff, eampots, pairpots, comments, out, _writeSetFLDensityFunctionFinnisSinclair)
14,246
def AddtoVtuField(vtu, add, fieldName, scale = None): """ Add a field from a vtu onto the corresponding field in an input vtu """ if optimise.DebuggingEnabled(): assert(VtuMatchLocations(vtu, add)) if scale is None: vtu.AddFieldToField(fieldName, add.GetField(fieldName)) else: vtu.AddFieldToField(fieldName, add.GetField(fieldName) * scale) return
14,247
def generate_offsets(minRadius : int = 0, maxRadius : int = 3_750_000): """Generate x and z coordinates in concentric circles around the origin Uses Bresenham's Circle Drawing Algorithm """ def yield_points(x, y): yield x, y yield x, -y yield -x, -y yield -x, y if x != y: yield y, x yield y, -x yield -y, -x yield -y, x def yield_circle(radius, previousCircle): x = 0 y = radius d = 3 - (2 * radius) while x < y: for point in yield_points(x, y): if point not in previousCircle: yield point if d < 0: d += (4 * x) + 6 else: d += (4 * (x-y)) + 10 for point in itertools.chain(yield_points(x + 1, y), yield_points(x, y - 1)): if point not in previousCircle: yield point y -= 1 x += 1 previousCircle = [(0,0)] for radius in range(minRadius, maxRadius): circle = set() for point in yield_circle(radius, previousCircle): if point not in circle: yield point circle.add(point) previousCircle = circle
14,248
def extract_project_info(req_soup, full_name=False): """Extract the relevant project info from a request. Arguments: req_soup (BS4 soup object): The soup of the request. full_name (boolean): Whether or not to capture the entire project name or just the last hyphenated element. Returns: prj_info (Project): The required info to post a project. """ if full_name: prj_name = req_soup.find("name").string else: prj_name = req_soup.find("name").string.split('-')[-1] res_name = req_soup.find("owner").find("name").string email = req_soup.find("owner").find("email").string # NOTE: Change this line to your own institution's email domain. if "email.arizona.edu" in email: res_lab = "internal" else: res_lab = "external" # Replace all not ascii chars with ascii ones, and any symbols with '-'. prj_res = api_types.Researcher( extract_custom_forms._sanitize_text(res_name.split()[0]), extract_custom_forms._sanitize_text(res_name.split()[-1]), extract_custom_forms._sanitize_text(res_lab), email, "") prj_info = api_types.Project(prj_name, prj_res) return prj_info
14,249
def histogram(ax, data, **kwargs): """smp_base.plot.histogram Plot the histogram """ assert len(data.shape) > 0, 'Data has bad shape = %s' % (data.shape, ) _loglevel = loglevel_debug + 0 # logger.log(_loglevel, " plot.histogram histo kwargs", kwargs) # init local kwargs kwargs_ = {} kwargs_.update(**kwargs) # kwargs = kwargs_plot_clean_histogram(**kwargs_) kwargs = plot_clean_kwargs('histogram', **kwargs_) # if not kwargs.has_key('histtype'): # kwargs_['histtype'] = kwargs['histtype'] logger.log(_loglevel, " plot.histogram kwargs .keys = %s" % (list(kwargs.keys()))) logger.log(_loglevel, " plot.histogram kwargs_.keys = %s" % (list(kwargs_.keys()))) # explicit limits and bins configuration if kwargs_['ylim'] is not None and kwargs_['orientation'] == 'horizontal': bins = np.linspace(kwargs_['ylim'][0], kwargs_['ylim'][1], 21 + 1) # logger.log(_loglevel, " plot.histogram setting bins = %s for orientation = %s from ylim = %s" % (bins, kwargs_['orientation'], kwargs_['ylim'])) elif kwargs_['xlim'] is not None and kwargs_['orientation'] == 'vertical': bins = np.linspace(kwargs_['xlim'][0], kwargs_['xlim'][1], 21 + 1) # logger.log(_loglevel, " plot.histogram setting bins = %s for orientation = %s from xlim = %s" % (bins, kwargs_['orientation'], kwargs_['xlim'])) elif 'bins' in kwargs_: bins = kwargs_['bins'] else: bins = 'auto' logger.log( _loglevel, " plot.histogram setting bins = %s for orientation = %s from xlim = %s", bins, kwargs_['orientation'], kwargs_['xlim']) # FIXME: decouple compute histogram; incoming data is bar data # already (def bar(...)) logger.log(_loglevel, " plot.histogram data = %s", data.shape) # if data.shape[-1] > 1: for i in range(data.shape[-1]): # compute the histogram for each variable (columns) in the input data # (n, bins) = np.histogram(data, bins = bins, **kwargs) # (n, bins) = meas_hist(data, bins = bins, **kwargs) (n, bins_i) = meas_hist(data[:,[i]], bins = bins, **kwargs) binwidth = np.mean(np.abs(np.diff(bins_i))) bincenters = bins_i[:-1] + binwidth/2.0 n = n / float(np.sum(n)) logger.log(_loglevel, " plot.histogram[%d] n = %s/%s", i, n.shape, n) logger.log(_loglevel, " plot.histogram[%d] binwidth = %s", i, binwidth) logger.log(_loglevel, " plot.histogram[%d] bincenters = %s/%s", i, bincenters.shape, bincenters) # kwargs = kwargs_plot_clean_bar(**kwargs_) kwargs_b = plot_clean_kwargs('bar', **kwargs_) logger.log(_loglevel, " plot.histogram[%d] kwargs = %s", i, list(kwargs_b.keys())) # orientation if kwargs_['orientation'] == 'vertical': axbar = ax.bar kwargs_b['width'] = binwidth elif kwargs_['orientation'] == 'horizontal': axbar = ax.barh kwargs_b['height'] = binwidth # plot the pre-computed histogram with bar plot patches = axbar(bincenters, n, **kwargs_b)
14,250
def test_measure_6(base_settings): """No. 6 tests collection for Measure. Test File: measure-composite-example.json """ filename = base_settings["unittest_data_dir"] / "measure-composite-example.json" inst = measure.Measure.parse_file( filename, content_type="application/json", encoding="utf-8" ) assert "Measure" == inst.resource_type impl_measure_6(inst) # testing reverse by generating data from itself and create again. data = inst.dict() assert "Measure" == data["resourceType"] inst2 = measure.Measure(**data) impl_measure_6(inst2)
14,251
def get_underlying_asset_price(token_symbol: str) -> Optional[Price]: """Gets the underlying asset price for token symbol, if any This function is neither in inquirer.py or chain/ethereum/defi.py due to recursive import problems """ price = None if token_symbol == 'yaLINK': price = Inquirer().find_usd_price(A_ALINK) elif token_symbol == 'yDAI': price = Inquirer().find_usd_price(A_DAI) elif token_symbol == 'yWETH': price = Inquirer().find_usd_price(A_ETH) elif token_symbol == 'yYFI': price = Inquirer().find_usd_price(A_YFI) elif token_symbol == 'yUSDT': price = Inquirer().find_usd_price(A_USDT) elif token_symbol == 'yUSDC': price = Inquirer().find_usd_price(A_USDC) elif token_symbol == 'yTUSD': price = Inquirer().find_usd_price(A_TUSD) elif token_symbol in ('ycrvRenWSBTC', 'crvRenWBTC', 'crvRenWSBTC'): price = Inquirer().find_usd_price(A_BTC) return price
14,252
def do_call_async( fn_name, *args, return_type=None, post_process=None ) -> asyncio.Future: """Perform an asynchronous library function call.""" lib_fn = getattr(get_library(), fn_name) loop = asyncio.get_event_loop() fut = loop.create_future() cf_args = [None, c_int64, c_int64] if return_type: cf_args.append(return_type) cb_type = CFUNCTYPE(*cf_args) # could be cached cb_res = _create_callback(cb_type, fut, post_process) # keep a reference to the callback function to avoid it being freed CALLBACKS[fut] = (loop, cb_res) result = lib_fn(*args, cb_res, c_void_p()) # not making use of callback ID if result: # callback will not be executed if CALLBACKS.pop(fut): fut.set_exception(get_current_error()) return fut
14,253
def prefer_static_value(x): """Return static value of tensor `x` if available, else `x`. Args: x: `Tensor` (already converted). Returns: Numpy array (if static value is obtainable), else `Tensor`. """ static_x = tensor_util.constant_value(x) if static_x is not None: return static_x return x
14,254
def copy_parameter_value(value, shared_types=None, memo=None): """ Returns a copy of **value** used as the value or spec of a Parameter, with exceptions. For example, we assume that if we have a Component in an iterable, it is meant to be a pointer rather than something used in computation requiring it to be a "real" instance (like `Component.function`) e.g. in spec attribute or Parameter `Mechanism.input_ports_spec` """ from psyneulink.core.components.component import Component, ComponentsMeta if shared_types is None: shared_types = (Component, ComponentsMeta, types.MethodType) else: shared_types = tuple(shared_types) try: return copy_iterable_with_shared( value, shared_types=shared_types, memo=memo ) except TypeError: # this will attempt to copy the current object if it # is referenced in a parameter, such as # ComparatorMechanism, which does this for input_ports if not isinstance(value, shared_types): return copy.deepcopy(value, memo) else: return value
14,255
def center(win): """ centers a tkinter window :param win: the main window or Toplevel window to center """ win.update_idletasks() width = win.winfo_width() frm_width = win.winfo_rootx() - win.winfo_x() win_width = width + 2 * frm_width height = win.winfo_height() titlebar_height = win.winfo_rooty() - win.winfo_y() win_height = height + titlebar_height + frm_width x = win.winfo_screenwidth() // 2 - win_width // 2 y = win.winfo_screenheight() // 2 - win_height // 2 win.geometry('+{}+{}'.format(x, y)) win.deiconify()
14,256
def test_get_window(): """ Test rebootmgr.get_window without parameters """ window = "Maintenance window is set to *-*-* 03:30:00, lasting 01h30m." salt_mock = { "cmd.run_all": MagicMock(return_value={"stdout": window, "retcode": 0}) } with patch.dict(rebootmgr.__salt__, salt_mock): assert rebootmgr.get_window() == { "time": "*-*-* 03:30:00", "duration": "01h30m", } salt_mock["cmd.run_all"].assert_called_with(["rebootmgrctl", "get-window"])
14,257
def procmap(method, item_list): """ Like :py:func:`map`, but where the method being applied has no return value. In other words, the procedure is called on every item in the list sequentially, but since each call has no return value, the call to :func:`procmap` also has no return value. :param method: The procedure to call each time. :param item_list: The list of items to apply the procedure to. :return: :py:const:`None` """ for item in item_list: method(item)
14,258
def test_convert_dict_to_list(): """Should convert dict to list.""" listdict = { "0": "apple", "1": "banana", "2": "orange", } expected = [ "apple", "banana", "orange", ] assert loader._convert_dict_to_list(listdict) == expected
14,259
def ajax_login_required(function): """ Decorator for views that checks that the user is logged in, resulting in a 403 Unauthorized response if not. """ @wraps(function, assigned=available_attrs(function)) def wrapped_function(request, *args, **kwargs): if request.user.is_authenticated: return function(request, *args, **kwargs) else: return HttpResponseForbidden() return wrapped_function
14,260
def compile_str_from_parsed(parsed): """The (quasi-)inverse of string.Formatter.parse. Args: parsed: iterator of (literal_text, field_name, format_spec, conversion) tuples, as yield by string.Formatter.parse Returns: A format string that would produce such a parsed input. >>> s = "ROOT/{}/{0!r}/{1!i:format}/hello{:0.02f}TAIL" >>> assert compile_str_from_parsed(string.Formatter().parse(s)) == s >>> >>> # Or, if you want to see more details... >>> parsed = list(string.Formatter().parse(s)) >>> for p in parsed: ... print(p) ('ROOT/', '', '', None) ('/', '0', '', 'r') ('/', '1', 'format', 'i') ('/hello', '', '0.02f', None) ('TAIL', None, None, None) >>> compile_str_from_parsed(parsed) 'ROOT/{}/{0!r}/{1!i:format}/hello{:0.02f}TAIL' """ result = '' for literal_text, field_name, format_spec, conversion in parsed: # output the literal text if literal_text: result += literal_text # if there's a field, output it if field_name is not None: result += '{' if field_name != '': result += field_name if conversion: result += '!' + conversion if format_spec: result += ':' + format_spec result += '}' return result
14,261
def __ixor__(self: array, other: array, /) -> array: """ Note: __ixor__ is a method of the array object. """ pass
14,262
def calculate_class_weight(labels): """Calculates the inverse of the class cardinalities and normalizes the weights such that the minimum is equal to 1. Args: labels: List of integers representing class labels Returns: Numpy array with weight for each class """ labels = np.array(labels) unique = sorted(np.unique(labels)) counts = np.zeros(len(unique)) for i, label in enumerate(unique): counts[i] = np.sum(labels == label) weight = 1. / counts weight = weight / weight.min() return weight
14,263
def kill_processes(procs, kill_children=True): """Kill a list of processes and optionally it's children.""" for proc in procs: LOGGER.debug("Starting kill of parent process %d", proc.pid) kill_process(proc, kill_children=kill_children) ret = proc.wait() LOGGER.debug("Finished kill of parent process %d has return code of %d", proc.pid, ret)
14,264
def gather_from_workers(who_has, deserialize=True, rpc=rpc, close=True, permissive=False): """ Gather data directly from peers Parameters ---------- who_has: dict Dict mapping keys to sets of workers that may have that key Returns dict mapping key to value See Also -------- gather _gather """ bad_addresses = set() who_has = {k: set(v) for k, v in who_has.items()} results = dict() all_bad_keys = set() while len(results) + len(all_bad_keys) < len(who_has): d = defaultdict(list) rev = dict() bad_keys = set() for key, addresses in who_has.items(): if key in results: continue try: addr = random.choice(list(addresses - bad_addresses)) d[addr].append(key) rev[key] = addr except IndexError: bad_keys.add(key) if bad_keys: if permissive: all_bad_keys |= bad_keys else: raise KeyError(*bad_keys) coroutines = [rpc(address).get_data(keys=keys, close=close) for address, keys in d.items()] response = yield ignore_exceptions(coroutines, socket.error, StreamClosedError) response = merge(response) bad_addresses |= {v for k, v in rev.items() if k not in response} results.update(merge(response)) if deserialize: results = valmap(loads, results) if permissive: raise Return((results, all_bad_keys)) else: raise Return(results)
14,265
def run(flock, previous_flock, amaze, template_triangles, amendments): """ Detects collisions and calculates required amendments that allow boid to avoid collisions. For each boid it first checks if boid collides with the wall by rotating on the same spot. If it is, boid is moved out of the wall. If it isn't, the checking continues: it calculates its impulse (desired dislocation vector) and breaks it into steps. For each step (partial impulse) it checks if a wall is hit. If it is, boid slides along it. Multiple walls will be properly processed. TODO: Currently it's imprecise near the corners - there's a small transparent square on the corner of the wall with the size (cfg.collision_check_stop, cfg.collision_check_stop), and boid can go through it. Implementing proper processing may require more complex logic and is out of the scope of this project. """ amendments.clear() i = 0 for boid in flock.np_arrays: impulse = np.hypot(boid[vel + x_var], boid[vel + y_var]) if impulse > 0: # We'll start from previous position and if no walls are hit, # increase it up to the new boid position boid[pos + x_var] = previous_flock.np_arrays[i][pos + x_var] boid[pos + y_var] = previous_flock.np_arrays[i][pos + y_var] template_triangle = template_triangles[min( int(np.round(np.degrees(flock.object_list[i].orientation))), 359)] triangle_offset = template_triangle.get_triangle_top_left() triangle_rect = template_triangle.rect.copy() collision_detected = False # Fisrt check if the boid has collided into a wall without # moving (e.g. rotated near the wall) # ------------------------------------------------------ hit_top, hit_right, hit_bottom, hit_left = \ check_for_collision([boid[pos + x_var], boid[pos + y_var]], [boid[vel + x_var], boid[vel + y_var]], triangle_rect, triangle_offset, amaze) if hit_right or hit_left or hit_top or hit_bottom: collision_detected = True if cfg.bounding_rects_show: flock.object_list[i].collided = True dx = dy = 0 if hit_right: wall_left_x = np.trunc(triangle_rect.right / cfg.tile_width) * cfg.tile_width # dx will be negative dx = wall_left_x - triangle_rect.right if hit_left: wall_right_x = np.ceil(triangle_rect.left / cfg.tile_width) * cfg.tile_width # dx will be positive dx = wall_right_x - triangle_rect.left if hit_top: wall_above_y = np.ceil(triangle_rect.top / cfg.tile_height) * cfg.tile_height # dy will be positive dy = wall_above_y - triangle_rect.top if hit_bottom: wall_below_y = np.trunc(triangle_rect.bottom / cfg.tile_height) * cfg.tile_height # dy will be negative dy = wall_below_y - triangle_rect.bottom deltas_in_tiles = maze.to_unit_tiles(dx, dy) boid[pos + x_var] = boid[pos + x_var] + deltas_in_tiles[x_var] boid[pos + y_var] = boid[pos + y_var] + deltas_in_tiles[y_var] # Collision check for this boid is finished if not collision_detected: # First position is unobstructed, so check positions ahead # ------------------------------------------------------ unit_impulse = cfg.collision_check_step # noinspection PyTypeChecker dx = boid[vel + x_var] * unit_impulse / impulse # Unit squares # noinspection PyTypeChecker dy = boid[vel + y_var] * unit_impulse / impulse # Unit squares number_of_checks = int(np.ceil(impulse / unit_impulse)) for j in range(0, number_of_checks): if (j + 1) * unit_impulse > impulse: # Last step can be smaller # Using Decimal here as float != float - 0 and Decimal is exact. # Python uses approximate values and it negatively manifests itself here. unit_impulse = np.float32(Decimal(impulse - unit_impulse * j)) dx = boid[vel + x_var] * unit_impulse / impulse # Unit squares dy = boid[vel + y_var] * unit_impulse / impulse # Unit squares hit_top, hit_right, hit_bottom, hit_left = \ check_for_collision([boid[pos + x_var] + dx, boid[pos + y_var] + dy], [boid[vel + x_var], boid[vel + y_var]], triangle_rect, triangle_offset, amaze) if hit_right or hit_left or hit_top or hit_bottom: collision_detected = True if cfg.bounding_rects_show: flock.object_list[i].collided = True # Nullify impulse if a wall is on the way if (dx > 0 and hit_right) or (dx < 0 and hit_left): dx = 0 if (dy > 0 and hit_bottom) or (dy < 0 and hit_top): dy = 0 if dx == 0 and dy == 0: # Can't proceed break if not maze.outside_maze(boid[pos + x_var] + dx, boid[pos + y_var] + dy): # The boid was moved outside the maze # Apply amendments to the host data according to the type of collision # I.e. slide along the wall boid[pos + x_var] = boid[pos + x_var] + dx boid[pos + y_var] = boid[pos + y_var] + dy else: # Boid is outside the maze, no point continuing the check break if collision_detected: # Save amendments to transfer them later to the GPU amendments.values.append(np.copy([boid[pos + x_var], boid[pos + y_var]])) amendments.indices.append(i) amendments.amount += 1 i += 1
14,266
def CosEnv(length,rft=(0.005),fs=(44100)): """ rft : Rise and fall time [s] length : Total length of window [s] fs : Sampling freq [Hz] """ rfsamp = int(np.round(rft * fs)) windowsamp = int(np.round(length * fs)) flatsamp = windowsamp - (2 * rfsamp) time_index = np.arange(0, 1, 1 / rfsamp) r_env = (1 + np.cos(np.pi + np.pi * time_index)) / 2 f_env = (1 + np.cos(np.pi * time_index)) / 2 flat_env = np.ones(flatsamp) env = np.concatenate((r_env, flat_env, f_env), 0) return env
14,267
def generate_client_credentials(cluster_name, public_key, verify_code): """Generate the client credentials""" prompts = [] prompts += expect('Enter a name for this cluster:', cluster_name) prompts += expect("Input the server's public key:", public_key) prompts += expect("Input the server verify code: ", verify_code) with expecting(prompts): output = run('cstar_perf_client --get-credentials') lines = output.split('\n') client_public_key = [line for line in lines if line.startswith("Your public key is")][0] fab.run("echo '{}' > ~/credentials.txt".format(client_public_key))
14,268
def load_deck_obj(deckFile, deckObj): """ reset the deckObj, and load in the lines from deckFile. """ deckObj = [] # open up the deck file, and start adding lines to the deckObj with open(deckFile, 'r') as deckItem: for deckLine in deckItem: # print([deckLine]) deckObj += [deckLine] # print the first 4 card names in the deck. # for card in deckObj[:4]: # print(card) # print the length of the json cards, and the deck's lines # print('deck item lines: {}'.format(len(deckObj))) return
14,269
def flatten(nested_list): """ Args: nested_list (list): list of lists Returns: list: flat list Example: >>> import ubelt as ub >>> nested_list = [['a', 'b'], ['c', 'd']] >>> list(ub.flatten(nested_list)) ['a', 'b', 'c', 'd'] """ return it.chain.from_iterable(nested_list)
14,270
def fetch_reply(query, session_id): """ main function to fetch reply for chatbot and return a reply dict with reply 'type' and 'data' """ response = apiai_response(query, session_id) intent, params = parse_response(response) reply = {} if intent == None: reply['type'] = 'none' reply['data'] = "I didn't understand" elif intent == "news": reply['type'] = 'news' print(params) articles = get_news(params) news_elements = [] for article in articles: element = {} element['title'] = article['title'] element['item_url'] = article['link'] element['image_url'] = article['img'] element['buttons'] = [{ "type":"web_url", "title":"Read more", "url":article['link']}] news_elements.append(element) reply['data'] = news_elements elif intent.startswith('smalltalk'): reply['type'] = 'smalltalk' reply['data'] = response['result']['fulfillment']['speech'] return reply
14,271
def test_DataFrameProfile_invalid(): """DataFrame profile should not accept invalid data types""" invalid_types = [ TEST_CAT_SERIES, TEST_NUM_SERIES, "data", 34, 34.5, {"data": "dictionary"}, [["col_name", 1], ["col_name2", 2]], (("col_name", 3), ("col_name2", 4)), np.array([1, 2, 3]), ] for invalid in invalid_types: with pytest.raises(TypeError): reports.DataFrameProfile(invalid)
14,272
def create_SpatialReference(sr): """ creates an arcpy.spatial reference object """ return arcpy.SpatialReference(sr)
14,273
def main(): """ main solver function """ graph = defaultdict(list) with open("passage_pathing.txt", encoding="utf-8") as file: lines = file.readlines() nodes = set() for line in lines: line = line.strip() start, end = line.split("-") nodes.add(start) nodes.add(end) graph[start].append(end) graph[end].append(start) print(nodes) for node in nodes: if node != "start" and node != "end" and node.islower(): path = [] visited = defaultdict(int) visited["start"] = 1 dfs("start", "end", graph, visited, path, node) print(ALL_PATHS) print(len(ALL_PATHS))
14,274
def check_in_image(paste_image_location, paste_image_size, canvas_image_size): """Checks whether the location for the pasted image is within the canvas. Args: paste_image_location: a namedtuple of utils.XY, with 'x' and 'y' coordinates of the center of the image we want to paste. paste_image_size: a namedtuple of utils.XY, with 'x' and 'y' coordinates corresponding to the size of the image we are pasting. canvas_image_size: the size of the canvas that we are pasting the image to. Returns: True if the pasted image would lie within the canvas, False otherwise. """ offset_x = int(paste_image_size.x / 2) + 1 offset_y = int(paste_image_size.y / 2) + 1 if (paste_image_location.x + offset_x > canvas_image_size or paste_image_location.x - offset_x < 1 or paste_image_location.y + offset_y > canvas_image_size or paste_image_location.y - offset_y < 1): return False return True
14,275
def test_add_auth_blank_user(mechfile_one_entry_with_auth_and_mech_use): """Test add_auth.""" inst = mech.mech_instance.MechInstance('first', mechfile_one_entry_with_auth_and_mech_use) inst.user = '' inst.password = 'vagrant' inst.vmx = '/tmp/first/some.vmx' with raises(SystemExit, match=r"Need to provide user"): mech.utils.add_auth(inst)
14,276
def get_gpu(os: str, line: List, value: str, key: str): """ Append the GPU info from the given neofetch line to the GPU list Parameters ---------- os : OS type line : List Component line value : str Neofetch extracted line key : str Component key """ value = value.replace(key, "").lstrip() splitValue = value.split() for v in splitValue: if v.upper() in ["AMD", "RADEON"]: line.append(Gpu_amd(os, value)) return elif v.upper() in ["NVIDIA", "GEFORCE"]: line.append(Gpu_nvidia(os, value)) return elif v.upper() in ["INTEL", "INTEL(R)"]: line.append(Gpu_intel(os, value)) return
14,277
def withSEVCHK(fcn): """decorator to raise a ChannelAccessException if the wrapped ca function does not return status = dbr.ECA_NORMAL. This handles the common case of running :func:`PySEVCHK` for a function whose return value is from a corresponding libca function and whose return value should be ``dbr.ECA_NORMAL``. """ @functools.wraps(fcn) def wrapper(*args, **kwds): "withSEVCHK wrapper" status = fcn(*args, **kwds) return PySEVCHK( fcn.__name__, status) return wrapper
14,278
def test_add_with_formatting(): """Use formatting expressions.""" context = Context({ 'arbset': {1, 2}, 'addthis': 3, 'add': { 'set': PyString('arbset'), 'addMe': '{addthis}' }}) add.run_step(context) context['add']['addMe'] = 4 add.run_step(context) assert context['arbset'] == {1, 2, 3, 4} assert len(context) == 3
14,279
def convert_ts_to_date(ts): """ Converts a timestamp to a date object """ # TODO: is this function necessary? return datetime.fromtimestamp(ts)
14,280
def add_message(request, kind, message): """ Put message into session """ messages = request.session.get('messages', []) messages.append((kind, message)) request.session['messages'] = messages
14,281
def available_methods(): """Get all available importance scores """ from . import ism, gradient, referencebased int_modules = [ism, gradient, referencebased] available_methods = {} for m in int_modules: available_methods = merge_dicts(available_methods, m.METHODS) return available_methods
14,282
def clip(*args, **kwargs): """ This command is used to create, edit and query character clips. Flags: - absolute : abs (bool) [create] This flag is now deprecated. Use aa/allAbsolute, ar/allRelative, ra/rotationsAbsolute, or da/defaultAbsolute instead. This flag controls whether the clip follows its keyframe values or whether they are offset by a value to maintain a smooth path. Default is true. - absoluteRotations : abr (bool) [create] This flag is now deprecated. Use aa/allAbsolute, ar/allRelative, ra/rotationsAbsolute, or da/defaultAbsolute instead. If true, this overrides the -absolute flag so that rotation channels are always calculated with absolute offsets. This allows you to have absolute offsets on rotations and relative offsets on all other channels. - active : a (unicode) [query,edit] Query or edit the active clip. This flag is not valid in create mode. Making a clip active causes its animCurves to be hooked directly to the character attributes in addition to being attached to the clip library node. This makes it easier to access the animCurves if you want to edit, delete or add additional animCruves to the clip. - addTrack : at (bool) [] This flag is now obsolete. Use the insertTrack flag on the clipSchedule command instead. - allAbsolute : aa (bool) [create] Set all channels to be calculated with absolute offsets. This flag cannot be used in conjunction with the ar/allRelative, ra/rotationsAbsolute or da/defaultAbsolute flags. - allClips : ac (bool) [query] This flag is used to query all the clips in the scene. Nodes of type "animClip" that are storing poses, are not returned by this command. - allRelative : ar (bool) [create] Set all channels to be calculated with relative offsets. This flag cannot be used in conjunction with the aa/allAbsolute, ra/rotationsAbsolute or da/defaultAbsolute flags. - allSourceClips : asc (bool) [query] This flag is used to query all the source clips in the scene. Nodes of type "animClip" that are storing poses or clip instances, are not returned by this command. - animCurveRange : acr (bool) [create] This flag can be used at the time you create the clip instead of the startTime and endTime flags. It specifies that you want the range of the clip to span the range of keys in the clips associated animCurves. - character : ch (bool) [query] This is a query only flag which operates on the specified clip. It returns the names of any characters that a clip is associated with. - constraint : cn (bool) [create] This creates a clip out of any constraints on the character. The constraint will be moved off of the character and into the clip, so that it is only active for the duration of the clip, and its value can be scaled/offset/cycled according to the clip attributes. - copy : c (bool) [create,query] This flag is used to copy a clip or clips to the clipboard. It should be used in conjunction with the name flag to copy the named clips on the specified character and its subcharacters. In query mode, this flag allows you to query what, if anything, has been copied into the clip clipboard. - defaultAbsolute : da (bool) [create] Sets all top-level channels except rotations in the clip to relative, and the remaining channels to absolute. This is the default during clip creation if no offset flag is specified. This flag cannot be used in conjunction with the aa/allAbsolute, ar/allRelative, or ra/rotationsAbsolute flags. - duplicate : d (bool) [query] Duplicate the clip specified by the name flag. The start time of the new clip should be specified with the startTime flag. - endTime : end (time) [create,query,edit] Specify the clip end - expression : ex (bool) [create] This creates a clip out of any expressions on the character. The expression will be moved off of the character and into the clip, so that it is only active for the duration of the clip, and its value can be scaled/offset/cycled according to the clip attributes. - ignoreSubcharacters : ignoreSubcharacters (bool) [create] During clip creation, duplication and isolation, subcharacters are included by default. If you want to create a clip on the top level character only, or you want to duplicate the clip on the top level character without including subCharacters, use the ignoreSubcharacters flag. - isolate : i (bool) [create] This flag should be used in conjunction with the name flag to specify that a clip or clips should be copied to a new clip library. The most common use of this flag is for export, when you want to only export certain clips from the character, without exporting all of the clips. - leaveOriginal : lo (bool) [create] This flag is used when creating a clip to specify that the animation curves should be copied to the clip library, and left on the character. - mapMethod : mm (unicode) [create] This is is valid with the paste and pasteInstance flags only. It specifies how the mapping should be done. Valid options are: "byNodeName", "byAttrName", "byCharacterMap", "byAttrOrder", "byMapOrAttrName" and "byMapOrNodeName". "byAttrName" is the default. The flags mean the following: "byAttrOrder" maps using the order that the character stores the attributes internally, "byAttrName" uses the attribute name to find a correspondence, "byNodeName" uses the node name \*and\* the attribute name to find a correspondence, "byCharacterMap" uses the existing characterMap node to do the mapping. "byMapOrAttrName" uses a character map if one exists, otherwise uses the attribute name. "byMapOrNodeName" uses a character map if one exists, otherwise uses the attribute name. - name : n (unicode) [create,query] In create mode, specify the clip name. In query mode, return a list of all the clips. In duplicate mode, specify the clip to be duplicated. In copy mode, specify the clip to be copied. This flag is multi-use, but multiple use is only supported with the copy flag. For use during create and with all other flags, only the first instance of the name flag will be utilized. In query mode, this flag can accept a value. - newName : nn (unicode) [create] Rename a clip. Must be used in conjunction with the clip name flag, which is used to specify the clip to be renamed. - paste : p (bool) [create] This flag is used to paste a clip or clips from the clipboard to a character. Clips are added to the clipboard using the c/copy flag. - pasteInstance : pi (bool) [create] This flag is used to paste an instance of a clip or clips from the clipboard to a character. Unlike the p/paste flag, which duplicates the animCurves from the original source clip, the pi/pasteInstance flag shares the animCurves from the source clip. - remove : rm (bool) [query] Remove the clip specified by the name flag. The clip will be permanently removed from the library and deleted from any times where it has been scheduled. - removeTrack : rt (bool) [create] This flag is now obsolete. Use removeTrack flag on the clipSchedule command instead. - rotationOffset : rof (float, float, float) [create,query] Return the channel offsets used to modify the clip's rotation.Flag can appear in Create mode of commandFlag can have multiple arguments, passed either as a tuple or a list. - rotationsAbsolute : ra (bool) [create] Set all channels except rotations to be calculated with relative offsets. Rotation channels will be calculated with absolute offsets. This flag cannot be used in conjunction with the aa/allAbsolute, ar/allRelative or da/defaultAbsolute flags. - scheduleClip : sc (bool) [create] This flag is used when creating a clip to specify whether or not the clip should immediately be scheduled at the current time. If the clip is not scheduled, the clip will be placed in the library for future use, but will not be placed on the timeline. This flag is for use only when creating a new clip or duplicating an existing. The default is true. - sourceClipName : scn (bool) [query] This flag is for query only. It returns the name of the source clip that controls an instanced clip. - split : sp (time) [create,edit] Split an existing clip into two clips. The split occurs around the specified time. - startTime : s (time) [create,query,edit] Specify the clip start - translationOffset : tof (float, float, float) [create,query] Return the channel offsets used to modify the clip's translation. - useChannel : uc (unicode) [create] Specify which channels should be acted on. This flag is valid only in conjunction with clip creation, and the isolate flag. The specified channels must be members of the character. Derived from mel command `maya.cmds.clip` """ pass
14,283
def PDeselSN (inSC, SNTab, isuba, fgid, ants, timerange, err): """ Deselect entries in an SN table Routine to deselect records in an SN table if they match a given subarray, have a selected FQ id, appear on a list of antennas and are in a given timerange. inSC = Selfcal object SNTab = Python AIPS SN Table isuba = subarray, <=0 -> any fqid = Selected FQ id, <=0 -> any ants = array of integer antenna numbers, 0->all timerange = timerange (days) err = Python Obit Error/message stack """ ################################################################ # Checks if not PIsA(inSC): raise TypeError('Bad input selfcalibrator') if not Table.PIsA(SNTab): raise TypeError("SNTab MUST be a Python Obit Table") if not OErr.OErrIsA(err): raise TypeError("err MUST be an OErr") # nantf = len(ants) Obit.UVSolnDeselSN (SNTab.me, isuba, fqid, nantf, ants, timerange, err.me) if err.isErr: printErrMsg(err, "Error deselecting solutions") # end PDeselSN
14,284
def p_cmdexpr_table(p): """cmdexpr : TABLE arglist | TABLE MACRO"""
14,285
def templates(): """ .. versionadded:: 2015.5.0 List the available LXC template scripts installed on the minion CLI Examples: .. code-block:: bash salt myminion lxc.templates """ try: template_scripts = os.listdir("/usr/share/lxc/templates") except OSError: return [] else: return [x[4:] for x in template_scripts if x.startswith("lxc-")]
14,286
def reg_planes_to_img(imgs, path=None, ax=None): """Export registered image single planes to a single figure. Simplified export tool taking a single plane from each registered image type, overlaying in a single figure, and exporting to file. Args: imgs (List[:obj:`np.ndarray`]): Sequence of image planes to display. The first image is assumed to be greyscale, the second is labels, and any subsequent images are borders. path (str): Output base path, which will be combined with :attr:`config.savefig`; defaults to None to not save. ax (:obj:`matplotlib.image.Axes`): Axes on which to plot; defaults to False, in which case a new figure and axes will be generated. """ if ax is None: # set up new figure with single subplot fig, gs = plot_support.setup_fig( 1, 1, config.plot_labels[config.PlotLabels.SIZE]) ax = fig.add_subplot(gs[0, 0]) imgs = [img[None] for img in imgs] cmaps_labels = _setup_labels_cmaps(imgs) plotted_imgs = _build_stack( ax, imgs, StackPlaneIO.process_plane, cmaps_labels=cmaps_labels, scale_bar=False) ax_img = plotted_imgs[0][0] aspect, origin = plot_support.get_aspect_ratio(config.plane) plot_support.fit_frame_to_image( ax_img.figure, ax_img.get_array().shape, aspect) if path: plot_support.save_fig(path, config.savefig)
14,287
def is_sketch_list_empty(): """Check to see if any sketches""" return len(_CVB_SKETCH_LIST) == 0
14,288
def pb22(): """ Problem 22 : Names scores. We first open the file, suppress the useless ", put everything into lowercase, and split to get a list. We use merge sort to sort the list by alphabetical order (see utils.merge_sort), and then : - for each word in the list - for each character in the list we get its alphabetical rank (ord - 96, that why we needed lowercase) and we sum. """ res = 0 with open('./resources/input_pb22.txt', 'r') as f: lst = f.readline().replace('"', '').lower().split(sep=',') utils.merge_sort(lst) for i in range(len(lst)): res += sum([ord(char)-96 for char in lst[i]])*(i+1) return res
14,289
def resize(im, target_size, max_size, stride=0, interpolation = cv2.INTER_LINEAR): """ only resize input image to target size and return scale :param im: BGR image input by opencv :param target_size: one dimensional size (the short side) :param max_size: one dimensional max size (the long side) :param stride: if given, pad the image to designated stride :param interpolation: if given, using given interpolation method to resize image :return: """ im_shape = im.shape im_size_min = np.min(im_shape[0:2]) im_size_max = np.max(im_shape[0:2]) im_scale = float(target_size) / float(im_size_min) # prevent bigger axis from being more than max_size: if np.round(im_scale * im_size_max) > max_size: im_scale = float(max_size) / float(im_size_max) im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=interpolation) if stride == 0: return im, im_scale else: # pad to product of stride im_height = int(np.ceil(im.shape[0] / float(stride)) * stride) im_width = int(np.ceil(im.shape[1] / float(stride)) * stride) im_channel = im.shape[2] padded_im = np.zeros((im_height, im_width, im_channel)) padded_im[:im.shape[0], :im.shape[1], :] = im return padded_im, im_scale
14,290
def _add_qc( samples: List[Sample], namespace: str, overwrite_multiqc: bool ) -> Tuple[str, str]: """ Populates s.qc_values for each Sample object. Returns paths to MultiQC html and json files. """ multiqc_html_path = join( f'gs://cpg-{NAGIM_PROJ_ID}-{namespace}-web/qc/multiqc.html' ) multiqc_json_path = join( f'gs://cpg-{NAGIM_PROJ_ID}-{namespace}-analysis/qc/multiqc_data.json' ) if 'QC' in SOURCES_TO_PROCESS: logger.info('Running MultiQC on QC files') parsed_json_fpath = _run_multiqc( samples, multiqc_html_path, multiqc_json_path, tmp_bucket=f'gs://cpg-{NAGIM_PROJ_ID}-{namespace}-tmp/qc', namespace=namespace, overwrite=overwrite_multiqc, ) gfs = gcsfs.GCSFileSystem() with gfs.open(parsed_json_fpath) as f: row_by_sample = json.load(f) for s in samples: if s.nagim_id in row_by_sample: s.qc_values = row_by_sample[s.nagim_id] return multiqc_html_path, multiqc_json_path
14,291
def oscillAnglesOfHKLs(hkls, chi, rMat_c, bMat, wavelength, vInv=None, beamVec=bVec_ref, etaVec=eta_ref): """ Takes a list of unit reciprocal lattice vectors in crystal frame to the specified detector-relative frame, subject to the conditions: 1) the reciprocal lattice vector must be able to satisfy a bragg condition 2) the associated diffracted beam must intersect the detector plane Required Arguments: hkls -- (n, 3) ndarray of n reciprocal lattice vectors in the CRYSTAL FRAME chi -- float representing the inclination angle of the oscillation axis (std coords) rMat_c -- (3, 3) ndarray, the COB taking CRYSTAL FRAME components to SAMPLE FRAME bMat -- (3, 3) ndarray, the COB taking RECIPROCAL LATTICE components to CRYSTAL FRAME wavelength -- float representing the x-ray wavelength in Angstroms Optional Keyword Arguments: beamVec -- (3, 1) mdarray containing the incident beam direction components in the LAB FRAME etaVec -- (3, 1) mdarray containing the reference azimuth direction components in the LAB FRAME Outputs: ome0 -- (n, 3) ndarray containing the feasible (tTh, eta, ome) triplets for each input hkl (first solution) ome1 -- (n, 3) ndarray containing the feasible (tTh, eta, ome) triplets for each input hkl (second solution) Notes: ------------------------------------------------------------------------ The reciprocal lattice vector, G, will satisfy the the Bragg condition when: b.T * G / ||G|| = -sin(theta) where b is the incident beam direction (k_i) and theta is the Bragg angle consistent with G and the specified wavelength. The components of G in the lab frame in this case are obtained using the crystal orientation, Rc, and the single-parameter oscillation matrix, Rs(ome): Rs(ome) * Rc * G / ||G|| The equation above can be rearranged to yield an expression of the form: a*sin(ome) + b*cos(ome) = c which is solved using the relation: a*sin(x) + b*cos(x) = sqrt(a**2 + b**2) * sin(x + alpha) --> sin(x + alpha) = c / sqrt(a**2 + b**2) where: alpha = atan2(b, a) The solutions are: / | arcsin(c / sqrt(a**2 + b**2)) - alpha x = < | pi - arcsin(c / sqrt(a**2 + b**2)) - alpha \ There is a double root in the case the reflection is tangent to the Debye-Scherrer cone (c**2 = a**2 + b**2), and no solution if the Laue condition cannot be satisfied (filled with NaNs in the results array here) """ hkls = np.array(hkls, dtype=float, order='C') if vInv is None: vInv = np.ascontiguousarray(vInv_ref.flatten()) else: vInv = np.ascontiguousarray(vInv.flatten()) beamVec = np.ascontiguousarray(beamVec.flatten()) etaVec = np.ascontiguousarray(etaVec.flatten()) bMat = np.ascontiguousarray(bMat) return _transforms_CAPI.oscillAnglesOfHKLs( hkls, chi, rMat_c, bMat, wavelength, vInv, beamVec, etaVec )
14,292
def srfFaultSurfaceExtract(SRFfile): """ Generate fault surface from SRF file convention Following the Graves' SRF convention used in BBP and CyberShake """ lines = open( SRFfile, 'r' ).readlines() Nseg = int(lines[1].strip().split()[1]) # loop over segments to get (Nrow,Ncol) of each segments # fault surface for each segment will be read latter srfFaultSurface = {} srfFaultSurface['segments'] = {} dims = [] dips = [] ztors = [] for iseg in xrange( Nseg ): il0 = 2*iseg + 2 # fault geometry info spl = lines[il0].strip().split() lon0, lat0, L, W, Ncol, Nrow = np.array( spl, 'f' ) Ncol, Nrow = int(Ncol), int(Nrow) dims.append( [Ncol,Nrow] ) il1 = il0 + 1 # focal mechanism and hypocenter info spl = lines[il1].strip().split() strike, dip, ztor, hypoAS, hypoDD = np.array(spl,'f') dips.append(dip) # will be used to get the average dip angle (over segments) ztors.append(ztor) srfFaultSurface['segments']['dims'] = dims srfFaultSurface['segments']['dips'] = dips srfFaultSurface['segments']['ztors'] = ztors il0 = 2*(Nseg+1) Npoints = int(lines[il0].strip().split()[1]) il0 = il0 + 1 # jump to the data block (for each segments, there are a data block) locs = []; rakes = [] while il0 < len(lines): spl = lines[il0].strip().split() lon, lat, dep, strike, dip, Area, Tinit, dt = np.array( spl, 'f' ) locs.append( [lon,lat,dep] ) il0 = il0 + 1 spl = lines[il0].strip().split() rake, slipA_AlongRake, Nt = np.array( spl[:3], 'f' ) rakes.append( rake ) # will be used to get average rake (over points) dl = int(Nt/6) + (Nt%6!=0)*1 il0 = il0 + dl + 1 # import (similar to the segments jump) ... Nrow1 = 0; Ncol1 = 0 for iseg in xrange( Nseg ): Nrow1 += dims[iseg][1] Ncol1 += dims[iseg][0] FaultGeom = np.array( locs ).reshape( (Nrow1, Ncol1, 3) ) srfFaultSurface['FaultGeom'] = FaultGeom srfFaultSurface['rakes'] = rakes return srfFaultSurface
14,293
def disconnectHandler(crash=True): """ Handles disconnect Writes a crash log if the crash parameter is True """ executeHandlers("evt02") if crash: crashLog("main.disconnect") logger.critical("disconnecting from the server") try: Component.disconnect() except AttributeError: pass global ALIVE ALIVE = False if not Daemon: logger.warning("the gateway is going to be restarted!") Print("Restarting...") time.sleep(5) os.execl(sys.executable, sys.executable, *sys.argv) else: logger.info("the gateway is shutting down!") os._exit(-1)
14,294
def _to_response( uploaded_protocol: UploadedProtocol, ) -> route_models.ProtocolResponseAttributes: """Create ProtocolResponse from an UploadedProtocol""" meta = uploaded_protocol.data analysis_result = uploaded_protocol.data.analysis_result return route_models.ProtocolResponseAttributes( id=meta.identifier, protocolFile=route_models.FileAttributes( basename=meta.contents.protocol_file.path.name ), supportFiles=[ route_models.FileAttributes(basename=s.path.name) for s in meta.contents.support_files ], lastModifiedAt=meta.last_modified_at, createdAt=meta.created_at, metadata=analysis_result.meta, requiredEquipment=analysis_result.required_equipment, errors=analysis_result.errors, )
14,295
def has_valid_chars(token: str) -> bool: """ decides whether this token consists of a reasonable character mix. :param token: the token to inspect :return: True, iff the character mix is considered "reasonable" """ hits = 0 # everything that is not alphanum or '-' or '.' limit = int(len(token) / 10) for c in token: if not (c.isalnum() or c == '.' or c == '-' or c == ' '): hits += 1 if hits > limit: return False return True
14,296
def sqlalchemy_engine(args, url): """engine constructor""" environ['PATH'] = args.ora_path # we have to point to oracle client directory url = f'oracle://{args.user}:{pswd(args.host, args.user)}@{args.host}/{args.sid}' logging.info(url) return create_engine(url)
14,297
def cpu_count(): """ Returns the default number of slave processes to be spawned. """ num = os.getenv("OMP_NUM_THREADS") if num is None: num = os.getenv("PBS_NUM_PPN") try: return int(num) except: return multiprocessing.cpu_count()
14,298
def shortcut_layer(name: str, shortcut, inputs): """ Creates the typical residual block architecture. Residual blocks are useful for training very deep convolutional neural networks because they act as gradient 'highways' that enable the gradient to flow back into the first few initial convolutional layers. Without residual blocks, the gradient tends to disappear at those first inital layers and the model has a difficult time converging. Parameters ---------- name : string The name of the tensor to be used in TensorBoard. shortcut: tensor The output of a previous convolutional layer inputs : tensor The output of the immediately previous convolutional layer. Returns ------- inputs : tensor The resulting tensor. new_shortcut : tensor A new shortcut for a future residual block to connect to. """ with tf.variable_scope(name): inputs += shortcut new_shortcut = inputs return inputs, new_shortcut
14,299