content
stringlengths
22
815k
id
int64
0
4.91M
def smoP(dataMatIn, classLabels, C, toler, maxIter, kTup = ('lin',0)): """ 完整的线性SMO算法 Parameters: dataMatIn - 数据矩阵 classLabels - 数据标签 C - 松弛变量 toler - 容错率 maxIter - 最大迭代次数 kTup - 包含核函数信息的元组 Returns: oS.b - SMO算法计算的b oS.alphas - SMO算法计算的alphas """ oS = optStruct(np.mat(dataMatIn), np.mat(classLabels).transpose(), C, toler, kTup) #初始化数据结构 iter = 0 #初始化当前迭代次数 entireSet = True; alphaPairsChanged = 0 while (iter < maxIter) and ((alphaPairsChanged > 0) or (entireSet)): #遍历整个数据集都alpha也没有更新或者超过最大迭代次数,则退出循环 alphaPairsChanged = 0 if entireSet: #遍历整个数据集 for i in range(oS.m): alphaPairsChanged += innerL(i,oS) #使用优化的SMO算法 print("全样本遍历:第%d次迭代 样本:%d, alpha优化次数:%d" % (iter,i,alphaPairsChanged)) iter += 1 else: #遍历非边界值 nonBoundIs = np.nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0] #遍历不在边界0和C的alpha for i in nonBoundIs: alphaPairsChanged += innerL(i,oS) print("非边界遍历:第%d次迭代 样本:%d, alpha优化次数:%d" % (iter,i,alphaPairsChanged)) iter += 1 if entireSet: #遍历一次后改为非边界遍历 entireSet = False elif (alphaPairsChanged == 0): #如果alpha没有更新,计算全样本遍历 entireSet = True print("迭代次数: %d" % iter) return oS.b,oS.alphas #返回SMO算法计算的b和alphas
9,900
def _get_output_data(output_list, heat, stack_id): """ 获取output数据 """ response = { 'code': 200, 'msg': 'ok', 'status': utils.INSTANTIATED, 'data': [] } for item in output_list['outputs']: output = heat.stacks.output_show(stack_id, item['output_key']) output_value = output['output']['output_value'] item = { 'vmId': output_value['vmId'], 'vncUrl': output_value['vncUrl'], 'networks': [] } if 'networks' in output_value and output_value['networks'] is not None: for net_name, ip_data in output_value['networks'].items(): if utils.validate_uuid(net_name): continue network = { 'name': net_name, 'ip': ip_data[0]['addr'] } item['networks'].append(network) response['data'].append(item) return response
9,901
def _ensure_str(s): """convert bytestrings and numpy strings to python strings""" return s.decode() if isinstance(s, bytes) else str(s)
9,902
def get_polarimeter_index(pol_name): """Return the progressive number of the polarimeter within the board (0…7) Args: pol_name (str): Name of the polarimeter, like ``R0`` or ``W3``. Returns: An integer from 0 to 7. """ if pol_name[0] == "W": return 7 else: return int(pol_name[1])
9,903
def merge_sort(collection): """ Pure implementation of the fastest ordered collection with heterogeneous : parameter collection : some mutable ordered collection with heterogeneous comparable items inside : return : a sollectiojn order by ascending Examples : >>> merge_sort([0, 5, 3, 2, 2]) [0, 2, 2, 3, 5] >>> merge_sort([]) [] >>> merge_sort([-45, -5, -2]) [-45, -5, -2] """ start, end = [], [] while len(collection) > 1 : min_one, max_one = min(collection), max(collection) start.append(min_one) end.append(max_one) collection.remove(min_one) collection.remove(max_one) end.reverse() return start + collection + end
9,904
def sma_centroids(dataframe, column, short_window, long_window, min_width=None, **kwargs): """Identify centermost point between two SMA interception points Define regions as being bounded by two consecutive interceptions of SMAs with different window widths, then choose the centermost data point within that region. Useful for defining regions that capture the crossover of SMAs. Essentially a wrapper around `sma_to_centroids`. Args: dataframe (pandas.DataFrame): dataframe from which SMAs should be calculated and regions defined column (str): name of column over in dataframe from which sliding-window slopes should be calculated short_window (int): number of consecutive dataframe rows to include in the short window long_window (int): number of consecutive dataframe rows to include in the long window min_width: minimum width, expressed in units of `x_column`, below which an intercept should be disregarded as a valid end of a window kwargs: arguments to be passed to calculate_sma() Returns: DataFrame with indices corresponding to dataframe """ x_column = '_datetime_start' sma_short = calculate_sma(dataframe, x_column, column, window=short_window, **kwargs) sma_long = calculate_sma(dataframe, x_column, column, window=long_window, **kwargs) intercepts = find_sma_intercepts(sma_short, sma_long, dataframe[x_column]) return find_sma_centroids(dataframe=dataframe, sma_short=sma_short, sma_long=sma_long, intercepts=intercepts, x_column=x_column, min_width=min_width)
9,905
def delMsg(msgNum): """Deletes a specified message from the inbox""" global usrPrompt try: inboxMessages = json.loads(api.getAllInboxMessages()) # gets the message ID via the message index number msgId = inboxMessages['inboxMessages'][int(msgNum)]['msgid'] msgAck = api.trashMessage(msgId) except: print '\n Connection Error\n' usrPrompt = 0 main() return msgAck
9,906
def convert_similarities( in_file_path, out_json_file_path): """ Iterate over every row in a TSV file and import each as a compound document. We don't yet create edges for compounds. """ with open( in_file_path, newline='') as csv_fd: reader = csv.reader(csv_fd, delimiter=' ', quotechar='"') with open( out_json_file_path, "w" ) as out_j: for row in reader: if ( row[0] != "#" ): doc = { '_from': _reaction_vert_name + '/' + row[0], '_to': _reaction_vert_name + '/' + row[1], 'sf_similarity': float(row[2]), 'df_similarity': float(row[3]) } out_j.write( json.dumps(doc) + "\n" )
9,907
def stage_static_files(sample_type, working_dir, debug=False): """Stage static files in the current working directory""" stage_static_latex(sample_type, working_dir) stage_static_pdfs(sample_type, working_dir)
9,908
def capitalize_first(str): """Capitalizes only the first letter of the given string. :param str: string to capitalize :return: str with only the first letter capitalized """ if str == "": return "" return str[0].upper() + str[1:]
9,909
async def test_get_entity(opp, client): """Test get entry.""" mock_registry( opp, { "test_domain.name": RegistryEntry( entity_id="test_domain.name", unique_id="1234", platform="test_platform", name="Hello World", ), "test_domain.no_name": RegistryEntry( entity_id="test_domain.no_name", unique_id="6789", platform="test_platform", ), }, ) await client.send_json( {"id": 5, "type": "config/entity_registry/get", "entity_id": "test_domain.name"} ) msg = await client.receive_json() assert msg["result"] == { "config_entry_id": None, "device_id": None, "disabled_by": None, "platform": "test_platform", "entity_id": "test_domain.name", "name": "Hello World", "icon": None, "original_name": None, "original_icon": None, } await client.send_json( { "id": 6, "type": "config/entity_registry/get", "entity_id": "test_domain.no_name", } ) msg = await client.receive_json() assert msg["result"] == { "config_entry_id": None, "device_id": None, "disabled_by": None, "platform": "test_platform", "entity_id": "test_domain.no_name", "name": None, "icon": None, "original_name": None, "original_icon": None, }
9,910
def min_cost_edge(G, T): """Returns the edge with the lowest cost/weight. Parameters ---------- G : NetworkX graph T : Prim's Algorithm Returns ------- The edge with the lowest cost/weight. """ edge_list = possible_edges(G, T) edge_list.sort(key = lambda e : cost(G, e)) return edge_list[0]
9,911
def get_image_html_tag(fig, format="svg"): """ Returns an HTML tag with embedded image data in the given format. :param fig: a matplotlib figure instance :param format: output image format (passed to fig.savefig) """ stream = io.BytesIO() # bbox_inches: expand the canvas to include the legend that was put outside the plot # see https://stackoverflow.com/a/43439132 fig.savefig(stream, format=format, bbox_inches="tight") data = stream.getvalue() if format == "svg": return data.decode("utf-8") data = base64.b64encode(data).decode("utf-8") return f"<img src=\"data:image/{format};base64,{data}\">"
9,912
def reformat_language_tuple(langval): """Produce standardly-formatted language specification string using given language tuple. :param langval: `tuple` in form ('<language>', '<language variant>'). Example: ('en', 'US') :return: `string` formatted in form '<language>-<language-variant>' """ if langval: langval_base, langval_variant = langval if langval_variant: langval_base = '{0}-{1}'.format(langval_base, langval_variant) return langval_base else: return None
9,913
def _create_ghostnet(variant, width=1.0, pretrained=False, **kwargs): """ Constructs a GhostNet model """ cfgs = [ # k, t, c, SE, s # stage1 [[3, 16, 16, 0, 1]], # stage2 [[3, 48, 24, 0, 2]], [[3, 72, 24, 0, 1]], # stage3 [[5, 72, 40, 0.25, 2]], [[5, 120, 40, 0.25, 1]], # stage4 [[3, 240, 80, 0, 2]], [[3, 200, 80, 0, 1], [3, 184, 80, 0, 1], [3, 184, 80, 0, 1], [3, 480, 112, 0.25, 1], [3, 672, 112, 0.25, 1] ], # stage5 [[5, 672, 160, 0.25, 2]], [[5, 960, 160, 0, 1], [5, 960, 160, 0.25, 1], [5, 960, 160, 0, 1], [5, 960, 160, 0.25, 1] ] ] model_kwargs = dict( cfgs=cfgs, width=width, **kwargs, ) return build_model_with_cfg( GhostNet, variant, pretrained, default_cfg=default_cfgs[variant], feature_cfg=dict(flatten_sequential=True), **model_kwargs)
9,914
def re_suffix(string): """ Remove any “os.extsep” prefixing a string, and ensure that it ends with a “$” – to indicate a regular expression suffix. """ if not string: return None return rf"{string.casefold().lstrip(QUALIFIER).rstrip(DOLLA)}{DOLLA}"
9,915
def _alternate_dataclass_repr(object) -> None: """ Overrides the default dataclass repr by not printing fields that are set to None. i.e. Only prints fields which have values. This is for ease of reading. """ populated_fields = { field.name: getattr(object, f"{field.name}") for field in fields(object) if getattr(object, f"{field.name}") is not None } class_name = object.__class__.__name__ repr_string = f"{class_name}(" + ", ".join([f"{field}={value}" for field, value in populated_fields.items()]) + ")" return repr_string
9,916
def which_coords_in_bounds(coords, map_shape): """ Checks the coordinates given to see if they are in bounds :param coords Union[array(2)[int], array(N,2)[int]]: [int, int] or [[int, int], ...], Nx2 ndarray :param map_shape Tuple[int]: shape of the map to check bounds :return Union[bool array(N)[bool]]: corresponding to whether the coord is in bounds (if array is given, then it will be array of bool) """ assert isinstance(coords, np.ndarray) and coords.dtype == np.int assert np.array(map_shape).dtype == np.int if len(coords.shape) == 1: return coords[0] >= 0 and coords[0] < map_shape[0] and coords[1] >= 0 and coords[1] < map_shape[1] else: return np.logical_and(np.logical_and(coords[:, 0] >= 0, coords[:, 0] < map_shape[0]), np.logical_and(coords[:, 1] >= 0, coords[:, 1] < map_shape[1]))
9,917
def get_activation_function(activation_function_name: str): """ Given the name of an activation function, retrieve the corresponding function and its derivative :param cost_function_name: the name of the cost function :return: the corresponding activation function and its derivative """ try: return activation_functions[activation_function_name] except KeyError: raise UnknownActivationFunctionName(activation_function_name)
9,918
def url_split(url, uses_hostname=True, split_filename=False): """Split the URL into its components. uses_hostname defines whether the protocol uses a hostname or just a path (for "file://relative/directory"-style URLs) or not. split_filename defines whether the filename will be split off in an attribute or whether it will be part of the path """ # urlparse.urlparse() is a bit deficient for our needs. try: if uses_hostname: match = URL_RE_HOSTNAME.match(url).groupdict() else: match = URL_RE_PLAIN.match(url).groupdict() except AttributeError: raise AttributeError, "Invalid URL." for key, item in match.items(): if item is None: if key == "port": # We should leave port as None if it's not defined. match[key] = "0" else: match[key] = "" if uses_hostname: match["port"] = int(match["port"]) if not split_filename: match["path"] = match["path"] + match["file"] match["file"] = "" return URLSplitResult(match)
9,919
def memdiff_search(bytes1, bytes2): """ Use binary searching to find the offset of the first difference between two strings. :param bytes1: The original sequence of bytes :param bytes2: A sequence of bytes to compare with bytes1 :type bytes1: str :type bytes2: str :rtype: int offset of the first location a and b differ, None if strings match """ # Prevent infinite recursion on inputs with length of one half = (len(bytes1) // 2) or 1 # Compare first half of the string if bytes1[:half] != bytes2[:half]: # Have we found the first diff? if bytes1[0] != bytes2[0]: return 0 return memdiff_search(bytes1[:half], bytes2[:half]) # Compare second half of the string if bytes1[half:] != bytes2[half:]: return memdiff_search(bytes1[half:], bytes2[half:]) + half
9,920
def decomposeJonesMatrix(Jmat): """ Decompose 2x2 Jones matrix to retardance and diattenuation vectors """ Jmat = Jmat / cp.sqrt(cp.linalg.det(Jmat)) q = cp.array([Jmat[0, 0] - Jmat[1, 1], Jmat[1, 0] + Jmat[0, 1], -1j * Jmat[1, 0] + 1j * Jmat[0, 1]]) / 2 tr = cp.trace(Jmat) / 2 c = cp.arccosh(tr) csin = c / cp.sinh(c) if c == 0: csin = 1 f = 2 * q * csin rotVector = -cp.imag(f) diatVector = cp.real(f) return rotVector, diatVector
9,921
def format_utc(time): """Format a time in UTC.""" return as_utc(time).strftime('%Y-%m-%d %H:%M:%S.%f')
9,922
def install_and_build_package( app, tool_dependency, actions_dict ): """Install a Galaxy tool dependency package either via a url or a mercurial or git clone command.""" sa_session = app.model.context.current install_dir = actions_dict[ 'install_dir' ] package_name = actions_dict[ 'package_name' ] actions = actions_dict.get( 'actions', None ) filtered_actions = [] env_shell_file_paths = [] if actions: with make_tmp_dir() as work_dir: with lcd( work_dir ): # The first action in the list of actions will be the one that defines the installation process. There # are currently only two supported processes; download_by_url and clone via a "shell_command" action type. action_type, action_dict = actions[ 0 ] if action_type == 'download_by_url': # Eliminate the download_by_url action so remaining actions can be processed correctly. filtered_actions = actions[ 1: ] url = action_dict[ 'url' ] if 'target_filename' in action_dict: # Sometimes compressed archives extracts their content to a folder other than the default defined file name. Using this # attribute will ensure that the file name is set appropriately and can be located after download, decompression and extraction. downloaded_filename = action_dict[ 'target_filename' ] else: downloaded_filename = os.path.split( url )[ -1 ] dir = common_util.url_download( work_dir, downloaded_filename, url, extract=True ) elif action_type == 'shell_command': # <action type="shell_command">git clone --recursive git://github.com/ekg/freebayes.git</action> # Eliminate the shell_command clone action so remaining actions can be processed correctly. filtered_actions = actions[ 1: ] return_code = handle_command( app, tool_dependency, install_dir, action_dict[ 'command' ] ) if return_code: return dir = package_name elif action_type == 'download_file': # <action type="download_file">http://effectors.org/download/version/TTSS_GUI-1.0.1.jar</action> # Download a single file to the working directory. filtered_actions = actions[ 1: ] url = action_dict[ 'url' ] if 'target_filename' in action_dict: # Sometimes compressed archives extracts their content to a folder other than the default defined file name. Using this # attribute will ensure that the file name is set appropriately and can be located after download, decompression and extraction. filename = action_dict[ 'target_filename' ] else: filename = url.split( '/' )[ -1 ] common_util.url_download( work_dir, filename, url ) dir = os.path.curdir else: # We're handling a complex repository dependency where we only have a set_environment tag set. # <action type="set_environment"> # <environment_variable name="PATH" action="prepend_to">$INSTALL_DIR/bin</environment_variable> # </action> filtered_actions = [ a for a in actions ] dir = install_dir # We need to be careful in determining if the value of dir is a valid directory because we're dealing with 2 environments, the fabric local # environment and the python environment. Checking the path as follows should work. full_path_to_dir = os.path.abspath( os.path.join( work_dir, dir ) ) if not os.path.exists( full_path_to_dir ): os.makedirs( full_path_to_dir ) # The package has been down-loaded, so we can now perform all of the actions defined for building it. for action_tup in filtered_actions: current_dir = os.path.abspath( os.path.join( work_dir, dir ) ) with lcd( current_dir ): action_type, action_dict = action_tup if action_type == 'make_directory': common_util.make_directory( full_path=action_dict[ 'full_path' ] ) elif action_type == 'move_directory_files': common_util.move_directory_files( current_dir=current_dir, source_dir=os.path.join( action_dict[ 'source_directory' ] ), destination_dir=os.path.join( action_dict[ 'destination_directory' ] ) ) elif action_type == 'move_file': # TODO: Remove this hack that resets current_dir so that the pre-compiled bwa binary can be found. # current_dir = '/Users/gvk/workspaces_2008/bwa/bwa-0.5.9' common_util.move_file( current_dir=current_dir, source=os.path.join( action_dict[ 'source' ] ), destination_dir=os.path.join( action_dict[ 'destination' ] ) ) elif action_type == 'set_environment': # Currently the only action supported in this category is "environment_variable". # Build a command line from the prior_installation_required, in case an environment variable is referenced # in the set_environment action. cmds = [] for env_shell_file_path in env_shell_file_paths: if os.path.exists( env_shell_file_path ): for env_setting in open( env_shell_file_path ): cmds.append( env_setting.strip( '\n' ) ) else: log.debug( 'Invalid file %s specified, ignoring set_environment action.', env_shell_file_path ) env_var_dicts = action_dict[ 'environment_variable' ] for env_var_dict in env_var_dicts: # Check for the presence of the $ENV[] key string and populate it if possible. env_var_dict = handle_environment_variables( app, tool_dependency, install_dir, env_var_dict, cmds ) env_command = common_util.create_or_update_env_shell_file( install_dir, env_var_dict ) return_code = handle_command( app, tool_dependency, install_dir, env_command ) if return_code: return elif action_type == 'set_environment_for_install': # Currently the only action supported in this category is a list of paths to one or more tool dependency env.sh files, # the environment setting in each of which will be injected into the environment for all <action type="shell_command"> # tags that follow this <action type="set_environment_for_install"> tag set in the tool_dependencies.xml file. env_shell_file_paths = action_dict[ 'env_shell_file_paths' ] elif action_type == 'setup_virtualenv': # TODO: maybe should be configurable venv_src_directory = os.path.abspath( os.path.join( app.config.tool_dependency_dir, '__virtualenv_src' ) ) if not install_virtualenv( app, venv_src_directory ): log.error( 'Unable to install virtualenv' ) return requirements = action_dict[ 'requirements' ] if os.path.exists( os.path.join( dir, requirements ) ): # requirements specified as path to a file requirements_path = requirements else: # requirements specified directly in XML, create a file with these for pip. requirements_path = os.path.join( install_dir, "requirements.txt" ) with open( requirements_path, "w" ) as f: f.write( requirements ) venv_directory = os.path.join( install_dir, "venv" ) # TODO: Consider making --no-site-packages optional. setup_command = "python %s/virtualenv.py --no-site-packages '%s'" % (venv_src_directory, venv_directory) # POSIXLY_CORRECT forces shell commands . and source to have the same # and well defined behavior in bash/zsh. activate_command = "POSIXLY_CORRECT=1; . %s" % os.path.join( venv_directory, "bin", "activate" ) install_command = "python '%s' install -r '%s'" % ( os.path.join( venv_directory, "bin", "pip" ), requirements_path ) full_setup_command = "%s; %s; %s" % ( setup_command, activate_command, install_command ) return_code = handle_command( app, tool_dependency, install_dir, full_setup_command ) if return_code: return site_packages_command = "%s -c 'import os, sys; print os.path.join(sys.prefix, \"lib\", \"python\" + sys.version[:3], \"site-packages\")'" % os.path.join( venv_directory, "bin", "python" ) output = handle_command( app, tool_dependency, install_dir, site_packages_command, return_output=True ) if output.return_code: return if not os.path.exists( output.stdout ): log.error( "virtualenv's site-packages directory '%s' does not exist", output.stdout ) return modify_env_command = common_util.create_or_update_env_shell_file( install_dir, dict( name="PYTHONPATH", action="prepend_to", value=output.stdout ) ) return_code = handle_command( app, tool_dependency, install_dir, modify_env_command ) if return_code: return modify_env_command = common_util.create_or_update_env_shell_file( install_dir, dict( name="PATH", action="prepend_to", value=os.path.join( venv_directory, "bin" ) ) ) return_code = handle_command( app, tool_dependency, install_dir, modify_env_command ) if return_code: return elif action_type == 'shell_command': with settings( warn_only=True ): cmd = '' for env_shell_file_path in env_shell_file_paths: if os.path.exists( env_shell_file_path ): for env_setting in open( env_shell_file_path ): cmd += '%s\n' % env_setting else: log.debug( 'Invalid file %s specified, ignoring shell_command action.', env_shell_file_path ) cmd += action_dict[ 'command' ] return_code = handle_command( app, tool_dependency, install_dir, cmd ) if return_code: return elif action_type == 'template_command': env_vars = dict() for env_shell_file_path in env_shell_file_paths: if os.path.exists( env_shell_file_path ): for env_setting in open( env_shell_file_path ): env_string = env_setting.split( ';' )[ 0 ] env_name, env_path = env_string.split( '=' ) env_vars[ env_name ] = env_path else: log.debug( 'Invalid file %s specified, ignoring template_command action.', env_shell_file_path ) env_vars.update( common_util.get_env_var_values( install_dir ) ) language = action_dict[ 'language' ] with settings( warn_only=True, **env_vars ): if language == 'cheetah': # We need to import fabric.api.env so that we can access all collected environment variables. cmd = fill_template( '#from fabric.api import env\n%s' % action_dict[ 'command' ], context=env_vars ) return_code = handle_command( app, tool_dependency, install_dir, cmd ) if return_code: return elif action_type == 'download_file': # Download a single file to the current working directory. url = action_dict[ 'url' ] if 'target_filename' in action_dict: filename = action_dict[ 'target_filename' ] else: filename = url.split( '/' )[ -1 ] extract = action_dict.get( 'extract', False ) common_util.url_download( current_dir, filename, url, extract=extract ) elif action_type == 'change_directory': target_directory = os.path.realpath( os.path.normpath( os.path.join( current_dir, action_dict[ 'directory' ] ) ) ) if target_directory.startswith( os.path.realpath( current_dir ) ) and os.path.exists( target_directory ): # Change directory to a directory within the current working directory. dir = target_directory elif target_directory.startswith( os.path.realpath( work_dir ) ) and os.path.exists( target_directory ): # Change directory to a directory above the current working directory, but within the defined work_dir. dir = target_directory.replace( os.path.realpath( work_dir ), '' ).lstrip( '/' ) else: log.error( 'Invalid or nonexistent directory %s specified, ignoring change_directory action.', target_directory )
9,923
def plugin_scope(): """Returns the capability as the remote network driver. This function returns the capability of the remote network driver, which is ``global`` or ``local`` and defaults to ``local``. With ``global`` capability, the network information is shared among multipe Docker daemons if the distributed store is appropriately configured. See the following link for more details about the spec: https://github.com/docker/libnetwork/blob/master/docs/remote.md#set-capability # noqa """ LOG.debug("Received /NetworkDriver.GetCapabilities") capabilities = {'Scope': cfg.CONF.capability_scope} return flask.jsonify(capabilities)
9,924
def _create_chimeric_msa( # pylint: disable=too-many-arguments output_folder, cluster, subexon_df, gene2speciesname, connected_subexons, aligner='ProGraphMSA', padding='XXXXXXXXXX', species_list=None): """Return a modified subexon_df, the dict of chimerics and the msa.""" subexon_df, subexon_matrix = subexons.alignment.create_subexon_matrix( subexon_df) chimerics = subexons.alignment.create_chimeric_sequences( subexon_df, subexon_matrix, connected_subexons, padding=padding) msa_file = _outfile(output_folder, "chimeric_alignment_", cluster, ".fasta") if chimerics: chimerics = subexons.alignment.sort_species(chimerics, gene2speciesname, species_list) subexons.alignment.run_aligner(chimerics, aligner=aligner, output_path=msa_file) msa = subexons.alignment.read_msa_fasta(msa_file) else: if os.path.isfile(msa_file): os.remove(msa_file) msa = None return subexon_df, chimerics, msa
9,925
def _expand_sources(sources): """ Expands a user-provided specification of source files into a list of paths. """ if sources is None: return [] if isinstance(sources, str): sources = [x.strip() for x in sources.split(",")] elif isinstance(sources, (float, int)): sources = [str(sources)] return [path for source in sources for path in _glob(source)]
9,926
def get_stream(stream_id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStreamResult: """ This data source provides details about a specific Stream resource in Oracle Cloud Infrastructure Streaming service. Gets detailed information about a stream, including the number of partitions. ## Example Usage ```python import pulumi import pulumi_oci as oci test_stream = oci.streaming.get_stream(stream_id=oci_streaming_stream["test_stream"]["id"]) ``` :param str stream_id: The OCID of the stream. """ __args__ = dict() __args__['streamId'] = stream_id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('oci:streaming/getStream:getStream', __args__, opts=opts, typ=GetStreamResult).value return AwaitableGetStreamResult( compartment_id=__ret__.compartment_id, defined_tags=__ret__.defined_tags, freeform_tags=__ret__.freeform_tags, id=__ret__.id, lifecycle_state_details=__ret__.lifecycle_state_details, messages_endpoint=__ret__.messages_endpoint, name=__ret__.name, partitions=__ret__.partitions, retention_in_hours=__ret__.retention_in_hours, state=__ret__.state, stream_id=__ret__.stream_id, stream_pool_id=__ret__.stream_pool_id, time_created=__ret__.time_created)
9,927
def get_job(JobName=None): """ Retrieves an existing job definition. See also: AWS API Documentation Exceptions :example: response = client.get_job( JobName='string' ) :type JobName: string :param JobName: [REQUIRED]\nThe name of the job definition to retrieve.\n :rtype: dict ReturnsResponse Syntax{ 'Job': { 'Name': 'string', 'Description': 'string', 'LogUri': 'string', 'Role': 'string', 'CreatedOn': datetime(2015, 1, 1), 'LastModifiedOn': datetime(2015, 1, 1), 'ExecutionProperty': { 'MaxConcurrentRuns': 123 }, 'Command': { 'Name': 'string', 'ScriptLocation': 'string', 'PythonVersion': 'string' }, 'DefaultArguments': { 'string': 'string' }, 'NonOverridableArguments': { 'string': 'string' }, 'Connections': { 'Connections': [ 'string', ] }, 'MaxRetries': 123, 'AllocatedCapacity': 123, 'Timeout': 123, 'MaxCapacity': 123.0, 'WorkerType': 'Standard'|'G.1X'|'G.2X', 'NumberOfWorkers': 123, 'SecurityConfiguration': 'string', 'NotificationProperty': { 'NotifyDelayAfter': 123 }, 'GlueVersion': 'string' } } Response Structure (dict) -- Job (dict) --The requested job definition. Name (string) --The name you assign to this job definition. Description (string) --A description of the job. LogUri (string) --This field is reserved for future use. Role (string) --The name or Amazon Resource Name (ARN) of the IAM role associated with this job. CreatedOn (datetime) --The time and date that this job definition was created. LastModifiedOn (datetime) --The last point in time when this job definition was modified. ExecutionProperty (dict) --An ExecutionProperty specifying the maximum number of concurrent runs allowed for this job. MaxConcurrentRuns (integer) --The maximum number of concurrent runs allowed for the job. The default is 1. An error is returned when this threshold is reached. The maximum value you can specify is controlled by a service limit. Command (dict) --The JobCommand that executes this job. Name (string) --The name of the job command. For an Apache Spark ETL job, this must be glueetl . For a Python shell job, it must be pythonshell . ScriptLocation (string) --Specifies the Amazon Simple Storage Service (Amazon S3) path to a script that executes a job. PythonVersion (string) --The Python version being used to execute a Python shell job. Allowed values are 2 or 3. DefaultArguments (dict) --The default arguments for this job, specified as name-value pairs. You can specify arguments here that your own job-execution script consumes, as well as arguments that AWS Glue itself consumes. For information about how to specify and consume your own Job arguments, see the Calling AWS Glue APIs in Python topic in the developer guide. For information about the key-value pairs that AWS Glue consumes to set up your job, see the Special Parameters Used by AWS Glue topic in the developer guide. (string) -- (string) -- NonOverridableArguments (dict) --Non-overridable arguments for this job, specified as name-value pairs. (string) -- (string) -- Connections (dict) --The connections used for this job. Connections (list) --A list of connections used by the job. (string) -- MaxRetries (integer) --The maximum number of times to retry this job after a JobRun fails. AllocatedCapacity (integer) --This field is deprecated. Use MaxCapacity instead. The number of AWS Glue data processing units (DPUs) allocated to runs of this job. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page . Timeout (integer) --The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters TIMEOUT status. The default is 2,880 minutes (48 hours). MaxCapacity (float) --The number of AWS Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the AWS Glue pricing page . Do not set Max Capacity if using WorkerType and NumberOfWorkers . The value that can be allocated for MaxCapacity depends on whether you are running a Python shell job or an Apache Spark ETL job: When you specify a Python shell job (JobCommand.Name ="pythonshell"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU. When you specify an Apache Spark ETL job (JobCommand.Name ="glueetl"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation. WorkerType (string) --The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, or G.2X. For the Standard worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker. For the G.1X worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. For the G.2X worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs. NumberOfWorkers (integer) --The number of workers of a defined workerType that are allocated when a job runs. The maximum number of workers you can define are 299 for G.1X , and 149 for G.2X . SecurityConfiguration (string) --The name of the SecurityConfiguration structure to be used with this job. NotificationProperty (dict) --Specifies configuration properties of a job notification. NotifyDelayAfter (integer) --After a job run starts, the number of minutes to wait before sending a job run delay notification. GlueVersion (string) --Glue version determines the versions of Apache Spark and Python that AWS Glue supports. The Python version indicates the version supported for jobs of type Spark. For more information about the available AWS Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide. Jobs that are created without specifying a Glue version default to Glue 0.9. Exceptions Glue.Client.exceptions.InvalidInputException Glue.Client.exceptions.EntityNotFoundException Glue.Client.exceptions.InternalServiceException Glue.Client.exceptions.OperationTimeoutException :return: { 'Job': { 'Name': 'string', 'Description': 'string', 'LogUri': 'string', 'Role': 'string', 'CreatedOn': datetime(2015, 1, 1), 'LastModifiedOn': datetime(2015, 1, 1), 'ExecutionProperty': { 'MaxConcurrentRuns': 123 }, 'Command': { 'Name': 'string', 'ScriptLocation': 'string', 'PythonVersion': 'string' }, 'DefaultArguments': { 'string': 'string' }, 'NonOverridableArguments': { 'string': 'string' }, 'Connections': { 'Connections': [ 'string', ] }, 'MaxRetries': 123, 'AllocatedCapacity': 123, 'Timeout': 123, 'MaxCapacity': 123.0, 'WorkerType': 'Standard'|'G.1X'|'G.2X', 'NumberOfWorkers': 123, 'SecurityConfiguration': 'string', 'NotificationProperty': { 'NotifyDelayAfter': 123 }, 'GlueVersion': 'string' } } :returns: (string) -- (string) -- """ pass
9,928
def test_thematic_breaks_018(): """ Test case 018: (part a) Four spaces is too many: """ # Arrange source_markdown = """ ***""" expected_tokens = [ "[icode-block(1,5): :]", "[text(1,5):***:]", "[end-icode-block:::True]", ] expected_gfm = """<pre><code>*** </code></pre>""" # Act & Assert act_and_assert(source_markdown, expected_gfm, expected_tokens)
9,929
def components(path): """Split a POSIX path into components.""" head, tail = os.path.split(os.path.normpath(path)) if head == "": return [tail] elif head == "/": return [head + tail] else: return components(head) + [tail]
9,930
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels): """ Fusion method. """ n_channels_int = n_channels in_act = input_a + input_b t_act = ops.Tanh()(in_act[:, :n_channels_int, :]) s_act = ops.Sigmoid()(in_act[:, n_channels_int:, :]) acts = t_act * s_act return acts
9,931
def test_copy_report_to_s3() -> None: """ Tests the copying of the report to an s3 bucket """ file_name = "test_file" s3_bucket_name = "test_bucket" account_name = "12345666757" response = True s3_client = Mock(upload_file=Mock(return_value=response)) actual_response = copy_report(file_name, account_name, s3_bucket_name, s3_client) s3_client.upload_file.assert_called_once_with(File=file_name, Bucket=s3_bucket_name, Prefix=account_name) assert actual_response
9,932
def _classes_dict(filename): """ Open JSON file and read the data for the Classes (and Origins). filename - the file name as a string. Runtime: O(n) """ class_dict = {} # {'robot': ['blitzcrank']} class_bonus_dict = {} dict = { 1: {}, 2: {}, 3: {}, 4 : {}, 6 : {}} # { 1 : { 'robot' : set['blitzcrank'], 'exile' : set['yasuo'] }, 2 : ... } with open(filename) as json_file: data = json.load(json_file) for class_obj in data.items(): # O(n) key = class_obj[1]['key'] # String name = class_obj[1]['name'] # String description = class_obj[1]['description'] # String accentChampionImage = class_obj[1]['accentChampionImage'] # URL as String bonuses = class_obj[1]['bonuses'] # Array [{'needed': int, 'effect': string}] needed = bonuses[-1]['needed'] # Check the highest number for needed. (In this case it's the last item in the array) class_dict[key] = [] class_bonus_dict[key] = needed dict[needed].update({class_obj[0]: []}) return dict
9,933
def main() -> None: """Entry point of this test project. """ _: ap.Stage = ap.Stage(background_color='#333') interface: Points2DInterface = Points2DInterface() interface.variable_name = 'test_point_2d_interface' interface.points = ap.Array([ap.Point2D(10, 20), ap.Point2D(30, 40)]) ap.assert_equal(left=ap.Point2D(10, 20), right=interface.points[0]) interface.points[1] = ap.Point2D(50, 60) ap.assert_equal(left=ap.Point2D(50, 60), right=interface.points[1]) ap.save_overall_html(dest_dir_path=_DEST_DIR_PATH)
9,934
def test_get_bitinformation_dim(): """Test xb.get_bitinformation is sensitive to dim.""" ds = xr.tutorial.load_dataset("rasm") bitinfo0 = xb.get_bitinformation(ds, axis=0) bitinfo2 = xb.get_bitinformation(ds, axis=2) assert_different(bitinfo0, bitinfo2)
9,935
def set_incident_seen(incident, user=None): """ Updates the incident to be seen """ is_org_member = incident.organization.has_access(user) if is_org_member: is_project_member = False for incident_project in IncidentProject.objects.filter(incident=incident).select_related( "project" ): if incident_project.project.member_set.filter(user=user).exists(): is_project_member = True break if is_project_member: incident_seen, created = IncidentSeen.objects.create_or_update( incident=incident, user=user, values={"last_seen": timezone.now()} ) return incident_seen return False
9,936
def single_parity_check( llr: np.array, mask_steps: int = 0, last_chunk_type: int = 0, ) -> np.array: """Compute beta value for Single parity node.""" all_sign = np.sign(np.prod(llr)) abs_alpha = np.fabs(llr) first_min_idx, second_min_idx = np.argsort(abs_alpha)[:2] result = np.sign(llr) * all_sign for i in range(result.size): if i == first_min_idx: result[i] *= abs_alpha[second_min_idx] else: result[i] *= abs_alpha[first_min_idx] return result
9,937
def show_section(res, section, caveat_outcome=None): """ Shows a given named section from a description of an ingestion submission. The caveat is used when there has been an error and should be a phrase that describes the fact that output shown is only up to the point of the caveat situation. Instead of a "My Heading" header the output will be "My Heading (prior to <caveat>)." :param res: the description of an ingestion submission as a python dictionary that represents JSON data :param section: the name of a section to find either in the toplevel or in additional_data. :param caveat_outcome: a phrase describing some caveat on the output """ section_data = get_section(res, section) if caveat_outcome and not section_data: # In the case of non-success, be brief unless there's data to show. return if caveat_outcome: caveat = " (prior to %s)" % caveat_outcome else: caveat = "" show("----- %s%s -----" % (keyword_as_title(section), caveat)) if not section_data: show("Nothing to show.") elif isinstance(section_data, dict): show(json.dumps(section_data, indent=2)) elif isinstance(section_data, list): for line in section_data: show(line) else: # We don't expect this, but such should be shown as-is, mostly to see what it is. show(section_data)
9,938
def send_uart(ctx, index, binary, data): """Send data to UART. UART1 is the AT Command interface, so you probably want to use UART3! """ if binary: try: data = bytes.fromhex(data) except ValueError: click.echo('Invalid binary data') return lora = Rak811() try: lora.send_uart(data, int(index)) except Rak811Error as e: print_exception(e) lora.close() return if ctx.obj['VERBOSE']: click.echo('Data sent.')
9,939
def _deserialize_job_result(user_input: JSON) -> JobResult: """Deserialize a JobResult from JSON.""" job = _deserialize_job(user_input['job']) plan = _deserialize_plan(user_input['plan']) is_done = user_input['is_done'] outputs = dict() # type: Dict[str, Asset] for name, asset in user_input['outputs'].items(): outputs[name] = _deserialize_asset(asset) return JobResult(job, plan, is_done, outputs)
9,940
def _brute_force_knn(X, centers, k, return_distance=True): """ :param X: array of shape=(n_samples, n_features) :param centers: array of shape=(n_centers, n_features) :param k: int, only looking for the nearest k points to each center. :param return_distance: bool, if True the return the distance along with the points :return: """ if k == 1: nearest, dists = pairwise_distances_argmin_min(centers, X) return (dists, nearest) if return_distance else nearest else: dists = pairwise_distances(centers, X) nearest = np.argsort(dists, axis=1)[:, :k] return (np.vstack([dists[i, nearest[i]] for i in range(dists.shape[0])]), nearest) if return_distance else nearest
9,941
def proceed(): """Proceed without waiting for any action to be triggered on the consumer.""" yield _proceed
9,942
def remove_rule(rule_id): """Remove a single rule""" ruleset = packetfilter.get_ruleset() ruleset.remove(rule_id) packetfilter.load_ruleset(ruleset) save_pfconf(packetfilter) return redirect(url_for('rules', message=PFWEB_ALERT_SUCCESS_DEL), code=302)
9,943
def setup_argparse(parser: argparse.ArgumentParser) -> None: """Setup argument parser for ``cubi-tk sea-snap pull-isa``.""" parser.add_argument("--hidden-cmd", dest="sea_snap_cmd", default=run, help=argparse.SUPPRESS) group_sodar = parser.add_argument_group("SODAR-related") group_sodar.add_argument( "--sodar-url", default=os.environ.get("SODAR_URL", "https://sodar.bihealth.org/"), help="URL to SODAR, defaults to SODAR_URL environment variable or fallback to https://sodar.bihealth.org/", ) group_sodar.add_argument( "--sodar-api-token", default=os.environ.get("SODAR_API_TOKEN", None), help="Authentication token when talking to SODAR. Defaults to SODAR_API_TOKEN environment variable.", ) parser.add_argument( "--allow-overwrite", default=False, action="store_true", help="Allow to overwrite output file, default is not to allow overwriting output file.", ) parser.add_argument( "--output_folder", default="ISA_files/", help="Output folder path for ISA files." ) parser.add_argument("project_uuid", help="UUID of project to pull the sample sheet for.")
9,944
def HexToMPDecimal(hex_chars): """ Convert bytes to an MPDecimal string. Example \x00 -> "aa" This gives us the AppID for a chrome extension. """ result = '' base = ord('a') for i in xrange(len(hex_chars)): value = ord(hex_chars[i]) dig1 = value / 16 dig2 = value % 16 result += chr(dig1 + base) result += chr(dig2 + base) return result
9,945
def genome_level_parallelization(bam_file, ref_file, vcf_file, output_dir_path, max_threads, confident_bed_tree): """ This method calls chromosome_level_parallelization for each chromosome. :param bam_file: path to BAM file :param ref_file: path to reference FASTA file :param vcf_file: path to VCF file :param output_dir_path: path to output directory :param max_threads: Maximum number of threads to run at one instance :param confident_bed_tree: tree containing confident bed intervals :return: """ # --- NEED WORK HERE --- GET THE CHROMOSOME NAMES FROM THE BAM FILE chr_list = ["chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10", "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19"] # chr_list = ["1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19"] program_start_time = time.time() # chr_list = ["19"] # each chromosome in list for chr_name in chr_list: start_time = time.time() # do a chromosome level parallelization chromosome_level_parallelization(chr_name, bam_file, ref_file, vcf_file, output_dir_path, max_threads, confident_bed_tree) end_time = time.time() sys.stderr.write(TextColor.PURPLE + "FINISHED " + str(chr_name) + " PROCESSES" + "\n") sys.stderr.write(TextColor.CYAN + "TIME ELAPSED: " + str(end_time - start_time) + "\n") # wait for the last process to end before file processing while True: if len(multiprocessing.active_children()) == 0: break summary_file_to_csv(output_dir_path, chr_list) # merge_all_candidate_dictionaries(output_dir_path, chr_list) program_end_time = time.time() sys.stderr.write(TextColor.RED + "ALL PROCESSES FINISHED SUCCESSFULLY" + "\n") sys.stderr.write(TextColor.CYAN + "TOTAL TIME FOR GENERATING ALL RESULTS: " + str(program_end_time-program_start_time) + "\n")
9,946
def assert_shape(tensor, ref_shape): """Assert that the shape of a tensor matches the given list of integers. None indicates that the size of a dimension is allowed to vary. Performs symbolic assertion when used in torch.jit.trace(). """ if tensor.ndim != len(ref_shape): raise AssertionError(f"Wrong number of dimensions: got {tensor.ndim}, expected {len(ref_shape)}") for idx, (size, ref_size) in enumerate(zip(tensor.shape, ref_shape)): if ref_size is None: pass elif isinstance(ref_size, torch.Tensor): with suppress_tracer_warnings(): # as_tensor results are registered as constants symbolic_assert( torch.equal(torch.as_tensor(size), ref_size), f"Wrong size for dimension {idx}", ) elif isinstance(size, torch.Tensor): with suppress_tracer_warnings(): # as_tensor results are registered as constants symbolic_assert( torch.equal(size, torch.as_tensor(ref_size)), f"Wrong size for dimension {idx}: expected {ref_size}", ) elif size != ref_size: raise AssertionError(f"Wrong size for dimension {idx}: got {size}, expected {ref_size}")
9,947
async def concurrent(streamqueue: asyncio.Queue, trace_name='concurrent', name='stream'): """Run code concurrently in different streams. :param streamqueue: asyncio.Queue instance. Queue tasks define the pool of streams used for concurrent execution. """ if not torch.cuda.is_available(): yield return initial_stream = torch.cuda.current_stream() with torch.cuda.stream(initial_stream): stream = await streamqueue.get() assert isinstance(stream, torch.cuda.Stream) try: with torch.cuda.stream(stream): logger.debug('%s %s is starting, stream: %s', trace_name, name, stream) yield current = torch.cuda.current_stream() assert current == stream logger.debug('%s %s has finished, stream: %s', trace_name, name, stream) finally: streamqueue.task_done() streamqueue.put_nowait(stream)
9,948
def fit_growth_curves(input_file, file_data_frame, file_data_units, condition_unit, time_unit, cell_density_unit): """ :Authors: Chuankai Cheng <chuankai@usc.edu> and J. Cameron Thrash <thrash@usc.edu> :License: MIT :Version: 1.0 :Date: 2021-03-17 :Repository: https://github.com/thrash-lab/sparse-growth-curve """ output_data_indices=file_data_frame.groupby( ['Strain','Replicate','Condition'] ).size().reset_index().rename(columns={0:'count'} )[['Strain','Replicate','Condition']] strains_conditions=output_data_indices.groupby(['Strain','Condition'] ).size().reset_index()[['Strain','Condition']] output_data_indices['Growth: Doubling rate']=0 output_data_indices['Death: Doubling rate']=0 output_data_indices=output_data_indices.astype(object) output_data_indices=output_data_indices.sort_values(by=['Strain','Condition']) strains=np.unique(strains_conditions['Strain']) row_num=len(strains) col_num=np.int(np.ceil(len(strains_conditions)/len(strains))) plt.figure(figsize=(col_num*2+1, row_num*2+1)) plot_j=1 previous_condition=output_data_indices['Condition'].values[0] plt.subplot(row_num, col_num, plot_j) color_i=0 plt.title(str(output_data_indices['Strain'].values[0])+'\n' +str(output_data_indices['Condition'].values[0])+' ' +condition_unit) plt.ylabel(cell_density_unit) plt.xlabel(time_unit) for i in output_data_indices.index: target_gr_index=output_data_indices.loc[i] target_growth_curve_df = file_data_frame[ (file_data_frame['Strain']==target_gr_index['Strain'])& (file_data_frame['Condition']==target_gr_index['Condition']) & (file_data_frame['Replicate']==target_gr_index['Replicate'])] #print('\n\nStrain:', target_gr_index['Strain'], # '\t Condition:',str(target_gr_index['Condition'])+' '+condition_unit, # '\t Replicate:',str(target_gr_index['Replicate'])) time=target_growth_curve_df.loc[:,'Time'].values cell_density=target_growth_curve_df.loc[:,'Cell density'].values #print('time=', time) #print('cell density=', 'cell_density') if target_gr_index['Condition']!=previous_condition: plt.yscale('log') plt.ylim(10**np.floor(np.log10(np.min(file_data_frame['Cell density']))-1), 10**np.ceil(np.log10(np.max(file_data_frame['Cell density']))+1)) plt.legend() #plt.xlim(np.floor(np.min(file_data_frame['Time'])), # np.ceil(np.max(file_data_frame['Time']))) color_i=0 plot_j+=1 plt.subplot(row_num, col_num, plot_j) plt.title(str(target_gr_index['Strain'])+'\n' +str(target_gr_index['Condition'])+' ' +condition_unit) plt.ylabel(cell_density_unit) plt.xlabel(time_unit) if len(cell_density)>4: (all_fit_time, all_fit_cell_density, all_fit_conf_band, selected_doubling_rate, selected_fit_time, selected_fit_cell_density, selected_doubling_rate_d, selected_fit_time_d, selected_fit_cell_density_d)=fit_growth_curve( time, cell_density, one_order=10, decision_tree_depth=1) output_data_indices.loc[i,'Growth: Doubling rate']=selected_doubling_rate output_data_indices.loc[i,'Death: Doubling rate']=selected_doubling_rate_d for k in range(len(all_fit_time)): #plt.plot(all_fit_time[i], all_fit_cell_density[i], 'k--') #plt.fill_between(all_fit_time[k], # all_fit_cell_density[k]*(all_fit_conf_band[k]), # all_fit_cell_density[k]/(all_fit_conf_band[k]), # color=colormap(color_i), alpha=0.1) plt.plot(selected_fit_time, selected_fit_cell_density, '-', color=colormap(color_i), linewidth=2) plt.plot(selected_fit_time_d, selected_fit_cell_density_d, '--', color=colormap(color_i), linewidth=1) elif len(cell_density)>2: x=time y=np.log2(cell_density) x_fit = np.arange(0.0, x[-1], 0.01)[:, np.newaxis] (doubling_rate, pre_y, ci) = myLinearRegression_CB(x, y, x_fit, one_order=10) #plt.fill_between(x_fit, # pre_y*ci, # pre_y/ci, # color=colormap(color_i), alpha=0.1) if doubling_rate>0: output_data_indices.loc[i,'Growth: Doubling rate']=doubling_rate plt.plot(x_fit, pre_y, '-', color=colormap(color_i), linewidth=2) else: output_data_indices.loc[i,'Death: Doubling rate']=doubling_rate plt.plot(x_fit, pre_y, '--', color=colormap(color_i), linewidth=1) elif len(cell_density)==2: x=time y=np.log2(cell_density) doubling_rate=(y[1]-y[0])/(x[1]-x[0]) output_data_indices.loc[i,'Growth: Doubling rate']=doubling_rate if doubling_rate>0: output_data_indices.loc[i,'Growth: Doubling rate']=doubling_rate plt.plot(x, y, '-', color=colormap(color_i), linewidth=2) else: output_data_indices.loc[i,'Death: Doubling rate']=doubling_rate plt.plot(x, y, '--', color=colormap(color_i), linewidth=1) plt.plot(time, cell_density,'o',alpha=0.3, color=colormap(color_i), label=output_data_indices.loc[i]['Replicate']) color_i+=1 previous_condition=output_data_indices.loc[i]['Condition'] plt.yscale('log') plt.ylim(10**np.floor(np.log10(np.min(file_data_frame['Cell density']))-1), 10**np.ceil(np.log10(np.max(file_data_frame['Cell density']))+1)) #plt.xlim(np.floor(np.min(file_data_frame['Time'])), # np.ceil(np.max(file_data_frame['Time']))) plt.legend() plt.tight_layout() output_file_string=(output_folder+ '/'+input_file+ '/1_Data_fit_visualization_'+ dt_string+'.pdf') plt.savefig(output_file_string) print('output file saved:'+output_file_string) return output_data_indices
9,949
def stringify(li,delimiter): """ Converts list entries to strings and joins with delimiter.""" string_list = map(str,li) return delimiter.join(string_list)
9,950
def norm_w(x, w): """ Compute sum_i( w[i] * |x[i]| ). See p. 7. """ return (w * abs(x)).sum()
9,951
def WordSyllables(outf, words, segments, IFflag=False, Adaptflag=True): """writes out a grammar defining Word* in terms of Syllables. words should be a sequence of Word symbols""" if IFflag=='Segments': for word in words: if Adaptflag: ruleprefix = word else: ruleprefix = "1 1 "+word outf.write("%s --> Segments\n"%ruleprefix) outf.write("1 1 Segments --> Segment\n") outf.write("1 1 Segments --> Segment Segments\n") for segment in segments: outf.write("1 1 Segment --> %s\n"%segment) return for word in words: if Adaptflag: ruleprefix = word else: ruleprefix = "1 1 "+word if IFflag: outf.write("%s --> SyllableIF\n"%ruleprefix) outf.write("%s --> SyllableI SyllableF\n"%ruleprefix) outf.write("%s --> SyllableI Syllable SyllableF\n"%ruleprefix) outf.write("%s --> SyllableI Syllable Syllable SyllableF\n"%ruleprefix) else: outf.write("%s --> Syllable\n"%ruleprefix) outf.write("%s --> Syllable Syllable\n"%ruleprefix) outf.write("%s --> Syllable Syllable Syllable\n"%ruleprefix) outf.write("%s --> Syllable Syllable Syllable Syllable\n"%ruleprefix) outf.write("1 1 Syllable --> Onset Rhyme\n") outf.write("1 1 Syllable --> Rhyme\n") outf.write("1 1 Rhyme --> Nucleus Coda\n") outf.write("1 1 Rhyme --> Nucleus\n") outf.write("Onset --> Consonants\n") outf.write("Nucleus --> Vowels\n") outf.write("Nucleus --> SyllabicConsonants\n") outf.write("Coda --> Consonants\n") if IFflag: outf.write("1 1 SyllableIF --> OnsetI RhymeF\n") outf.write("1 1 SyllableIF --> RhymeF\n") outf.write("1 1 SyllableI --> OnsetI Rhyme\n") outf.write("1 1 SyllableI --> Rhyme\n") outf.write("1 1 SyllableF --> Onset RhymeF\n") outf.write("1 1 SyllableF --> RhymeF\n") outf.write("OnsetI --> Consonants\n") outf.write("1 1 RhymeF --> Nucleus CodaF\n") outf.write("1 1 RhymeF --> Nucleus\n") outf.write("1 1 CodaF --> Consonants\n") CVs(outf, segments)
9,952
def _tensor_run_opt_ext(opt, momentum, learning_rate, gradient, weight, moment): """Apply momentum optimizer to the weight parameter using Tensor.""" success = True success = F.depend(success, opt(weight, moment, learning_rate, gradient, momentum)) return success
9,953
def get_config_settings(env: str = "dev") -> Dict: """ Retrieves configuration from YAML file """ config_fh = construct_config_path(env) with open(config_fh, "r") as f: data = yaml.safe_load(f) return data
9,954
def get_all(connection: ApiConnection, config: str, section: str = None) -> dict: """Get all sections of a config or all values of a section. :param connection: :param config:UCI config name :param section:[optional] UCI section name :return: JSON RPC response result """ return request(connection, 'uci', 'get_all', config, section)
9,955
def load_module(module, app): """Load an object from a Python module In: - ``module`` -- name of the module - ``app`` -- name of the object to load Return: - (the object, None) """ r = __import__(module, fromlist=('',)) if app is not None: r = getattr(r, app) return r, None
9,956
def get_routes(app: web.Application) -> list: """ Get the full list of defined routes """ return get_standard_routes(app) + get_custom_routes(app)
9,957
def cb_round(series: pd.Series, base: Number = 5, sig_dec: int = 0): """ Returns the pandas series (or column) with values rounded per the custom base value Args: series (pd.Series): data to be rounded base (float): base value to which data should be rounded (may be decimal) sig_dec (int): number of significant decimals for the custom-rounded value Returns: pd.Series """ valid.validate_array(series, "series", expected_len=None) if not base >= 0.01: err = f"cannot round with base {base}." + "cb_round designed for base >= 0.01." raise ValueError(err) result = series.apply(lambda x: round(base * round(float(x) / base), sig_dec)) return result
9,958
def makeGaussian(size, sigma=3, center=None): """ Make a square gaussian kernel. size is the length of a side of the square fwhm is full-width-half-maximum, which can be thought of as an effective radius. """ x = np.arange(0, size, 1, float) y = x[:, np.newaxis] if center is None: x0 = y0 = size // 2 else: x0 = center[0] y0 = center[1] return np.exp(-((x - x0) ** 2 + (y - y0) ** 2) / (2.0 * sigma ** 2)) # return np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / sigma ** 2)
9,959
def cmd_list(txn, path, args): """List raw keys/values from database.""" recurse = (8 if args['-R'] else 0) keys = txn.raw.list_keys(path, recurse=recurse) if args['--quiet']: if args['values']: values = [txn.raw.get(key) for key in keys] print(" ".join(values)) else: print(" ".join(keys)) else: if args['values']: print("Keys with {} prefix:".format(path)) for key in keys: value = txn.raw.get(key) print("{} = {}".format(key, value)) else: print("Keys with {} prefix: {}".format(path, ", ".join(keys)))
9,960
def average_relative_error(y_pred, y_true): """Calculate Average Relative Error Args: y_true (array-like): np.ndarray or torch.Tensor of dimension N x d with actual values y_pred (array-like): np.ndarray or torch.Tensor of dimension N x d with predicted values Returns: float: Average Relative Mean Squared Error Raises: ValueError : If Parameters are not both of type np.ndarray or torch.Tensor """ if isinstance(y_true, np.ndarray) and isinstance(y_pred, np.ndarray): return sum(sum(abs(y_true - y_pred) / y_true) / len(y_true)) / len(y_true[0, :]) elif isinstance(y_true, torch.Tensor) and isinstance(y_pred, torch.Tensor): return torch.sum(torch.sum(torch.abs(y_true - y_pred) / y_true, dim=0) / len(y_true)) / len(y_true[0, :]) else: raise ValueError( 'y_true and y_pred must be both of type numpy.ndarray or torch.Tensor')
9,961
def do_device_shutdown(cs, args): """Shutdown a specific device in the pool.""" device = _find_device(cs, args.mac) cs.devices.shutdown(device)
9,962
def validate_and_upload(region, conf): """Validate and upload CloudFormation templates to S3.""" templates = collect_templates(conf) error = False for t in templates: if not t.validate(): error = True if error: exit(1) for t in templates: t.upload() process_assets(region, conf)
9,963
def write_package_file(type, output_dir, pkg_name, pkg_ver, additional_args=None): """Reads the template file for the given type and writes out the package file (setup.py, package.json, etc)""" if additional_args is None: additional_args = [] if type == 'python': template_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "setup.py.template") setup_py_file = os.path.join(os.path.abspath(output_dir), "setup.py") elif type == 'node': template_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "package.json.template") setup_py_file = os.path.join(os.path.abspath(output_dir), "package.json") else: raise ValueError("Type must be on of python or node") with open(template_file, 'r') as inf: template = inf.read() with open(setup_py_file, 'w+') as outf: outf.write(template.format(PACKAGE_NAME=pkg_name, PACKAGE_VERSION=pkg_ver, PACKAGE_ADDITIONAL_SETUP_ARGS=',\n '.join(additional_args)))
9,964
def main(): """ Main function. Start translation of relation triplets based on command line arguments. """ optparser = init_optparser() (options, args) = optparser.parse_args() fetch_relation_triples_of_file(options.input, options.output, options.log, options.lang)
9,965
def make_subprocess_hook_mock(exit_code: int, output: str) -> Mock: """Mock a SubprocessHook factory object for use in testing. This mock allows us to validate that the RenvOperator is executing subprocess commands as expected without running them for real. """ result_mock = Mock() result_mock.exit_code = exit_code result_mock.output = output hook_instance_mock = Mock() hook_instance_mock.run_command = Mock(return_value=result_mock) hook_factory_mock = Mock(return_value=hook_instance_mock) return hook_factory_mock
9,966
def binstr2int(bin_str: str) -> int: """转换二进制形式的字符串为10进制数字, 和int2binstr相反 Args: bin_str: 二进制字符串, 比如: '0b0011'或'0011' Returns: 转换后的10进制整数 """ return int(bin_str, 2)
9,967
def _nodeset_compare(compare, a, b, relational=False): """ Applies a comparison function to node-sets a and b in order to evaluate equality (=, !=) and relational (<, <=, >=, >) expressions in which both objects to be compared are node-sets. Returns an XPath boolean indicating the result of the comparison. """ if isinstance(a, Types.NodesetType) and isinstance(b, Types.NodesetType): # From XPath 1.0 Section 3.4: # If both objects to be compared are node-sets, then the comparison # will be true if and only if there is a node in the first node-set # and a node in the second node-set such that the result of # performing the comparison on the string-values of the two nodes # is true. if not (a and b): # One of the two node-sets is empty. In this case, according to # section 3.4 of the XPath rec, no node exists in one of the two # sets to compare, so *any* comparison must be false. return boolean.false # If it is a relational comparison, the actual comparison is done on # the string value of each of the nodes. This means that the values # are then converted to numbers for comparison. if relational: # NumberValue internally coerces a node to a string before # converting it to a number, so the "convert to string" clause # is handled. coerce = Conversions.NumberValue else: coerce = Conversions.StringValue # Convert the nodesets into lists of the converted values. a = map(coerce, a) b = map(coerce, b) # Now compare the items; if any compare True, we're done. for left in a: for right in b: if compare(left, right): return boolean.true return boolean.false # From XPath 1.0 Section 3.4: # If one object to be compared is a node-set and the other is a number, # then the comparison will be true if and only if there is a node in the # node-set such that the result of performing the comparison on the # number to be compared and on the result of converting the string-value # of that node to a number using the number function is true. If one # object to be compared is a node-set and the other is a string, then the # comparison will be true if and only if there is a node in the node-set # such that the result of performing the comparison on the string-value # of the node and the other string is true. If one object to be compared # is a node-set and the other is a boolean, then the comparison will be # true if and only if the result of performing the comparison on the # boolean and on the result of converting the node-set to a boolean using # the boolean function is true. # # (In other words, coerce each node to the same type as the other operand, # then compare them. Note, however, that relational comparisons convert # their operands to numbers.) if isinstance(a, Types.NodesetType): # a is nodeset if isinstance(b, Types.BooleanType): a = Conversions.BooleanValue(a) return compare(a, b) and boolean.true or boolean.false elif relational: b = Conversions.NumberValue(b) coerce = Conversions.NumberValue elif isinstance(b, Types.NumberType): coerce = Conversions.NumberValue else: b = Conversions.StringValue(b) coerce = Conversions.StringValue for node in a: if compare(coerce(node), b): return boolean.true else: # b is nodeset if isinstance(a, Types.BooleanType): b = Conversions.BooleanValue(b) return compare(a, b) and boolean.true or boolean.false elif relational: a = Conversions.NumberValue(a) coerce = Conversions.NumberValue elif isinstance(a, Types.NumberType): coerce = Conversions.NumberValue else: a = Conversions.StringValue(a) coerce = Conversions.StringValue for node in b: if compare(a, coerce(node)): return boolean.true return boolean.false
9,968
def test_cl_shift(options): """ Create tests for centerline shifts 8 out of 8 points are on one side of the mean >= 10 out of 11 points are on one side of the mean >= 12 out of 14 points are on one side of the mean >= 14 out of 17 points are on one side of the mean >= 16 out of 20 points are on one side of the mean """ windows = [ (8, Window(8, init=options.m)), (10, Window(11, init=options.m)), (12, Window(14, init=options.m)), (14, Window(17, init=options.m)), (16, Window(20, init=options.m)), ] cl = options.m def test(x): for n, w in windows: w.append(x) if np.sum(w.data > cl) >= n: err_out("%s is %g/%g points > centerline" % (w.data, n, w.n)) elif np.sum(w.data < cl) >= n: err_out("%s is %g/%g points < centerline" % (w.data, n, w.n)) return test
9,969
def _scale(tensor): """Scale a tensor based on min and max of each example and channel Resulting tensor has range (-1, 1). Parameters ---------- tensor : torch.Tensor or torch.autograd.Variable Tensor to scale of shape BxCxHxW Returns ------- Tuple (scaled_tensor, min, max), where min and max are tensors containing the values used for normalizing the tensor """ b, c, h, w = tensor.shape out = tensor.view(b, c, h * w) minimum, _ = out.min(dim=2, keepdim=True) out = out - minimum maximum, _ = out.max(dim=2, keepdim=True) out = out / maximum # out has range (0, 1) out = out * 2 - 1 # out has range (-1, 1) return out.view(b, c, h, w), minimum, maximum
9,970
def get_moscow_oh(opening_hours): """ returns an OpeningHourBlock from a fake json corresponding to a POI located in moscow city for different opening_hours formats. """ return get_oh_block(opening_hours, lat=55.748, lon=37.588, country_code="RU")
9,971
def get_firebase_credential_errors(credentials: str): """ Wrapper to get error strings for test_firebase_credential_errors because otherwise the code is gross. Returns None if no errors occurred. """ try: test_firebase_credential_errors(credentials) return None except Exception as e: return str(e)
9,972
def url(endpoint, path): """append the provided path to the endpoint to build an url""" return f"{endpoint.rstrip('/')}/{path}"
9,973
def is_collision(line_seg1, line_seg2): """ Checks for a collision between line segments p1(x1, y1) -> q1(x2, y2) and p2(x3, y3) -> q2(x4, y4) """ def on_segment(p1, p2, p3): if (p2[0] <= max(p1[0], p3[0])) & (p2[0] >= min(p1[0], p3[0])) & (p2[1] <= max(p1[1], p3[1])) & (p2[1] >= min(p1[1], p3[1])): return True return False def orientation(p1, p2, p3): val = ((p2[1] - p1[1]) * (p3[0] - p2[0])) - ((p2[0] - p1[0]) * (p3[1] - p2[1])) if val == 0: return 0 elif val > 0: return 1 elif val < 0: return 2 p1, q1 = line_seg1[0], line_seg1[1] p2, q2 = line_seg2[0], line_seg2[1] o1 = orientation(p1, q1, p2) o2 = orientation(p1, q1, q2) o3 = orientation(p2, q2, p1) o4 = orientation(p2, q2, q1) if (o1 != o2) & (o3 != o4): return True if (o1 == 0 & on_segment(p1, p2, q1)): return True if (o2 == 0 & on_segment(p1, q2, q1)): return True if (o3 == 0 & on_segment(p2, p1, q2)): return True if (o4 == 0 & on_segment(p2, q1, q2)): return True return False
9,974
def plot_bar_graph_one_time( example_table_xarray, time_index, predictor_indices, info_string=None, figure_object=None, axes_object=None): """Plots predictors at one time as bar graph. :param example_table_xarray: xarray table in format returned by `example_io.read_file`. :param time_index: Index of valid time to plot. :param predictor_indices: 1-D numpy array with indices of predictors to plot. :param info_string: Info string (to be appended to title). :param figure_object: Will plot on this figure (instance of `matplotlib.figure.Figure`). If None, will create new figure. :param axes_object: Will plot on these axes (instance of `matplotlib.axes._subplots.AxesSubplot`). If None, will create new axes. :return: figure_object: See input doc. :return: axes_object: See input doc. :return: pathless_output_file_name: Pathless name for output file. """ error_checking.assert_is_integer(time_index) error_checking.assert_is_geq(time_index, 0) error_checking.assert_is_integer_numpy_array(predictor_indices) error_checking.assert_is_geq_numpy_array(predictor_indices, 0) if info_string is not None: error_checking.assert_is_string(info_string) xt = example_table_xarray predictor_values = ( xt[example_utils.SATELLITE_PREDICTORS_UNGRIDDED_KEY].values[ time_index, predictor_indices ] ) num_predictors = len(predictor_values) y_coords = numpy.linspace( 0, num_predictors - 1, num=num_predictors, dtype=float ) if figure_object is None or axes_object is None: figure_object, axes_object = pyplot.subplots( 1, 1, figsize=(FIGURE_WIDTH_INCHES, FIGURE_HEIGHT_INCHES) ) axes_object.barh( y_coords, predictor_values, color=BAR_FACE_COLOUR, edgecolor=BAR_EDGE_COLOUR, linewidth=BAR_EDGE_WIDTH ) pyplot.yticks([], []) axes_object.set_xlim(MIN_NORMALIZED_VALUE, MAX_NORMALIZED_VALUE) predictor_names = xt.coords[ example_utils.SATELLITE_PREDICTOR_UNGRIDDED_DIM ].values[predictor_indices].tolist() for j in range(num_predictors): axes_object.text( 0, y_coords[j], predictor_names[j], color=BAR_FONT_COLOUR, horizontalalignment='center', verticalalignment='center', fontsize=BAR_FONT_SIZE, fontweight='bold' ) valid_time_unix_sec = ( xt.coords[example_utils.SATELLITE_TIME_DIM].values[time_index] ) valid_time_string = time_conversion.unix_sec_to_string( valid_time_unix_sec, TIME_FORMAT_SECONDS ) cyclone_id_string = xt[satellite_utils.CYCLONE_ID_KEY].values[time_index] if not isinstance(cyclone_id_string, str): cyclone_id_string = cyclone_id_string.decode('utf-8') title_string = 'Satellite for {0:s} at {1:s}'.format( cyclone_id_string, valid_time_string ) if info_string is not None: title_string += '; {0:s}'.format(info_string) axes_object.set_title(title_string) pathless_output_file_name = '{0:s}_{1:s}_scalar_satellite.jpg'.format( cyclone_id_string, valid_time_string ) return figure_object, axes_object, pathless_output_file_name
9,975
def reshape_nda_to_2d(arr) : """Reshape np.array to 2-d """ sh = arr.shape if len(sh)<3 : return arr arr.shape = (arr.size/sh[-1], sh[-1]) return arr
9,976
def spellchecker(): """Spellcheck the Markdown and ReST files on the site""" if platform.system() == "Darwin": # Mac seems to use ispell as a default, but openbsd and linux # use aspell. No need to maintain exceptions for multiple # dictionaries. raise click.ClickException( "Spellchecker not supported on Mac due to different " "enchant backends" ) spelling_errors_found = False md_posts = glob.glob(os.path.join(SITE_BASE, "posts", "*.md")) md_pages = glob.glob(os.path.join(SITE_BASE, "stories", "*.md")) for file_to_check in md_pages + md_posts: en_spellchecker = enchant.checker.SpellChecker( "en_GB", filters=[enchant.tokenize.EmailFilter, enchant.tokenize.URLFilter] ) with open(file_to_check, 'r', encoding="utf-8") as f: lines = f.readlines() for exc in _get_spellcheck_exceptions(lines): en_spellchecker.add(exc) file_text = " ".join([strip_markdown_directives(l) for l in _non_directive_lines(lines)]) en_spellchecker.set_text(file_text) for err in en_spellchecker: spelling_errors_found = True context = "%s%s%s" % ( en_spellchecker.leading_context(30), en_spellchecker.word, en_spellchecker.trailing_context(30), ) spelling_error = \ "Not in dictionary: %s (file: %s " \ "context: %s). Suggestions: %s" % \ (err.word, os.path.basename(file_to_check), context, ", ".join(en_spellchecker.suggest(err.word)) ) print(spelling_error) if spelling_errors_found: raise click.ClickException("Spelling errors found.")
9,977
async def async_setup(hass, config): """Initialize the DuckDNS component.""" domain = config[DOMAIN][CONF_DOMAIN] token = config[DOMAIN][CONF_ACCESS_TOKEN] session = async_get_clientsession(hass) result = await _update_duckdns(session, domain, token) if not result: return False async def update_domain_interval(now): """Update the DuckDNS entry.""" await _update_duckdns(session, domain, token) async def update_domain_service(call): """Update the DuckDNS entry.""" await _update_duckdns(session, domain, token, txt=call.data[ATTR_TXT]) async_track_time_interval(hass, update_domain_interval, INTERVAL) hass.services.async_register( DOMAIN, SERVICE_SET_TXT, update_domain_service, schema=SERVICE_TXT_SCHEMA ) return result
9,978
def mock_signal(*args): """Mock creation of a binary signal array. :return: binary array :rtype: np.ndarray """ signal = np.array([1, 0, 1]) return signal
9,979
def matmul(a, b): """np.matmul defaults to bfloat16, but this helper function doesn't.""" return np.matmul(a, b, precision=jax.lax.Precision.HIGHEST)
9,980
def find_preard(path, metadata_pattern='*.json'): """ Match pre-ARD metadata with imagery in some location Parameters ---------- path : str or Path Path to a metadata file or directory of files (returning matches inside the directory) metadata_pattern : str, optional If ``path`` is a directory, this value is used as a glob inside ``path`` to locate metadata files Returns ------- dict[str, list[str]] Pairs of metadata filename to image filename(s) """ path = Path(path) if path.is_dir(): metadata = list(path.glob(metadata_pattern)) else: metadata = [path] preard = {} for meta in metadata: images = sorted(meta.parent.glob(meta.stem + '*.tif')) if images: preard[meta] = images else: logger.debug(f'Could not find images for metadata file {meta}') preard[meta] = [] return preard
9,981
def predictions(logit_1, logit_2, logit_3, logit_4, logit_5): """Converts predictions into understandable format. For example correct prediction for 2 will be > [2,10,10,10,10] """ first_digits = np.argmax(logit_1, axis=1) second_digits = np.argmax(logit_2, axis=1) third_digits = np.argmax(logit_3, axis=1) fourth_digits = np.argmax(logit_4, axis=1) fifth_digits = np.argmax(logit_5, axis=1) stacked_digits = np.vstack((first_digits, second_digits, third_digits, fourth_digits, fifth_digits)) rotated_digits = np.rot90(stacked_digits)[::-1] return rotated_digits
9,982
def main(args): """Main function""" tracker = Tracker(args.host, port=args.port, port_end=args.port_end, silent=args.silent) tracker.proc.join()
9,983
def savecommandline(theargs, thename): """ Parameters ---------- theargs thename Returns ------- """ tide_io.writevec([" ".join(theargs)], thename + "_commandline.txt")
9,984
def plot_temperature_hist(runs): """Plot temperature histograms for all the runs""" num_runs = 0 for run in runs: if len(run.thermal.data_frame): num_runs += 1 if num_runs == 0: return axis = pre_plot_setup(ncols=num_runs) if num_runs == 1: axis = [axis] for ax, run in zip(axis, runs): run.thermal.plot_temperature_hist(ax, run.name)
9,985
def execute_parent(parent_path, child_path, input_tensor_npy, return_full_ctx=False): """Execute parent model containing a single StreamingDataflowPartition by replacing it with the model at child_path and return result.""" parent_model = load_test_checkpoint_or_skip(parent_path) iname = parent_model.graph.input[0].name oname = parent_model.graph.output[0].name sdp_node = parent_model.get_nodes_by_op_type("StreamingDataflowPartition")[0] sdp_node = getCustomOp(sdp_node) sdp_node.set_nodeattr("model", child_path) ret = execute_onnx(parent_model, {iname: input_tensor_npy}, True) if return_full_ctx: return ret else: return ret[oname]
9,986
def find_program(prog, paths): """Finds the specified program in env PATH, or tries a set of paths """ loc = spawn.find_executable(prog) if(loc != None): return loc for loc in paths: p = os.path.join(loc, prog) if os.path.exists(p): return p return None
9,987
def fibonacci(position): """ Based on a position returns the number in the Fibonacci sequence on that position """ if position == 0: return 0 elif position == 1: return 1 return fibonacci(position-1)+fibonacci(position-2)
9,988
def visualize_gebco(source, band, min=None, max=None): """ Specialized function to visualize GEBCO data :param source: String, Google Earth Engine image id :param band: String, band of image to visualize :return: Dictionary """ data_params = deepcopy(DATASETS_VIS[source]) # prevent mutation of global state if min is not None: data_params["bathy_vis_params"]["min"] = min if max is not None: data_params["topo_vis_params"]["max"] = max image = ee.Image(source) gebco = image.select(data_params["bandNames"][band]) land_mask = LANDMASK hillshaded = visualize_elevation( image=gebco, land_mask=land_mask, data_params=data_params, bathy_only=False, hillshade_image=True, ) url = _get_gee_url(hillshaded) info = {} info["dataset"] = "gebco" info["band"] = band linear_gradient = [] palette = ( data_params["bathy_vis_params"]["palette"] + data_params["topo_vis_params"]["palette"] ) n_colors = len(palette) offsets = np.linspace(0, 100, num=n_colors) for color, offset in zip(palette, offsets): linear_gradient.append( {"offset": "{:.3f}%".format(offset), "opacity": 100, "color": color} ) info.update( { "url": url, "linearGradient": linear_gradient, "min": data_params["bathy_vis_params"]["min"], "max": data_params["topo_vis_params"]["max"], "imageId": source, } ) return info
9,989
def show(): """ Send values for turning pixels on """ if not available: return 2 else: lockBus() try: bus.write_byte(arduinoAddress, 0x06) except: errorMsg = sys.exc_info()[0] errorHandler(5, errorMsg) unlockBus()
9,990
def mini_batch_generator(input_data, batch_size=64, shuffle=True): """Generator for training mini-batches Args: input_data (ndarray): Input training data. batch_size (int): Size of the mini batch. shuffle (bool): If the data is shuffled before mini batch generation. """ n_samples = input_data.shape[0] n_batches = n_samples // batch_size if shuffle: shuffled_idx = np.arange(n_samples) np.random.shuffle(shuffled_idx) input_data = input_data[shuffled_idx] for j in range(n_batches): yield input_data[(j * batch_size):((j + 1) * batch_size)]
9,991
def bound_concurrency(size): """Decorator to limit concurrency on coroutine calls""" sem = asyncio.Semaphore(size) def decorator(func): """Actual decorator""" @functools.wraps(func) async def wrapper(*args, **kwargs): """Wrapper""" async with sem: return await func(*args, **kwargs) return wrapper return decorator
9,992
def _download_file(url, required_length, STRICT_REQUIRED_LENGTH=True): """ <Purpose> Given the url, hashes and length of the desired file, this function opens a connection to 'url' and downloads the file while ensuring its length and hashes match 'required_hashes' and 'required_length'. tuf.util.TempFile is used instead of regular tempfile object because of additional functionality provided by 'tuf.util.TempFile'. <Arguments> url: A URL string that represents the location of the file. required_length: An integer value representing the length of the file. STRICT_REQUIRED_LENGTH: A Boolean indicator used to signal whether we should perform strict checking of required_length. True by default. We explicitly set this to False when we know that we want to turn this off for downloading the timestamp metadata, which has no signed required_length. <Side Effects> A 'tuf.util.TempFile' object is created on disk to store the contents of 'url'. <Exceptions> tuf.DownloadLengthMismatchError, if there was a mismatch of observed vs expected lengths while downloading the file. tuf.FormatError, if any of the arguments are improperly formatted. Any other unforeseen runtime exception. <Returns> A 'tuf.util.TempFile' file-like object which points to the contents of 'url'. """ # Do all of the arguments have the appropriate format? # Raise 'tuf.FormatError' if there is a mismatch. tuf.formats.URL_SCHEMA.check_match(url) tuf.formats.LENGTH_SCHEMA.check_match(required_length) # 'url.replace()' is for compatibility with Windows-based systems because # they might put back-slashes in place of forward-slashes. This converts it # to the common format. url = url.replace('\\', '/') logger.info('Downloading: '+str(url)) # NOTE: Not thread-safe. # Save current values or functions for restoration later. previous_socket_timeout = socket.getdefaulttimeout() previous_http_response_class = httplib.HTTPConnection.response_class # This is the temporary file that we will return to contain the contents of # the downloaded file. temp_file = tuf.util.TempFile() try: # NOTE: Not thread-safe. # Set timeout to induce non-blocking socket operations. socket.setdefaulttimeout(tuf.conf.SOCKET_TIMEOUT) # Replace the socket file-like object class with our safer version. httplib.HTTPConnection.response_class = SaferHTTPResponse # Open the connection to the remote file. connection = _open_connection(url) # We ask the server about how big it thinks this file should be. reported_length = _get_content_length(connection) # Then, we check whether the required length matches the reported length. _check_content_length(reported_length, required_length) # Download the contents of the URL, up to the required length, to a # temporary file, and get the total number of downloaded bytes. total_downloaded = _download_fixed_amount_of_data(connection, temp_file, required_length) # Does the total number of downloaded bytes match the required length? _check_downloaded_length(total_downloaded, required_length, STRICT_REQUIRED_LENGTH=STRICT_REQUIRED_LENGTH) except: # Close 'temp_file'; any written data is lost. temp_file.close_temp_file() logger.exception('Could not download URL: '+str(url)) raise else: return temp_file finally: # NOTE: Not thread-safe. # Restore previously saved values or functions. httplib.HTTPConnection.response_class = previous_http_response_class socket.setdefaulttimeout(previous_socket_timeout)
9,993
def tune_speed_librosa(src=None, sr=_sr, rate=1., out_type=np.ndarray): """ 变语速 :param src: :param rate: :return: """ wav = anything2wav(src, sr=sr) spec = librosa.stft(wav) spec = zoom(spec.T, rate=1 / rate, is_same=0).T out = librosa.istft(spec) # out = librosa.griffinlim(spec, n_iter=10) if out_type is np.ndarray: return out else: return anything2bytesio(out, sr=sr)
9,994
def fqn_from_file(java_filepath: pathlib.Path) -> str: """Extract the expected fully qualified class name for the given java file. Args: java_filepath: Path to a .java file. """ if not java_filepath.suffix == ".java": raise ValueError("{} not a path to a .java file".format(java_filepath)) package = extract_package(java_filepath) simple_name = java_filepath.name[:-len(java_filepath.suffix)] return fqn(package, simple_name)
9,995
def assert_raises(*args, **kwargs): """Assert an exception is raised as a context manager or by passing in a callable and its arguments. As a context manager: >>> with assert_raises(Exception): ... raise Exception Pass in a callable: >>> def raise_exception(arg, kwarg=None): ... raise Exception >>> assert_raises(Exception, raise_exception, 1, kwarg=234) """ if (len(args) == 1) and not kwargs: return _assert_raises_context_manager(args[0]) else: return _assert_raises(*args, **kwargs)
9,996
def test_tuple_init(): """Check validation at Tuple init.""" with pytest.raises(ValueError): Tuple(0) with pytest.raises(ValueError): Tuple(str) with pytest.raises(ValueError): Tuple({}) with pytest.raises(ValueError): Tuple([str]) with pytest.raises(ValueError): Tuple((str, int)) with pytest.raises(ValueError): Tuple((String(), int))
9,997
def print_log_info1(t0tot, t0_gN): """ Args: t0tot: t0_gN: """ logging.info("Central Reads per t0set, in millions:\n") # We iterate over the set names for k in t0tot.keys(): try: logging.info(f"{k}: {t0_gN[k].sum()/e6:.2f}") except Exception: logging.info(f"Couldn't print value for key {k}")
9,998
def proxy(ctx, control, host, port, socket, proxy): """Settings to configure the connection to a Tor node acting as proxy.""" if control == 'port': if host is None or port is None: raise click.BadOptionUsage( option_name='control', message=f"--control mode '{control}' requires --host and --port to be defined as well.") elif control == 'socket': if socket is None: raise click.BadOptionUsage(option_name='control', message="--control mode 'socket' requires --socket to be defined as well.") return {'proxy': { 'control': control, 'host': host, 'port': port, 'socket': socket, 'proxy': proxy }}
9,999