content
stringlengths
22
815k
id
int64
0
4.91M
def init_log(logfile=None, file_size=5, debug=False): """ Initializes logging to file and console. :param logfile: the name of the file :param file_size: the max size of the file in megabytes, before wrapping occurs :param debug: Boolean to enable verbose logging :return: ``log`` object """ # TODO: move into imported module if debug: log_lvl = logging.DEBUG else: log_lvl = logging.INFO log_formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d,(%(threadName)-10s),' \ '[%(levelname)s],%(funcName)s(%(lineno)d),%(message)s', datefmt='%Y-%m-%d %H:%M:%S') log_formatter.converter = time.gmtime if logfile is not None: log_object = logging.getLogger(logfile) log_handler = RotatingFileHandler(logfile, mode='a', maxBytes=file_size * 1024 * 1024, backupCount=2, encoding=None, delay=0) log_handler.setFormatter(log_formatter) log_object.addHandler(log_handler) else: log_object = logging.getLogger("temp_log") log_object.setLevel(log_lvl) console = logging.StreamHandler() console.setFormatter(log_formatter) console.setLevel(log_lvl) log_object.addHandler(console) return log_object
5,337,200
def symmetric_mean_absolute_percentage_error(a, b): """ Calculates symmetric Mean Absolute Percentage Error (sMAPE). Args: a (): ctual values. b (): Predicted values. Returns: sMAPE float %. """ a = np.reshape(a, (-1,)) b = np.reshape(b, (-1,)) return 100.0 * np.mean(2.0 * np.abs(a - b) / (np.abs(a) + np.abs(b))).item()
5,337,201
def exec_quiet(handle, *args, **kwargs): """ Like exe.execute but doesnt print the exception. """ try: val = handle(*args, **kwargs) except Exception: pass else: return val
5,337,202
def link_documents_bundles_with_journals( journal_path: str, issue_path: str, output_path: str ): """Busca pelo relacionamento entre periódicos e fascículos a partir de arquivos JSON extraídos de uma base MST. O resultado é escrito em um arquivo JSON contendo um objeto (dict) com identificadores de periócios como chaves e arrays de ids das issues que compõe o periódico""" journals_bundles = {} extract_isis.create_output_dir(output_path) journals_as_json = reading.read_json_file(journal_path) issues_as_json = reading.read_json_file(issue_path) journals = conversion.conversion_journals_to_kernel(journals_as_json) issues = conversion.conversion_issues_to_xylose(issues_as_json) issues = filter_issues(issues) for journal in journals: journals_bundles[journal["id"]] = find_documents_bundles(journal, issues) with open(output_path, "w") as output: output.write(json.dumps(journals_bundles, indent=4, sort_keys=True))
5,337,203
def make_modified_function_def(original_type, name, original, target): """Make the modified function definition. :return: the definition for the modified function """ arguments = format_method_arguments(name, original) argument_names = set(target.parameters) unavailable_arguments = [p for p in original.parameters if p not in argument_names] derived_from = format_derived_from(original_type, unavailable_arguments, original) raise_error = format_raise_errors(original_type, name, unavailable_arguments, original) return (""" {0} def {1}({2}):{3}""".format(derived_from, name, arguments, raise_error))
5,337,204
def send_is_typing_event(msisdn): """ Sends IS_TYPING event to the user. Args: msisdn (str): The msisdn of the user in E.164 format, e.g. '+14155555555'. """ agent_event = { 'eventType': 'IS_TYPING' } send_event_with_body(msisdn, agent_event, str(uuid.uuid4().int) + "a")
5,337,205
async def test_non_json_response(aresponses): """Test that the proper error is raised when the response text isn't JSON.""" aresponses.add( "www.airvisual.com", "/api/v2/node/12345", "get", aresponses.Response( text="This is a valid response, but it isn't JSON.", headers={"Content-Type": "application/json"}, status=200, ), ) with pytest.raises(AirVisualError): async with ClientSession() as session: cloud_api = CloudAPI(TEST_API_KEY, session=session) await cloud_api.node.get_by_node_id("12345")
5,337,206
def fetch_tiles(server, tile_def_generator, output=pathlib.Path('.'), force=False): """ fetch and store tiles @param server server definition object @param tile_def_generator generator of tile definitions consisting of [x, y, z, bbox] tuples @param output output folder path @param force flag to force to overwrite """ input_queue = multiprocessing.JoinableQueue() stop_event = multiprocessing.Event() statistic = multiprocessing.Manager().dict() workers = [] for i in range(server["concurrency"]): p = multiprocessing.Process(target=fetch_tile_worker, args=(i, input_queue, stop_event, server, output, force, statistic)) workers.append(p) p.start() for [x, y, z, *bbox] in tile_def_generator: input_queue.put([x, y, z, bbox]) input_queue.join() stop_event.set() for w in workers: w.join() def collect_result(s1, s2): if s1: return { "counter_total": s1["counter_total"] + s2["counter_total"], "counter_attempt": s1["counter_attempt"] + s2["counter_attempt"], "counter_ok": s1["counter_ok"] + s2["counter_ok"] } else: return s2 result = reduce(collect_result, statistic.values(), None) print ("Total: {}, Ok: {}, Failed: {}, Skipped: {}".format( result["counter_total"], result["counter_ok"], result["counter_attempt"] - result["counter_ok"], result["counter_total"] - result["counter_attempt"]))
5,337,207
def _UTMLetterDesignator(lat): """ This routine determines the correct UTM letter designator for the given latitude returns 'Z' if latitude is outside the UTM limits of 84N to 80S. Written by Chuck Gantz- chuck.gantz@globalstar.com """ if 84 >= lat >= 72: return 'X' elif 72 > lat >= 64: return 'W' elif 64 > lat >= 56: return 'V' elif 56 > lat >= 48: return 'U' elif 48 > lat >= 40: return 'T' elif 40 > lat >= 32: return 'S' elif 32 > lat >= 24: return 'R' elif 24 > lat >= 16: return 'Q' elif 16 > lat >= 8: return 'P' elif 8 > lat >= 0: return 'N' elif 0 > lat >= -8: return 'M' elif -8> lat >= -16: return 'L' elif -16 > lat >= -24: return 'K' elif -24 > lat >= -32: return 'J' elif -32 > lat >= -40: return 'H' elif -40 > lat >= -48: return 'G' elif -48 > lat >= -56: return 'F' elif -56 > lat >= -64: return 'E' elif -64 > lat >= -72: return 'D' elif -72 > lat >= -80: return 'C' else: return 'Z' # if the latitude is outside the UTM limits
5,337,208
def is_ignored(file: str) -> bool: """ Check if the given file is ignored :param file: the file :return: if the file is ignored or not """ for ignored in config.get('input').get('ignored'): ignored_regex = re.compile(ignored) if re.match(ignored_regex, file): return True return False
5,337,209
def plot_learning_curve(classifier, X, y, measurements=[0.1, 0.325, 0.55, 0.775, 1.], metric=None, n_jobs=-1, save_to_folder=os.path.join(os.path.dirname(__file__), "ExperimentResults")): """ Calculates the learning curve for a given model (classifier or regressor). The methods takes the evaluation metric as a parameter. Additionally the method saves a plot of the calculated learning curve to a file. :param classifier: learning model :type classifier: sklearn estimator :param X: training data :type X: DataFrame :param y: training labels :type y: Series :param measurements: number of measurements of classifier/regressor performance (number of point defining the learning curve) :type measurements: int :param metric: evaluation metric :type metric: sklearn scorer :param n_jobs: number of threads :type n_jobs: int :param save_to_folder: determines the folder where the learning curve plot should be saved :type save_to_folder: str :return: plt, file_name """ sns.set_style("whitegrid") plt.figure() plt.title("Learning curves") plt.xlabel("Training examples") plt.ylabel("Score") train_sizes, train_scores, test_scores = skms.learning_curve(classifier, X, y, n_jobs=n_jobs, train_sizes=measurements, scoring=metric) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.grid() plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score") plt.legend(loc="best") file_name = _get_file_name("", classifier, "learning_curve", "png") if not os.path.exists(save_to_folder): os.mkdir(save_to_folder) plt.savefig(os.path.join(save_to_folder, file_name)) plt.close()
5,337,210
def get_powerups_wf(original_wf): """ get user powerups setting. """ idx_list = get_fws_and_tasks(original_wf) for idx_fw, idx_t in idx_list: f0 = original_wf.fws[idx_fw].tasks[idx_t] if not isinstance(f0, Iterable) or isinstance(f0, str) : continue for k0 in f0: if debug: print("level 0", k0, type(f0)) if k0=='powerups' : if debug: print("level 0", f0[k0]) return f0[k0] else: try: f1 = f0[k0] except: f1 = k0 if not isinstance(f1, Iterable) or isinstance(f1, str) : continue for k1 in f1: if debug: print("level 1", k1, type(f1)) if str(k1)=='powerups' : if debug: print("level 1", f1[k1]) return f1[k1] else: try: f2 = f1[k1] except: f2 = k1 if not isinstance(f2, Iterable) or isinstance(f2, str) : continue for k2 in f2: if debug: print("level 2", k2, type(f2)) if str(k2)=='powerups' : if debug: print("level 2", f2[k2]) return f2[k2] else: try: f3 = f2[k2] except: f3=k2 if not isinstance(f3, Iterable) or isinstance(f3, str) : continue for k3 in f3: if debug: print("level 3", k3, type(f3)) if str(k3)=='powerups' : if debug: print(type(f0),type(f1),type(f2),type(f3)) if debug: print("level 3", f3[k3]) return f3[k3] else: try: f4 = f3[k3] except: f4=k3 if not isinstance(f4, Iterable) or isinstance(f4, str) : continue for k4 in f4: if debug: print("level 4", k4, type(f4)) if str(k4)=='powerups' : if debug: print("level 4", f4[k4]) return f4[k4] return {}
5,337,211
def enumerate_phone_column_index_from_row(row): """Enumerates the phone column from a given row. Uses Regexs Parameters ---------- row : list list of cell values from row Returns ------- int phone column index enumerated from row """ # initial phone_column_index value phone_column_index = -1 # generate cell values from row cell_values = get_cell_values_from_row(row) # iterate through cell values for i in range(len(cell_values)): value = cell_values[i] # Check if value matches "[Pp]hone || 0000000000" if is_value_phone_identifier(value): phone_column_index = i break return phone_column_index
5,337,212
def fetch_words(url): """ Fetch a list of words from URL Args: url: The URL of a UTF-8 text doxument Returns: A list of strings containing the words from the document. """ story = urlopen(url) story_words = [] for line in story: line_words = line.decode('utf-8').split() for word in line_words: story_words.append(word) story.close() return story_words
5,337,213
def pipe(func): """command pipe""" @wraps(func) def wrapper(*args, **kwargs): if PIPE_LOCK.locked(): LOGGER.debug("process locked") return None window = sublime.active_window() view = window.active_view() status_key = "pythontools" with PIPE_LOCK: try: view.set_status(status_key, "BUSY") return func(*args, **kwargs) finally: view.erase_status(status_key) return wrapper
5,337,214
def dot_keys_to_nested(data: Dict) -> Dict: """old['aaaa.bbbb'] -> d['aaaa']['bbbb'] Args: data (Dict): [description] Returns: Dict: [description] """ rules = defaultdict(lambda: dict()) for key, val in data.items(): if '.' in key: key, _, param = key.partition('.') rules[key][param] = val return rules
5,337,215
def vec2adjmat(source, target, weight=None, symmetric=True): """Convert source and target into adjacency matrix. Parameters ---------- source : list The source node. target : list The target node. weight : list of int The Weights between the source-target values symmetric : bool, optional Make the adjacency matrix symmetric with the same number of rows as columns. The default is True. Returns ------- pd.DataFrame adjacency matrix. Examples -------- >>> source=['Cloudy','Cloudy','Sprinkler','Rain'] >>> target=['Sprinkler','Rain','Wet_Grass','Wet_Grass'] >>> vec2adjmat(source, target) >>> >>> weight=[1,2,1,3] >>> vec2adjmat(source, target, weight=weight) """ if len(source)!=len(target): raise Exception('[hnet] >Source and Target should have equal elements.') if weight is None: weight = [1]*len(source) df = pd.DataFrame(np.c_[source, target], columns=['source','target']) # Make adjacency matrix adjmat = pd.crosstab(df['source'], df['target'], values=weight, aggfunc='sum').fillna(0) # Get all unique nodes nodes = np.unique(list(adjmat.columns.values)+list(adjmat.index.values)) # nodes = np.unique(np.c_[adjmat.columns.values, adjmat.index.values].flatten()) # Make the adjacency matrix symmetric if symmetric: # Add missing columns node_columns = np.setdiff1d(nodes, adjmat.columns.values) for node in node_columns: adjmat[node]=0 # Add missing rows node_rows = np.setdiff1d(nodes, adjmat.index.values) adjmat=adjmat.T for node in node_rows: adjmat[node]=0 adjmat=adjmat.T # Sort to make ordering of columns and rows similar [IA, IB] = ismember(adjmat.columns.values, adjmat.index.values) adjmat = adjmat.iloc[IB,:] adjmat.index.name='source' adjmat.columns.name='target' return(adjmat)
5,337,216
def ll_(msg, t0=None): """ ... ending logging msg giving a time lapse if starting time stamp given """ import time import logging logging.info(' {}{}'.format(msg, ' <--' if t0 else '')) if t0: logging.info(' ' + rTime_(time.time() - t0)) logging.info(' ')
5,337,217
def do_confusion_parks( qsos: QSOLoaderDR16Q, dla_cnn: str = "data/dr16q/distfiles/DR16Q_v4.fits", snr: float = -1.0, dla_confidence: float = 0.98, p_thresh: float = 0.98, lyb: bool = True, ): """ plot the multi-DLA confusion matrix between our MAP predictions and Parks' predictions """ if "dla_catalog_parks" not in dir(qsos): qsos.load_dla_parks( dla_cnn, p_thresh=dla_confidence, multi_dla=False, num_dla=1 ) confusion_matrix, _ = qsos.make_multi_confusion( qsos.dla_catalog_parks, dla_confidence, p_thresh, snr=snr, lyb=lyb ) size, _ = confusion_matrix.shape print("Confusion Matrix Garnett's Multi-DLA versus Parks : ") print("----") for i in range(size): print("{} DLA".format(i), end="\t") for j in range(size): print("{}".format(confusion_matrix[i, j]), end=" ") print("") print("Mutli-DLA disagreements : ") print("----") for i in range(size): num = ( confusion_matrix[(i + 1) :, 0 : (i + 1)].sum() + confusion_matrix[0 : (i + 1), (i + 1) :].sum() ) print( "Error between >= {} DLAs and < {} DLAs: {:.2g}".format( i + 1, i + 1, num / confusion_matrix.sum() ) )
5,337,218
def configure(spec, host, port, user, password, dbname, prompt, attributes, memberships, ownerships, privileges, live, verbose): """ Configure the role attributes, memberships, object ownerships, and/or privileges of a database cluster to match a desired spec. Note that attributes and memberships are database cluster-wide settings, i.e. they are the same across multiple databases within a given Postgres instance. Ownerships and privileges are specific to each individual database within a Postgres instance. Inputs: spec - str; the path for the configuration file host - str; the database server host port - str; the database server port user - str; the database user name password - str; the database user's password dbname - str; the database to connect to and configure prompt - bool; whether to prompt for a password attributes - bool; whether to configure the role attributes for the specified database cluster memberships - bool; whether to configure the role memberships for the specified database cluster ownerships - bool; whether to configure the ownerships for the specified database privileges - bool; whether to configure the privileges for the specified database live - bool; whether to apply the changes (True) or just show what changes would be made without actually appyling them (False) verbose - bool; whether to show all queries that are executed and all debug log messages during execution """ if verbose: root_logger = logging.getLogger('') root_logger.setLevel(logging.DEBUG) if prompt: password = getpass.getpass() db_connection = common.get_db_connection(host, port, dbname, user, password) cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor) spec = load_spec(spec) sql_to_run = [] password_changed = False # Initialize this in case the attributes module isn't run verify_spec(spec) if attributes: sql_to_run.append(create_divider('attributes')) # Password changes happen within the attributes.py module itself so we don't leak # passwords; as a result we need to see if password changes occurred module_sql, all_password_sql_to_run = analyze_attributes(spec, cursor, verbose) run_module_sql(module_sql, cursor, verbose) if all_password_sql_to_run: password_changed = True run_password_sql(cursor, all_password_sql_to_run) sql_to_run.extend(module_sql) if memberships: sql_to_run.append(create_divider('memberships')) module_sql = analyze_memberships(spec, cursor, verbose) run_module_sql(module_sql, cursor, verbose) sql_to_run.extend(module_sql) if ownerships: sql_to_run.append(create_divider('ownerships')) module_sql = analyze_schemas(spec, cursor, verbose) run_module_sql(module_sql, cursor, verbose) sql_to_run.extend(module_sql) if privileges: sql_to_run.append(create_divider('privileges')) module_sql = analyze_privileges(spec, cursor, verbose) run_module_sql(module_sql, cursor, verbose) sql_to_run.extend(module_sql) changed = password_changed or has_changes(sql_to_run) if changed and live: logger.debug('Committing changes') db_connection.commit() else: db_connection.rollback() # Make sure there is at least 1 line with a real change (vs. all headers) if changed: click.secho(HEADER.format('LIVE' if live else 'CHECK'), fg='green') for statement in sql_to_run: click.secho(statement, fg='green') else: click.secho(SUCCESS_MSG, fg='green')
5,337,219
def is_valid(number): """Check if the number provided is a valid PAN. This checks the length and formatting.""" try: return bool(validate(number)) except ValidationError: return False
5,337,220
def remove_spaces(string: str): """Removes all whitespaces from the given string""" if string is None: return "" return "".join(l for l in str(string) if l not in WHITESPACES)
5,337,221
def decode_event_to_internal(abi, log_event): """ Enforce the binary for internal usage. """ # Note: All addresses inside the event_data must be decoded. decoded_event = decode_event(abi, log_event) if not decoded_event: raise UnknownEventType() # copy the attribute dict because that data structure is immutable data = dict(decoded_event) args = dict(data['args']) data['args'] = args # translate from web3's to raiden's name convention data['block_number'] = log_event.pop('blockNumber') data['transaction_hash'] = log_event.pop('transactionHash') assert data['block_number'], 'The event must have the block_number' assert data['transaction_hash'], 'The event must have the transaction hash field' event = data['event'] if event == EVENT_TOKEN_NETWORK_CREATED: args['token_network_address'] = to_canonical_address(args['token_network_address']) args['token_address'] = to_canonical_address(args['token_address']) elif event == ChannelEvent.OPENED: args['participant1'] = to_canonical_address(args['participant1']) args['participant2'] = to_canonical_address(args['participant2']) elif event == ChannelEvent.DEPOSIT: args['participant'] = to_canonical_address(args['participant']) elif event == ChannelEvent.BALANCE_PROOF_UPDATED: args['closing_participant'] = to_canonical_address(args['closing_participant']) elif event == ChannelEvent.CLOSED: args['closing_participant'] = to_canonical_address(args['closing_participant']) elif event == ChannelEvent.UNLOCKED: args['participant'] = to_canonical_address(args['participant']) args['partner'] = to_canonical_address(args['partner']) return Event( originating_contract=to_canonical_address(log_event['address']), event_data=data, )
5,337,222
def plot_msa_info(msa): """ Plot a representation of the MSA coverage. Copied from https://github.com/sokrypton/ColabFold/blob/main/beta/colabfold.py """ msa_arr = np.unique(msa, axis=0) total_msa_size = len(msa_arr) print(f"\n{total_msa_size} Sequences Found in Total\n") if total_msa_size > 1: plt.figure(figsize=(8, 5), dpi=100) plt.title("Sequence coverage") seqid = (msa[0] == msa_arr).mean(-1) seqid_sort = seqid.argsort() non_gaps = (msa_arr != 20).astype(float) non_gaps[non_gaps == 0] = np.nan plt.imshow( non_gaps[seqid_sort] * seqid[seqid_sort, None], interpolation="nearest", aspect="auto", cmap="rainbow_r", vmin=0, vmax=1, origin="lower", extent=(0, msa_arr.shape[1], 0, msa_arr.shape[0]), ) plt.plot((msa_arr != 20).sum(0), color="black") plt.xlim(0, msa_arr.shape[1]) plt.ylim(0, msa_arr.shape[0]) plt.colorbar( label="Sequence identity to query", ) plt.xlabel("Positions") plt.ylabel("Sequences") plt.show() else: print("Unable to display MSA of length 1")
5,337,223
def run_ode_solver(system, slope_func, **options): """Computes a numerical solution to a differential equation. `system` must contain `init` with initial conditions, `t_0` with the start time, and `t_end` with the end time. It can contain any other parameters required by the slope function. `options` can be any legal options of `scipy.integrate.solve_ivp` system: System object slope_func: function that computes slopes returns: TimeFrame """ # make sure `system` contains `init` if not hasattr(system, 'init'): msg = """It looks like `system` does not contain `init` as a system variable. `init` should be a State object that specifies the initial condition:""" raise ValueError(msg) # make sure `system` contains `t_end` if not hasattr(system, 't_end'): msg = """It looks like `system` does not contain `t_end` as a system variable. `t_end` should be the final time:""" raise ValueError(msg) # make the system parameters available as globals unpack(system) # the default value for t_0 is 0 t_0 = getattr(system, 't_0', 0) # try running the slope function with the initial conditions # try: # slope_func(init, t_0, system) # except Exception as e: # msg = """Before running scipy.integrate.solve_ivp, I tried # running the slope function you provided with the # initial conditions in `system` and `t=t_0` and I got # the following error:""" # logger.error(msg) # raise(e) # wrap the slope function to reverse the arguments and add `system` f = lambda t, y: slope_func(y, t, system) def wrap_event(event): """Wrap the event functions. Make events terminal by default. """ wrapped = lambda t, y: event(y, t, system) wrapped.terminal = getattr(event, 'terminal', True) wrapped.direction = getattr(event, 'direction', 0) return wrapped # wrap the event functions so they take the right arguments events = options.pop('events', []) try: events = [wrap_event(event) for event in events] except TypeError: events = wrap_event(events) # remove dimensions from the initial conditions. # we need this because otherwise `init` gets copied into the # results array along with its units init_no_dim = [getattr(x, 'magnitude', x) for x in init] # if the user did not provide t_eval or events, return # equally spaced points if 't_eval' not in options: if not events: options['t_eval'] = linspace(t_0, t_end, 51) # run the solver with units_off(): bunch = solve_ivp(f, [t_0, t_end], init_no_dim, events=events, **options) # separate the results from the details y = bunch.pop('y') t = bunch.pop('t') details = ModSimSeries(bunch) # pack the results into a TimeFrame results = TimeFrame(np.transpose(y), index=t, columns=init.index) return results, details
5,337,224
def case_configuration(group_id, det_obj, edges): """ Get all the needed information of the detectors for the chosen edges, as well as only those trajectories that map onto one of the edges. Parameters ---------- group_id det_obj edges Returns ------- """ ds = det_obj.detector_selection(edges) id_ft_pan = list(set(det_obj.features.index.get_level_values(0)) & set(edges)) id_ft_pan.sort() ds_ft = det_obj.features.loc[(id_ft_pan,)] ds_ft.attrs = det_obj.features.attrs lt = td.get_lt(group_id=group_id, edges=edges, gdf=True) return ds, ds_ft, lt
5,337,225
def test_formats(format): """ Testing different formats for StreamGear """ StreamGear(output="output.mpd", format=format, logging=True)
5,337,226
def _remarks(item: str) -> str: """Returns the remarks. Reserved for later parsing""" return item
5,337,227
def unauthorized(): # TODO: security """Redirect unauthorised users to Login page.""" flash('Please log in to access this page.', 'danger') return redirect(url_for('auth.login', next=request.path))
5,337,228
def register_elastic_ip(ElasticIp=None, StackId=None): """ Registers an Elastic IP address with a specified stack. An address can be registered with only one stack at a time. If the address is already registered, you must first deregister it by calling DeregisterElasticIp . For more information, see Resource Management . See also: AWS API Documentation Exceptions :example: response = client.register_elastic_ip( ElasticIp='string', StackId='string' ) :type ElasticIp: string :param ElasticIp: [REQUIRED]\nThe Elastic IP address.\n :type StackId: string :param StackId: [REQUIRED]\nThe stack ID.\n :rtype: dict ReturnsResponse Syntax { 'ElasticIp': 'string' } Response Structure (dict) -- Contains the response to a RegisterElasticIp request. ElasticIp (string) -- The Elastic IP address. Exceptions OpsWorks.Client.exceptions.ValidationException OpsWorks.Client.exceptions.ResourceNotFoundException :return: { 'ElasticIp': 'string' } :returns: OpsWorks.Client.exceptions.ValidationException OpsWorks.Client.exceptions.ResourceNotFoundException """ pass
5,337,229
def delete_streaming_distribution(Id=None, IfMatch=None): """ Delete a streaming distribution. To delete an RTMP distribution using the CloudFront API, perform the following steps. For information about deleting a distribution using the CloudFront console, see Deleting a Distribution in the Amazon CloudFront Developer Guide . See also: AWS API Documentation Exceptions :example: response = client.delete_streaming_distribution( Id='string', IfMatch='string' ) :type Id: string :param Id: [REQUIRED]\nThe distribution ID.\n :type IfMatch: string :param IfMatch: The value of the ETag header that you received when you disabled the streaming distribution. For example: E2QWRUHAPOMQZL . :returns: Id (string) -- [REQUIRED] The distribution ID. IfMatch (string) -- The value of the ETag header that you received when you disabled the streaming distribution. For example: E2QWRUHAPOMQZL . """ pass
5,337,230
async def monitor(obj, state, only, add_date, ignore): """Monitor a DistKV subtree""" flushing = not state seen = False async with obj.client.watch( obj.path, nchain=obj.meta, fetch=state, max_depth=0 if only else -1 ) as res: async for r in res: if add_date and "value" in r: add_dates(r.value) if any(p == r.path[: len(p)] for p in ignore): continue if r.get("state", "") == "uptodate": if only and not seen: # value doesn't exist return flushing = True else: del r["seq"] if only: try: print(r.value, file=obj.stdout) continue except AttributeError: # value has been deleted continue if flushing: r["time"] = time.time() r["_time"] = datetime.datetime.now().isoformat(sep=" ", timespec="milliseconds") yprint(r, stream=obj.stdout) print("---", file=obj.stdout) if flushing: obj.stdout.flush() seen = True
5,337,231
def _date_to_datetime(value): """Convert a date to a datetime for datastore storage. Args: value: A datetime.date object. Returns: A datetime object with time set to 0:00. """ assert isinstance(value, datetime.date) return datetime.datetime(value.year, value.month, value.day)
5,337,232
def DirType(d): """ given a string path to a directory, D, verify it can be used. """ d = os.path.abspath(d) if not os.path.exists(d): raise ArgumentTypeError('DirType:%s does not exist' % d) if not os.path.isdir(d): raise ArgumentTypeError('DirType:%s is not a directory' % d) if os.access(d, os.R_OK): return d else: raise ArgumentTypeError('DirType:%s is not a readable dir' % d)
5,337,233
def get_posted_float(key): """ Retrieve a named float value from a POSTed form :param key: Value key :return: Value or None if not specified """ value = request.form[key] return float(value) if value else None
5,337,234
def te_ds(mass, norm_vel, x_ratios, source_distance, te_einstein, gamma, sigma_total, \ xval, val): """Returns the probability of a sampled value T_E by weighting from the T_E probability distribution of the data """ if min(xval) < te_einstein <= max(xval): omegac = gamma*norm_vel*np.sqrt(x_ratios*(1.-x_ratios))*(mass)**(-1./2.) pte = np.interp(te_einstein, xval, val) # print(pte) # print(probulge_ds(omegac,source_distance,mass,norm_vel,\ # x_ratios,sigma_total,var = "unfixed")) # print(probdisk_ds(omegac,source_distance,mass,norm_vel,\ # x_ratios,sigma_total,var = "unfixed")) # print(Big_phi_source(source_distance,SIGMA_SOURCE_T)) prob = galfunc.probulge_ds(omegac, source_distance, mass, norm_vel, x_ratios, \ sigma_total, var="unfixed")+\ galfunc.probdisk_ds(omegac, source_distance, mass, norm_vel, x_ratios, \ sigma_total, var="unfixed") prob2 = galfunc.big_phi_source(source_distance, SIGMA_SOURCE_T)*pte # print('interal' , prob) # print('interal2' , prob2) return prob*prob2 else: return 0.
5,337,235
def roulette(fitness_values, return_size, elite=0): """ Perform a roulette wheel selection Return return_size item indices """ sorted_indices = np.argsort(fitness_values) c_sorted = np.sort(fitness_values).cumsum() c_sorted /= np.max(c_sorted) sampled = [sorted_indices[np.sum(np.random.rand() > c_sorted)] for _ in \ xrange(return_size)] elites = sorted_indices[::-1][:elite].tolist() return sampled, elites
5,337,236
def working_dir(val, **kwargs): # pylint: disable=unused-argument """ Must be an absolute path """ try: is_abs = os.path.isabs(val) except AttributeError: is_abs = False if not is_abs: raise SaltInvocationError("'{0}' is not an absolute path".format(val)) return val
5,337,237
def convert(file, destination): """Convert a user.yaml FILE to the new format. If a DESTINATION is provided, saves the result as a file. Otherwise, print the result.""" with open(file, "r") as f: user_yaml = f.read() convert_old_user_yaml_to_new_user_yaml(user_yaml, destination)
5,337,238
def import_blank_from_ipuz(ipuz, blank): """Load a blank grid from an ipuz file into the database.""" data = json.loads(ipuz.read().decode('latin_1')) for y, row in enumerate(data['puzzle']): for x, cell in enumerate(row): if cell == "#": block = Block(blank=blank, x=x, y=y) block.save()
5,337,239
def apply_latency_predictor_cli(args): """apply latency predictor to predict model latency according to the command line interface arguments """ if not args.predictor: logging.keyinfo('You must specify a predictor. Use "nn-meter --list-predictors" to see all supporting predictors.') return # specify model type if args.tensorflow: input_model, model_type, model_suffix = args.tensorflow, "pb", ".pb" elif args.onnx: input_model, model_type, model_suffix = args.onnx, "onnx", ".onnx" elif args.nn_meter_ir: input_model, model_type, model_suffix = args.nn_meter_ir, "nnmeter-ir", ".json" elif args.torchvision: # torch model name from torchvision model zoo input_model_list, model_type = args.torchvision, "torch" # load predictor predictor = load_latency_predictor(args.predictor, args.predictor_version) # specify model for prediction if not args.torchvision: # input of tensorflow, onnx, nnmeter-ir and nni-ir is file name, while input of torchvision is string list input_model_list = [] if os.path.isfile(input_model): input_model_list = [input_model] elif os.path.isdir(input_model): input_model_list = glob(os.path.join(input_model, "**" + model_suffix)) input_model_list.sort() logging.info(f'Found {len(input_model_list)} model in {input_model}. Start prediction ...') else: logging.error(f'Cannot find any model satisfying the arguments.') # predict latency result = {} for model in input_model_list: latency = predictor.predict(model, model_type) # in unit of ms result[os.path.basename(model)] = latency logging.result(f'[RESULT] predict latency for {os.path.basename(model)}: {latency} ms') return result
5,337,240
def create_sample_tree(): """ 1 / \ 2 3 / \ 4 5 """ root = TreeNode(1) root.left = TreeNode(2) root.right = TreeNode(3) root.right.left = TreeNode(4) root.right.right = TreeNode(5) return root
5,337,241
def sp_normalize(adj_def, device='cpu'): """ :param adj: scipy.sparse.coo_matrix :param device: default as cpu :return: normalized_adj: """ adj_ = sp.coo_matrix(adj_def) adj_ = adj_ + sp.coo_matrix(sp.eye(adj_def.shape[0]), dtype=np.float32) rowsum = np.array(adj_.sum(axis=1)).reshape(-1) norm_unit = np.float_power(rowsum, -0.5).astype(np.float32) degree_mat_inv_sqrt = sp.diags(norm_unit) degree_mat_sqrt = copy.copy(degree_mat_inv_sqrt) # degree_mat_sqrt = degree_mat_inv_sqrt.to_dense() support = adj_.__matmul__(degree_mat_sqrt) # support = coo_to_csp(support.tocoo()) # degree_mat_inv_sqrt = coo_to_csp(degree_mat_inv_sqrt.tocoo()) adj_normalized = degree_mat_inv_sqrt.__matmul__(support) adj_normalized = coo_to_csp(adj_normalized.tocoo()) return adj_normalized, rowsum # coo_adj = sp.coo_matrix(adj_normalized.to('cpu').numpy()) # return coo_to_csp(coo_adj).to(device), rowsum
5,337,242
def isTileEvent(x:int, y:int): """ checks if a given tile is an event or not quicker than generateTileAt x: the x value of the target tile y: the y value of the target tile """ perlRand = getPerlin(x, y, s=2.501) if Math.floor(perlRand * 3400) == 421 and deriveTile(x, y)=='H': return True elif Math.floor(perlRand * 9000) == 4203 and deriveTile(x, y)=='C': return True return False
5,337,243
def args_to_object(args,obj): """ Copy all fields from the argparse table "args" to the object "obj" """ for n, v in inspect.getmembers(args): if not n.startswith('_'): setattr(obj, n, v)
5,337,244
def to_onehot_sym(ind, dim): """Return a matrix with one hot encoding of each element in ind.""" assert ind.ndim == 1 return theano.tensor.extra_ops.to_one_hot(ind, dim)
5,337,245
def generate_potential_grasp(object_cloud): """ The object_cloud needs to be in table coordinates. """ # https://www.cs.princeton.edu/~funk/tog02.pdf picking points in triangle nrmls = object_cloud.normals.copy() # if object_cloud.points[:,2].max()<0.11: # nrmls[nrmls[:,2]>0] *= -1 # direction_bias = np.max( np.vstack( [ nrmls @ np.array([0,0,-1]), np.zeros(nrmls.shape[0])] ), axis=0 ) # else: # direction_bias = np.ones(nrmls.shape) area_bias = object_cloud.facet_areas/np.sum(object_cloud.facet_areas) probability = area_bias probability /= np.sum(probability) sample = np.random.choice(np.arange(object_cloud.hull.simplices.shape[0]), p=probability) simplex = object_cloud.hull.simplices[sample] r1,r2 = np.random.uniform(0,1,2) sqrt_r1 = r1**0.5 A,B,C = object_cloud.points[simplex] point = (1-sqrt_r1)*A + sqrt_r1*(1-r2)*B + sqrt_r1*r2*C direction = nrmls[sample] # this is pointing inwards distance = np.random.uniform(0.01, 0.15) # in cm p = point - direction*distance if p[2] < 0.07: n = (point[2] - 0.07)/distance - direction[2] direction[2] = direction[2]+n direction = direction/np.linalg.norm(direction) p = point - direction*distance y_axis = np.random.uniform(-1,1,3) y_axis = y_axis - (y_axis@direction)*direction y_axis /= np.linalg.norm(y_axis) x_axis = np.cross(y_axis, direction) x_axis /= np.linalg.norm(x_axis) R = np.zeros((3,3)) R[:,0] = x_axis R[:,1] = y_axis R[:,2] = direction return R, p[...,np.newaxis]
5,337,246
def ACP_O(model, Xtrain, Ytrain, Xtest, labels = [0,1], out_file = None, seed = 42, damp = 10**-3, batches = 1): """Runs ACP (ordinary) CP-IF to make a prediction for all points in Xtest """ N = len(Xtrain) # Train model on D. model_D = model model_D = model_D.to(device) model_D.fit(Xtrain, Ytrain, seed = seed) torch.cuda.empty_cache() # Estimate influence. gradients = [] for x, y in zip(Xtrain, Ytrain): gradients.append(model_D.grad_z(x, y, flatten = True).cpu().detach()) torch.cuda.empty_cache() gradients.append(None) #for the test point H_D = torch.zeros(model_D.count_params(), model_D.count_params()).to(device) Xtrain_splitted = np.array_split(Xtrain, batches) Ytrain_splitted = np.array_split(Ytrain, batches) for batch_X, batch_Y in zip(Xtrain_splitted, Ytrain_splitted): H_D += model_D.hessian_all_points(batch_X, batch_Y) H_D = H_D/batches H_D += torch.diag(Tensor([damp]*len(H_D))).to(device) torch.cuda.empty_cache() H_inv = torch.inverse(H_D) del H_D #Preliminary scores losses = [np.float64(model_D.compute_loss(x, y).cpu().detach()) for x, y in zip(Xtrain, Ytrain)] losses.append(None) pvals = [] prediction_times = [] for k, xtest in enumerate(Xtest): print("TEST: " +str(k+1)) pvals_xtest = {} scores = {} start = time.perf_counter() for yhat in labels: # Extended dataset Xtmp = np.row_stack((Xtrain, [xtest])) Ytmp = np.concatenate((Ytrain, [yhat])) alphas = np.zeros(len(Xtmp)) # Obtain gradient on test point g_test = model_D.grad_z(Xtmp[-1,:], Ytmp[-1], flatten = True) # Obtain loss on test point loss_test = np.float64(model_D.compute_loss(Xtmp[-1,:], Ytmp[-1]).cpu().detach()) gradients[-1] = g_test losses[-1] = loss_test for j, (x,y) in enumerate(zip(Xtmp, Ytmp)): gradient = gradients[j].to(device) # Compute influence est = - gradient.T@H_inv@g_test/N alphas[j] = losses[j] + np.array(est.cpu().detach()) torch.cuda.empty_cache() pval = sum(alphas >= alphas[-1])/(N+1) print(pval) pvals_xtest[yhat], scores[yhat] = pval, list(alphas) prediction_times.append(time.perf_counter() - start) pvals.append(pvals_xtest) if out_file: log_to_file(out_file, {"N": len(Xtrain), "prediction-times": prediction_times[-1], "p-values": pvals_xtest }) return pvals, prediction_times
5,337,247
def set(ctx, username, password): """update user's password.\n USERNAME: the user's name \n PASSWORD: the new password of the user """ client = ctx.obj['client'] if not username or not password: click.echo('user set must provide username and password.', err=True) sys.exit(1) valid, response = client.update_password(username, password) if valid: click.echo("user[%s] update success" % username) else: click.echo("user[%s] update failed with message[%s]" % (username, response)) sys.exit(1)
5,337,248
def erase_all_data(): """ Function to erase all data for a clean start. Use with caution!""" JobStreams, Replicates, BaseDirNames, JobBaseNames, Runs, \ nJobStreams, nReplicates, nBaseNames = check_job_structure() cwd = os.getcwd() print("\nWe are about to erase all data in this directory, which can be useful") print("for making a clean start, but disasterous if this is the wrong folder!") print(("{}Proceed with caution!{}".format(RED, DEFAULT))) print("This operation will delete all data in the folders:\n") print(("/{} ".format(JobStreams,DEFAULT))) print("/Setup_and_Config/Benchmarking/ - Benchmarking data.") strg = input("\n Press enter to quit or type: '{}erase all my data{}': ".format(YELLOW, DEFAULT)) print (strg) if strg in ['erase all my data']: print("Ok, well if you say so....") for j in range( 0, nJobStreams): TargetDir = cwd + "/" + JobStreams[j] print((" Erasing all files in:{}".format(TargetDir))) if os.path.isdir( TargetDir ): shutil.rmtree( TargetDir ) else: print((" Couldn't see {}".format(TargetDir))) # cleanup benchmark files: benchmark_delete = ["current_MD_run_files.*", "slurm*", "bm_config.*" ,"FFTW_*", "temp_working_errors", "bm_input.*"] for j in benchmark_delete: filetarget = cwd + "/Setup_and_Config/Benchmarking/" + j p = glob(filetarget) for m in p: os.remove(m) print("\nOh the humanity. I sure hope that wasn't anything important.") else: print("Phew! Nothing erased.")
5,337,249
def func (x): """ sinc (x) """ if x == 0: return 1.0 return math.sin (x) / x
5,337,250
def grad_norm(model=None, parameters=None): """Compute parameter gradient norm.""" assert parameters is not None or model is not None total_norm = 0 if parameters is None: parameters = [] if model is not None: parameters.extend(model.parameters()) parameters = [p for p in parameters if p.grad is not None and p.requires_grad] for p in parameters: param_norm = p.grad.detach().data.norm(2) total_norm += param_norm.item()**2 total_norm = total_norm**0.5 return total_norm
5,337,251
def chain_from_iterable(iterables): """Alternate constructor for chain(). Gets chained inputs from a single iterable argument that is evaluated lazily. :param iterables: an iterable of iterables """ # chain_from_iterable(['ABC', 'DEF']) --> A B C D E F for it in iterables: for element in it: yield element
5,337,252
def db_entry_trim_empty_fields(entry): """ Remove empty fields from an internal-format entry dict """ entry_trim = copy.deepcopy(entry) # Make a copy to modify as needed for field in [ 'url', 'title', 'extended' ]: if field in entry: if (entry[field] is None) or \ (type(entry[field]) is str and len(entry[field]) == 0): del entry_trim[field] return entry_trim
5,337,253
def test_get_alert_attachments(mocker): """ Given: - An app client object - Alert-id = 1234 When: - Calling function get_alert_attachments Then: - Ensure the return data is correct """ mock_client = OpsGenieV3.Client(base_url="") mocker.patch.object(mock_client, 'get_alert_attachments', return_value=util_load_json('test_data/get_alert_attachments.json')) res = OpsGenieV3.get_alert_attachments(mock_client, {"alert-id": 1234}) assert (res.readable_output == "### OpsGenie Attachment\n**No entries.**\n")
5,337,254
def loadconfig(PATH): """Load Latte's repo configuration from the PATH. A dictionary of the config data is returned, otherwise None.""" try: f = open(PATH, "r") except FileNotFoundError: return None else: confobj = SWConfig(f.read()) if confobj is None: return None else: return confobj.data
5,337,255
def _call_token_server(method, request): """Sends an RPC to tokenserver.minter.TokenMinter service. Args: method: name of the method to call. request: dict with request fields. Returns: Dict with response fields. Raises: auth.AuthorizationError on HTTP 403 reply. InternalError if the RPC fails unexpectedly. """ # Double check token server URL looks sane ('https://....'). This is checked # when it's imported from the config. This check should never fail. ts_url = auth.get_request_auth_db().token_server_url try: utils.validate_root_service_url(ts_url) except ValueError as exc: raise InternalError('Invalid token server URL %s: %s' % (ts_url, exc)) # See TokenMinter in # https://chromium.googlesource.com/infra/luci/luci-go/+/master/tokenserver/api/minter/v1/token_minter.proto # But beware that proto JSON serialization uses camelCase, not snake_case. try: return net.json_request( url='%s/prpc/tokenserver.minter.TokenMinter/%s' % (ts_url, method), method='POST', payload=request, headers={'Accept': 'application/json; charset=utf-8'}, scopes=[net.EMAIL_SCOPE]) except net.Error as exc: logging.error( 'Error calling %s (HTTP %s: %s):\n%s', method, exc.status_code, exc.message, exc.response) if exc.status_code == 403: raise auth.AuthorizationError(exc.response) raise InternalError('Failed to call MintOAuthTokenGrant, see server logs')
5,337,256
def assert_array_equal(x: Tuple[int, int, int], y: Tuple[float, float, int]): """ usage.skimage: 3 """ ...
5,337,257
def get_keywords(text): """Get keywords that relate to this article (from NLP service) Args: text (sting): text to extract keywords from Returns: [list]: list of extracted keywords """ extracted_keywords = [] request = {'text': text} nlp_output = requests.post(env.get_keywords_endpoint(), json=request) nlp_output.raise_for_status() json_output = nlp_output.json() if 'error' in json_output: raise Exception(json_output['error']['message']) for keyword in json_output["tokens"]: extracted_keywords.append(keyword["lemma"]) return extracted_keywords
5,337,258
def test_tiproviders_editor(kv_sec, mp_conf_ctrl): """TI Providers item editor.""" edit_comp = CETIProviders(mp_controls=mp_conf_ctrl) edit_comp.select_item.label = "VirusTotal" provider = edit_comp.select_item.label # get the control for this provider ctrl_path = f"TIProviders.{provider}.Args.AuthKey" arg_ctrl = mp_conf_ctrl.get_control(ctrl_path) arg_ctrl.rb_store_type.value = STORE_ENV_VAR arg_ctrl.txt_val.value = "test_var" os.environ["test_var"] = "test_value" arg_ctrl.btn_add_kv_secret.click() check.is_true(arg_ctrl.txt_val.disabled) check.equal(arg_ctrl.txt_val.value, "") set_secret, ss_args, _ = kv_sec.mock_calls[1] check.equal(set_secret, "().set_secret") check.equal(ss_args[0], "TIProviders-VirusTotal-Args-AuthKey") check.equal(ss_args[1], "test_value") check.equal(arg_ctrl.rb_store_type.value, STORE_KEYVAULT) arg_ctrl.rb_store_type.value = STORE_TEXT arg_ctrl.txt_val.value = "test_value2" arg_ctrl.btn_add_kv_secret.click() check.is_true(arg_ctrl.txt_val.disabled) check.equal(arg_ctrl.txt_val.value, "") check.equal(arg_ctrl.rb_store_type.value, STORE_KEYVAULT) set_secret, ss_args, _ = kv_sec.mock_calls[3] check.equal(set_secret, "().set_secret") check.equal(ss_args[0], "TIProviders-VirusTotal-Args-AuthKey") check.equal(ss_args[1], "test_value2") kv_call_count = len(kv_sec.mock_calls) arg_ctrl.rb_store_type.value = STORE_TEXT arg_ctrl.txt_val.value = "" arg_ctrl.btn_add_kv_secret.click() # verify we didn't call KV with blank value check.equal(len(kv_sec.mock_calls), kv_call_count)
5,337,259
def weather_config() -> str: """The function config_handle() is called and the contents that are returned are stored in the variable 'config file'. This is then appropriately parsed so the weather api key is accessed. This is then returned at the end of the function.""" #Accessing Weather Api Key config_file = config_handle() api_key = config_file["api_keys"]["weather_key"] return api_key
5,337,260
def interpret_bit_flags(bit_flags, flip_bits=None, flag_name_map=None): """ Converts input bit flags to a single integer value (bit mask) or `None`. When input is a list of flags (either a Python list of integer flags or a string of comma-, ``'|'``-, or ``'+'``-separated list of flags), the returned bit mask is obtained by summing input flags. .. note:: In order to flip the bits of the returned bit mask, for input of `str` type, prepend '~' to the input string. '~' must be prepended to the *entire string* and not to each bit flag! For input that is already a bit mask or a Python list of bit flags, set ``flip_bits`` for `True` in order to flip the bits of the returned bit mask. Parameters ---------- bit_flags : int, str, list, None An integer bit mask or flag, `None`, a string of comma-, ``'|'``- or ``'+'``-separated list of integer bit flags or mnemonic flag names, or a Python list of integer bit flags. If ``bit_flags`` is a `str` and if it is prepended with '~', then the output bit mask will have its bits flipped (compared to simple sum of input flags). For input ``bit_flags`` that is already a bit mask or a Python list of bit flags, bit-flipping can be controlled through ``flip_bits`` parameter. .. note:: When ``bit_flags`` is a list of flag names, the ``flag_name_map`` parameter must be provided. .. note:: Only one flag separator is supported at a time. ``bit_flags`` string should not mix ``','``, ``'+'``, and ``'|'`` separators. flip_bits : bool, None Indicates whether or not to flip the bits of the returned bit mask obtained from input bit flags. This parameter must be set to `None` when input ``bit_flags`` is either `None` or a Python list of flags. flag_name_map : BitFlagNameMap A `BitFlagNameMap` object that provides mapping from mnemonic bit flag names to integer bit values in order to translate mnemonic flags to numeric values when ``bit_flags`` that are comma- or '+'-separated list of menmonic bit flag names. Returns ------- bitmask : int or None Returns an integer bit mask formed from the input bit value or `None` if input ``bit_flags`` parameter is `None` or an empty string. If input string value was prepended with '~' (or ``flip_bits`` was set to `True`), then returned value will have its bits flipped (inverse mask). Examples -------- >>> from astropy.nddata.bitmask import interpret_bit_flags, extend_bit_flag_map >>> ST_DQ = extend_bit_flag_map('ST_DQ', CR=1, CLOUDY=4, RAINY=8, HOT=16, DEAD=32) >>> "{0:016b}".format(0xFFFF & interpret_bit_flags(28)) '0000000000011100' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('4,8,16')) '0000000000011100' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('CLOUDY,RAINY,HOT', flag_name_map=ST_DQ)) '0000000000011100' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~4,8,16')) '1111111111100011' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(4+8+16)')) '1111111111100011' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags('~(CLOUDY+RAINY+HOT)', ... flag_name_map=ST_DQ)) '1111111111100011' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16])) '0000000000011100' >>> "{0:016b}".format(0xFFFF & interpret_bit_flags([4, 8, 16], flip_bits=True)) '1111111111100011' """ has_flip_bits = flip_bits is not None flip_bits = bool(flip_bits) allow_non_flags = False if _is_int(bit_flags): return (~int(bit_flags) if flip_bits else int(bit_flags)) elif bit_flags is None: if has_flip_bits: raise TypeError( "Keyword argument 'flip_bits' must be set to 'None' when " "input 'bit_flags' is None." ) return None elif isinstance(bit_flags, str): if has_flip_bits: raise TypeError( "Keyword argument 'flip_bits' is not permitted for " "comma-separated string lists of bit flags. Prepend '~' to " "the string to indicate bit-flipping." ) bit_flags = str(bit_flags).strip() if bit_flags.upper() in ['', 'NONE', 'INDEF']: return None # check whether bitwise-NOT is present and if it is, check that it is # in the first position: bitflip_pos = bit_flags.find('~') if bitflip_pos == 0: flip_bits = True bit_flags = bit_flags[1:].lstrip() else: if bitflip_pos > 0: raise ValueError("Bitwise-NOT must precede bit flag list.") flip_bits = False # basic check for correct use of parenthesis: while True: nlpar = bit_flags.count('(') nrpar = bit_flags.count(')') if nlpar == 0 and nrpar == 0: break if nlpar != nrpar: raise ValueError("Unbalanced parantheses in bit flag list.") lpar_pos = bit_flags.find('(') rpar_pos = bit_flags.rfind(')') if lpar_pos > 0 or rpar_pos < (len(bit_flags) - 1): raise ValueError("Incorrect syntax (incorrect use of " "parenthesis) in bit flag list.") bit_flags = bit_flags[1:-1].strip() if sum(k in bit_flags for k in '+,|') > 1: raise ValueError( "Only one type of bit flag separator may be used in one " "expression. Allowed separators are: '+', '|', or ','." ) if ',' in bit_flags: bit_flags = bit_flags.split(',') elif '+' in bit_flags: bit_flags = bit_flags.split('+') elif '|' in bit_flags: bit_flags = bit_flags.split('|') else: if bit_flags == '': raise ValueError( "Empty bit flag lists not allowed when either bitwise-NOT " "or parenthesis are present." ) bit_flags = [bit_flags] if flag_name_map is not None: try: int(bit_flags[0]) except ValueError: bit_flags = [flag_name_map[f] for f in bit_flags] allow_non_flags = len(bit_flags) == 1 elif hasattr(bit_flags, '__iter__'): if not all([_is_int(flag) for flag in bit_flags]): if (flag_name_map is not None and all([isinstance(flag, str) for flag in bit_flags])): bit_flags = [flag_name_map[f] for f in bit_flags] else: raise TypeError("Every bit flag in a list must be either an " "integer flag value or a 'str' flag name.") else: raise TypeError("Unsupported type for argument 'bit_flags'.") bitset = set(map(int, bit_flags)) if len(bitset) != len(bit_flags): warnings.warn("Duplicate bit flags will be ignored") bitmask = 0 for v in bitset: if not _is_bit_flag(v) and not allow_non_flags: raise ValueError("Input list contains invalid (not powers of two) " "bit flag: {:d}".format(v)) bitmask += v if flip_bits: bitmask = ~bitmask return bitmask
5,337,261
def _check_web_services_response(response, msg=''): """ Log an error message if the specified response's status code is not 200. """ if response.status_code == 404: current_app.logger.error("{}".format(msg)) elif response.status_code != 200: current_app.logger.error("{}\n{}".format(msg, dumps(response.json(), indent=2)))
5,337,262
def test_update_post(client): """Test that dummy post is properly updated""" POST["label"] = "YTA" payload = dumps(dict(label="YTA")) resp = client.put(f"{settings.api.version}/posts/{POST['id']}/", data=payload) assert resp.status_code == HTTP_200_OK assert resp.json() == POST
5,337,263
def reparam(mu, std, do_sample=True, cuda=True): """Reparametrization for Normal distribution. """ if do_sample: eps = torch.FloatTensor(std.size()).normal_() if cuda: eps = eps.cuda() eps = Variable(eps) return mu + eps * std else: return mu
5,337,264
def simu_grid_graph(width, height, rand_weight=False): """Generate a grid graph. To generate a grid graph. Each node has 4-neighbors. Please see more details in https://en.wikipedia.org/wiki/Lattice_graph. For example, we can generate 5x3(width x height) grid graph 0---1---2---3---4 | | | | | 5---6---7---8---9 | | | | | 10--11--12--13--14 by using simu_grid_graph(5, 3) We can also generate a 1x5 chain graph 0---1---2---3---4 by using simu_grid_graph(5, 1) :param width: width of this grid graph. :param height: height of this grid graph. :param rand_weight: generate weights from U(1., 2.) if it is True. :return: edges and corresponding edge costs. return two empty [],[] list if there was any error occurring. """ if width < 0 and height < 0: print('Error: width and height should be positive.') return [], [] width, height = int(width), int(height) edges, weights = [], [] index = 0 for i in range(height): for j in range(width): if (index % width) != (width - 1): edges.append((index, index + 1)) if index + width < int(width * height): edges.append((index, index + width)) else: if index + width < int(width * height): edges.append((index, index + width)) index += 1 edges = np.asarray(edges, dtype=int) # random generate costs of the graph if rand_weight: weights = [] while len(weights) < len(edges): weights.append(random.uniform(1., 2.0)) weights = np.asarray(weights, dtype=np.float64) else: # set unit weights for edge costs. weights = np.ones(len(edges), dtype=np.float64) return edges, weights
5,337,265
def solrctl(): """ solrctl path """ for dirname in os.environ.get('PATH', '').split(os.path.pathsep): path = os.path.join(dirname, 'solrctl') if os.path.exists(path): return path return None
5,337,266
def calculate_sensitivity_to_weighting(jac, weights, moments_cov, params_cov): """calculate the sensitivity to weighting. The sensitivity measure is calculated for each parameter wrt each moment. It answers the following question: How would the precision change if the weight of the kth moment is increased a little? Args: sensitivity_to_bias (np.ndarray or pandas.DataFrame): See ``calculate_sensitivity_to_bias`` for details. weights (np.ndarray or pandas.DataFrame): The weighting matrix used for msm estimation. moments_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the empirical moments. params_cov (numpy.ndarray or pandas.DataFrame): The covariance matrix of the parameter estimates. Returns: np.ndarray or pd.DataFrame: Sensitivity measure with shape (n_params, n_moments) """ _jac, _weights, _moments_cov, _params_cov, names = process_pandas_arguments( jac=jac, weights=weights, moments_cov=moments_cov, params_cov=params_cov ) gwg_inverse = _sandwich(_jac, _weights) gwg_inverse = robust_inverse(gwg_inverse, INVALID_SENSITIVITY_MSG) m6 = [] for k in range(len(_weights)): mask_matrix_o = np.zeros(shape=_weights.shape) mask_matrix_o[k, k] = 1 m6k_1 = gwg_inverse @ _sandwich(_jac, mask_matrix_o) @ _params_cov m6k_2 = ( gwg_inverse @ _jac.T @ mask_matrix_o @ _moments_cov @ _weights @ _jac @ gwg_inverse ) m6k_3 = ( gwg_inverse @ _jac.T @ _weights @ _moments_cov @ mask_matrix_o @ _jac @ gwg_inverse ) m6k_4 = _params_cov @ _sandwich(_jac, mask_matrix_o) @ gwg_inverse m6k = -m6k_1 + m6k_2 + m6k_3 - m6k_4 m6k = m6k.diagonal() m6.append(m6k) m6 = np.array(m6).T weights_diagonal = np.diagonal(_weights) params_variances = np.diagonal(_params_cov) e6 = m6 / params_variances.reshape(-1, 1) e6 = e6 * weights_diagonal if names: e6 = pd.DataFrame(e6, index=names.get("params"), columns=names.get("moments")) return e6
5,337,267
def subword(w): """ Function used in the Key Expansion routine that takes a four-byte input word and applies an S-box to each of the four bytes to produce an output word. """ w = w.reshape(4, 8) return SBOX[w[0]] + SBOX[w[1]] + SBOX[w[2]] + SBOX[w[3]]
5,337,268
def YumInstall(vm): """Installs the OpenJDK package on the VM.""" vm.InstallPackages('java-1.{0}.0-openjdk-devel'.format(FLAGS.openjdk_version))
5,337,269
def deaScranDESeq2(counts, conds, comparisons, alpha, scran_clusters=False): """Makes a call to DESeq2 with SCRAN to perform D.E.A. in the given counts matrix with the given conditions and comparisons. Returns a list of DESeq2 results for each comparison """ results = list() n_cells = len(counts.columns) try: pandas2ri.activate() deseq2 = RimportLibrary("DESeq2") scran = RimportLibrary("scran") multicore = RimportLibrary("BiocParallel") multicore.register(multicore.MulticoreParam(multiprocessing.cpu_count()-1)) as_matrix = r["as.matrix"] # Create the R conditions and counts data r_counts = pandas2ri.py2ri(counts) cond = robjects.StrVector(conds) r_call = """ function(r_counts) { sce = SingleCellExperiment(assays=list(counts=r_counts)) return(sce) } """ r_func = r(r_call) sce = r_func(as_matrix(r_counts)) if scran_clusters: r_clusters = scran.quickCluster(as_matrix(r_counts), max(n_cells/10, 10)) min_cluster_size = min(Counter(r_clusters).values()) sizes = list(set([round((min_cluster_size/2) / i) for i in [5,4,3,2,1]])) sce = scran.computeSumFactors(sce, clusters=r_clusters, sizes=sizes, positive=True) else: sizes = list(set([round((n_cells/2) * i) for i in [0.1,0.2,0.3,0.4,0.5]])) sce = scran.computeSumFactors(sce, sizes=sizes, positive=True) sce = r.normalize(sce) dds = r.convertTo(sce, type="DESeq2") r_call = """ function(dds, conditions){ colData(dds)$conditions = as.factor(conditions) design(dds) = formula(~ conditions) return(dds) } """ r_func = r(r_call) dds = r_func(dds, cond) dds = r.DESeq(dds) # Perform the comparisons and store results in list for A,B in comparisons: result = r.results(dds, contrast=r.c("conditions", A, B), alpha=alpha) result = r['as.data.frame'](result) genes = r['rownames'](result) result = pandas2ri.ri2py_dataframe(result) # There seems to be a problem parsing the rownames from R to pandas # so we do it manually result.index = genes results.append(result) pandas2ri.deactivate() except Exception as e: raise e return results
5,337,270
def stop_gzweb(cloudsim_api, constellation_name): """ Stops the gzweb service and waits for its status to "stopped" """ cloudsim_api.stop_gzweb(constellation_name) count=100 while count > 0: print("count %s/100" % count) time.sleep(5) count -= 1 r = cloudsim_api.ping_gzweb(constellation_name) print("%s/100 gzweb state: %s" % (count, r)) if r == "": return raise RestException("Can't start notebook on %s" % constellation_name)
5,337,271
def cylindrical_to_cartesian(a: ArrayLike) -> NDArray: """ Transform given cylindrical coordinates array :math:`\\rho\\phi z` (radial distance, azimuth and height) to cartesian coordinates array :math:`xyz`. Parameters ---------- a Cylindrical coordinates array :math:`\\rho\\phi z` to transform, :math:`\\rho` is in range [0, +inf], :math:`\\phi` is in range [-pi, pi] radians i.e. [-180, 180] degrees, :math:`z` is in range [0, +inf]. Returns ------- :class:`numpy.ndarray` Cartesian coordinates array :math:`xyz`. References ---------- :cite:`Wikipedia2006`, :cite:`Wikipedia2005a` Examples -------- >>> a = np.array([3.16227766, 0.32175055, 6.00000000]) >>> cylindrical_to_cartesian(a) # doctest: +ELLIPSIS array([ 3. , 0.9999999..., 6. ]) """ a = as_float_array(a) x, y = tsplit(polar_to_cartesian(a[..., 0:2])) return tstack([x, y, a[..., -1]])
5,337,272
def eigenarray_to_array( array ): """Convert Eigen::ArrayXd to numpy array""" return N.frombuffer( array.data(), dtype='d', count=array.size() )
5,337,273
def _ncon_to_adjmat(labels: List[List[int]]): """ Generate an adjacency matrix from the network connections. """ # process inputs N = len(labels) ranks = [len(labels[i]) for i in range(N)] flat_labels = np.hstack([labels[i] for i in range(N)]) tensor_counter = np.hstack( [i * np.ones(ranks[i], dtype=int) for i in range(N)]) index_counter = np.hstack([np.arange(ranks[i]) for i in range(N)]) # build log-adjacency index-by-index adjmat = np.zeros([N, N], dtype=int) unique_labels = np.unique(flat_labels) for ele in unique_labels: # identify tensor/index location of each edge tnr = tensor_counter[flat_labels == ele] ind = index_counter[flat_labels == ele] if len(ind) == 1: # external index adjmat[tnr[0], tnr[0]] += 1 elif len(ind) == 2: # internal index if tnr[0] != tnr[1]: # ignore partial traces adjmat[tnr[0], tnr[1]] += 1 adjmat[tnr[1], tnr[0]] += 1 return adjmat
5,337,274
def start_project(name, gcloud_project, database, tasks): """ This command creates a new project structure under the directory <name>. """ path = os.path.join(os.path.dirname(__file__), 'project_template') loader = FileSystemLoader(path) env = Environment(loader=loader) os.mkdir(os.path.join(os.curdir, name)) to_render = ['app.yaml', 'main.py', 'requirements.txt', 'settings.py', 'README.md'] if database != 'none': context = { 'DATABASE_ENGINE': f'secure_scaffold.contrib.db.engine.{database}', 'DATABASE_CHOICE': f'{database}', 'GCLOUD_NAME': gcloud_project, 'PROJECT_NAME': name, 'TASKS': tasks, 'DEPENDENCIES': 1 } to_render.append('models.py') else: context = { 'DATABASE_CHOICE': '', 'TASKS': tasks, 'DEPENDENCIES': 0, } if tasks: to_render.append('tasks.py') context['TASK_QUEUE'] = click.prompt('What is the name of the task queue?') context['TASK_LOCATION'] = click.prompt('What region is the task located in?') context['DEPENDENCIES'] += 1 click.secho(f'Generating project structure for {name}', fg='green') for file in to_render: template = env.get_template(f'{file}.tpl') with open(os.path.join(os.curdir, name, file), 'w') as to_write: to_write.write(template.render(**context)) click.secho(f'Project generated in ./{name}. ' f'Run cd {name} to see the project.', fg='green')
5,337,275
def test_command_line_interface() -> None: """Test the CLI.""" runner = CliRunner() result = runner.invoke(cli.proto_compile) assert result.exit_code == 0 assert "Usage: proto-compile" in result.output help_result = runner.invoke(cli.proto_compile, ["--help"]) assert help_result.exit_code == 0 assert "Show this message and exit." in help_result.output
5,337,276
def search(name="", address="", description=""): """ Returns a list of Plan Objects """ plans = [] name = name.replace(' ','+') address = address.replace(' ','+') description = description.replace(' ','+') Address_Search_URL = f"http://webgeo.kildarecoco.ie/planningenquiry/Public/GetPlanningFileNameAddressResult?name={name}&address={address}&devDesc={description}&startDate=&endDate=" r = requests.get(Address_Search_URL) if r.status_code == 200: try: for item in r.json(): p = Plan(item['FileNumber']) plans.append(p) except json.decoder.JSONDecodeError: content = json.loads(r.text) for item in content: try: p = Plan(item['FileNumber']) plans.append(p) except: print(f"Couldn't load: {item}", file=sys.stderr) return plans
5,337,277
def _is_install_requirement(requirement): """ return True iff setup should install requirement :param requirement: (str) line of requirements.txt file :return: (bool) """ return not (requirement.startswith('-e') or 'git+' in requirement)
5,337,278
def test_amp_pro(): """適当な量子状態をつくって、棒グラフを表示するテスト用関数。""" n = 3 state = QuantumState(n) #state.set_Haar_random_state() show_amplitude(state) show_probability(state) X(0).update_quantum_state(state) show_amplitude(state) show_probability(state) H(0).update_quantum_state(state) show_amplitude(state) show_probability(state) Z(1).update_quantum_state(state) show_amplitude(state) show_probability(state) CNOT(0, 1).update_quantum_state(state) show_amplitude(state) show_probability(state)
5,337,279
def confusion_matrices_runs_thresholds( y, scores, thresholds, n_obs=None, fill=0.0, obs_axis=0 ): """Compute confusion matrices over runs and thresholds. `conf_mats_runs_thresh` is an alias for this function. Parameters ---------- y : np.ndarray[bool, int32, int64, float32, float64] the ground truth labels, if different runs have different number of observations the n_obs parameter must be set to avoid computing metrics of the filled values. If ``y`` is one dimensional and ``scores`` is not the ``y`` values are assumed to be the same for each run. scores : np.array[float32, float64] the classifier scoress, if different runs have different number of observations the n_obs parameter must be set to avoid computing metrics of the filled values. thresholds : np.array[float32, float64] classification thresholds n_obs : np.array[int64], default=None the number of observations per run, if None the same number of observations are assumed exist for each run. fill : double value to fill when a metric is not defined, e.g. divide by zero. obs_axis : {0, 1}, default=0 0 if the observations for a single run is a column (e.g. from pd.DataFrame) and 1 otherwhise Returns ------- conf_mat : np.ndarray[int64] 3D array where the rows contain the counts for a threshold, the columns the confusion matrix entries and the slices the counts for a run """ thresholds = check_array( thresholds, max_dim=1, dtype_check=_convert_to_float, ) scores = check_array( scores, axis=obs_axis, target_axis=obs_axis, target_order=1-obs_axis, max_dim=2, dtype_check=_convert_to_float, ) n_runs = scores.shape[1 - obs_axis] max_obs = scores.shape[obs_axis] if y.ndim == 1: y = np.tile(y[:, None], n_runs) elif y.shape[1] == 1 and y.shape[0] >= 2: y = np.tile(y, n_runs) y = check_array( y, axis=obs_axis, target_axis=obs_axis, target_order=1-obs_axis, max_dim=2, dtype_check=_convert_to_ext_types, ) n_thresholds = thresholds.size if n_obs is None: n_obs = np.repeat(max_obs, n_runs) cm = _core.confusion_matrix_runs_thresholds( y, scores, thresholds, n_obs ) # cm and mtr are both flat arrays with order conf_mat, thresholds, runs # as this is fastest to create. However, how the cubes will be sliced # later doesn't align with this. So we incur a copy such that the cubes # have the optimal strides for further processing if n_thresholds == 1: # create cube from flat array cm = cm.reshape(n_runs, 4, order='C') else: # create cube from flat array cm = cm.reshape(n_runs, n_thresholds, 4, order='C') # reorder such that with F-order we get from smallest to largest # strides: conf_mat, runs, thresholds cm = np.swapaxes(np.swapaxes(cm, 0, 2), 1, 2) # make values over the confusion matrix and runs contiguous cm = np.asarray(cm, order='F') # change order s.t. we have thresholds, conf_mat, runs cm = np.swapaxes(cm.T, 1, 2) return cm
5,337,280
def new_project(request): """ Function that enables one to upload projects """ profile = Profile.objects.all() for profile in profile: if request.method == 'POST': form = ProjectForm(request.POST, request.FILES) if form.is_valid(): pro = form.save(commit=False) pro.profile = profile pro.user = request.user pro.save() return redirect('landing') else: form = ProjectForm() return render(request, 'new_pro.html', {"form": form})
5,337,281
def move_to_next_pixel(fdr, row, col): """ Given fdr (flow direction array), row (current row index), col (current col index). return the next downstream neighbor as row, col pair See How Flow Direction works http://desktop.arcgis.com/en/arcmap/latest/tools/spatial-analyst-toolbox/how-flow-direction-works.htm D8 flow direction grid | 32 | 64 | 128 | | 16 | X | 1 | | 8 | 4 | 2 | """ # get the fdr pixel value (x,y) value = fdr[row, col] # Update the row, col based on the flow direction if value == 1: col += 1 elif value == 2: col += 1 row += 1 elif value == 4: row += 1 elif value == 8: row += 1 col -= 1 elif value == 16: col -= 1 elif value == 32: row -= 1 col -= 1 elif value == 64: row -= 1 elif value == 128: row -= 1 col += 1 else: # Indetermine flow direction, sink. Do not move. row = row col = col return (row, col)
5,337,282
def AddBetaArgs(parser): """Declare beta flag and positional arguments for this command parser.""" flags.AddExternalMasterGroup(parser) flags.AddInstanceResizeLimit(parser) flags.AddNetwork(parser) flags.AddAllocatedIpRangeName(parser) labels_util.AddCreateLabelsFlags(parser)
5,337,283
def test_timeout_write_property(mqtt_servient): """Timeouts can be defined on Property writes.""" exposed_thing = next(mqtt_servient.exposed_things) prop_name = next(six.iterkeys(exposed_thing.properties)) td = ThingDescription.from_thing(exposed_thing.thing) mqtt_mock = _build_hbmqtt_mock(_effect_raise_timeout) timeout = random.random() @tornado.gen.coroutine def test_coroutine(): with patch('wotpy.protocols.mqtt.client.hbmqtt.client.MQTTClient', new=mqtt_mock): mqtt_client = MQTTClient() with pytest.raises(ClientRequestTimeout): yield mqtt_client.write_property(td, prop_name, Faker().pystr(), timeout=timeout) run_test_coroutine(test_coroutine)
5,337,284
def test_method_delete_not_allowed(flask_app, api_version): """Test Depot ID 96556/3. Verify the TAC API does not support HTTP DELETE and returns HTTP 405 METHOD NOT ALLOWED. """ rv = flask_app.delete(url_for('{0}.tac_api'.format(api_version), tac='35567907')) assert rv.status_code == 405 assert b'Method Not Allowed' in rv.data
5,337,285
def test_negative_make_bucket_invalid_name( # pylint: disable=invalid-name log_entry): """Test make_bucket() with invalid bucket name.""" # Get a unique bucket_name bucket_name = _gen_bucket_name() # Default location log_entry["args"] = { "location": "default value ('us-east-1')", } # Create an array of invalid bucket names to test invalid_bucket_name_list = [ bucket_name + '.', '.' + bucket_name, bucket_name + '...abcd' ] for name in invalid_bucket_name_list: log_entry["args"]["bucket_name"] = name try: # Create a bucket with default bucket location _call(log_entry, _CLIENT.make_bucket, bucket_name) # Check if bucket was created properly _call(log_entry, _CLIENT.bucket_exists, bucket_name) # Remove bucket _call(log_entry, _CLIENT.remove_bucket, bucket_name) except InvalidBucketError: pass # Test passes log_entry["method"] = _CLIENT.make_bucket log_entry["args"]['bucket_name'] = invalid_bucket_name_list
5,337,286
def test_fetch_industry_headlines_happypath(): """validate expected layout from yahoo raw feeds""" feed = yahoo.news.fetch_finance_headlines_yahoo( 'AAPL', uri=yahoo.news.INDUSTRY_NEWS_URL ) print(feed[0]) print(feed[0].keys()) assert isinstance(feed, list) [ helpers.validate_schema(article, 'yahoo/yahoo_industry_headline.schema') for article in feed ]
5,337,287
def clean_text(post): """ Function to filter basic greetings and clean the input text. :param post: raw post :return: clean_post or None if the string is empty after cleaning """ post = str(post) """ filtering basic greetings """ for template in TEMPLATES: if template in str(post).lower(): post = post.replace(template, '') """ clean text """ raw_text = str(post).replace('\'', ' ') translator = re.compile('[%s]' % re.escape(string.punctuation)) clean_text_sub = translator.sub(' ', raw_text) clean_text = re.sub(' +', ' ', clean_text_sub).strip() if clean_text == 'nan' or clean_text is None: return '' else: return clean_text
5,337,288
def turnIsLegal(speed, unitVelocity, velocity2): """ Assumes all velocities have equal magnitude and only need their relative angle checked. :param velocity1: :param velocity2: :return: """ cosAngle = np.dot(unitVelocity, velocity2) / speed return cosAngle > MAX_TURN_ANGLE_COS
5,337,289
def convert_selection_vars_to_common_effects(G: ADMG) -> nx.DiGraph: """Convert all undirected edges to unobserved common effects. Parameters ---------- G : ADMG A causal graph with undirected edges. Returns ------- G_copy : ADMG A causal graph that is a fully specified DAG with unobserved selection variables added in place of undirected edges. """ uc_label = "Unobserved Confounders" G_copy = nx.DiGraph(G.dag) # for every bidirected edge, add a new node for idx, latent_edge in enumerate(G.c_component_graph.edges): G_copy.add_node(f"U{idx}", label=uc_label, observed="no") # then add edges from the new UC to the nodes G_copy.add_edge("U", latent_edge[0]) G_copy.add_edge("U", latent_edge[1]) return G_copy
5,337,290
def plot_confusion_matrix(estimator, X, y_true, *, labels=None, sample_weight=None, normalize=None, display_labels=None, include_values=True, xticks_rotation='horizontal', values_format=None, cmap='viridis', ax=None, colorbar=True): """Plot Confusion Matrix. Read more in the :ref:`User Guide <confusion_matrix>`. Parameters ---------- estimator : estimator instance Fitted classifier or a fitted :class:`~sklearn.pipeline.Pipeline` in which the last estimator is a classifier. X : {array-like, sparse matrix} of shape (n_samples, n_features) Input values. y_true : array-like of shape (n_samples,) Target values. labels : array-like of shape (n_classes,), default=None List of labels to index the matrix. This may be used to reorder or select a subset of labels. If `None` is given, those that appear at least once in `y_true` or `y_pred` are used in sorted order. sample_weight : array-like of shape (n_samples,), default=None Sample weights. normalize : {'true', 'pred', 'all'}, default=None Normalizes confusion matrix over the true (rows), predicted (columns) conditions or all the population. If None, confusion matrix will not be normalized. display_labels : array-like of shape (n_classes,), default=None Target names used for plotting include_values : bool, default=True Includes values in confusion matrix. xticks_rotation : {'vertical', 'horizontal'} or float, \ default='horizontal' Rotation of xtick labels. values_format : str, default=None Format specification for values in confusion matrix. If `None`, the format specification is 'd' or '.2g' whichever is shorter. cmap : str or matplotlib Colormap, default='viridis' Colormap recognized by matplotlib. ax : matplotlib Axes, default=None Axes object to plot on. If `None`, a new figure and axes is created. colorbar : bool, default=True Whether or not to add a colorbar to the plot. .. versionadded:: 0.24 Returns ------- display : :class:`~sklearn.metrics.ConfusionMatrixDisplay` See Also -------- confusion_matrix : Compute Confusion Matrix to evaluate the accuracy of a classification. ConfusionMatrixDisplay : Confusion Matrix visualization. Examples -------- >>> import matplotlib.pyplot as plt # doctest: +SKIP >>> from sklearn.datasets import make_classification >>> from sklearn.metrics import plot_confusion_matrix >>> from sklearn.model_selection import train_test_split >>> from sklearn.svm import SVC >>> X, y = make_classification(random_state=0) >>> X_train, X_test, y_train, y_test = train_test_split( ... X, y, random_state=0) >>> clf = SVC(random_state=0) >>> clf.fit(X_train, y_train) SVC(random_state=0) >>> plot_confusion_matrix(clf, X_test, y_test) # doctest: +SKIP >>> plt.show() # doctest: +SKIP """ y_pred = estimator.predict(X) cm = confusion_matrix(y_true, y_pred, sample_weight=sample_weight, labels=labels, normalize=normalize) disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=display_labels) return disp.plot(include_values=include_values, cmap=cmap, ax=ax, xticks_rotation=xticks_rotation, values_format=values_format, colorbar=colorbar)
5,337,291
def mean_binary_proto_to_np_array(caffe, mean_binproto): """ :param caffe: caffe instances from import_caffe() method :param mean_binproto: full path to the mode's image-mean .binaryproto created from train.lmdb. :return: """ # I don't have my image mean in .npy file but in binaryproto. I'm converting it to a numpy array. # Took me some time to figure this out. blob = caffe.proto.caffe_pb2.BlobProto() data = open(mean_binproto, 'rb').read() blob.ParseFromString(data) mu = np.array(caffe.io.blobproto_to_array(blob)) mu = mu.squeeze() # The output array had one redundant dimension. return mu
5,337,292
def addnplay(tag): """ looks up song or album in MPD, adds them to the playlist, plays them if the playlist has been empty. adheres to toggle_clr_plist. :param tag: string of song, album title, case sensitive format: /^(s|a):[\w\s]+/ """ with connection(client): try: m = re.match("^(t|a):(.+)", tag) if not m: raise ValueError("wrong card format") tag, value = m.group(1), m.group(2) if tag == "t": hit = client.find("title", value) elif tag == "a": hit = client.find("album", value) if not hit: raise Exception("file not found") if pstate["clr_plist"] == True: client.clear() for i in hit: client.add(i["file"]) if pstate["clr_plist"] == True: client.play() else: # wenn die pl vorher leer war, # dann spielen? # TESTEN, sonst wie in load_playlist() plist = client.playlistinfo() if len(plist) == len(hit): client.play(0) else: kitt() trigger_idler() except ValueError as e: print(e) kitt(BLUE) if not run["sleep_mode"]: show_playlist(client) except Exception as e: print(e) kitt(RED) if not run["sleep_mode"]: show_playlist(client) except musicpd.CommandError as e: print("error in addnplay(): " + str(e))
5,337,293
async def get_db() -> sqlalchemy.engine.base.Connection: """Get a SQLAlchemy database connection. Uses this environment variable if it exists: DATABASE_URL=dialect://user:password@host/dbname Otherwise uses a SQLite database for initial local development. """ load_dotenv() database_url = os.getenv('DATABASE_URL', default='sqlite:///temporary.db') engine = sqlalchemy.create_engine(database_url) connection = engine.connect() try: yield connection finally: connection.close()
5,337,294
async def stop_project(ctx, services: List[str], show_status=True): """ Stops a project by stopping all it's services (or a subset). If show_status is true, shows status after that. """ project = ctx.system_config["project"] engine = ctx.engine if len(services) < 1: return echo("Stopping services...") echo() ctx.progress_bars = _build_progress_bars(services) ctx.start_stop_errors = [] try: async for service_name, status, finished in engine.stop_project(project, services): _handle_progress_bar(service_name, status, finished, ctx.progress_bars, ctx.start_stop_errors) except Exception as err: raise RiptideCliError("Error stopping the services", ctx) from err for bar in reversed(ctx.progress_bars.values()): bar.close() echo() display_errors(ctx.start_stop_errors, ctx) if show_status: status_project(ctx)
5,337,295
def delete_channel(u, p, cid): """ Delete an existing service hook. """ c = Channel.query.filter_by( id=cid, project_id=p.id ).first() if not c: # Project or channel doesn't exist (404 Not Found) return abort(404) if c.project.owner.id != g.user.id or c.project.id != p.id: # Project isn't public and the viewer isn't the project owner. # (403 Forbidden) return abort(403) if request.method == 'POST' and request.form.get('do') == 'd': c.project.channels.remove(c) db.session.delete(c) db.session.commit() return redirect(url_for('.details', p=p.name, u=u.username)) return render_template('delete_channel.html', project=c.project, channel=c )
5,337,296
def ridgeline(data, overlap=0, fill=True, labels=None, n_points=150, dist_scale=0.05): """ Creates a standard ridgeline plot. data, list of lists. overlap, overlap between distributions. 1 max overlap, 0 no overlap. fill, matplotlib color to fill the distributions. n_points, number of points to evaluate each distribution function. labels, values to place on the y axis to describe the distributions. """ if overlap > 1 or overlap < 0: raise ValueError('overlap must be in [0 1]') #xx = np.linspace(np.min(np.concatenate(data)), # np.max(np.concatenate(data)), n_points) xx = np.linspace(0.0, 86400.0, n_points) curves = [] ys = [] for i, d in enumerate(data): if d[0] == -1: # the case when there is no sample from this activity curve = np.zeros(shape=xx.shape) else: curve = gaussian_kde(d).pdf(xx) y = i*(1.0-overlap) ys.append(y) curve = minmax_scale(curve)*dist_scale if fill: plt.fill_between(xx, np.ones(n_points)*y, curve+y, zorder=len(data)-i+1, color=fill) plt.plot(xx, curve+y, c='k', zorder=len(data)-i+1) if labels: plt.yticks(ys, labels)
5,337,297
def joint_img_freq_loss(output_domain, loss, loss_lambda): """Specifies a function which computes the appropriate loss function. Loss function here is computed on both Fourier and image space data. Args: output_domain(str): Network output domain ('FREQ' or 'IMAGE') loss(str): Loss type ('L1' or 'L2') loss_lambda(float): Weighting of freq loss vs image loss Returns: Function computing loss value from a true and predicted input """ def joint_loss(y_true, y_pred): return(image_loss(output_domain, loss)(y_true, y_pred) + loss_lambda * fourier_loss(output_domain, loss)(y_true, y_pred)) return joint_loss
5,337,298
def verify_auth_token(untrusted_message): """ Verifies a Auth Token. Returns a django.contrib.auth.models.User instance if successful or False. """ # decrypt the message untrusted = URLSafeTimedSerializer(settings.SSO_SECRET).loads( untrusted_message, max_age=300) # do some extra validation if 'auth_token' not in untrusted: return False if 'request_token' not in untrusted: return False # call the SSO server to verify the token params = { 'auth_token': untrusted['auth_token'], 'key': settings.SSO_KEY } message = URLSafeTimedSerializer(settings.SSO_SECRET).dumps(params) url = urljoin(settings.SSO_SERVER_PRIVATE_URL, 'sso/api/verify') + '/' response = requests.get( url, params={ 'key': settings.SSO_KEY, 'message': message }, timeout=10 ) # ensure the response is sane if response.status_code != 200: return False # build a User object from the message data = URLSafeTimedSerializer(settings.SSO_SECRET).loads( response.content, max_age=300) user_data = json.loads(data['user']) user = client.construct_user(user_data) if 'roles' in data: role_data = json.loads(data['roles']) client.synchronize_roles(user, role_data) return user
5,337,299