content
stringlengths
22
815k
id
int64
0
4.91M
def translate_provider_for_icon(sync_server, project, site): """ Get provider for 'site' This is used for getting icon, 'studio' should have different icon then local sites, even the provider 'local_drive' is same """ if site == sync_server.DEFAULT_SITE: return sync_server.DEFAULT_SITE return sync_server.get_provider_for_site(site=site)
28,700
def save_subsystem_config(client, fd, indent=2, name=None): """Write current (live) configuration of SPDK subsystem to stdout. Args: fd: opened file descriptor where data will be saved indent: Indent level. Value less than 0 mean compact mode. Default is indent level 2. """ cfg = { 'subsystem': name, 'config': client.call('framework_get_config', {"name": name}) } _json_dump(cfg, fd, indent)
28,701
def server_delete_ip(body=None): # noqa: E501 """delete server IP Send by server during shutdown. # noqa: E501 :param body: port of iperf server. Ip and time could be emply :type body: dict | bytes :rtype: List[InlineResponse200] """ if connexion.request.is_json: body = ServerAddr.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!'
28,702
def validate_environment_variables(): # type: () -> None """ Checks that the following are set as environment variables : - confluence host name - workspace """ global CONFLUENCE_HOST CONFLUENCE_HOST = env.get("CONFLUENCE_HOST") global WORKSPACE WORKSPACE = env.get("WORKSPACE") print("[STARTUP] env variables exist")
28,703
def _UpdateDatastore(test_key, test_attributes, query_time): """Updates a LuciTest's disabled_test_variants, issue_keys, tags in datastore. This function modifies LuciTest attributes as follows: - Overwrites existing disabled_test_variants. - Adds new issue_keys to existing issue_keys, - Overwrites existing tags. Args: test_key (ndb.Key): Key of LuciTest entities. test_attributes (dict): Dictionary that contains the new property values for a LuciTest of the form {'disabled_test_variants': set(), 'issue_keys': set(), 'tags': set()} query_time (datetime): The time of the latest query. """ test = yield test_key.get_async() if not test: test = LuciTest(key=test_key) test.issue_keys = [] test.tags = [] test.disabled_test_variants = test_attributes.get('disabled_test_variants', set()) new_issue_keys = test_attributes.get('issue_keys', set()).difference(test.issue_keys) for new_issue_key in new_issue_keys: _CreateIssue(new_issue_key) test.issue_keys.extend(new_issue_keys) test.issue_keys.sort() test.tags = _GetUpdatedTags(test, test_attributes.get('tags', set())) test.tags.sort() test.last_updated_time = query_time yield test.put_async()
28,704
def extract_key_and_index(field): """Returns the key type, key name and if key is a compound list then returns the index pointed by the field Arguments: field: csv header field """ for key_type, value in KEY_TYPES.items(): regex = re.compile(value["regex"]) match = regex.match(field) if match: return tuple([key_type] + list(match.groups())) return None
28,705
def _normalize_heartrate_of_logfiles(): """Normalizes heartrate of each dataframe/user by dividing by min of the movementtutorial. Saves changes directly to globals.df_list Note: I didn't do this on the refactoring-step on purpose, since the user might want to do some plots, which might be more convenient to do with non-normalized heartrate data """ normalized_df_list = [] for dataframe in globals()["df_list"]: if 'MOVEMENTTUTORIAL' in dataframe['Gamemode'].values: # Remove movement tutorial tutorial_mask = dataframe['Gamemode'] == 'MOVEMENTTUTORIAL' tutorial_entries = dataframe[tutorial_mask] tutorial_endtime = tutorial_entries['Time'].max() baseline = dataframe[dataframe["Time"] < tutorial_endtime]["Heartrate"].min() # Use MINIMUM of tutorial if baseline == -1: print('ERROR: No Heartrate data!!!') baseline = 120 if baseline == 0: print('ERROR: Heartrate data corrupted!!!') baseline = dataframe[dataframe["Time"] < tutorial_endtime]["Heartrate"].mean() dataframe["Heartrate"] = dataframe["Heartrate"] / baseline normalized_df_list.append(dataframe) else: print('ERROR: No Movement tutorial') globals()["df_list"] = normalized_df_list
28,706
def as_mask(indexes, length): """ Convert indexes into a binary mask. Parameters: indexes (LongTensor): positive indexes length (int): maximal possible value of indexes """ mask = torch.zeros(length, dtype=torch.bool, device=indexes.device) mask[indexes] = 1 return mask
28,707
def interpolate_array(x, y, smooth_rate=500): """ :param x: :param y: :return: """ interp_obj = interpolate.PchipInterpolator(x, y) new_x = np.linspace(x[0], x[-1], smooth_rate) new_y = interp_obj(new_x) return new_x, new_y
28,708
def calculate_resource_utilization_for_slaves( slaves: Sequence[_SlaveT], tasks: Sequence[MesosTask] ) -> ResourceUtilizationDict: """ Given a list of slaves and a list of tasks, calculate the total available resource available in that list of slaves, and the resources consumed by tasks running on those slaves. :param slaves: a list of slaves to calculate resource usage for :param tasks: the list of tasks running in the mesos cluster :returns: a dict, containing keys for "free" and "total" resources. Each of these keys is a ResourceInfo tuple, exposing a number for cpu, disk and mem. """ resource_total_dict: _Counter[str] = Counter() for slave in slaves: filtered_resources = filter_mesos_state_metrics(slave["resources"]) resource_total_dict.update(Counter(filtered_resources)) resource_free_dict = copy.deepcopy(resource_total_dict) for task in tasks: task_resources = task["resources"] resource_free_dict.subtract(Counter(filter_mesos_state_metrics(task_resources))) for slave in slaves: filtered_resources = filter_mesos_state_metrics( reserved_maintenence_resources(slave["reserved_resources"]) ) resource_free_dict.subtract(Counter(filtered_resources)) return { "free": ResourceInfo( cpus=resource_free_dict["cpus"], disk=resource_free_dict["disk"], mem=resource_free_dict["mem"], gpus=resource_free_dict.get("gpus", 0), ), "total": ResourceInfo( cpus=resource_total_dict["cpus"], disk=resource_total_dict["disk"], mem=resource_total_dict["mem"], gpus=resource_total_dict.get("gpus", 0), ), "slave_count": len(slaves), }
28,709
def check_pass(value): """ This test always passes (it is used for 'checking' things like the workshop address, for which no sensible validation is feasible). """ return True
28,710
def state_array_to_int(s): """translates a state s into an integer by interpreting the state as a binary represenation""" return int(state_array_to_string(s), 2)
28,711
def async_task(coro, loop=asyncio.get_event_loop(), error_cb=None): """ Wrapper to always print exceptions for asyncio tasks. """ future = asyncio.ensure_future(coro) def exception_logging_done_cb(future): try: e = future.exception() except asyncio.CancelledError: return if e is not None: log.critical('Unhandled exception in async future: {}: {}\n{}', type(e).__name__, e, ''.join(traceback.format_tb(e.__traceback__))) if error_cb is not None: error_cb() loop.call_exception_handler({ 'message': 'Unhandled exception in async future', 'future': future, 'exception': e, }) future.add_done_callback(exception_logging_done_cb) return future
28,712
def is_valid_charts_yaml(content): """ Check if 'content' contains mandatory keys :param content: parsed YAML file as list of dictionary of key values :return: True if dict contains mandatory values, else False """ # Iterate on each list cell for chart_details in content: # If one of the keys is missing or, is None if not all(chart_details.get(x) is not None and x in chart_details for x in ['chart_name', 'helm_repo_name', 'name_space', 'values_file', 'private_image']): return False # If one of the keys is not a string if not all(type(chart_details.get(x)) is str for x in ['chart_name', 'helm_repo_name', 'name_space', 'values_file']): return False # If one of the keys is not a boolean if not all(type(chart_details.get(x)) is bool for x in ['private_image']): return False if not all(type(chart_details.get(x)) is list for x in ['extra_executes']): return False return True
28,713
def run_watcher(pattern: str, command: str, run_on_start: bool=False) -> None: """ For every files that match the given glob pattern, this will, every SLEEP_TIME, check whether any of those files changed. If any changed, then it would run the command given. If run_on_start is given, the bash command will run at the very beginning before starting to watch for changes. """ if run_on_start: run_bash_command(command) files = { fname: os.path.getmtime(fname) for fname in glob.glob(pattern) if os.path.isfile(fname) } while True: time.sleep(SLEEP_TIME_SEC) # figure out files with new times new_files = {} for fname, lasttime in files.items(): newtime = os.path.getmtime(fname) if newtime == lasttime: continue new_files[fname] = newtime # batch the runs so that we run only once if any file changed if len(new_files) != 0: _ = run_bash_command(command) files.update(new_files)
28,714
def get_trader_fcas_availability_agc_status_condition(params) -> bool: """Get FCAS availability AGC status condition. AGC must be enabled for regulation FCAS.""" # Check AGC status if presented with a regulating FCAS offer if params['trade_type'] in ['L5RE', 'R5RE']: # AGC is active='1', AGC is inactive='0' return True if params['agc_status'] == '1' else False # Return True if a presented with a contingency FCAS offer (AGC doesn't need to be enabled) else: return True
28,715
def load_class(full_class_string): """ dynamically load a class from a string """ class_data = full_class_string.split(".") module_path = ".".join(class_data[:-1]) class_str = class_data[-1] module = importlib.import_module(module_path) # Finally, we retrieve the Class return getattr(module, class_str)
28,716
def request_credentials_from_console(): """ Requests the credentials interactive and returns them in form (username, password) """ username = raw_input('Username: ') password = raw_input('Password: ') return username, password
28,717
def prettify_url(url): """Return a URL without its schema """ if not url: return url split = url.split('//', 1) if len(split) == 2: schema, path = split else: path = url return path.rstrip('/')
28,718
def CDLMORNINGDOJISTAR(data: xr.DataArray, penetration: float = 0.3) -> xr.DataArray: """ Morning Doji Star (Pattern Recognition) Inputs: data:['open', 'high', 'low', 'close'] Parameters: penetration: 0.3 Outputs: double series (values are -1, 0 or 1) """ return multiple_series_call(talib.CDLMORNINGDOJISTAR, data, ds.TIME, ds.FIELD, [f.OPEN, f.HIGH, f.LOW, f.CLOSE], [penetration], result_divider=100)
28,719
def panerror_to_dict(obj): """Serializer function for POCS custom exceptions.""" name_match = error_pattern.search(str(obj.__class__)) if name_match: exception_name = name_match.group(1) else: msg = f"Unexpected obj type: {obj}, {obj.__class__}" raise ValueError(msg) return {"__class__": "PanError", "exception_name": exception_name, "args": obj.args}
28,720
def build_class_instance(module_path: str, init_params: Optional[dict] = None): """ Create an object instance from absolute module_path string. Parameters ---------- module_path: str Full module_path that is valid for your project or some external package. init_params: optional dict These parameters will be used as init parameters for the given type. Returns ------- Some object instance """ class_ = get_type_from_module_path(module_path=module_path) result = class_(**(init_params or {})) return result
28,721
def check_line(group, flag, value): """ Check each line of the initialization file """ # Check for admissible group/flag combinations assert (group in STRUCTURE.keys()) assert (flag in STRUCTURE[group]) # Check the values for the flags. try: if (group, flag) == ('BASICS', 'periods'): assert isinstance(value, int) assert (value > 0) for arg in ['OCCUPATION A', 'OCCUPATION B']: if (group, flag) in (arg, 'coeff'): assert isinstance(value, float) assert np.isfinite(value) for arg in ['max', 'start']: if (group, flag) in ('EDUCATION', arg): assert isinstance(value, int) assert np.isfinite(value) assert (value >= 0) if (group, flag) == ('HOME', 'coeff'): assert isinstance(value, float) assert np.isfinite(value) if group == 'SIMULATION': if flag in ['agents', 'draws', 'seed']: assert isinstance(value, int) assert (value > 0) if flag in ['file']: assert isinstance(value, str) if flag in ['tau']: assert isinstance(value, float) assert np.isfinite(float) assert (value > 0) if group == 'SOLUTION': if flag in ['draws', 'seed']: assert isinstance(value, int) assert np.isfinite(value) if flag in ['store']: assert (value in [True, False]) if group == 'PROGRAM': if flag in ['debug']: assert (value in [True, False]) if flag in ['version']: assert (value in ['FORTRAN', 'PYTHON']) if value == 'FORTRAN': fname = EXEC_DIR + '/resfort_scalar' assert os.path.exists(fname) if flag in ['procs']: assert isinstance(value, int) assert value > 0 if group == 'PARALLELISM': if flag in ['flag'] and value: fname = EXEC_DIR + '/resfort_parallel_master' assert os.path.exists(fname) if flag in ['procs']: assert isinstance(value, int) assert value > 0 if group == 'SIMULATION': if flag in ['agents', 'seed']: assert isinstance(value, int) assert (value > 0) if flag in ['file']: assert isinstance(value, str) if group == 'DERIVATIVE': if flag in ['version']: assert (value in ['forward-differences']) if flag in ['eps']: assert isinstance(value, float) assert (value > 0) if group == 'SCALING': if flag in ['flag']: assert (value in [True, False]) if flag in ['minimum']: assert isinstance(value, float) assert (value > 0) if group == 'INTERPOLATION': if flag in ['flag']: assert (value in [True, False]) if flag in ['points']: assert isinstance(value, int) assert (value > 0) except AssertionError: msg = '\n Misspecified initialization file (group, flag): ' msg += group + ', ' + flag + '\n' sys.exit(msg)
28,722
def generate_experiment_file_from_videos(video_root, experiment_root, video_name): """ Generate experiment files from video :param video_root: Root folder of the videos :param experiment_root: Root folder where experiment specification would be stored in yml file :param video_name: Name of the video :return None """ experiment_file_name = os.path.splitext(video_name)[0] + "_experiment.yml" diva_experiment = du.experiment() diva_experiment.set_type(du.experiment_type.activity_detection) diva_input = diva_experiment.get_input() diva_input.set_dataset_id(video_name) diva_input.set_frame_rate_Hz(30) diva_input.set_video_file_source(video_root, video_name) _generate_dummy_output(diva_experiment) if diva_experiment.is_valid(): diva_experiment.write_experiment(os.path.join(experiment_root, experiment_file_name)) else: raise Exception("Invalid experiment file")
28,723
def maintainers_mapper(maintainers, package): """ Update package maintainers and return package. https://docs.npmjs.com/files/package.json#people-fields-author-contributors npm also sets a top-level "maintainers" field with your npm user info. """ # note this is the same code as contributors_mappers... should be refactored maintains = [] if isinstance(maintainers, list): for contrib in maintainers: name, email, url = parse_person(contrib) maintains.append(models.Party(type=models.party_person, name=name, email=email, url=url)) else: # a string or dict name, email, url = parse_person(maintainers) maintains.append(models.Party(type=models.party_person, name=name, email=email, url=url)) package.maintainers = maintains return package
28,724
def calc_original_pressure(pressure_ratio): """ calculates the original pressure value given the <code>AUDITORY_THRESHOLD</code>. The results are only correct if the pressure ratio is build using the <code>AUDITORY_THRESHOLD</code>. :param pressure_ratio: the pressure ration that shall be converted to the original value :return: the pressure value the ratio is based on """ return pressure_ratio * (AUDITORY_THRESHOLD ** 2)
28,725
def validate(conf): """ The list of validation checks to be performed """ check_branch_commit_exists(conf) check_ocaml_version(conf) check_unique_keys(conf, 'run_path_tag') check_unique_keys(conf, 'codespeed_name') check_run_path_tag_length(conf)
28,726
def normalize_field_names(fields): """ Map field names to a normalized form to check for collisions like 'coveredText' vs 'covered_text' """ return set(s.replace('_','').lower() for s in fields)
28,727
def unzip(path_to_zip_file, directory_to_extract_to='.'): """ Helper function to unzinp a zip file """ zip_ref = zipfile.ZipFile(path_to_zip_file, 'r') zip_ref.extractall(directory_to_extract_to) zip_ref.close()
28,728
def which(program): # type: (str) -> str """Like UNIX which, returns the first location of a program given using your system PATH If full location to program is given, uses that first""" dirname, progname = os.path.split(program) # type: str, str syspath = tuple([dirname] + os.environ['PATH'].split(os.pathsep)) # type: Tuple[str] syspath = tuple(filter(None, syspath)) # type: tuple[str] progpath = map(os.path.join, syspath, itertools.repeat(progname, len(syspath))) # type: map[str] try: extensions = tuple([''] + os.environ.get('PATHEXT').split(os.pathsep)) # type: Tuple[str] progpath = map(lambda t: ''.join(t), itertools.product(progpath, extensions)) # type: map[str] except AttributeError: pass progpath = tuple(filter(lambda e: os.path.isfile(e) and os.access(e, os.X_OK), progpath)) # type: Tuple[str] if not progpath: raise ValueError("Cannot find program '%s' in your PATH" % program) return progpath[0]
28,729
def powspec_disc_n(n, fs, mu, s, kp, km, vr, vt, tr): """Return the n'th Lorentzian and its width""" Td = ifana.LIF().Tdp(mu, s, vr, vt) + tr Ppp = (kp*exp(-(kp+km)*tr)+km)/(kp+km) kpbar = (kp*(Td-tr)-log(Ppp))/Td return 1./Td * 2*kpbar/(kpbar**2 + (2*pi*(fs - n*1./Td))**2), kpbar
28,730
def list_keys(request): """ Tags: keys --- Lists all added keys. READ permission required on key. --- """ auth_context = auth_context_from_request(request) return filter_list_keys(auth_context)
28,731
def rounder(money_dist: list, pot: int, to_coin: int = 2) -> list: """ Rounds the money distribution while preserving total sum stolen from https://stackoverflow.com/a/44740221 """ def custom_round(x): """ Rounds a number to be divisible by to_coin specified """ return int(to_coin * round(x / to_coin)) rs = [custom_round(x) for x in money_dist] k = pot - sum(rs) assert k == custom_round(k) fs = [x - custom_round(x) for x in money_dist] indices = [ i for order, (e, i) in enumerate( reversed(sorted((e, i) for i, e in enumerate(fs))) ) if order < k ] return [r + 1 if i in indices else r for i, r in enumerate(rs)]
28,732
def test_check_spoil_contrib(): """ Construct a case where a star spoils the edge of the 8x8 (edge and then background pixel). Note that for these mock stars, since we we are checking the status of the first star, ASPQ1 needs to be nonzero on that star or the check_spoil_contrib code will bail out before actually doing the check """ stars = StarsTable.empty() stars.add_fake_star(row=0, col=0, mag=8.0, id=1, ASPQ1=1) stars.add_fake_star(row=0, col=-5, mag=6.0, id=2, ASPQ1=0) bg_spoil, reg_spoil, rej = check_spoil_contrib(stars, np.array([True, True]), stars, .05, 25) assert reg_spoil[0] # Construct a case where a star spoils just a background pixel stars = StarsTable.empty() stars.add_fake_star(row=0, col=0, mag=8.0, id=1, ASPQ1=1) stars.add_fake_star(row=-5.5, col=-5.5, mag=9.5, id=2, ASPQ1=0) bg_spoil, reg_spoil, rej = check_spoil_contrib(stars, np.array([True, True]), stars, .05, 25) assert bg_spoil[0]
28,733
def test_search_term_found_in_title(admin_client, public_resource_with_metadata): """ Test of direct URL querystring for a search Test valid JSON response Test index response Test title match in search """ search_term = public_resource_with_metadata.title djangoresponse = admin_client.get('/discoverapi/?q={}'.format(search_term), follow=True) response = json.loads(djangoresponse.content.decode("utf-8")) resources = response['resources'] assert djangoresponse.status_code == 200 assert search_term in json.loads(resources)[0]['title']
28,734
def find_wcscorr_row(wcstab, selections): """ Return an array of indices from the table (NOT HDU) 'wcstab' that matches the selections specified by the user. The row selection criteria must be specified as a dictionary with column name as key and value(s) representing the valid desired row values. For example, {'wcs_id':'OPUS','extver':2}. """ mask = None for i in selections: bmask = (wcstab.field(i) == selections[i]) if mask is None: mask = bmask.copy() else: mask = np.logical_and(mask,bmask) del bmask return mask
28,735
def activity_boxplot(DataFrame): """ Plots the distribution of pXC50 according to binary activity labels with a boxplot. """ inactives = DataFrame[DataFrame['Activity']==0]['pXC50'] actives = DataFrame[DataFrame['Activity']==1]['pXC50'] plt.figure(figsize = (10, 7.5)) meanpointprops = dict(marker = '+', markeredgecolor = 'black') plt.boxplot([inactives, actives], vert = False, widths = 0.6, showmeans = True, meanprops = meanpointprops, labels = ['inactives','actives']) plt.xlabel(r'pXC$_{50}$', size = 11) plt.title(r'pXC$_{50}$ distribution over class', size = 13) plt.show()
28,736
def populate_pair_lists(pair, blacklist, blackpairs, badpairs, newpairs, tickerlist): """Check pair conditions.""" # Check if pair is in tickerlist and on 3Commas blacklist if pair in tickerlist: if pair in blacklist: blackpairs.append(pair) else: newpairs.append(pair) else: badpairs.append(pair)
28,737
def togglePopup( id, view, params, title="", position=None, showCloseIcon=True, draggable=True, resizable=False, modal=False, overlayDismiss=False, sessionId="current_session", pageId="current_page", viewPortBound=False, ): """Toggles a popup. Will open up the popup if it has not been opened yet. Otherwise, it will close the currently opened popup. Args: id (str): A unique popup string. Will be used to close the popup from other popup or script actions. view (str): The path to the View to use in the popup. params (dict): Dictionary of key-value pairs to us as input parameters to the View. Optional. title (str): Text to display in the title bar. If no value or an empty string are given, the title bar will not be displayed. Defaults to an empty string. Optional. position (dict): Dictionary of key-value pairs to use for position. Possible position keys are: left, top, right, bottom, width, height. Defaults to the center of the window. Optional. showCloseIcon (bool): Will show the close icon if True. Defaults to True. Optional. draggable (bool): Will allow the popup to be dragged if True. Defaults to True. Optional. resizable (bool): Will allow the popup to be resized if True. Defaults to False. Optional. modal (bool): Will make the popup modal if True. A modal popup is the only view the user can interact with. Defaults to False. Optional. overlayDismiss (bool): Will allow the user to dismiss and close a modal popup by clicking outside of it if True. Defaults to False. Optional. sessionId (str): Identifier of the Session to target. If omitted the current Session will be used automatically. When targeting a different session, then the pageId parameter must be included in the call. Optional. pageId (str): Identifier of the Page to target. If omitted, the current Page will be used automatically. Optional. viewPortBound (bool): If True, popups will be "shifted" to open within the bounds of the viewport. If the popup would be larger than the viewport, then it will be resized to fit within the bounds. Default is False. Optional. """ builtins.print( id, view, params, title, position, showCloseIcon, draggable, resizable, modal, overlayDismiss, sessionId, pageId, viewPortBound, )
28,738
def test_create_entity_relationships(): """ Given - indicator domain name - related indicators When - run the fetch incidents command Then - Validate created relationships """ domain_name = "test.com" relevant_indicators = [ { 'type': 'IP', 'value': '1.1.1.1' } ] relationships = create_entity_relationships(relevant_indicators, domain_name) assert relationships[0].get('entityB') == '1.1.1.1' assert relationships[0].get('entityBType') == 'IP' assert relationships[0].get('entityA') == 'test.com' assert relationships[0].get('entityAType') == 'Domain'
28,739
def process_image(image_file): """ ๋‹ค์„ฏ ๋‹จ๊ณ„์˜ ์ด๋ฏธ์ง€ ์ฒ˜๋ฆฌ(Image precessing)๋ฅผ ํž™๋‹ˆ๋‹ค. ํ˜„์žฌ ํ•จ์ˆ˜์—์„œ ์ˆœ์„œ๋ฅผ ๋ณ€๊ฒฝํ•˜์—ฌ ์ ์šฉํ•  ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. 1) Gray-scale ์ ์šฉ 2) Morph Gradient ์ ์šฉ 3) Threshold ์ ์šฉ 4) Long Line Removal ์ ์šฉ 5) Close ์ ์šฉ 6) Contour ์ถ”์ถœ :param image_file: ์ด๋ฏธ์ง€ ์ฒ˜๋ฆฌ(Image precessing)๋ฅผ ์ ์šฉํ•  ์ด๋ฏธ์ง€ ํŒŒ์ผ :return: ์ด๋ฏธ์ง€ ์ฒ˜๋ฆฌ ํ›„ ๊ธ€์ž๋กœ ์ถ”์ •๋˜๋Š” ๋ถ€๋ถ„์„ ์ž˜๋ผ๋‚ธ ์ด๋ฏธ์ง€ ๋ฆฌ์ŠคํŠธ """ image_origin = open_original(image_file) # todo input ์‚ฌ์ด์ฆˆ๊ฐ€ ์ผ์ • ์ˆ˜์ค€ ์ดํ•˜์ผ ๊ฒฝ์šฐ cv2.pyrUp() ์œผ๋กœ ์‚ฌ์ด์ฆˆ๋ฅผ ํ™•์žฅํ•  ์ˆ˜ ์žˆ๋„๋ก ์ž๋™ํ™”ํ•˜๊ธฐ # todo ์•„๋‹ˆ๋ฉด ์„ค์ •ํŒŒ์ผ์—์„œ ์‚ฌ์ด์ฆˆ์—… ํ• ์ง€๋ง์ง€๋ฅผ ์„ ํƒํ•  ์ˆ˜ ์žˆ๋„๋ก ํ•˜๊ธฐ (configs.yml) # image_origin = cv2.pyrUp(image_origin) # size up ( x4 ) ์ด๋ฏธ์ง€ ํฌ๊ธฐ๊ฐ€ ์ž‘์„ ๊ฒฝ์šฐ ์ด๋ฏธ์ง€ ์‚ฌ์ด์ฆˆ์—… ํ•ด์•ผํ•ฉ๋‹ˆ๋‹ค. # Grey-Scale image_gray = get_gray(image_origin) # Morph Gradient image_gradient = get_gradient(image_gray) # Threshold image_threshold = get_threshold(image_gradient) # Long line remove image_line_removed = remove_long_line(image_threshold) # Morph Close image_close = get_closing(image_line_removed) contours = get_contours(image_close) return get_cropped_images(image_origin, contours)
28,740
def query_review_data(category, title_keyword, count, output_uri): """ Query the Amazon review dataset for top reviews from a category that contain a keyword in their product titles. The output of the query is written as JSON to the specified output URI. :param category: The category to query, such as Books or Grocery. :param title_keyword: The keyword that must be included in each returned product title. :param count: The number of results to return. :param output_uri: The URI where the output JSON files are stored, typically an Amazon S3 bucket, such as 's3://example-bucket/review-output'. """ with SparkSession.builder.getOrCreate() as spark: input_uri = f's3://amazon-reviews-pds/parquet/product_category={category}' df = spark.read.parquet(input_uri) query_agg = df.filter(df.verified_purchase == 'Y') \ .where(func.lower(func.col('product_title')).like(f'%{title_keyword}%')) \ .groupBy('product_title') \ .agg({'star_rating': 'avg', 'review_id': 'count'}) \ .filter(func.col('count(review_id)') >= 50) \ .sort(func.desc('avg(star_rating)')) \ .limit(count) \ .select(func.col('product_title').alias('product'), func.col('count(review_id)').alias('review_count'), func.col('avg(star_rating)').alias('review_avg_stars')) query_agg.write.mode('overwrite').json(output_uri)
28,741
def _gftRead(url, step): """ Reads in a gtf file from a specific db given the url. Some gft have a certain number of header lines that are skipped however. Input: url where gtf is fetched from Input: number of lines to skip while reading in the frame Output: gtf in a pandas frame """ urllib.request.urlretrieve(url, "/tmp/conversion.gtf.gz") gtf = pd.read_csv("/tmp/conversion.gtf.gz", compression = "gzip", engine = "python", delimiter = '\t', skiprows = step, header = None) return gtf
28,742
def add() -> jsonify: """ Adds a new item in the server and returns the updated list to the front-end """ # Passed Items from Front-End name = request.form['name'] priority = request.form['priority'] price = request.form['price'].replace(",", "") # To prevent string to float conversion money = request.form['money'] # Adds item to the server and check the status of the addition is_right = mysqlcommands.add_item(name, priority, price, money) # Pass the status of the addition to this variable message = constants.ADD_ITEM_SUCCESS_MESSAGE if is_right else constants.ADD_ITEM_FAILURE_MESSAGE # Get the content from the JSON file my_obj = jsoncommands.get_json(constants.JSON_FILE_PATH) # Re-allocate the budget with the new added item utilities.money_allocation(mysqlcommands.get_all_items(), my_obj['Total'] * my_obj['Percentage'], mysqlcommands.priority_count()) return jsonify({ "color": is_right, "message": message, "allItems": mysqlcommands.get_all_items() })
28,743
def _get_base(**kwargs): """ If the needed base does not exist, then create it, if it does exist create nothing and return the name of the base lxc container so it can be cloned. """ profile = get_container_profile(copy.deepcopy(kwargs.get("profile"))) kw_overrides = copy.deepcopy(kwargs) def select(key, default=None): kw_overrides_match = kw_overrides.pop(key, _marker) profile_match = profile.pop(key, default) # let kwarg overrides be the preferred choice if kw_overrides_match is _marker: return profile_match return kw_overrides_match template = select("template") image = select("image") vgname = select("vgname") path = kwargs.get("path", None) # remove the above three variables from kwargs, if they exist, to avoid # duplicates if create() is invoked below. for param in ("path", "image", "vgname", "template"): kwargs.pop(param, None) if image: proto = _urlparse(image).scheme img_tar = __salt__["cp.cache_file"](image) img_name = os.path.basename(img_tar) hash_ = salt.utils.hashutils.get_hash( img_tar, __salt__["config.get"]("hash_type") ) name = "__base_{0}_{1}_{2}".format(proto, img_name, hash_) if not exists(name, path=path): create( name, template=template, image=image, path=path, vgname=vgname, **kwargs ) if vgname: rootfs = os.path.join("/dev", vgname, name) edit_conf( info(name, path=path)["config"], out_format="commented", **{"lxc.rootfs": rootfs} ) return name elif template: name = "__base_{0}".format(template) if not exists(name, path=path): create( name, template=template, image=image, path=path, vgname=vgname, **kwargs ) if vgname: rootfs = os.path.join("/dev", vgname, name) edit_conf( info(name, path=path)["config"], out_format="commented", **{"lxc.rootfs": rootfs} ) return name return ""
28,744
def calculate(formula, **params): """ Calculate formula and return a dictionary of coin and amounts """ formula = Formula.get(formula) if formula is None: raise InvalidFormula(formula) if not formula.expression: return {} return calculate_expression(formula.expression, formula, **params)
28,745
def plot_learning(train_loss, eval_loss, path_to_save): """ Generates and saves a plot of training and development set loss curves to file. :param train_loss: (list) training losses :param eval_loss: (list) dev set losses :param path_to_save: (str) output directory :return: void """ plt.plot(train_loss) plt.plot(eval_loss) plt.legend(['Train loss', 'Dev loss']) path = os.path.join(path_to_save, 'learning_plot.png') plt.savefig(path) plt.clf()
28,746
def subcommand(args, api): """Execute the right airgap subcommand from args.""" if args.action == "download-scripts": download_scripts.subcommand(args, api) elif args.action == "upload": upload.subcommand(args, api) elif args.action == "download-compliance-scripts": download_compliance_scripts.subcommand(args, api) elif args.action == "upload-compliance": upload_compliance.subcommand(args, api) else: print( f"'{args.action}' is not a valid subcommand for airgap", file=sys.stderr, ) sys.exit(1)
28,747
def run_models(): """ run all models for comparison --- takes a while """ # feature sets and combinations to compare # Non Deep Methods if run_ehr_baselines: run_outcome(outcome = "future_afib", features=features) run_outcome(outcome = "stroke_6m", features=features) run_outcome(outcome = "troponin", features=features) run_outcome(outcome = "mace", features=features) # MLP outcomes if run_beatnet: run_mlp_outcome(outcome='troponin') run_mlp_outcome(outcome='future_afib') run_mlp_outcome(outcome='mace') run_mlp_outcome(outcome='stroke_6m') # full trace Resnet if run_resnet: run_resnet_outcome(outcome='future_afib') run_resnet_outcome(outcome='mace') run_resnet_outcome(outcome='stroke_6m') run_resnet_outcome(outcome='troponin')
28,748
def plot_histogram(x): """ Visualise range distribution :param x: :return: """ x = x.flatten() plt.hist(x) plt.show()
28,749
def pack_batch_tensor(inputs): """default pad_ids = 0 """ input_max_length = max([d.size(0) for d in inputs]) # prepare batch tensor input_ids = torch.LongTensor(len(inputs), input_max_length).zero_() input_mask = torch.LongTensor(len(inputs), input_max_length).zero_() for i, d in enumerate(inputs): input_ids[i, :d.size(0)].copy_(d) input_mask[i, :d.size(0)].fill_(1) return { "input_ids":input_ids, "input_mask":input_mask, }
28,750
def schedule(dev): """ Gets the schedule from the thermostat. """ # TODO: expose setting the schedule somehow? for d in range(7): dev.query_schedule(d) for day in dev.schedule.values(): click.echo("Day %s, base temp: %s" % (day.day, day.base_temp)) current_hour = day.next_change_at for hour in day.hours: if current_hour == 0: continue click.echo("\t[%s-%s] %s" % (current_hour, hour.next_change_at, hour.target_temp)) current_hour = hour.next_change_at
28,751
def build_network(network_class=None, dataset_dirs_args=None, dataset_dirs_class=None, dataset_dirs=None, dataset_spec_args=None, dataset_spec_class=None, dataset_spec=None, network_spec_args=None, network_spec_class=None, network_spec=None, json_spec_path=None, spec_cont=None, class_priority=False): """ build network """ # build network specification network_spec = ivy.default( network_spec, build_network_specification( dataset_dirs_args=dataset_dirs_args, dataset_dirs_class=dataset_dirs_class, dataset_dirs=dataset_dirs, dataset_spec_args=dataset_spec_args, dataset_spec_class=dataset_spec_class, dataset_spec=dataset_spec, network_spec_args=network_spec_args, network_spec_class=network_spec_class, json_spec_path=json_spec_path, spec_cont=spec_cont)) # override network_class if specified in network_spec network_class = ivy.default(ivy.default( _import_arg_specified_class_if_present(network_spec, 'network_class'), network_class, rev=class_priority), None) # verify network_class exists if not ivy.exists(network_class): raise Exception('network_class must either be specified in this build_network() method,' 'or network_class attribute must be specified in the network_spec instance') # network return network_class(network_spec)
28,752
def slt_workflow(slicetiming_txt="alt+z",SinkTag="func_preproc",wf_name="slicetiming_correction"): """ Modified version of porcupine generated slicetiming code: `source: -` Creates a slice time corrected functional image. Workflow inputs: :param func: The reoriented functional file. :param SinkDir: :param SinkTag: The output directory in which the returned images (see workflow outputs) could be found in a subdirectory directory specific for this workflow. Workflow outputs: :return: slt_workflow - workflow Balint Kincses kincses.balint@med.u-szeged.hu 2018 """ # This is a Nipype generator. Warning, here be dragons. # !/usr/bin/env python import sys import os import nipype import nipype.pipeline as pe import nipype.interfaces.utility as utility import PUMI.func_preproc.info.info_get as info_get import PUMI.utils.utils_convert as utils_convert import nipype.interfaces.afni as afni import PUMI.utils.globals as globals SinkDir = os.path.abspath(globals._SinkDir_ + "/" + SinkTag) if not os.path.exists(SinkDir): os.makedirs(SinkDir) # Basic interface class generates identity mappings inputspec = pe.Node(utility.IdentityInterface(fields=['func', 'slicetiming_txt']), name='inputspec') inputspec.inputs.func = func inputspec.inputs.slicetiming_txt = slicetiming_txt # Custom interface wrapping function TR #NodeHash_6000004b9860 = pe.MapNode(interface=info_get.TR, name='NodeName_6000004b9860', iterfield=['in_file']) TRvalue = pe.Node(interface=info_get.TR, name='TRvalue') # Custom interface wrapping function Str2Float func_str2float = pe.Node(interface=utils_convert.Str2Float, name='func_str2float') # Custom interface wrapping function Float2Str func_str2float_2 = pe.Node(interface=utils_convert.Float2Str, name='func_str2float_2') # Wraps command **3dTshift** sltcor = pe.Node(interface=afni.TShift(), name='sltcor') sltcor.inputs.rltplus = True sltcor.inputs.outputtype = "NIFTI_GZ" #sltcor.inputs.terminal_output = 'allatonce' # Basic interface class generates identity mappings outputspec = pe.Node(utility.IdentityInterface(fields=['slicetimed', 'TR']), name='outputspec') #todo: qc timeseries # Custom interface wrapping function JoinVal2Dict #func_joinval2dict = pe.Node(interface=utils_convert.JoinVal2Dict, # name='func_joinval2dict') # Generic datasink module to store structured outputs ds = pe.Node(interface=io.DataSink(), name='ds') ds.inputs.base_directory = SinkDir #ds.inputs.regexp_substitutions = [("func_slicetimed/_NodeName_.{13}", "")] # Create a workflow to connect all those nodes analysisflow = nipype.Workflow(wf_name) analysisflow.connect(inputspec, 'slicetiming_txt', sltcor, 'tpattern') analysisflow.connect(func_str2float, 'float', outputspec, 'TR') analysisflow.connect(inputspec, 'func', sltcor, 'in_file') analysisflow.connect(inputspec, 'func', TRvalue, 'in_file') analysisflow.connect(func_str2float_2, 'str', sltcor, 'tr') analysisflow.connect(TRvalue, 'TR', func_str2float_2, 'float') #analysisflow.connect(ds, 'out_file', func_joinval2dict, 'keys') #analysisflow.connect(func_str2float, 'float', func_joinval2dict, 'vals') analysisflow.connect(TRvalue, 'TR', func_str2float, 'str') analysisflow.connect(sltcor, 'out_file', ds, 'slicetimed') analysisflow.connect(sltcor, 'out_file', outputspec, 'slicetimed') return analysisflow
28,753
def lookup_service_root(service_root): """Dereference an alias to a service root. A recognized server alias such as "staging" gets turned into the appropriate URI. A URI gets returned as is. Any other string raises a ValueError. """ if service_root == EDGE_SERVICE_ROOT: # This will trigger a deprecation warning and use production instead. service_root = 'edge' return _dereference_alias(service_root, service_roots)
28,754
def test_list_blocks_259(): """ Test case 259: Here is an empty bullet list item: """ # Arrange source_markdown = """- foo - - bar""" expected_tokens = [ "[ulist(1,1):-::2:]", "[para(1,3):]", "[text(1,3):foo:]", "[end-para:::True]", "[li(2,1):2::]", "[BLANK(2,2):]", "[li(3,1):2::]", "[para(3,3):]", "[text(3,3):bar:]", "[end-para:::True]", "[end-ulist:::True]", ] expected_gfm = """<ul> <li>foo</li> <li></li> <li>bar</li> </ul>""" # Act & Assert act_and_assert(source_markdown, expected_gfm, expected_tokens)
28,755
def main(): """ Main """ test_cm2_lycopene_100()
28,756
def test_gitlab_fetcher(url, username, repository, git_ref, tmp_path): """Test creating a valid fetcher for GitLab URLs.""" mock_git_fetcher = Mock() with patch("reana_server.fetcher.WorkflowFetcherGit", mock_git_fetcher): parsed_url = ParsedUrl(url) _get_gitlab_fetcher(ParsedUrl(url), tmp_path) mock_git_fetcher.assert_called_once() expected_repo_url = f"https://{parsed_url.hostname}/{username}/{repository}.git" ( call_parsed_url, call_tmp_path, call_git_ref, call_spec, ) = mock_git_fetcher.call_args.args assert call_parsed_url.original_url == expected_repo_url assert call_tmp_path == tmp_path assert call_git_ref == git_ref assert call_spec is None
28,757
def get_initial_configuration(): """ Return (pos, type) pos: (1, 1) - (9, 9) type will be 2-letter strings like CSA format. (e.g. "FU", "HI", etc.) """ warnings.warn( """get_initial_configuration() returns ambiguous cell state. Use get_initial_configuration_with_dir() instead.""", DeprecationWarning) initial_state_top = { (1, 1): "KY", (2, 1): "KE", (3, 1): "GI", (4, 1): "KI", (5, 1): "OU", (6, 1): "KI", (7, 1): "GI", (8, 1): "KE", (9, 1): "KY", (2, 2): "KA", (8, 2): "HI", (1, 3): "FU", (2, 3): "FU", (3, 3): "FU", (4, 3): "FU", (5, 3): "FU", (6, 3): "FU", (7, 3): "FU", (8, 3): "FU", (9, 3): "FU", } initial_state = {} for (pos, ty) in initial_state_top.items(): x, y = pos initial_state[pos] = ty initial_state[(10 - x, 10 - y)] = ty return initial_state
28,758
def request_pull_to_diff_or_patch( repo, requestid, username=None, namespace=None, diff=False ): """Returns the commits from the specified pull-request as patches. :arg repo: the `pagure.lib.model.Project` object of the current pagure project browsed :type repo: `pagure.lib.model.Project` :arg requestid: the identifier of the pull-request to convert to patch or diff :type requestid: int :kwarg username: the username of the user who forked then project when the project viewed is a fork :type username: str or None :kwarg namespace: the namespace of the project if it has one :type namespace: str or None :kwarg diff: a boolean whether the data returned is a patch or a diff :type diff: boolean :return: the patch or diff representation of the specified pull-request :rtype: str """ repo = flask.g.repo if not repo.settings.get("pull_requests", True): flask.abort(404, description="No pull-requests found for this project") request = pagure.lib.query.search_pull_requests( flask.g.session, project_id=repo.id, requestid=requestid ) if not request: flask.abort(404, description="Pull-request not found") if request.remote: repopath = pagure.utils.get_remote_repo_path( request.remote_git, request.branch_from ) parentpath = pagure.utils.get_repo_path(request.project) else: repo_from = request.project_from parentpath = pagure.utils.get_repo_path(request.project) repopath = parentpath if repo_from: repopath = pagure.utils.get_repo_path(repo_from) repo_obj = pygit2.Repository(repopath) orig_repo = pygit2.Repository(parentpath) branch = repo_obj.lookup_branch(request.branch_from) commitid = None if branch: commitid = branch.peel().hex diff_commits = [] if request.status != "Open": commitid = request.commit_stop try: for commit in repo_obj.walk(commitid, pygit2.GIT_SORT_NONE): diff_commits.append(commit) if commit.oid.hex == request.commit_start: break except KeyError: # This happens when repo.walk() cannot find commitid pass else: try: diff_commits = pagure.lib.git.diff_pull_request( flask.g.session, request, repo_obj, orig_repo, with_diff=False ) except pagure.exceptions.PagureException as err: flask.flash("%s" % err, "error") return flask.redirect( flask.url_for( "ui_ns.view_repo", username=username, repo=repo.name, namespace=namespace, ) ) except SQLAlchemyError as err: # pragma: no cover flask.g.session.rollback() _log.exception(err) flask.flash( "Could not update this pull-request in the database", "error" ) diff_commits.reverse() patch = pagure.lib.git.commit_to_patch( repo_obj, diff_commits, diff_view=diff ) return flask.Response(patch, content_type="text/plain;charset=UTF-8")
28,759
def GL(alpha, f_name, domain_start = 0.0, domain_end = 1.0, num_points = 100): """ Computes the GL fractional derivative of a function for an entire array of function values. Parameters ========== alpha : float The order of the differintegral to be computed. f_name : function handle, lambda function, list, or 1d-array of function values This is the function that is to be differintegrated. domain_start : float The left-endpoint of the function domain. Default value is 0. domain_end : float The right-endpoint of the function domain; the point at which the differintegral is being evaluated. Default value is 1. num_points : integer The number of points in the domain. Default value is 100. Examples: >>> DF_poly = GL(-0.5, lambda x: x**2 - 1) >>> DF_sqrt = GL(0.5, lambda x: np.sqrt(x), 0., 1., 100) """ # Flip the domain limits if they are in the wrong order. if domain_start > domain_end: domain_start, domain_end = domain_end, domain_start # Check inputs. checkValues(alpha, domain_start, domain_end, num_points) f_values, step_size = functionCheck(f_name, domain_start, domain_end, num_points) # Get the convolution filter. b_coeffs = GLcoeffs(alpha, num_points-1) # Real Fourier transforms for convolution filter and array of function values. B = np.fft.rfft(b_coeffs) F = np.fft.rfft(f_values) result = np.fft.irfft(F*B)*step_size**-alpha return result
28,760
def check_lint(root_dir, ignore, verbose, dry_run, files_at_a_time, max_line_len, continue_on_error): """Check for lint. Unless `continue_on_error` is selected, returns `False` on the first iteration where lint is found, or where the lint checker otherwise returned failure. :return: Whether the check found everything OK. """ success = True # Suffixes for types of file that pocketlint can check for us. pocketlint_suffixes = C_LIKE_SUFFIXES + PERL_SUFFIXES + [ '.ini', # Don't check for now. Styles differ too much. # '.css', '.js', '.md', '.cgi', '.php', '.py', '.sh', ] lintable_files = find_files( root_dir, ignore=ignore, suffixes=pocketlint_suffixes) command_line = ['pocketlint', '-m', '%d' % max_line_len, '--'] for chunk in chunk_file_list(lintable_files, files_at_a_time): try: run_command( command_line + chunk, verbose=verbose, dry_run=dry_run) except CalledProcessError: success = False if not success and not continue_on_error: return False return success
28,761
def remove_comments(s): """ Examples -------- >>> code = ''' ... # comment 1 ... # comment 2 ... echo foo ... ''' >>> remove_comments(code) 'echo foo' """ return "\n".join(l for l in s.strip().split("\n") if not l.strip().startswith("#"))
28,762
def generate_wsl(ws): """ Generates watershed line that correspond to areas of touching objects. """ se = square(3) ero = ws.copy() ero[ero == 0] = ero.max() + 1 ero = erosion(ero, se) ero[ws == 0] = 0 grad = dilation(ws, se) - ero grad[ws == 0] = 0 grad[grad > 0] = 255 grad = grad.astype(np.uint8) return grad
28,763
def train_lstm_model(x, y, epochs=200, patience=10, lstm_dim=48, batch_size=128, lr=1e-3): """ Train an LSTM to predict purchase (1) or abandon (0) :param x: session sequences :param y: target labels :param epochs: num training epochs :param patience: early stopping patience :param lstm_dim: lstm units :param batch_size: batch size :param lr: learning rate :return: """ # Verfiy if GPU/CPU is being used print("Print out system device...") print(device_lib.list_local_devices()) print("Starting training now...") X_train, X_test, y_train, y_test = train_test_split(x,y) # pad sequences for training in batches max_len = max(len(_) for _ in x) X_train = pad_sequences(X_train, padding="post",value=7, maxlen=max_len) X_test = pad_sequences(X_test, padding="post", value=7, maxlen=max_len) # convert to one-hot X_train = tf.one_hot(X_train, depth=7) X_test = tf.one_hot(X_test, depth=7) y_train = np.array(y_train) y_test = np.array(y_test) # Define Model model = keras.Sequential() model.add(keras.layers.InputLayer(input_shape=(None,7))) # Masking layer ignores padded time-steps model.add(keras.layers.Masking()) model.add(keras.layers.LSTM(lstm_dim)) model.add(keras.layers.Dense(1,activation='sigmoid')) model.summary() # Some Hyper Params opt = keras.optimizers.Adam(learning_rate=lr) loss = keras.losses.BinaryCrossentropy() es = keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience, verbose=1, restore_best_weights=True) # Include wandb callback for tracking callbacks = [es, WandbCallback()] model.compile(optimizer=opt, loss=loss, metrics=['accuracy']) # Train Model model.fit(X_train, y_train, validation_data=(X_test,y_test), batch_size=batch_size, epochs=epochs, callbacks=callbacks) # return trained model # NB: to store model as Metaflow Artifact it needs to be pickle-able! return model.to_json(), model.get_weights(), model
28,764
def nash_sutcliffe_efficiency(predicted, observed): """ implements Nash-Sutcliffe Model Efficiencobserved Coefficient where predicted is modeled and observed is observed""" if np.isnan(np.min(predicted)) or np.isnan(np.min(observed)): return np.asarray([np.nan]) return 1 - np.sum((predicted - observed)**2) / np.sum((observed - observed.mean())**2)
28,765
def display_duplicates(df: Union[pd.DataFrame, pd.Series]) -> None: """Print a summary of the column-wise duplicates in the passed dataframe. """ if isinstance(df, pd.core.series.Series): df = pd.DataFrame(df) dup_count = 0 print("Number of column-wise duplicates per column:") for col in df: dup = df[col].loc[df[[col]].duplicated(keep=False) == 1] dup_nunique = dup.nunique() dup_full = len(dup) if dup_nunique > 0: print( f" - {col}: {dup_nunique} unique duplicated values " f"({dup_full} duplicated rows)" ) dup_count += dup_nunique if dup_count == 0: print("... No duplicate values in columns.")
28,766
def tf_Affine_transformer(points, theta): """ Arguments: points: `Matrix` [2, np] of grid points to transform theta: `Matrix` [bs, 2, 3] with a batch of transformations """ with tf.name_scope('Affine_transformer'): num_batch = tf.shape(theta)[0] grid = tf.tile(tf.expand_dims(points, 0), [num_batch, 1, 1]) # Transform A x (x_t, y_t, 1)^T -> (x_s, y_s) T_g = tf.matmul(theta, grid) return T_g
28,767
def read_COCO_gt(filename, n_imgs=None, ret_img_sizes=False, ret_classes=False, bbox_gt=False): """ Function for reading COCO ground-truth files and converting them to GroundTruthInstances format. :param filename: filename of the annotation.json file with all COCO ground-truth annotations :param n_imgs: number of images ground-truth is being extracted from. If None extract all (default None) :param ret_img_sizes: Boolean flag dictating if the image sizes should be returned :param ret_classes: Boolean flag dictating if the class mapping dictionary should be returned :param bbox_gt: Boolean flag dictating if the GroundTruthInstance should ignore the segmentation mask and only use bounding box information. :return: ground-truth instances as GTLoader and optionally image sizes or class mapping dictionary if requested """ # read the json file coco_obj = COCO(filename) gt_instances = GTLoader(coco_obj, n_imgs, bbox_gt=bbox_gt) # Return image sizes if requested if ret_img_sizes: return gt_instances, [ [coco_obj.imgs[img_id]['height'], coco_obj.imgs[img_id]['width']] for img_id in sorted(coco_obj.imgs.keys()) ] # Return class mapping dictionary if requested if ret_classes: return gt_instances, { coco_obj.cats[cat_id]['name']: idx for idx, cat_id in enumerate(sorted(coco_obj.cats.keys())) } return gt_instances
28,768
def get_metrics_from_file(metric_file): """Gets all metric functions within a file :param str metric_file: The name of the file to look in :return: Tuples containing (function name, function object) :rtype: list """ try: metrics = import_module(metric_file) metrics = get_sorted_metric_function_tuples(metrics) except ImportError: raise NoMetricFileFound if not metrics: raise NoMetricFunctionsFound return metrics
28,769
def mnist_model(inputs, mode): """Takes the MNIST inputs and mode and outputs a tensor of logits.""" # Input Layer # Reshape X to 4-D tensor: [batch_size, width, height, channels] # MNIST images are 28x28 pixels, and have one color channel inputs = tf.reshape(inputs, [-1, 28, 28, 1]) data_format = 'channels_last' if tf.test.is_built_with_cuda(): # When running on GPU, transpose the data from channels_last (NHWC) to # channels_first (NCHW) to improve performance. # See https://www.tensorflow.org/performance/performance_guide#data_formats data_format = 'channels_first' inputs = tf.transpose(inputs, [0, 3, 1, 2]) # Convolutional Layer #1 # Computes 32 features using a 5x5 filter with ReLU activation. # Padding is added to preserve width and height. # Input Tensor Shape: [batch_size, 28, 28, 1] # Output Tensor Shape: [batch_size, 28, 28, 32] conv1 = tf.layers.conv2d( inputs=inputs, filters=32, kernel_size=[5, 5], padding='same', activation=tf.nn.relu, data_format=data_format) # Pooling Layer #1 # First max pooling layer with a 2x2 filter and stride of 2 # Input Tensor Shape: [batch_size, 28, 28, 32] # Output Tensor Shape: [batch_size, 14, 14, 32] pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2, data_format=data_format) # Convolutional Layer #2 # Computes 64 features using a 5x5 filter. # Padding is added to preserve width and height. # Input Tensor Shape: [batch_size, 14, 14, 32] # Output Tensor Shape: [batch_size, 14, 14, 64] conv2 = tf.layers.conv2d( inputs=pool1, filters=64, kernel_size=[5, 5], padding='same', activation=tf.nn.relu, data_format=data_format) # Pooling Layer #2 # Second max pooling layer with a 2x2 filter and stride of 2 # Input Tensor Shape: [batch_size, 14, 14, 64] # Output Tensor Shape: [batch_size, 7, 7, 64] pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2, data_format=data_format) # Flatten tensor into a batch of vectors # Input Tensor Shape: [batch_size, 7, 7, 64] # Output Tensor Shape: [batch_size, 7 * 7 * 64] pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64]) # Dense Layer # Densely connected layer with 1024 neurons # Input Tensor Shape: [batch_size, 7 * 7 * 64] # Output Tensor Shape: [batch_size, 1024] dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu) # Add dropout operation; 0.6 probability that element will be kept dropout = tf.layers.dropout( inputs=dense, rate=0.4, training=(mode == tf.estimator.ModeKeys.TRAIN)) # Logits layer # Input Tensor Shape: [batch_size, 1024] # Output Tensor Shape: [batch_size, 10] logits = tf.layers.dense(inputs=dropout, units=10) return logits
28,770
def show_local_mp4_video(file_name, width=640, height=480): """Renders a mp4 video on a Jupyter notebook Args: file_name (str): Path to file. width (int): Video width. height (int): Video height. Returns: obj: Video render as HTML object. """ video_encoded = base64.b64encode(io.open(file_name, 'rb').read()) return HTML(data='''<video width="{0}" height="{1}" alt="test" controls> <source src="data:video/mp4;base64,{2}" type="video/mp4" /> </video>'''.format(width, height, video_encoded.decode('ascii')))
28,771
def change_status(sid, rev, status, **kwargs): """ [INCOMPLETE] - DISABLE OTHER REVISION OF THE SAME SIGNTURE WHEN DEPLOYING ONE Change the status of a signature Variables: sid => ID of the signature rev => Revision number of the signature status => New state Arguments: None Data Block: None Result example: { "success" : true } #If saving the rule was a success or not """ DEPLOYED_STATUSES = ['DEPLOYED', 'NOISY', 'DISABLED'] DRAFT_STATUSES = ['STAGING', 'TESTING'] STALE_STATUSES = ['INVALID'] user = kwargs['user'] if status == 'INVALID': return make_api_response("", "INVALID signature status is reserved for service use only.", 403) if not user['is_admin'] and status in DEPLOYED_STATUSES: return make_api_response("", "Only admins are allowed to change the signature status to a deployed status.", 403) key = "%sr.%s" % (sid, rev) data = STORAGE.get_signature(key) if data: if not Classification.is_accessible(user['classification'], data['meta'].get('classification', Classification.UNRESTRICTED)): return make_api_response("", "You are not allowed change status on this signature", 403) if data['meta']['al_status'] in STALE_STATUSES and status not in DRAFT_STATUSES: return make_api_response("", "Only action available while signature in {} status is to change " "signature to a DRAFT status" .format(data['meta']['al_status']), 403) if data['meta']['al_status'] in DEPLOYED_STATUSES and status in DRAFT_STATUSES: return make_api_response("", "You cannot change the status of signature %s r.%s from %s to %s." % (sid, rev, data['meta']['al_status'], status), 403) query = "meta.al_status:{status} AND _yz_rk:{sid}* AND NOT _yz_rk:{key}" today = datetime.date.today().isoformat() uname = user['uname'] if status not in ['DISABLED', 'INVALID', 'TESTING']: for other in STORAGE.get_signatures( STORAGE.list_filtered_signature_keys( query.format(key=key, sid=sid, status=status) ) ): other['meta']['al_state_change_date'] = today other['meta']['al_state_change_user'] = uname other['meta']['al_status'] = 'DISABLED' other_sid = other['meta']['id'] other_rev = other['meta']['rule_version'] other_key = "%sr.%s" % (other_sid, other_rev) STORAGE.save_signature(other_key, other) data['meta']['al_state_change_date'] = today data['meta']['al_state_change_user'] = uname data['meta']['al_status'] = status STORAGE.save_signature(key, data) return make_api_response({"success": True}) else: return make_api_response("", "Signature not found. (%s r.%s)" % (sid, rev), 404)
28,772
def trilinear_interpolation(a: np.ndarray, factor: float) -> np.ndarray: """Resize an three dimensional array using trilinear interpolation. :param a: The array to resize. The array is expected to have at least three dimensions. :param factor: The amount to resize the array. Given how the interpolation works, you probably don't get great results with factor less than or equal to .5. Consider multiple passes of interpolation with larger factors in those cases. :return: A :class:ndarray object. :rtype: numpy.ndarray Usage:: >>> import numpy as np >>> >>> a = np.array([ ... [ ... [0, 1], ... [1, 0], ... ], ... [ ... [1, 0], ... [0, 1], ... ], ... ]) >>> trilinear_interpolation(a, 2) array([[[0. , 0.5, 1. , 1. ], [0.5, 0.5, 0.5, 0.5], [1. , 0.5, 0. , 0. ], [1. , 0.5, 0. , 0. ]], <BLANKLINE> [[0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5]], <BLANKLINE> [[1. , 0.5, 0. , 0. ], [0.5, 0.5, 0.5, 0.5], [0. , 0.5, 1. , 1. ], [0. , 0.5, 1. , 1. ]], <BLANKLINE> [[1. , 0.5, 0. , 0. ], [0.5, 0.5, 0.5, 0.5], [0. , 0.5, 1. , 1. ], [0. , 0.5, 1. , 1. ]]]) """ # Return the array unchanged if the array won't be magnified. if factor == 1: return a # Perform a defensive copy of the original array to avoid # unexpected side effects. a = a.copy() # Since we are magnifying the given array, the new array's shape # will increase by the magnification factor. mag_size = tuple(int(s * factor) for s in a.shape) # Map out the relationship between the old space and the # new space. indices = np.indices(mag_size) if factor > 1: whole = (indices // factor).astype(int) parts = (indices / factor - whole).astype(float) else: new_ends = [s - 1 for s in mag_size] old_ends = [s - 1 for s in a.shape] true_factors = [n / o for n, o in zip(new_ends, old_ends)] for i in range(len(true_factors)): if true_factors[i] == 0: true_factors[i] = .5 whole = indices.copy() parts = indices.copy() for i in Z, Y, X: whole[i] = (indices[i] // true_factors[i]).astype(int) parts[i] = (indices[i] / true_factors[i] - whole[i]).astype(float) del indices # Trilinear interpolation determines the value of a new pixel by # comparing the values of the eight old pixels that surround it. # The hashes are the keys to the dictionary that contains those # old pixel values. The key indicates the position of the pixel # on each axis, with one meaning the position is ahead of the # new pixel, and zero meaning the position is behind it. hashes = [f'{n:>03b}'[::-1] for n in range(2 ** 3)] hash_table = {} # The original array needs to be made one dimensional for the # numpy.take operation that will occur as we build the tables. raveled = np.ravel(a) # Build the table that contains the old pixel values to # interpolate. for hash in hashes: hash_whole = whole.copy() # Use the hash key to adjust the which old pixel we are # looking at. for axis in Z, Y, X: if hash[axis] == '1': hash_whole[axis] += 1 # Handle the pixels that were pushed off the far # edge of the original array by giving them the # value of the last pixel along that axis in the # original array. m = np.zeros(hash_whole[axis].shape, dtype=bool) m[hash_whole[axis] >= a.shape[axis]] = True hash_whole[axis][m] = a.shape[axis] - 1 # Since numpy.take() only works in one dimension, we need to # map the three dimensional indices of the original array to # the one dimensional indices used by the raveled version of # that array. raveled_indices = hash_whole[Z] * a.shape[Y] * a.shape[X] raveled_indices += hash_whole[Y] * a.shape[X] raveled_indices += hash_whole[X] # Get the value of the pixel in the original array. hash_table[hash] = np.take(raveled, raveled_indices.astype(int)) # Once the hash table has been built, clean up the working arrays # in case we are running short on memory. else: del hash_whole, raveled_indices, whole # Everything before this was to set up the interpolation. Now that # it's set up, we perform the interpolation. Since we are doing # this across three dimensions, it's a three stage process. Stage # one is along the X axis. x1 = lerp(hash_table['000'], hash_table['001'], parts[X]) x2 = lerp(hash_table['010'], hash_table['011'], parts[X]) x3 = lerp(hash_table['100'], hash_table['101'], parts[X]) x4 = lerp(hash_table['110'], hash_table['111'], parts[X]) # Stage two is along the Y axis. y1 = lerp(x1, x2, parts[Y]) y2 = lerp(x3, x4, parts[Y]) del x1, x2, x3, x4 # And stage three is along the Z axis. Since this is the last step # we can just return the result. return lerp(y1, y2, parts[Z])
28,773
def load_data(path): """ๅฐ†ๆๆ–™็š„labelไธŽtext่ฟ›่กŒๅˆ†็ฆป๏ผŒๅพ—ๅˆฐไธคไธชlist""" label_list = [] text_list = [] with open(path, 'r') as f: for line in f.readlines(): data = line.strip().split('\t') data[1] = data[1].strip().split() label = [0 for i in range(8)] total = 0 for i in range(0, 8): label[i] = float(data[1][1 + i].split(':')[1]) total += label[i] for i in range(len(label)): label[i] /= total label_list.append(label) text_list.append(data[2].strip().split()) return label_list, text_list
28,774
def invoke(request): """Where the magic happens...""" with monitor(labels=_labels, name="transform_request"): transformed_request = _transform_request(request) with monitor(labels=_labels, name="invoke"): response = _model.predict(transformed_request) with monitor(labels=_labels, name="transform_response"): transformed_response = _transform_response(response) return transformed_response
28,775
def serializers(): #FIXME: could be much smarter """return a tuple of string names of serializers""" try: import imp imp.find_module('cPickle') serializers = (None, 'pickle', 'json', 'cPickle', 'dill') except ImportError: serializers = (None, 'pickle', 'json', 'dill') try: import imp imp.find_module('cloudpickle') serializers += ('cloudpickle',) except ImportError: pass try: import imp imp.find_module('jsonpickle') serializers += ('jsonpickle',) except ImportError: pass return serializers
28,776
def check_job(job_name): """Checks if transcription job has finished using its job name. If the transcription job hasn't finished it will call itself recursively until the job is completed. Arguments: job_name (str): the job name of the transcription job in AWS Transcribe. """ try: params = {'job_name': job_name} url = ('https://cljehyxc6c.execute-api.us-east-1.amazonaws.com/v10/' 'check-transcription/') response = post(url, dumps(params)).json() except RequestException as conn_error: print('There was a connection error checking the transcription job') print(conn_error) exit(1) if response['response'] != 'COMPLETED': sleep(30) check_job(job_name)
28,777
async def set_offset(bot, message): """``` Set the offset for the DnD ping cycle. Usage: * /set_offset N ```""" new_offset = message.content[12:].strip() try: new_offset = int(new_offset) except ValueError: pass with open(PING_CYCLE_OFFSET_FILE, 'w') as f: json.dump(new_offset, f)
28,778
def munge_pocket_response(resp): """Munge Pocket Article response.""" articles = resp['list'] result = pd.DataFrame([articles[id] for id in articles]) # only munge if actual articles present if len(result) != 0: result['url'] = (result['resolved_url'].combine_first(result['given_url'])) for time_col in ['time_added', 'time_updated', 'time_read']: result[time_col] = pd.to_datetime(result[time_col], unit='s') return ( result.drop_duplicates(subset=['resolved_id'])[[ 'item_id', 'resolved_id', 'given_title', 'url', 'resolved_title', 'time_added', 'time_read', 'time_updated', 'status', 'word_count' ]] )
28,779
def while_t(): """printing small 't' using while loop""" i=0 while i<7: j=0 while j<4: if j==1 or i==3 and j!=3 or j==2 and i in(3,6) or j==3 and i==5: print("*",end=" ") else: print(" ",end=" ") j+=1 print() i+=1
28,780
def __abs_path(path): """ Creates an absolute path, based on the relative path from the configuration file :param path: A relative path :return: The absolute path, based on the configuration file """ if not os.path.isabs(path): parent = os.path.abspath(os.path.join(config_path, os.pardir)) return os.path.abspath(os.path.join(os.path.relpath(parent), path)) + os.path.sep else: return path
28,781
def cgo_library(name, srcs, toolchain=None, go_tool=None, copts=[], clinkopts=[], cdeps=[], **kwargs): """Builds a cgo-enabled go library. Args: name: A unique name for this rule. srcs: List of Go, C and C++ files that are processed to build a Go library. Those Go files must contain `import "C"`. C and C++ files can be anything allowed in `srcs` attribute of `cc_library`. copts: Add these flags to the C++ compiler. clinkopts: Add these flags to the C++ linker. cdeps: List of C/C++ libraries to be linked into the binary target. They must be `cc_library` rules. deps: List of other libraries to be linked to this library target. data: List of files needed by this rule at runtime. NOTE: `srcs` cannot contain pure-Go files, which do not have `import "C"`. So you need to define another `go_library` when you build a go package with both cgo-enabled and pure-Go sources. ``` cgo_library( name = "cgo_enabled", srcs = ["cgo-enabled.go", "foo.cc", "bar.S", "baz.a"], ) go_library( name = "go_default_library", srcs = ["pure-go.go"], library = ":cgo_enabled", ) ``` """ cgogen = _setup_cgo_library( name = name, srcs = srcs, cdeps = cdeps, copts = copts, clinkopts = clinkopts, go_tool = go_tool, toolchain = toolchain, ) go_library( name = name, srcs = cgogen.go_thunks + [ cgogen.gotypes, cgogen.outdir + "/_cgo_import.go", ], cgo_object = cgogen.outdir + "/_cgo_object", go_tool = go_tool, toolchain = toolchain, **kwargs )
28,782
async def test_track_template_error(hass, caplog): """Test tracking template with error.""" template_error = Template("{{ (states.switch | lunch) > 0 }}", hass) error_calls = [] @ha.callback def error_callback(entity_id, old_state, new_state): error_calls.append((entity_id, old_state, new_state)) async_track_template(hass, template_error, error_callback) await hass.async_block_till_done() hass.states.async_set("switch.new", "on") await hass.async_block_till_done() assert not error_calls assert "lunch" in caplog.text assert "TemplateAssertionError" in caplog.text caplog.clear() with patch.object(Template, "async_render") as render: render.return_value = "ok" hass.states.async_set("switch.not_exist", "off") await hass.async_block_till_done() assert "no filter named 'lunch'" not in caplog.text assert "TemplateAssertionError" not in caplog.text
28,783
def fake_data_PSBL_phot(outdir='', outroot='psbl', raL=259.5, decL=-29.0, t0=57000.0, u0_amp=0.8, tE=500.0, piE_E=0.02, piE_N=0.02, q=0.5, sep=5.0, phi=75.0, b_sff1=0.5, mag_src1=16.0, parallax=True, target='Unknown', animate=False): """ Optional Inputs --------------- outdir : str The output directory where figures and data are saved. outroot : str The output file name root for a saved figure. raL : float (deg) The right ascension in degrees. Needed if parallax=True. decL : float (deg) The declination in degrees. Needed if parallax=False. t0: float Time of photometric peak, as seen from Earth [MJD] u0_amp: float Angular distance between the lens and source on the plane of the sky at closest approach in units of thetaE. It can be positive (u0_hat cross thetaE_hat pointing away from us) or negative (u0_hat cross thetaE_hat pointing towards us). tE: float Einstein crossing time. [MJD] piE_E: float The microlensing parallax in the East direction in units of thetaE piE_N: float The microlensing parallax in the North direction in units of thetaE q: float Mass ratio (low-mass / high-mass) sep: float Angular separation of the two lenses in units of thetaE where thetaE is defined with the total binary mass. phi: float Angle made between the binary axis and the relative proper motion vector, measured in degrees. b_sff: array or list The ratio of the source flux to the total (source + neighbors + lens) b_sff = f_S / (f_S + f_L + f_N). This must be passed in as a list or array, with one entry for each photometric filter. mag_src: array or list Photometric magnitude of the source. This must be passed in as a list or array, with one entry for each photometric filter. """ start = time.time() if parallax: psbl = model.PSBL_Phot_Par_Param1(t0, u0_amp, tE, piE_E, piE_N, q, sep, phi, [b_sff1], [mag_src1], raL=raL, decL=decL, root_tol=1e-8) else: psbl = model.PSBL_Phot_noPar_Param1(t0, u0_amp, tE, piE_E, piE_N, q, sep, phi, [b_sff1], [mag_src1], root_tol=1e-8) # Simulate # photometric observations every 1 day and # for the bulge observing window. Observations missed # for 125 days out of 365 days for photometry. t_pho = np.array([], dtype=float) for year_start in np.arange(54000, 60000, 365.25): phot_win = 240.0 phot_start = (365.25 - phot_win) / 2.0 t_pho_new = np.arange(year_start + phot_start, year_start + phot_start + phot_win, 1) t_pho = np.concatenate([t_pho, t_pho_new]) t_mod = np.arange(t_pho.min(), t_pho.max(), 1) i_pho, A_pho = psbl.get_all_arrays(t_pho) i_mod, A_mod = psbl.get_all_arrays(t_mod) imag_pho = psbl.get_photometry(t_pho, amp_arr=A_pho) imag_mod = psbl.get_photometry(t_mod, amp_arr=A_mod) # Make the photometric observations. # Assume 0.05 mag photoemtric errors at I=19. # This means Signal = 400 e- at I=19. flux0 = 400.0 imag0 = 19.0 flux_pho = flux0 * 10 ** ((imag_pho - imag0) / -2.5) flux_pho_err = flux_pho ** 0.5 flux_pho += np.random.randn(len(t_pho)) * flux_pho_err imag_pho = -2.5 * np.log10(flux_pho / flux0) + imag0 imag_pho_err = 1.087 / flux_pho_err stop = time.time() fmt = 'It took {0:.2f} seconds to evaluate the model at {1:d} time steps' print(fmt.format(stop - start, len(t_mod) + len(t_pho))) ########## # Plot photometry ########## plt.figure(1) plt.clf() plt.errorbar(t_pho, imag_pho, yerr=imag_pho_err, fmt='k.', label='Sim Obs', alpha=0.2) plt.plot(t_mod, imag_mod, color='red', label='Model') plt.gca().invert_yaxis() plt.xlabel('Time (MJD)') plt.ylabel('I (mag)') plt.legend() data = {} data['t_phot1'] = t_pho data['mag1'] = imag_pho data['mag_err1'] = imag_pho_err data['phot_files'] = ['fake_data_parallax_phot1'] data['ast_files'] = ['fake_data_parallax_ast1'] data['target'] = target data['phot_data'] = 'sim' data['ast_data'] = 'sim' data['raL'] = raL data['decL'] = decL params = {} params['t0'] = t0 params['u0_amp'] = u0_amp params['tE'] = tE params['piE_E'] = piE_E params['piE_N'] = piE_N params['q'] = q params['sep'] = sep params['phi'] = phi params['b_sff'] = b_sff1 params['mag_src'] = mag_src1 out_name = outdir + outroot + '_movie.gif' if animate: ani = plot_models.animate_PSBL(psbl, outfile=out_name) else: ani = None return data, params, psbl, ani
28,784
def is_dicom(path: pathlib.Path) -> bool: """Check if the input is a DICOM file. Args: path (pathlib.Path): Path to the file to check. Returns: bool: True if the file is a DICOM file. """ path = pathlib.Path(path) is_dcm = path.suffix.lower() == ".dcm" is_dcm_dir = path.is_dir() and any( p.suffix.lower() == ".dcm" for p in path.iterdir() ) return is_dcm or is_dcm_dir
28,785
def _hessian(model: 'BinaryLogReg', data: Dataset, data_weights: Optional[jnp.ndarray]) -> jnp.ndarray: """Ravelled Hessian matrix of the objective function with respect to the model parameters""" params_flat, unravel = ravel_pytree(model.params) random_params = model.random_params h = jax.hessian(lambda p: _objective(unravel(p), random_params, data, model.lamb, model.pos_label, data_weights)) return h(params_flat)
28,786
def promax2meta(doc, target): """ Return meta information (Line or Area) of csv Promax geometry file. Arguments: doc -- csv Promax geometry file target -- meta information to get (Line or Area) """ ptarget = r'' + re.escape(target) + r'\s*[=:]\s*\"?([\w-]+)\"?' for line in open(doc): result = (re.search(ptarget, line, re.I)) if result: return result.group(1)
28,787
def axLabel(value, unit): """ Return axis label for given strings. :param value: Value for axis label :type value: int :param unit: Unit for axis label :type unit: str :return: Axis label as \"<value> (<unit>)\" :rtype: str """ return str(value) + " (" + str(unit) + ")"
28,788
def save_notebook(filename, timeout=10): """ Force-saves a Jupyter notebook by displaying JavaScript. Args: filename (``str``): path to notebook file being saved timeout (``int`` or ``float``): number of seconds to wait for save before timing-out Returns ``bool``: whether the notebook was saved successfully """ timeout = timeout * 10**9 if get_ipython() is not None: with open(filename, "rb") as f: md5 = hashlib.md5(f.read()).hexdigest() start = time.time_ns() display(Javascript("Jupyter.notebook.save_checkpoint();")) curr = md5 while curr == md5 and time.time_ns() - start <= timeout: time.sleep(1) with open(filename, "rb") as f: curr = hashlib.md5(f.read()).hexdigest() return curr != md5 return True
28,789
def test_config_start_with_api(test_microvm_with_api, vm_config_file): """ Test if a microvm configured from file boots successfully. @type: functional """ test_microvm = test_microvm_with_api _configure_vm_from_json(test_microvm, vm_config_file) test_microvm.spawn() response = test_microvm.machine_cfg.get() assert test_microvm.api_session.is_status_ok(response.status_code) assert test_microvm.state == "Running" # Validate full vm configuration. response = test_microvm.full_cfg.get() assert test_microvm.api_session.is_status_ok(response.status_code) with open(vm_config_file, encoding='utf-8') as json_file: assert response.json() == json.load(json_file)
28,790
def test_gen(): """Create the test system.""" project_name = "test_grid_sinfactory" return PFactoryGrid(project_name=project_name).gens["SM1"]
28,791
def validate_api_key(): """Validates an API key submitted via POST.""" api_key_form = ApiKeyForm() api_key_form.organization.choices = session['orgs_list'] if api_key_form.validate_on_submit(): session['org_id'] = api_key_form.organization.data return jsonify(True) return jsonify(api_key_form.errors), 422
28,792
def test_suggest_add(entities, capsys): """ The netixlan described in the remote-ixf doesn't exist, but there is a relationship btw the network and ix (ie a different netixlan). The network does not have automatic updates. There isn't a local-ixf that matches the remote-ixf. We suggest adding the netixlan, create an admin ticket, and send emails to the network and IX. """ data = setup_test_data("ixf.member.2") # asn1001 network = entities["net"]["UPDATE_DISABLED"] # asn1001 ixlan = entities["ixlan"][0] # This appears in the remote-ixf data so should not # create a IXFMemberData instance entities["netixlan"].append( NetworkIXLan.objects.create( network=network, ixlan=ixlan, asn=network.asn, speed=10000, ipaddr4="195.69.150.250", ipaddr6="2001:7f8:1::a500:2906:3", status="ok", is_rs_peer=True, operational=True, ) ) importer = ixf.Importer() importer.update(ixlan, data=data) assert IXFMemberData.objects.count() == 1 assert NetworkIXLan.objects.count() == 1 log = importer.log["data"][0] assert log["action"] == "suggest-add" stdout = capsys.readouterr().out assert_email_sent( stdout, (network.asn, "195.69.147.250", "2001:7f8:1::a500:2906:1") ) assert_ticket_exists([(1001, "195.69.147.250", "2001:7f8:1::a500:2906:1")]) # Test idempotent importer.update(ixlan, data=data) assert IXFMemberData.objects.count() == 1 assert NetworkIXLan.objects.count() == 1 assert_ticket_exists([(1001, "195.69.147.250", "2001:7f8:1::a500:2906:1")])
28,793
def plot_clickForPlane(): """ Create a Plane at location of one mouse click in the view or onto a clicked object or at a pre-selected point location: Create a Plane perpendicular to the view at location of one mouse click. - Click first on the Button then click once on the View. - Click first on the Button then click once on one object of the View to attach the plane at the object. But you can also select an already existing point first and click the button to attach the plane. """ msg = verbose createFolders('WorkPlanes') m_actDoc = get_ActiveDocument(info=msg) if m_actDoc.Name is None: return None m_selEx = Gui.Selection.getSelectionEx(m_actDoc.Name) if len(m_selEx) >= 1: SelectedObjects = get_SelectedObjects(info=1) Number_of_Points = SelectedObjects[0] if (Number_of_Points == 1): Point_List = SelectedObjects[3] name = "Plane" part = "Part::Feature" # return view direction as a vector Plane_Normal = Gui.ActiveDocument.ActiveView.getViewDirection() # Set the base of the plane at location of mouse click Plane_Point = Point_List[-1].Point # Create a Plane Plane_User_Name, plane = plot_plane(m_lengthPlane, m_widthPlane, Plane_Point, Plane_Normal, part, name) else: printError_msg("Either select first one Point and Click the button or \n" + "Click the button and one free mouse click in the view or" + "Click the button and one mouse click on an object of the view !") else: global m_callback #view = Gui.ActiveDocument.ActiveView view = get_ActiveView() # m_callback = view.addEventCallbackPivy(SoMouseButtonEvent.getClassTypeId(),getClickedPlane) m_callback = view.addEventCallback("SoMouseButtonEvent", getClickedPlane2)
28,794
def is_logged_in(f): """ is logged in decorator """ @wraps(f) def wrap(*args, **kwargs): """ wrap from template """ if 'logged_in' in session: return f(*args, **kwargs) else: flash('Unauthorized, Please login', 'danger') return redirect(url_for('login')) return wrap
28,795
def dataframe_from_mult_files(filenames): """@param filenames (List[Str]): list of filenames""" dfs = [] for filename in filenames: dfs.append(dataframe_from_file(filename)) return pd.concat(dfs, axis=0)
28,796
def batch_euclidean_dist(x, y, min_val): """ euclidean_dist function over batch x and y are batches of matrices x' and y': x' = (x'_1, | x'_2 | ... | x'_m).T y' = (y'_1, | y'_2 | ... | y'_n).T Where x_i and y_j are vectors. We calculate the distances between each pair x_i and y_j. res'[i, j] = dict(x'_i, y'_j) res (batch of res') will have the shape [batch_size, m, n] For calculation we use the formula x^2 - 2xy + y^2. Clipped to prevent zero distances for numerical stability. """ _, m, _ = x.shape _, n, _ = y.shape # shape [N, m, n] xx = ops.pows(x, 2).sum(-1, keepdims=True).repeat(n, axis=-1) yy = ops.pows(y, 2).sum(-1, keepdims=True).repeat(m, axis=-1).transpose(0, 2, 1) dist = xx + yy dist = 1 * dist - 2 * ops.batch_dot(x, y.transpose(0, 2, 1)) # Avoiding zeros for numerical stability dist = ops.maximum( dist, min_val, ) dist = ops.sqrt(dist) return dist
28,797
def outgroup_reformat(newick, outgroup): """ Move the location of the outgroup in a newick string to be at the end of the string Inputs: newick --- a newick string to be reformatted outgroup --- the outgroup Output: newick --- the reformatted string """ # Replace the outgroup and comma with an empty string newick = newick.replace(outgroup + ",", "") newick = newick[:-2] + "," + outgroup + ");" return newick
28,798
def get_source_token(request): """ Perform token validation for the presqt-source-token header. Parameters ---------- request : HTTP request object Returns ------- Returns the token if the validation is successful. Raises a custom AuthorizationException error if the validation fails. """ # Validate that the proper token exists in the request. try: return request.META['HTTP_PRESQT_SOURCE_TOKEN'] except KeyError: raise PresQTValidationError( "PresQT Error: 'presqt-source-token' missing in the request headers.", status.HTTP_400_BAD_REQUEST)
28,799