content
stringlengths
22
815k
id
int64
0
4.91M
def fake_categorize_file(tmpdir_factory): """Creates a simple categorize for testing.""" file_name = tmpdir_factory.mktemp("data").join("categorize.nc") root_grp = netCDF4.Dataset(file_name, "w", format="NETCDF4_CLASSIC") n_points = 7 root_grp.createDimension('time', n_points) var = root_grp.createVariable('time', 'f8', 'time') var[:] = np.arange(n_points) var = root_grp.createVariable('category_bits', 'i4', 'time') var[:] = [0, 1, 2, 4, 8, 16, 32] var = root_grp.createVariable('quality_bits', 'i4', 'time') var[:] = [0, 1, 2, 4, 8, 16, 32] root_grp.close() return file_name
29,000
def safe_unsigned_div(a, b, eps=None): """Calculates a/b with b >= 0 safely. a: A `float` or a tensor of shape `[A1, ..., An]`, which is the nominator. b: A `float` or a tensor of shape `[A1, ..., An]`, which is the denominator. eps: A small `float`, to be added to the denominator. If left as `None`, its value is automatically selected using `b.dtype`. name: A name for this op. Defaults to 'safe_unsigned_div'. Raises: InvalidArgumentError: If tf-graphics debug flag is set and the division causes `NaN` or `Inf` values. Returns: A tensor of shape `[A1, ..., An]` containing the results of division. """ if eps is None: eps = 10.0 * np.finfo(b.dtype).tiny return a / (b + eps)
29,001
def create_helm(ioc_dict: Dict, entity_yaml: str, path: Path): """ create a boilerplate helm chart with name str in folder path update the values.yml and Chart.yml by rendering their jinja templates and insert the boot script whose text is supplied in script_txt """ helm_folder = path / ioc_dict["ioc_name"] if path.exists(): if helm_folder.exists(): shutil.rmtree(helm_folder) else: # fail if parent does not exist (usually the iocs folder) path.mkdir() shutil.copytree(HELM_TEMPLATE, helm_folder) render_file( helm_folder / "Chart.yaml.jinja", ioc_name=ioc_dict["ioc_name"], description=ioc_dict["description"], ) render_file( helm_folder / "values.yaml.jinja", base_image=ioc_dict["generic_ioc_image"], ) boot_script_path = helm_folder / "config" / "ioc.boot.yaml" # Saves rendered boot script with open(boot_script_path, "w") as f: f.write(entity_yaml)
29,002
def load_unpack_npz(path): """ Simple helper function to circumvent hardcoding of keyword arguments for NumPy zip loading and saving. This assumes that the first entry of the zipped array contains the keys (in-order) for the rest of the array. Parameters ---------- path : string Path to load the NumPy zip file Returns ---------- data : dict Unpacked dictionary with specified keys inserted """ # Load the NumPy zip file at the path data = dict(np.load(path, allow_pickle=True)) # Extract the key names stored in the dictionary keys = data.pop(list(data.keys())[0]) # Obtain the names of the saved keys old_keys = list(data.keys()) # Re-add all of the entries of the data with the specified keys for i in range(len(keys)): data[keys[i]] = data.pop(old_keys[i]) return data
29,003
def verify(message, expected, *args, **kwargs): """Verify target substitutions.""" actual = command_handler._inject( MessagePacket( *message if isinstance(message, list) else (message,)), *args, **kwargs ).text assert actual == expected
29,004
def is_valid_email(email): """ Check if a string is a valid email. Returns a Boolean. """ try: return re.match(EMAIL_RE, email) is not None except TypeError: return False
29,005
def test_field_upload_resp_fields(datapack_tar, svc_client_with_repo): """Check response fields.""" svc_client, headers, project_id, _ = svc_client_with_repo headers.pop("Content-Type") response = svc_client.post( "/cache.files_upload", data=dict(file=(io.BytesIO(datapack_tar.read_bytes()), datapack_tar.name)), query_string={"unpack_archive": True, "override_existing": True}, headers=headers, ) assert response assert 200 == response.status_code assert {"result"} == set(response.json.keys()) assert response.json["result"]["files"] assert { "content_type", "file_id", "file_name", "file_size", "is_archive", "created_at", "is_dir", "unpack_archive", "relative_path", } == set(response.json["result"]["files"][0].keys()) assert not response.json["result"]["files"][0]["is_archive"] assert not response.json["result"]["files"][0]["unpack_archive"] rel_path = response.json["result"]["files"][0]["relative_path"] assert rel_path.startswith(datapack_tar.name) and "unpacked" in rel_path
29,006
def _create_matcher(utterance): """Create a regex that matches the utterance.""" # Split utterance into parts that are type: NORMAL, GROUP or OPTIONAL # Pattern matches (GROUP|OPTIONAL): Change light to [the color] {name} parts = re.split(r'({\w+}|\[[\w\s]+\] *)', utterance) # Pattern to extract name from GROUP part. Matches {name} group_matcher = re.compile(r'{(\w+)}') # Pattern to extract text from OPTIONAL part. Matches [the color] optional_matcher = re.compile(r'\[([\w ]+)\] *') pattern = ['^'] for part in parts: group_match = group_matcher.match(part) optional_match = optional_matcher.match(part) # Normal part if group_match is None and optional_match is None: pattern.append(part) continue # Group part if group_match is not None: pattern.append( r'(?P<{}>[\w ]+?)\s*'.format(group_match.groups()[0])) # Optional part elif optional_match is not None: pattern.append(r'(?:{} *)?'.format(optional_match.groups()[0])) pattern.append('$') return re.compile(''.join(pattern), re.I)
29,007
def loadTrainingData(path_to_follow): """ It loads the images, assuming that these are in the /IMG/ subfolder. Also, the output is in the CSV file "steering.csv". :param path_to_follow: is the full path where the images are placed. :return: a list with (1) a numpy array with the images in RGB color, (2) a numpy array with the steering angle, (3) a numpy array with the class label, and (4) the data logs. """ data_path = os.path.join(path_to_follow, "*.csv") files = glob.glob(data_path) data_log = pd.read_csv(files[0]) # Check special case of relative paths... if len(grep(data_log['path'][0], "^\s*IMG.+")) > 10: data_log['path'] = path_to_follow + data_log['path'] dataset = [] for f in data_log['path']: img = mpimg.imread(f) img = img.astype('uint8') dataset.append(img) del img dataset = np.array(dataset, dtype="uint8") labels = np.array(data_log['label'], dtype="uint8") steering = np.array(data_log['steering'], dtype="float32") return (dataset, steering, labels, data_log)
29,008
def configure_logging(log_level=None, log_file=None, simplified_console_logs=False): """ This should be called once as early as possible in app startup to configure logging handlers and formatting. :param log_level: The level at which to record log messages (DEBUG|INFO|NOTICE|WARNING|ERROR|CRITICAL) :type log_level: str :param log_file: The file to write logs to, or None to disable logging to a file :type log_file: str | None :param simplified_console_logs: Whether or not to use the simplified logging format and coloring :type simplified_console_logs: bool """ # Set datetimes in log messages to be local timezone instead of UTC logbook.set_datetime_format('local') # Redirect standard lib logging to capture third-party logs in our log files (e.g., tornado, requests) logging.root.setLevel(logging.WARNING) # don't include DEBUG/INFO/NOTICE-level logs from third parties logbook.compat.redirect_logging(set_root_logger_level=False) # Add a NullHandler to suppress all log messages lower than our desired log_level. (Otherwise they go to stderr.) NullHandler().push_application() log_level = log_level or Configuration['log_level'] format_string, log_colors = _LOG_FORMAT_STRING, _LOG_COLORS if simplified_console_logs: format_string, log_colors = _SIMPLIFIED_LOG_FORMAT_STRING, _SIMPLIFIED_LOG_COLORS # handler for stdout log_handler = _ColorizingStreamHandler( stream=sys.stdout, level=log_level, format_string=format_string, log_colors=log_colors, bubble=True, ) log_handler.push_application() # handler for log file if log_file: fs.create_dir(os.path.dirname(log_file)) previous_log_file_exists = os.path.exists(log_file) event_handler = _ColorizingRotatingFileHandler( filename=log_file, level=log_level, format_string=_LOG_FORMAT_STRING, log_colors=_LOG_COLORS, bubble=True, max_size=Configuration['max_log_file_size'], backup_count=Configuration['max_log_file_backups'], ) event_handler.push_application() if previous_log_file_exists: # Force application to create a new log file on startup. event_handler.perform_rollover(increment_logfile_counter=False) else: event_handler.log_application_summary()
29,009
def top_compartment_air_CO2(setpoints: Setpoints, states: States, weather: Weather): """ Equation 2.13 / 8.13 cap_CO2_Top * top_CO2 = mass_CO2_flux_AirTop - mass_CO2_flux_TopOut """ cap_CO2_Top = Coefficients.Construction.greenhouse_height - Coefficients.Construction.air_height # Note: line 46 / setDepParams / GreenLight mass_CO2_flux_AirTop = greenhouse_air_and_above_thermal_screen_CO2_flux(states, setpoints, weather) mass_CO2_flux_TopOut = above_thermal_screen_and_outdoor_CO2_flux(states, setpoints, weather) return (mass_CO2_flux_AirTop - mass_CO2_flux_TopOut) / cap_CO2_Top
29,010
def test_zero_indexed(bucketer, df): """Test that bins are zero-indexed.""" BUCK = bucketer(n_bins=3) x_t = BUCK.fit_transform(df.drop(columns=["pet_ownership"])) assert x_t["MARRIAGE"].min() == 0 assert x_t["EDUCATION"].min() == 0 assert x_t["LIMIT_BAL"].min() == 0 assert x_t["BILL_AMT1"].min() == 0
29,011
def coning_sculling(gyro, accel, order=1): """Apply coning and sculling corrections to inertial readings. The algorithm assumes a polynomial model for the angular velocity and the specific force, fitting coefficients by considering previous time intervals. The algorithm for a linear approximation is well known and described in [1]_ and [2]_. The accelerometer readings are also corrected for body frame rotation during a sampling period. Parameters ---------- gyro : array_like, shape (n_readings, 3) Gyro readings. accel : array_like, shape (n_readings, 3) Accelerometer readings. order : {0, 1, 2}, optional Angular velocity and specific force polynomial model order. Note that 0 means not applying non-commutative corrections at all. Default is 1. Returns ------- theta : ndarray, shape (n_readings, 3) Estimated rotation vectors. dv : ndarray, shape (n_readings, 3) Estimated velocity increments. References ---------- .. [1] P. G. Savage, "Strapdown Inertial Navigation Integration Algorithm Design Part 1: Attitude Algorithms", Journal of Guidance, Control, and Dynamics 1998, Vol. 21, no. 2. .. [2] P. G. Savage, "Strapdown Inertial Navigation Integration Algorithm Design Part 2: Velocity and Position Algorithms", Journal of Guidance, Control, and Dynamics 1998, Vol. 21, no. 2. """ if order not in [0, 1, 2]: raise ValueError("`order` must be 1, 2 or 3.") gyro = np.asarray(gyro) accel = np.asarray(accel) if order == 0: coning = 0 sculling = 0 elif order == 1: coning = np.vstack((np.zeros(3), np.cross(gyro[:-1], gyro[1:]) / 12)) sculling = np.vstack((np.zeros(3), (np.cross(gyro[:-1], accel[1:]) + np.cross(accel[:-1], gyro[1:])) / 12)) elif order == 2: coning = (-121 * np.cross(gyro[2:], gyro[1:-1]) + 31 * np.cross(gyro[2:], gyro[:-2]) - np.cross(gyro[1:-1], gyro[:-2])) / 720 sculling = (-121 * np.cross(gyro[2:], accel[1:-1]) + 31 * np.cross(gyro[2:], accel[:-2]) - np.cross(gyro[1:-1], accel[:-2]) - 121 * np.cross(accel[2:], gyro[1:-1]) + 31 * np.cross(accel[2:], gyro[:-2]) - np.cross(accel[1:-1], gyro[:-2])) / 720 coning = np.vstack((np.zeros((2, 3)), coning)) sculling = np.vstack((np.zeros((2, 3)), sculling)) else: assert False rc = 0.5 * np.cross(gyro, accel) return gyro + coning, accel + sculling + rc
29,012
def add(): """Add a task. :url: /add/ :returns: job """ job = scheduler.add_job( func=task2, trigger="interval", seconds=10, id="test job 2", name="test job 2", replace_existing=True, ) return "%s added!" % job.name
29,013
def magnitude(x: float, y: float, z: float) -> float: """ Magnitude of x, y, z acceleration √(x²+y²+z²) Dispatch <float> Args: x (float): X-axis of acceleration y (float): Y-axis of acceleration z (float): Z-axis of acceleration Returns: float: Magnitude of acceleration Dispatch <pd.DataFrame> Args: df (pd.DataFrame): Dataframe containing acceleration columns xcol (str): X-axis column name, default 'x' ycol (str): Y-axis column name, default 'y' zcol (str): Z-axis column name, default 'z' Returns: float: Magnitude of acceleration """ return np.sqrt(x**2 + y**2 + z**2)
29,014
def save_images(images: List[np.array], path: str, name: str ) -> None: """ This function takes an array of np.array images, an output path and an ID for the name generation and saves the images as png files ("$name_$index.png). Args: images (List[np.array]): Images path (str): Output directory name (str): name for filename generation """ if not os.path.exists(path): os.makedirs(path, exist_ok=True) for index in range(len(images)): filename = f"{name}_{index}.png" file_path = os.path.join(path, filename) cv2.imwrite(file_path, images[index])
29,015
def test_extract_phrase_returns_correctly(test_input_sentences, test_input_merge_inplace, test_expected): """Test that the extract_phrase method returns correctly.""" # Invoke the `extract_phrase` method of `ChunkParser` test_output = ChunkParser().extract_phrase(test_input_sentences, test_input_merge_inplace) # Assert each element within the nested `test_output` is a Chunk object, and has the same `__dict__` as # the equivalent nested element in `test_expected` for e_list, o_list in zip(test_expected, test_output): for e, o in zip(e_list, o_list): assert isinstance(o, Chunk) assert o.__dict__ == e.__dict__
29,016
def dauth( bot, input ): """Toggle whether channel should be auth enabled by default""" if not input.admin: return False if not input.origin[0] == ID.HON_SC_CHANNEL_MSG: bot.reply("Run me from channel intended for the default auth!") else: cname = bot.id2chan[input.origin[2]] authed = False if cname in bot.config.default_auth: bot.config.set_del( 'default_auth', cname ) else: bot.config.set_add( 'default_auth', cname ) authed = True bot.reply( "Default auth in this channel is now " + ( authed and "enabled" or "disabled" ) )
29,017
def write_to_disk(func): """ decorator used to write the data into disk during each checkpoint to help us to resume the operation Args: func: Returns: """ def wrapper(*args, **kwargs): func(*args, **kwargs) with open("checkpoint.json", "r") as f: f.write(json.dumps(args[0])) return wrapper
29,018
def _get_partition(device, uuid): """Find the partition of a given device.""" LOG.debug("Find the partition %(uuid)s on device %(dev)s", {'dev': device, 'uuid': uuid}) try: _rescan_device(device) lsblk = utils.execute('lsblk', '-PbioKNAME,UUID,PARTUUID,TYPE', device) report = lsblk[0] for line in report.split('\n'): part = {} # Split into KEY=VAL pairs vals = shlex.split(line) for key, val in (v.split('=', 1) for v in vals): part[key] = val.strip() # Ignore non partition if part.get('TYPE') not in ['md', 'part']: # NOTE(TheJulia): This technically creates an edge failure # case where a filesystem on a whole block device sans # partitioning would behave differently. continue if part.get('UUID') == uuid: LOG.debug("Partition %(uuid)s found on device " "%(dev)s", {'uuid': uuid, 'dev': device}) return '/dev/' + part.get('KNAME') if part.get('PARTUUID') == uuid: LOG.debug("Partition %(uuid)s found on device " "%(dev)s", {'uuid': uuid, 'dev': device}) return '/dev/' + part.get('KNAME') else: # NOTE(TheJulia): We may want to consider moving towards using # findfs in the future, if we're comfortable with the execution # and interaction. There is value in either way though. # NOTE(rg): alternative: blkid -l -t UUID=/PARTUUID= try: findfs, stderr = utils.execute('findfs', 'UUID=%s' % uuid) return findfs.strip() except processutils.ProcessExecutionError as e: LOG.debug('First fallback detection attempt for locating ' 'partition via UUID %(uuid)s failed. ' 'Error: %(err)s', {'uuid': uuid, 'err': e}) try: findfs, stderr = utils.execute( 'findfs', 'PARTUUID=%s' % uuid) return findfs.strip() except processutils.ProcessExecutionError as e: LOG.debug('Secondary fallback detection attempt for ' 'locating partition via UUID %(uuid)s failed. ' 'Error: %(err)s', {'uuid': uuid, 'err': e}) # Last fallback: In case we cannot find the partition by UUID # and the deploy device is an md device, we check if the md # device has a partition (which we assume to contain the root fs). if hardware.is_md_device(device): md_partition = device + 'p1' if (os.path.exists(md_partition) and stat.S_ISBLK(os.stat(md_partition).st_mode)): LOG.debug("Found md device with partition %s", md_partition) return md_partition else: LOG.debug('Could not find partition %(part)s on md ' 'device %(dev)s', {'part': md_partition, 'dev': device}) # Partition not found, time to escalate. error_msg = ("No partition with UUID %(uuid)s found on " "device %(dev)s" % {'uuid': uuid, 'dev': device}) LOG.error(error_msg) raise errors.DeviceNotFound(error_msg) except processutils.ProcessExecutionError as e: error_msg = ('Finding the partition with UUID %(uuid)s on ' 'device %(dev)s failed with %(err)s' % {'uuid': uuid, 'dev': device, 'err': e}) LOG.error(error_msg) raise errors.CommandExecutionError(error_msg)
29,019
def get_hourly_total_exchange_volume_in_period_from_db_trades(tc_db, start_time, end_time): """ Get the exchange volume for this exchange in this period from our saved version of the trade history. """ # Watch this query for performance. results = tc_db.query( func.hour(EHTrade.timestamp), func.sum(EHTrade._volume), )\ .filter(EHTrade.timestamp >= start_time)\ .filter(EHTrade.timestamp < end_time)\ .group_by(func.hour(EHTrade.timestamp))\ .all() formatted_results = [] for row in results: hour = row[0] timestamp = Delorean(start_time, 'UTC').next_hour(hour).datetime volume = Money(row[1], 'BTC') formatted_results.append([ timestamp, volume, ]) formatted_results = sorted(formatted_results, key=lambda r: r[0]) return formatted_results
29,020
def test_case_1(): """ If we add new choice, which changes the max length, we should have a migration, plus the column in postgres should be affected. """ print('Running test case 1') database = Database() setuper = Setuper(database, 'TEST_1') setuper.before_setup() assert database.get_column_size() == 3, 'Initial max length should be 3' setuper.setup() new_project_location = setuper.get_new_project_location() os.chdir(new_project_location) result = setuper.call('python manage.py makemigrations') assert 'No changes detected' not in result, 'There should be new migrations' print(result) print(setuper.call('python manage.py migrate')) assert database.get_column_size() == 4, 'Migration should increase the max length to 4' setuper.after_setup()
29,021
async def action(bot, msg): """**!dice** _dice_**d**_sides_[+|-_modifier_] Simulate a dice roll. _Dice_ is the number of dice to roll and _sides_ is the number of sides each die has. Optionally you can specify _modifier_, prefixed with + or -. `!dice 1d20+3` """ match = match_pattern.match(msg.clean_content) if match: match_grps = match.groups() dice_count = int(match_grps[0]) dice_sides = int(match_grps[1]) if match_grps[2] and match_grps[3]: modifier_sign = match_grps[2] modifier = int(match_grps[3]) if modifier_sign == "-": modifier = -modifier else: modifier = 0 if dice_count < sys.maxsize and dice_sides < sys.maxsize and modifier < sys.maxsize: result = random.randint(dice_count, dice_count * dice_sides) + modifier await bot.send_message(msg.channel, str(result))
29,022
def descending(sorting_func: typing.Any) -> typing.Any: """ Modify a sorting function to sort in descending order. :param sorting_func: the original sorting function :return: the modified sorting function """ def modified_sorting_func(current_columns, original_columns, sorting_func=sorting_func): return sqlalchemy.sql.desc(sorting_func(current_columns, original_columns)) modified_sorting_func.require_original_columns = getattr(sorting_func, 'require_original_columns', False) return modified_sorting_func
29,023
def circleColor(renderer, x, y, rad, color): """Draws an unfilled circle to the renderer with a given color. If the rendering color has any transparency, blending will be enabled. Args: renderer (:obj:`SDL_Renderer`): The renderer to draw on. x (int): The X coordinate of the center of the circle. y (int): The Y coordinate of the center of the circle. rad (int): The radius (in pixels) of the circle. color (int): The color to draw with as a 32-bit ``0xRRGGBBAA`` integer (e.g. ``0xFF0000FF`` for solid red). Returns: int: 0 on success, or -1 on failure. """ return _funcs["circleColor"](renderer, x, y, rad, color)
29,024
def _get_ref_init_error(dpde, error, **kwargs): """ Function that identifies where the continuous gyro begins, initiates and then carries the static errors during the continuous modes. """ temp = [0.0] for coeff, inc in zip(dpde[1:, 2], error.survey.inc_rad[1:]): if inc > kwargs['header']['XY Static Gyro']['End Inc']: temp.append(temp[-1]) else: temp.append(coeff) dpde[:, 2] = temp return dpde
29,025
def get_free_hugepages(socket=None): """Get the free hugepage totals on the system. :param socket: optional socket param to get free hugepages on a socket. To be passed a string. :returns: hugepage amount as int """ hugepage_free_re = re.compile(r'HugePages_Free:\s+(?P<free_hp>\d+)$') if socket: if os.path.exists( '/sys/devices/system/node/node{}/meminfo'.format(socket)): meminfo_path = '/sys/devices/system/node/node{}/meminfo'.format( socket) else: _LOGGER.info('No hugepage info found for socket %s', socket) return 0 else: meminfo_path = '/proc/meminfo' with open(meminfo_path, 'r') as result_file: data = result_file.readlines() for line in data: match = hugepage_free_re.search(line) if match: _LOGGER.info('Hugepages free: %s %s', match.group('free_hp'), 'on socket {}'.format(socket) if socket else '') return int(match.group('free_hp')) _LOGGER.info('Could not parse for hugepage size') return 0
29,026
def setup_logger( log_filename: Pathlike, log_level: str = "info", use_console: bool = True ) -> None: """Setup log level. Args: log_filename: The filename to save the log. log_level: The log level to use, e.g., "debug", "info", "warning", "error", "critical" """ now = datetime.now() date_time = now.strftime("%Y-%m-%d-%H-%M-%S") if dist.is_available() and dist.is_initialized(): world_size = dist.get_world_size() rank = dist.get_rank() formatter = f"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] ({rank}/{world_size}) %(message)s" # noqa log_filename = f"{log_filename}-{date_time}-{rank}" else: formatter = ( "%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s" ) log_filename = f"{log_filename}-{date_time}" os.makedirs(os.path.dirname(log_filename), exist_ok=True) level = logging.ERROR if log_level == "debug": level = logging.DEBUG elif log_level == "info": level = logging.INFO elif log_level == "warning": level = logging.WARNING elif log_level == "critical": level = logging.CRITICAL # https://discuss.pytorch.org/t/pytorch-1-8-distributed-mode-will-disable-python-logging-module/113897/7 if int(sys.version[0]) == 3 and int(sys.version_info[1]) >= 8: logging.basicConfig( filename=log_filename, format=formatter, level=level, filemode="w", force=True ) else: logging.basicConfig( filename=log_filename, format=formatter, level=level, filemode="w" ) if use_console: console = logging.StreamHandler() console.setLevel(level) console.setFormatter(logging.Formatter(formatter)) logging.getLogger("").addHandler(console)
29,027
def get_extrainfo_descriptors(start = None, end = None, cache_to = None, bridge = False, timeout = None, retries = 3): """ Shorthand for :func:`~stem.descriptor.collector.CollecTor.get_extrainfo_descriptors` on our singleton instance. """ for desc in get_instance().get_extrainfo_descriptors(start, end, cache_to, bridge, timeout, retries): yield desc
29,028
async def loop(reader: asyncio.StreamReader, writer: asyncio.StreamWriter, my_id: int): """The main event loop. Gets the current world and returns the decision made.""" data = await next_state(reader) while data["state"] == "playing": choice = decision(my_id, World(data["map"])) writer.write(choice) await writer.drain() data = await next_state(reader)
29,029
def umount(path, bg=False): """Umount dfuse from a given path""" if bg: cmd = ['fusermount3', '-uz', path] else: cmd = ['fusermount3', '-u', path] ret = subprocess.run(cmd, check=False) print('rc from umount {}'.format(ret.returncode)) return ret.returncode
29,030
def assert_allclose(actual: List[int], desired: numpy.ndarray): """ usage.scipy: 4 usage.statsmodels: 1 """ ...
29,031
def parse_internal_ballot(line): """ Parse an internal ballot line (with or without a trailing newline). This function allows leading and trailing spaces. ValueError is raised if one of the values does not parse to an integer. An internal ballot line is a space-delimited string of integers of the form-- "WEIGHT CHOICE1 CHOICE2 CHOICE3 ...". """ ints = parse_integer_line(line) weight = next(ints) choices = tuple(ints) return weight, choices
29,032
def get_fratio(*args): """ """ cmtr = get_cmtr(*args) cme = get_cme(*args) fratio = cmtr / cme return fratio
29,033
def log_progress(is_first_update, total, current, message): """Log progress of the issues or PRs processing. Args: is_first_update (bool): This is the first update of this repo. total (int): Number of issues/PRs. current (int): Last processed issue/PR number. message (str): String with the processed object instance. """ if is_first_update and total > 1600: if (current + 1) % 400 == 0: logging.info( "processed {num} of {total} {message}".format( num=current + 1, total=total, message=message ) )
29,034
def _select_by_property(peak_properties, pmin, pmax): """ Evaluate where the generic property of peaks confirms to an interval. Parameters ---------- peak_properties : ndarray An array with properties for each peak. pmin : None or number or ndarray Lower interval boundary for `peak_properties`. ``None`` is interpreted as an open border. pmax : None or number or ndarray Upper interval boundary for `peak_properties`. ``None`` is interpreted as an open border. Returns ------- keep : bool A boolean mask evaluating to true where `peak_properties` confirms to the interval. See Also -------- find_peaks Notes ----- .. versionadded:: 1.1.0 """ keep = np.ones(peak_properties.size, dtype=bool) if pmin is not None: keep &= (pmin <= peak_properties) if pmax is not None: keep &= (peak_properties <= pmax) return keep
29,035
def from_period_type_name(period_type_name: str) -> PeriodType: """ Safely get Period Type from its name. :param period_type_name: Name of the period type. :return: Period type enum. """ period_type_values = [item.value for item in PeriodType] if period_type_name.lower() not in period_type_values: raise AttributeError(f"Non-existent period type {period_type_name}, supported types: {period_type_values}") else: return PeriodType(period_type_name.lower())
29,036
def scale_down_all_workloads(wait_time: int): """Kill all workloads""" command = "kubectl scale sts --all --replicas=0 && sleep {wait_time}".format( wait_time=wait_time) default_shell_run(command)
29,037
def randomInt(length=4, seed=None): """ Returns random integer value with provided number of digits >>> random.seed(0) >>> randomInt(6) 874254 """ if seed is not None: _ = getCurrentThreadData().random _.seed(seed) choice = _.choice else: choice = random.choice return int("".join(choice(string.digits if _ != 0 else string.digits.replace('0', '')) for _ in xrange(0, length)))
29,038
def matchatleastone(text, regexes): """Returns a list of strings that match at least one of the regexes.""" finalregex = "|".join(regexes) result = re.findall(finalregex, text) return result
29,039
def safe_string_equals(a, b): """ Near-constant time string comparison. Used in order to avoid timing attacks on sensitive information such as secret keys during request verification (`rootLabs`_). .. _`rootLabs`: http://rdist.root.org/2010/01/07/timing-independent-array-comparison/ """ if len(a) != len(b): return False result = 0 for x, y in zip(a, b): result |= ord(x) ^ ord(y) return result == 0
29,040
def DFS_complete(g): """Perform DFS for entire graph and return forest as a dictionary. forest maps each vertex v to the edge that was used to discover it. (Vertices that are roots of a DFS tree are mapped to None.) :param g: a Graph class object :type g: Graph :return: A tuple of dicts summarizing the clusters of the input graph. The second returned value, that which is of interest in this project, is a dict where a key is a discovery vertex of a cluster and its corresponding value is the list of vertices in its cluster. :rtype: tuple """ forest = {} clusters = {} for u in g.vertices(): if u not in forest: forest[u] = None # u will be the root of a tree cluster = [u] DFS(g, u, forest, cluster) clusters[u] = cluster return forest, clusters
29,041
def get_keys(mapping, *keys): """Return the values corresponding to the given keys, in order.""" return (mapping[k] for k in keys)
29,042
def test_initialize_physical_parameters(): """ Checks function SolveDiffusion2D.initialize_domain """ solver = SolveDiffusion2D() w = 22. h = 3. dx = 0.5 dy = 0.86 solver.initialize_domain(w, h, dx, dy) d = 7.0 solver.initialize_physical_parameters(d) actual_dt_rounded = round(solver.dt, 3) assert 0.013 == actual_dt_rounded
29,043
def accents_dewinize(text): """Replace Win1252 symbols with ASCII chars or sequences needed when copying code parts from MS Office, like Word... From the book "Fluent Python" by Luciano Ramalho (O'Reilly, 2015) >>> accents_dewinize('“Stupid word • error inside™ ”') '"Stupid word - error inside(TM) "' """ return sanitize.dewinize(text)
29,044
def spiral_trajectory(base_resolution, spiral_arms, field_of_view, max_grad_ampl, min_rise_time, dwell_time, views=1, phases=None, ordering='linear', angle_range='full', tiny_number=7, readout_os=2.0, gradient_delay=0.0, larmor_const=42.577478518, vd_inner_cutoff=1.0, vd_outer_cutoff=1.0, vd_outer_density=1.0, vd_type='linear'): """Calculate a spiral trajectory. Args: base_resolution: An `int`. The base resolution, or number of pixels in the readout dimension. spiral_arms: An `int`. The number of spiral arms that a fully sampled k-space should be divided into. field_of_view: A `float`. The field of view, in mm. max_grad_ampl: A `float`. The maximum allowed gradient amplitude, in mT/m. min_rise_time: A `float`. The minimum allowed rise time, in us/(mT/m). dwell_time: A `float`. The digitiser's real dwell time, in us. This does not include oversampling. The effective dwell time (with oversampling) is equal to `dwell_time * readout_os`. views: An `int`. The number of radial views per phase. phases: An `int`. The number of phases for cine acquisitions. If `None`, this is assumed to be a non-cine acquisition with no time dimension. ordering: A `string`. The ordering type. Must be one of: `{'linear', 'golden', 'tiny', 'sorted'}`. angle_range: A `string`. The range of the rotation angle. Must be one of: `{'full', 'half'}`. If `angle_range` is `'full'`, the full circle/sphere is included in the range. If `angle_range` is `'half'`, only a semicircle/hemisphere is included. tiny_number: An `int`. The tiny golden angle number. Only used if `ordering` is `'tiny'` or `'tiny_half'`. Must be >= 2. Defaults to 7. readout_os: A `float`. The readout oversampling factor. Defaults to 2.0. gradient_delay: A `float`. The system's gradient delay relative to the ADC, in us. Defaults to 0.0. larmor_const: A `float`. The Larmor constant of the imaging nucleus, in MHz/T. Defaults to 42.577478518 (the Larmor constant of the 1H nucleus). vd_inner_cutoff: Defines the inner, high-density portion of *k*-space. Must be between 0.0 and 1.0, where 0.0 is the center of *k*-space and 1.0 is the edge. Between 0.0 and `vd_inner_cutoff`, *k*-space will be sampled at the Nyquist rate. vd_outer_cutoff: Defines the outer, low-density portion of *k*-space. Must be between 0.0 and 1.0, where 0.0 is the center of *k*-space and 1.0 is the edge. Between `vd_outer_cutoff` and 1.0, *k*-space will be sampled at a rate `vd_outer_density` times the Nyquist rate. vd_outer_density: Defines the sampling density in the outer portion of *k*-space. Must be > 0.0. Higher means more densely sampled. Multiplies the Nyquist rate: 1.0 means sampling at the Nyquist rate, < 1.0 means undersampled and > 1.0 means oversampled. vd_type: Defines the rate of variation of the sampling density the variable-density portion of *k*-space, i.e., between `vd_inner_cutoff` and `vd_outer_cutoff`. Must be one of `'linear'`, `'quadratic'` or `'hanning'`. Returns: A `Tensor` of type `float32` and shape `[views, samples, 2]` if `phases` is `None`, or of shape `[phases, views, samples, 2]` if `phases` is not `None`. `samples` is equal to `base_resolution * readout_os`. The units are radians/voxel, ie, values are in the range `[-pi, pi]`. References: .. [1] Pipe, J.G. and Zwart, N.R. (2014), Spiral trajectory design: A flexible numerical algorithm and base analytical equations. Magn. Reson. Med, 71: 278-285. https://doi.org/10.1002/mrm.24675 """ return _kspace_trajectory('spiral', {'base_resolution': base_resolution, 'spiral_arms': spiral_arms, 'field_of_view': field_of_view, 'max_grad_ampl': max_grad_ampl, 'min_rise_time': min_rise_time, 'dwell_time': dwell_time, 'readout_os': readout_os, 'gradient_delay': gradient_delay, 'larmor_const': larmor_const, 'vd_inner_cutoff': vd_inner_cutoff, 'vd_outer_cutoff': vd_outer_cutoff, 'vd_outer_density': vd_outer_density, 'vd_type': vd_type}, views=views, phases=phases, ordering=ordering, angle_range=angle_range, tiny_number=tiny_number)
29,045
def total_examples(X): """Counts the total number of examples of a sharded and sliced data object X.""" count = 0 for i in range(len(X)): for j in range(len(X[i])): count += len(X[i][j]) return count
29,046
def convert_and_save(model, input_shape, weights=True, quiet=True, ignore_tests=False, input_range=None, filename=None, directory=None): """ Conversion between PyTorch and Keras, and automatic save (Conversions from Keras to PyTorch aren't implemented) Arguments: -model: A Keras or PyTorch model or layer to convert -input_shape: Input shape (list, tuple or int), without batchsize. -weights (bool): Also convert weights. If set to false, only convert model architecture -quiet (bool): If a progress bar and some messages should appear -ignore_tests (bool): If tests should be ignored. If set to True, converted model will still be tested by security. If models are not identical, it will only print a warning. If set to False, and models are not identical, RuntimeWarning will be raised If weights is False, tests are automatically ignored -input_range: Optionnal. A list of 2 elements containing max and min values to give as input to the model when performing the tests. If None, models will be tested on samples from the "standard normal" distribution. -filename: Filename to give to model's hdf5 file. If filename is not None and save is not False, then save will automatically be set to True -directory: Where to save model's hdf5 file. If directory is not None and save is not False, then save will automatically be set to True Returns: Name of created hdf5 file """ return convert(model=model, input_shape=input_shape, weights=weights, quiet=quiet, ignore_tests=ignore_tests, input_range=input_range, save=True, filename=filename, directory=directory)
29,047
def no_afni(): """ Checks if AFNI is available """ if Info.version() is None: return True return False
29,048
def delta_eta_plot_projection_range_string(inclusive_analysis: "correlations.Correlations") -> str: """ Provides a string that describes the delta phi projection range for delta eta plots. """ # The limit is almost certainly a multiple of pi, so we try to express it more naturally # as a value like pi/2 or 3*pi/2 value = _find_pi_coefficient(value = inclusive_analysis.near_side_phi_region.max) return labels.make_valid_latex_string( fr"$|\Delta\varphi|<{value}$" )
29,049
def select_gpu(gpu_ids=None, gpu_mem_frac=None): """ Find the GPU ID with highest available memory fraction. If ID is given as input, set the gpu_mem_frac to maximum available, or if a memory fraction is given, make sure the given GPU has the desired memory fraction available. Currently only supports single GPU runs. :param int gpu_ids: Desired GPU ID. If None, find GPU with the most memory available. :param float gpu_mem_frac: Desired GPU memory fraction [0, 1]. If None, use maximum available amount of GPU. :return int gpu_ids: GPU ID to use. :return float cur_mem_frac: GPU memory fraction to use :raises NotImplementedError: If gpu_ids is not int :raises AssertionError: If requested memory fraction isn't available """ # Check if user has specified a GPU ID to use if not isinstance(gpu_ids, type(None)): # Currently only supporting one GPU as input if not isinstance(gpu_ids, int): raise NotImplementedError if gpu_ids == -1: return -1, 0 cur_mem_frac = check_gpu_availability(gpu_ids) if not isinstance(gpu_mem_frac, type(None)): if isinstance(gpu_mem_frac, float): gpu_mem_frac = [gpu_mem_frac] assert np.all(np.array(cur_mem_frac >= gpu_mem_frac)), \ ("Not enough memory available. Requested/current fractions:", "\n".join([str(c) + " / " + "{0:.4g}".format(m) for c, m in zip(gpu_mem_frac, cur_mem_frac)])) return gpu_ids, cur_mem_frac[0] # User has not specified GPU ID, find the GPU with most memory available sp = subprocess.Popen(['nvidia-smi --query-gpu=index --format=csv'], stdout=subprocess.PIPE, shell=True) gpu_ids = sp.communicate() gpu_ids = gpu_ids[0].decode('utf8') gpu_ids = gpu_ids.split('\n') # If no GPUs are found, run on CPU (debug mode) if len(gpu_ids) <= 2: print('No GPUs found, run will be slow. Query result: {}'.format(gpu_ids)) return -1, 0 gpu_ids = [int(gpu_id) for gpu_id in gpu_ids[1:-1]] cur_mem_frac = check_gpu_availability(gpu_ids) # Get the GPU with maximum memory fraction max_mem = max(cur_mem_frac) idx = cur_mem_frac.index(max_mem) gpu_id = gpu_ids[idx] # Subtract a little margin to be safe max_mem = max_mem - np.finfo(np.float32).eps print('Using GPU {} with memory fraction {}.'.format(gpu_id, max_mem)) return gpu_id, max_mem
29,050
def load_classification_dataset( fname, tokenizer, input_field_a, input_field_b=None, label_field='label', label_map=None, limit=None ): """ Loads a dataset for classification Parameters ========== tokenizer : transformers.PretrainedTokenizer Maps text to id tensors. sentence1 : """ instances = [] label_map = label_map or {} loader = LOADERS[fname.suffix] for instance in loader(fname): logger.debug(instance) model_inputs = tokenizer.encode_plus( instance[input_field_a], instance[input_field_b] if input_field_b else None, add_special_tokens=True, # add_prefix_space=True, return_tensors='pt' ) logger.debug(model_inputs) label = instance[label_field] if label not in label_map: label_map[label] = len(label_map) label_id = label_map[label] label_id = torch.tensor([[label_id]]) # To make collator expectation logger.debug(f'Label id: {label_id}') instances.append((model_inputs, label_id)) if limit: instances = random.sample(instances, limit) return instances, label_map
29,051
def test_mnist(args): """ Calculates error rate on MNIST for a previously saved MulticlassClassifier instance. Parameters ---------- args : argparse.Namespace Arguments from command line. Returns ------- error_rate : float The calculated error rate. Raises ------ FileNotFound If unable to locate multicc_mnist.dill.xz """ # Arguments from the command line. process_count = args.process_count fraction_of_mnist = args.fraction_of_mnist # Load trained MulticlassClassifier. print('Loading MulticlassClassifier') load_filepath = 'multicc_mnist.dill.xz' if os.path.isfile(load_filepath): with lzma.open(load_filepath, 'rb') as multicc_file: multicc = dill.load(multicc_file) else: raise FileNotFoundError('Unable to locate ' + load_filepath) # Set other binary classifier predict arguments. # We set option so the voted perceptron predict method returns a real-valued confidence score # as opposed to a label. This is required by MulticlassClassifier. other_bc_predict_args = tuple(['score']) other_bc_predict_kwargs = {} # Get data and labels for testing. print('Loading MNIST data') data, labels = get_data_and_labels('test', fraction_of_mnist) # Calculate error rate on test data. print('Computing error rate') error_rate = multicc.error_rate(data, labels, other_bc_predict_args, other_bc_predict_kwargs, process_count) return error_rate
29,052
def has_content_in(page, language): """Fitler that return ``True`` if the page has any content in a particular language. :param page: the current page :param language: the language you want to look at """ if page is None: return False return Content.objects.filter(page=page, language=language).count() > 0
29,053
def public_encrypt(key, data, oaep): """ public key encryption using rsa with pkcs1-oaep padding. returns the base64-encoded encrypted data data: the data to be encrypted, bytes key: pem-formatted key string or bytes oaep: whether to use oaep padding or not """ if isinstance(key, str): key = key.encode("ascii") pubkey = load_public_key(key) if oaep: encrypted = rsa_oaep_encrypt(pubkey, data) else: encrypted = rsa_pkcs1v15_encrypt(pubkey, data) return b64encode(encrypted).decode("ascii")
29,054
def newton_method(f, x_init = 0, epsilon = 1e-10): """ Newton Raphson Optimizer ... Parameters --- f: Function to calculate root for x_init(optional) : initial value of x epsilon(optional): Adjustable precision Returns --- x: Value of root """ prev_value = x_init + 2 * epsilon value = x_init iterations = 0 while abs(prev_value - value) > epsilon: prev_value = value f_dash = derivative(f, value) value = value - f(value) / f_dash iterations += 1 print(f"Newton Method converged in {iterations} iterations") return value
29,055
def create_preference_branch(this, args, callee): """Creates a preference branch, which can be used for testing composed preference names.""" if args: if args[0].is_literal: res = this.traverser.wrap().query_interface('nsIPrefBranch') res.hooks['preference_branch'] = args[0].as_str() return res
29,056
def circle( gdf, radius=10, fill=True, fill_color=None, name="layer", width=950, height=550, location=None, color="blue", tooltip=None, zoom=7, tiles="OpenStreetMap", attr=None, style={}, ): """ Convert Geodataframe to geojson and plot it. Parameters ---------- gdf : GeoDataframe radius: radius of the circle fill: fill the circle fill_color: fill the circle with this color (column name or color) name : name of the geojson layer, optional, default "layer" width : width of the map, default 950 height : height of the map, default 550 location : center of the map rendered, default centroid of first geometry color : color of your geometries, default blue use random to randomize the colors (column name or color) tooltip : hover box on the map with geometry info, default all columns can be a list of column names zoom : zoom level of the map, default 7 tiles : basemap, default openstreetmap, options ['google','googlesatellite','googlehybrid'] or custom wms attr : Attribution to external basemaps being used, default None style : dict, additional style to geometries Returns ------- m : folium.map """ gpd_copy = _get_lat_lon(gdf.copy()) m = _folium_map( gpd_copy, width, height, location, tiles=tiles, attr=attr, zoom_start=zoom ) for index, row in gpd_copy.iterrows(): if tooltip is not None: tooltip_dict = {k: v for k, v in dict(row).items() if k in tooltip} tooltip = "".join( [ "<p><b>{}</b> {}</p>".format(keyvalue[0], keyvalue[1]) for keyvalue in list(tooltip_dict.items()) ] ) else: tooltip = _get_tooltip(tooltip, gdf) if fill_color in list(gpd_copy.columns): fill_color = row[fill_color] if color in list(gpd_copy.columns): color = row[color] folium.Circle( radius=radius, location=[row["latitude"], row["longitude"]], tooltip=tooltip, popup=tooltip, fill=fill, color=color, fill_color=fill_color, ).add_to(m) return m
29,057
def draw_rectangle(turtle): """ This function draws the Guitar handle :param turtle: The name of the turtle :return: None """ turtle.color(139,69,19) turtle.penup() turtle.setpos(30, -45) turtle.begin_fill() turtle.pendown() for i in range(2): turtle.forward(40) turtle.left(90) turtle.forward(300) turtle.left(90) turtle.end_fill()
29,058
def no_recurse(f): """Wrapper function that forces a function to return True if it recurse.""" def func(*args, **kwargs): for i in traceback.extract_stack(): if i[2] == f.__name__: return True return f(*args, **kwargs) return func
29,059
def get_tap_number(distSys: SystemClass, names: List[str]) -> pandas.DataFrame: """ Get the tap number of regulators. Args: distSys : An instance of [SystemClass][dssdata.SystemClass]. names : Regulators names Returns: The tap number of regulators. """ def get_one(reg_name: str) -> Tuple[str, int]: distSys.dss.RegControls.Name(reg_name) return (reg_name, int(distSys.dss.RegControls.TapNumber())) __check_elements(names, distSys.dss.RegControls.AllNames()) return pandas.DataFrame( data=tuple(map(get_one, names)), columns=["reg_name", "tap"] )
29,060
def status(app: str) -> dict: """ :param app: The name of the Heroku app in which you want to change :type app: str :return: dictionary containing information about the app's status """ return Herokron(app).status()
29,061
def GetIPv4Interfaces(): """Returns a list of IPv4 interfaces.""" interfaces = sorted(netifaces.interfaces()) return [x for x in interfaces if not x.startswith('lo')]
29,062
def merge_dicts(source, destination): """ Recursively merges two dictionaries source and destination. The source dictionary will only be read, but the destination dictionary will be overwritten. """ for key, value in source.items(): if isinstance(value, dict): # get node or create one node = destination.setdefault(key, {}) merge_dicts(value, node) else: destination[key] = value return destination
29,063
def format_code(session): """Run code reformatter""" # session.install("-e", ".[lint]") session.install( "isort==4.3.21", "seed-isort-config==2.1.0", "black==19.10b0" ) session.run("seed-isort-config", success_codes=[0, 1]) session.run("isort", "-rc", *files_to_format) session.run("black", "-l", max_line_length, *files_to_format)
29,064
def create_hive_connection(): """ Create a connection for Hive :param username: str :param password: str :return: jaydebeapi.connect or None """ try: conn = jaydebeapi.connect('org.apache.hive.jdbc.HiveDriver', hive_jdbc_url, [hive_username, hive_password], hive_jar_path, '') return conn except Exception as e: raise Exception(e)
29,065
def main(): """ The main function to execute upon call. Returns ------- int returns integer 0 for safe executions. """ print('Hello World') return 0
29,066
def brightness(image, magnitude, name=None): """Adjusts the `magnitude` of brightness of an `image`. Args: image: An int or float tensor of shape `[height, width, num_channels]`. magnitude: A 0-D float tensor or single floating point value above 0.0. name: An optional string for name of the operation. Returns: A tensor with same shape and type as that of `image`. """ _check_image_dtype(image) with tf.name_scope(name or "brightness"): dark = tf.zeros_like(image) bright_image = blend(dark, image, magnitude) return bright_image
29,067
def sitemap_xml(): """Sitemap XML""" sitemap = render_template("core/sitemap.xml") return Response(sitemap, mimetype="text/xml")
29,068
def build_editable(location, expose=None, hide=None): """Generate files that can be added to a wheel to expose packages from a directory. By default, every package (directory with __init__.py) in the supplied location will be exposed on sys.path by the generated wheel. Optional arguments: expose: A list of packages to include in the generated wheel (overrides the default behaviour). hide: A list of sub-packages of exposed packages that will be invisible in the generated wheel. Returns: a list of (name, content) pairs, specifying files that should be added to the generated wheel. Callers are responsible for building a valid wheel containing these files. """ location = Path(location) if expose is None: expose = [pkg.parent.name for pkg in location.glob("*/__init__.py")] if hide is None: hide = [] for pkg in expose: code = _TEMPLATE for of, to in { '"" # location of replacement': str(location), '"" # excludes': hide, }.items(): code = code.replace(of, repr(to)) yield "{}.py".format(pkg), code
29,069
def get_CM(): """Pertzの係数CMをndarrayとして取得する Args: Returns: CM(ndarray[float]):Pertzの係数CM """ # pythonは0オリジンのため全て-1 CM = [0.385230, 0.385230, 0.385230, 0.462880, 0.317440,#1_1 => 0_0 0.338390, 0.338390, 0.221270, 0.316730, 0.503650, 0.235680, 0.235680, 0.241280, 0.157830, 0.269440, 0.830130, 0.830130, 0.171970, 0.841070, 0.457370, 0.548010, 0.548010, 0.478000, 0.966880, 1.036370, 0.548010, 0.548010, 1.000000, 3.012370, 1.976540, 0.582690, 0.582690, 0.229720, 0.892710, 0.569950, 0.131280, 0.131280, 0.385460, 0.511070, 0.127940,#1_2 => 0_1 0.223710, 0.223710, 0.193560, 0.304560, 0.193940, 0.229970, 0.229970, 0.275020, 0.312730, 0.244610, 0.090100, 0.184580, 0.260500, 0.687480, 0.579440, 0.131530, 0.131530, 0.370190, 1.380350, 1.052270, 1.116250, 1.116250, 0.928030, 3.525490, 2.316920, 0.090100, 0.237000, 0.300040, 0.812470, 0.664970, 0.587510, 0.130000, 0.400000, 0.537210, 0.832490,#1_3 => 0_2 0.306210, 0.129830, 0.204460, 0.500000, 0.681640, 0.224020, 0.260620, 0.334080, 0.501040, 0.350470, 0.421540, 0.753970, 0.750660, 3.706840, 0.983790, 0.706680, 0.373530, 1.245670, 0.864860, 1.992630, 4.864400, 0.117390, 0.265180, 0.359180, 3.310820, 0.392080, 0.493290, 0.651560, 1.932780, 0.898730, 0.126970, 0.126970, 0.126970, 0.126970, 0.126970,#1_4 => 0_3 0.810820, 0.810820, 0.810820, 0.810820, 0.810820, 3.241680, 2.500000, 2.291440, 2.291440, 2.291440, 4.000000, 3.000000, 2.000000, 0.975430, 1.965570, 12.494170, 12.494170, 8.000000, 5.083520, 8.792390, 21.744240, 21.744240, 21.744240, 21.744240, 21.744240, 3.241680, 12.494170, 1.620760, 1.375250, 2.331620, 0.126970, 0.126970, 0.126970, 0.126970, 0.126970,#1_5 => 0_4 0.810820, 0.810820, 0.810820, 0.810820, 0.810820, 3.241680, 2.500000, 2.291440, 2.291440, 2.291440, 4.000000, 3.000000, 2.000000, 0.975430, 1.965570, 12.494170, 12.494170, 8.000000, 5.083520, 8.792390, 21.744240, 21.744240, 21.744240, 21.744240, 21.744240, 3.241680, 12.494170, 1.620760, 1.375250, 2.331620, 0.126970, 0.126970, 0.126970, 0.126970, 0.126970,#1_6 => 0_5 0.810820, 0.810820, 0.810820, 0.810820, 0.810820, 3.241680, 2.500000, 2.291440, 2.291440, 2.291440, 4.000000, 3.000000, 2.000000, 0.975430, 1.965570, 12.494170, 12.494170, 8.000000, 5.083520, 8.792390, 21.744240, 21.744240, 21.744240, 21.744240, 21.744240, 3.241680, 12.494170, 1.620760, 1.375250, 2.331620, 0.337440, 0.337440, 0.969110, 1.097190, 1.116080,#2_1 => 1_0 0.337440, 0.337440, 0.969110, 1.116030, 0.623900, 0.337440, 0.337440, 1.530590, 1.024420, 0.908480, 0.584040, 0.584040, 0.847250, 0.914940, 1.289300, 0.337440, 0.337440, 0.310240, 1.435020, 1.852830, 0.337440, 0.337440, 1.015010, 1.097190, 2.117230, 0.337440, 0.337440, 0.969110, 1.145730, 1.476400, 0.300000, 0.300000, 0.700000, 1.100000, 0.796940,#2_2 => 1_1 0.219870, 0.219870, 0.526530, 0.809610, 0.649300, 0.386650, 0.386650, 0.119320, 0.576120, 0.685460, 0.746730, 0.399830, 0.470970, 0.986530, 0.785370, 0.575420, 0.936700, 1.649200, 1.495840, 1.335590, 1.319670, 4.002570, 1.276390, 2.644550, 2.518670, 0.665190, 0.678910, 1.012360, 1.199940, 0.986580, 0.378870, 0.974060, 0.500000, 0.491880, 0.665290,#2_3 => 1_2 0.105210, 0.263470, 0.407040, 0.553460, 0.582590, 0.312900, 0.345240, 1.144180, 0.854790, 0.612280, 0.119070, 0.365120, 0.560520, 0.793720, 0.802600, 0.781610, 0.837390, 1.270420, 1.537980, 1.292950, 1.152290, 1.152290, 1.492080, 1.245370, 2.177100, 0.424660, 0.529550, 0.966910, 1.033460, 0.958730, 0.310590, 0.714410, 0.252450, 0.500000, 0.607600,#2_4 => 1_3 0.975190, 0.363420, 0.500000, 0.400000, 0.502800, 0.175580, 0.196250, 0.476360, 1.072470, 0.490510, 0.719280, 0.698620, 0.657770, 1.190840, 0.681110, 0.426240, 1.464840, 0.678550, 1.157730, 0.978430, 2.501120, 1.789130, 1.387090, 2.394180, 2.394180, 0.491640, 0.677610, 0.685610, 1.082400, 0.735410, 0.597000, 0.500000, 0.300000, 0.310050, 0.413510,#2_5 => 1_4 0.314790, 0.336310, 0.400000, 0.400000, 0.442460, 0.166510, 0.460440, 0.552570, 1.000000, 0.461610, 0.401020, 0.559110, 0.403630, 1.016710, 0.671490, 0.400360, 0.750830, 0.842640, 1.802600, 1.023830, 3.315300, 1.510380, 2.443650, 1.638820, 2.133990, 0.530790, 0.745850, 0.693050, 1.458040, 0.804500, 0.597000, 0.500000, 0.300000, 0.310050, 0.800920,#2_6 => 1_5 0.314790, 0.336310, 0.400000, 0.400000, 0.237040, 0.166510, 0.460440, 0.552570, 1.000000, 0.581990, 0.401020, 0.559110, 0.403630, 1.016710, 0.898570, 0.400360, 0.750830, 0.842640, 1.802600, 3.400390, 3.315300, 1.510380, 2.443650, 1.638820, 2.508780, 0.204340, 1.157740, 2.003080, 2.622080, 1.409380, 1.242210, 1.242210, 1.242210, 1.242210, 1.242210,#3_1 => 2_0 0.056980, 0.056980, 0.656990, 0.656990, 0.925160, 0.089090, 0.089090, 1.040430, 1.232480, 1.205300, 1.053850, 1.053850, 1.399690, 1.084640, 1.233340, 1.151540, 1.151540, 1.118290, 1.531640, 1.411840, 1.494980, 1.494980, 1.700000, 1.800810, 1.671600, 1.018450, 1.018450, 1.153600, 1.321890, 1.294670, 0.700000, 0.700000, 1.023460, 0.700000, 0.945830,#3_2 => 2_1 0.886300, 0.886300, 1.333620, 0.800000, 1.066620, 0.902180, 0.902180, 0.954330, 1.126690, 1.097310, 1.095300, 1.075060, 1.176490, 1.139470, 1.096110, 1.201660, 1.201660, 1.438200, 1.256280, 1.198060, 1.525850, 1.525850, 1.869160, 1.985410, 1.911590, 1.288220, 1.082810, 1.286370, 1.166170, 1.119330, 0.600000, 1.029910, 0.859890, 0.550000, 0.813600,#3_3 => 2_2 0.604450, 1.029910, 0.859890, 0.656700, 0.928840, 0.455850, 0.750580, 0.804930, 0.823000, 0.911000, 0.526580, 0.932310, 0.908620, 0.983520, 0.988090, 1.036110, 1.100690, 0.848380, 1.035270, 1.042380, 1.048440, 1.652720, 0.900000, 2.350410, 1.082950, 0.817410, 0.976160, 0.861300, 0.974780, 1.004580, 0.782110, 0.564280, 0.600000, 0.600000, 0.665740,#3_4 => 2_3 0.894480, 0.680730, 0.541990, 0.800000, 0.669140, 0.487460, 0.818950, 0.841830, 0.872540, 0.709040, 0.709310, 0.872780, 0.908480, 0.953290, 0.844350, 0.863920, 0.947770, 0.876220, 1.078750, 0.936910, 1.280350, 0.866720, 0.769790, 1.078750, 0.975130, 0.725420, 0.869970, 0.868810, 0.951190, 0.829220, 0.791750, 0.654040, 0.483170, 0.409000, 0.597180,#3_5 => 2_4 0.566140, 0.948990, 0.971820, 0.653570, 0.718550, 0.648710, 0.637730, 0.870510, 0.860600, 0.694300, 0.637630, 0.767610, 0.925670, 0.990310, 0.847670, 0.736380, 0.946060, 1.117590, 1.029340, 0.947020, 1.180970, 0.850000, 1.050000, 0.950000, 0.888580, 0.700560, 0.801440, 0.961970, 0.906140, 0.823880, 0.500000, 0.500000, 0.586770, 0.470550, 0.629790,#3_6 => 2_5 0.500000, 0.500000, 1.056220, 1.260140, 0.658140, 0.500000, 0.500000, 0.631830, 0.842620, 0.582780, 0.554710, 0.734730, 0.985820, 0.915640, 0.898260, 0.712510, 1.205990, 0.909510, 1.078260, 0.885610, 1.899260, 1.559710, 1.000000, 1.150000, 1.120390, 0.653880, 0.793120, 0.903320, 0.944070, 0.796130, 1.000000, 1.000000, 1.050000, 1.170380, 1.178090,#4_1 => 3_0 0.960580, 0.960580, 1.059530, 1.179030, 1.131690, 0.871470, 0.871470, 0.995860, 1.141910, 1.114600, 1.201590, 1.201590, 0.993610, 1.109380, 1.126320, 1.065010, 1.065010, 0.828660, 0.939970, 1.017930, 1.065010, 1.065010, 0.623690, 1.119620, 1.132260, 1.071570, 1.071570, 0.958070, 1.114130, 1.127110, 0.950000, 0.973390, 0.852520, 1.092200, 1.096590,#4_2 => 3_1 0.804120, 0.913870, 0.980990, 1.094580, 1.042420, 0.737540, 0.935970, 0.999940, 1.056490, 1.050060, 1.032980, 1.034540, 0.968460, 1.032080, 1.015780, 0.900000, 0.977210, 0.945960, 1.008840, 0.969960, 0.600000, 0.750000, 0.750000, 0.844710, 0.899100, 0.926800, 0.965030, 0.968520, 1.044910, 1.032310, 0.850000, 1.029710, 0.961100, 1.055670, 1.009700,#4_3 => 3_2 0.818530, 0.960010, 0.996450, 1.081970, 1.036470, 0.765380, 0.953500, 0.948260, 1.052110, 1.000140, 0.775610, 0.909610, 0.927800, 0.987800, 0.952100, 1.000990, 0.881880, 0.875950, 0.949100, 0.893690, 0.902370, 0.875960, 0.807990, 0.942410, 0.917920, 0.856580, 0.928270, 0.946820, 1.032260, 0.972990, 0.750000, 0.857930, 0.983800, 1.056540, 0.980240,#4_4 => 3_3 0.750000, 0.987010, 1.013730, 1.133780, 1.038250, 0.800000, 0.947380, 1.012380, 1.091270, 0.999840, 0.800000, 0.914550, 0.908570, 0.999190, 0.915230, 0.778540, 0.800590, 0.799070, 0.902180, 0.851560, 0.680190, 0.317410, 0.507680, 0.388910, 0.646710, 0.794920, 0.912780, 0.960830, 1.057110, 0.947950, 0.750000, 0.833890, 0.867530, 1.059890, 0.932840,#4_5 => 3_4 0.979700, 0.971470, 0.995510, 1.068490, 1.030150, 0.858850, 0.987920, 1.043220, 1.108700, 1.044900, 0.802400, 0.955110, 0.911660, 1.045070, 0.944470, 0.884890, 0.766210, 0.885390, 0.859070, 0.818190, 0.615680, 0.700000, 0.850000, 0.624620, 0.669300, 0.835570, 0.946150, 0.977090, 1.049350, 0.979970, 0.689220, 0.809600, 0.900000, 0.789500, 0.853990,#4_6 => 3_5 0.854660, 0.852840, 0.938200, 0.923110, 0.955010, 0.938600, 0.932980, 1.010390, 1.043950, 1.041640, 0.843620, 0.981300, 0.951590, 0.946100, 0.966330, 0.694740, 0.814690, 0.572650, 0.400000, 0.726830, 0.211370, 0.671780, 0.416340, 0.297290, 0.498050, 0.843540, 0.882330, 0.911760, 0.898420, 0.960210, 1.054880, 1.075210, 1.068460, 1.153370, 1.069220,#5_1 => 4_0 1.000000, 1.062220, 1.013470, 1.088170, 1.046200, 0.885090, 0.993530, 0.942590, 1.054990, 1.012740, 0.920000, 0.950000, 0.978720, 1.020280, 0.984440, 0.850000, 0.908500, 0.839940, 0.985570, 0.962180, 0.800000, 0.800000, 0.810080, 0.950000, 0.961550, 1.038590, 1.063200, 1.034440, 1.112780, 1.037800, 1.017610, 1.028360, 1.058960, 1.133180, 1.045620,#5_2 => 4_1 0.920000, 0.998970, 1.033590, 1.089030, 1.022060, 0.912370, 0.949930, 0.979770, 1.020420, 0.981770, 0.847160, 0.935300, 0.930540, 0.955050, 0.946560, 0.880260, 0.867110, 0.874130, 0.972650, 0.883420, 0.627150, 0.627150, 0.700000, 0.774070, 0.845130, 0.973700, 1.006240, 1.026190, 1.071960, 1.017240, 1.028710, 1.017570, 1.025900, 1.081790, 1.024240,#5_3 => 4_2 0.924980, 0.985500, 1.014100, 1.092210, 0.999610, 0.828570, 0.934920, 0.994950, 1.024590, 0.949710, 0.900810, 0.901330, 0.928830, 0.979570, 0.913100, 0.761030, 0.845150, 0.805360, 0.936790, 0.853460, 0.626400, 0.546750, 0.730500, 0.850000, 0.689050, 0.957630, 0.985480, 0.991790, 1.050220, 0.987900, 0.992730, 0.993880, 1.017150, 1.059120, 1.017450,#5_4 => 4_3 0.975610, 0.987160, 1.026820, 1.075440, 1.007250, 0.871090, 0.933190, 0.974690, 0.979840, 0.952730, 0.828750, 0.868090, 0.834920, 0.905510, 0.871530, 0.781540, 0.782470, 0.767910, 0.764140, 0.795890, 0.743460, 0.693390, 0.514870, 0.630150, 0.715660, 0.934760, 0.957870, 0.959640, 0.972510, 0.981640, 0.965840, 0.941240, 0.987100, 1.022540, 1.011160,#5_5 => 4_4 0.988630, 0.994770, 0.976590, 0.950000, 1.034840, 0.958200, 1.018080, 0.974480, 0.920000, 0.989870, 0.811720, 0.869090, 0.812020, 0.850000, 0.821050, 0.682030, 0.679480, 0.632450, 0.746580, 0.738550, 0.668290, 0.445860, 0.500000, 0.678920, 0.696510, 0.926940, 0.953350, 0.959050, 0.876210, 0.991490, 0.948940, 0.997760, 0.850000, 0.826520, 0.998470,#5_6 => 4_5 1.017860, 0.970000, 0.850000, 0.700000, 0.988560, 1.000000, 0.950000, 0.850000, 0.606240, 0.947260, 1.000000, 0.746140, 0.751740, 0.598390, 0.725230, 0.922210, 0.500000, 0.376800, 0.517110, 0.548630, 0.500000, 0.450000, 0.429970, 0.404490, 0.539940, 0.960430, 0.881630, 0.775640, 0.596350, 0.937680, 1.030000, 1.040000, 1.000000, 1.000000, 1.049510,#6_1 => 5_0 1.050000, 0.990000, 0.990000, 0.950000, 0.996530, 1.050000, 0.990000, 0.990000, 0.820000, 0.971940, 1.050000, 0.790000, 0.880000, 0.820000, 0.951840, 1.000000, 0.530000, 0.440000, 0.710000, 0.928730, 0.540000, 0.470000, 0.500000, 0.550000, 0.773950, 1.038270, 0.920180, 0.910930, 0.821140, 1.034560, 1.041020, 0.997520, 0.961600, 1.000000, 1.035780,#6_2 => 5_1 0.948030, 0.980000, 0.900000, 0.950360, 0.977460, 0.950000, 0.977250, 0.869270, 0.800000, 0.951680, 0.951870, 0.850000, 0.748770, 0.700000, 0.883850, 0.900000, 0.823190, 0.727450, 0.600000, 0.839870, 0.850000, 0.805020, 0.692310, 0.500000, 0.788410, 1.010090, 0.895270, 0.773030, 0.816280, 1.011680, 1.022450, 1.004600, 0.983650, 1.000000, 1.032940,#6_3 => 5_2 0.943960, 0.999240, 0.983920, 0.905990, 0.978150, 0.936240, 0.946480, 0.850000, 0.850000, 0.930320, 0.816420, 0.885000, 0.644950, 0.817650, 0.865310, 0.742960, 0.765690, 0.561520, 0.700000, 0.827140, 0.643870, 0.596710, 0.474460, 0.600000, 0.651200, 0.971740, 0.940560, 0.714880, 0.864380, 1.001650, 0.995260, 0.977010, 1.000000, 1.000000, 1.035250,#6_4 => 5_3 0.939810, 0.975250, 0.939980, 0.950000, 0.982550, 0.876870, 0.879440, 0.850000, 0.900000, 0.917810, 0.873480, 0.873450, 0.751470, 0.850000, 0.863040, 0.761470, 0.702360, 0.638770, 0.750000, 0.783120, 0.734080, 0.650000, 0.600000, 0.650000, 0.715660, 0.942160, 0.919100, 0.770340, 0.731170, 0.995180, 0.952560, 0.916780, 0.920000, 0.900000, 1.005880,#6_5 => 5_4 0.928620, 0.994420, 0.900000, 0.900000, 0.983720, 0.913070, 0.850000, 0.850000, 0.800000, 0.924280, 0.868090, 0.807170, 0.823550, 0.600000, 0.844520, 0.769570, 0.719870, 0.650000, 0.550000, 0.733500, 0.580250, 0.650000, 0.600000, 0.500000, 0.628850, 0.904770, 0.852650, 0.708370, 0.493730, 0.949030, 0.911970, 0.800000, 0.800000, 0.800000, 0.956320,#6_6 => 5_5 0.912620, 0.682610, 0.750000, 0.700000, 0.950110, 0.653450, 0.659330, 0.700000, 0.600000, 0.856110, 0.648440, 0.600000, 0.641120, 0.500000, 0.695780, 0.570000, 0.550000, 0.598800, 0.40000 , 0.560150, 0.475230, 0.500000, 0.518640, 0.339970, 0.520230, 0.743440, 0.592190, 0.603060, 0.316930, 0.794390 ] return np.array(CM, dtype=float).reshape((6,6,7,5))
29,070
def remove_sleepEDF(mne_raw, CHANNELS): """Extracts CHANNELS channels from MNE_RAW data. Args: raw - mne data strucutre of n number of recordings and t seconds each CHANNELS - channels wished to be extracted Returns: extracted - mne data structure with only specified channels """ extracted = mne_raw.pick_channels(CHANNELS) return extracted
29,071
def zip(zipfilename, srcdirs): # , recursive=True): """ Last element of srcdir is considered root of zip'd content. E.g., srcdir="doc/Java" gives zip file with Java as the root element. srcdir might expand to /Volumes/SSD2/Users/parrt/antlr/code/antlr4/doc/Java """ if isinstance(srcdirs, basestring): srcdirs = [srcdirs] with zipfile.ZipFile(zipfilename, mode="w", compression=zipfile.ZIP_DEFLATED) as z: for srcdir in srcdirs: srcdir = uniformpath(srcdir) rootdir = os.path.dirname(srcdir) # "...doc/Java" gives doc rootnameindex = len(rootdir) + 1 # "...doc/Java" gives start of "Java" for f in allfiles(srcdir): z.write(f, f[rootnameindex:])
29,072
def save_email_schedule(request, action, schedule_item, op_payload): """ Function to handle the creation and edition of email items :param request: Http request being processed :param action: Action item related to the schedule :param schedule_item: Schedule item or None if it is new :param op_payload: dictionary to carry over the request to the next step :return: """ # Create the form to ask for the email subject and other information form = EmailScheduleForm( data=request.POST or None, action=action, instance=schedule_item, columns=action.workflow.columns.filter(is_key=True), confirm_items=op_payload.get('confirm_items', False)) # Check if the request is GET, or POST but not valid if request.method == 'GET' or not form.is_valid(): now = datetime.datetime.now(pytz.timezone(settings.TIME_ZONE)) # Render the form return render(request, 'scheduler/edit.html', {'action': action, 'form': form, 'now': now}) # Processing a valid POST request # Save the schedule item object s_item = form.save(commit=False) # Assign additional fields and save s_item.user = request.user s_item.action = action s_item.status = ScheduledAction.STATUS_CREATING s_item.payload = { 'subject': form.cleaned_data['subject'], 'cc_email': [x for x in form.cleaned_data['cc_email'].split(',') if x], 'bcc_email': [x for x in form.cleaned_data['bcc_email'].split(',') if x], 'send_confirmation': form.cleaned_data['send_confirmation'], 'track_read': form.cleaned_data['track_read'] } # Verify that that action does comply with the name uniqueness # property (only with respec to other actions) try: s_item.save() except IntegrityError as e: # There is an action with this name already form.add_error('name', _( 'A scheduled execution of this action with this name ' 'already exists')) return render(request, 'scheduler/edit.html', {'action': action, 'form': form, 'now': datetime.datetime.now(pytz.timezone( settings.TIME_ZONE))}) # Upload information to the op_payload op_payload['schedule_id'] = s_item.id op_payload['confirm_items'] = form.cleaned_data['confirm_items'] if op_payload['confirm_items']: # Update information to carry to the filtering stage op_payload['exclude_values'] = s_item.exclude_values op_payload['item_column'] = s_item.item_column.name op_payload['button_label'] = ugettext('Schedule') request.session[action_session_dictionary] = op_payload return redirect('action:item_filter') else: # If there is not item_column, the exclude values should be empty. s_item.exclude_values = [] s_item.save() # Go straight to the final step return finish_scheduling(request, s_item, op_payload)
29,073
def MutipleClouds_LOS_example(): """Example spectrum from a LOS of multiple clouds""" # Define input wavenelgth array wavelength_array = WavelengthArray(1000,1560,7.5) # All physical parameters become arrays lognHs = [-4,-4.1,-3.6] logZs = [0, 0, 0] logTs = [4.3,4.2,4.0] # These parameters can also be array, same length as physical properties. bNT = 29.0 z = 0.0 size = 1.0 # kpc # Define model model_name='photo_collision_thin'; ion_model = DefineIonizationModel(model_name,z) # Compute spectrum spec = ProduceSpec_alongLOS(ion_model,wavelength_array,lognHs,logZs,logTs,bNT,z,size) # Plot spectrum pl.step(wavelength_array,spec) pl.ylim([0,1.4]) pl.show()
29,074
def show_colored_canvas(color): """Show a transient VisPy canvas with a uniform background color.""" from vispy import app, gloo c = app.Canvas() @c.connect def on_draw(e): gloo.clear(color) show_test(c)
29,075
def slice( _data: DataFrame, *rows: NumericOrIter, _preserve: bool = False, base0_: bool = None, ) -> DataFrame: """Index rows by their (integer) locations Original APIs https://dplyr.tidyverse.org/reference/slice.html Args: _data: The dataframe rows: The indexes Ranges can be specified as `f[1:3]` Note that the negatives mean differently than in dplyr. In dplyr, negative numbers meaning exclusive, but here negative numbers are negative indexes like how they act in python indexing. For exclusive indexes, you need to use inversion. For example: `slice(df, ~f[:3])` excludes first 3 rows. You can also do: `slice(df, ~c(f[:3], 6))` to exclude multiple set of rows. To exclude a single row, you can't do this directly: `slice(df, ~1)` since `~1` is directly compiled into a number. You can do this instead: `slice(df, ~c(1))` Exclusive and inclusive expressions are allowed to be mixed, unlike in `dplyr`. They are expanded in the order they are passed in. _preserve: Relevant when the _data input is grouped. If _preserve = FALSE (the default), the grouping structure is recalculated based on the resulting data, otherwise the grouping is kept as is. base0_: If rows are selected by indexes, whether they are 0-based. If not provided, `datar.base.get_option('index.base.0')` is used. Returns: The sliced dataframe """ if not rows: return _data rows = _sanitize_rows(rows, _data.shape[0], base0_) out = _data.iloc[rows, :] if isinstance(_data.index, RangeIndex): out.reset_index(drop=True, inplace=True) # copy_attrs(out, _data) # attrs carried return out
29,076
def get_random_color(): """ Get random color :return: np.array([r,g,b]) """ global _start_color, _color_step # rgb = np.random.uniform(0, 25, [3]) # rgb = np.asarray(np.floor(rgb) / 24 * 255, np.uint8) _start_color = (_start_color + _color_step) % np.array([256, 256, 256]) rgb = np.asarray(_start_color, np.uint8).tolist() return rgb
29,077
def test_del_list() -> None: """Remove element(s) from a list.""" nums: List[int] = [1, 2, 3, 4, 5] del nums[0] assert nums == [2, 3, 4, 5] del nums[1:3] assert nums == [2, 5] del nums[:] assert nums == [] # Delete entire variable del nums
29,078
async def test_delete_start_entry_not_found( event_loop: Any, mocker: MockFixture, start_entry_mock: dict, ) -> None: """Should raise StartEntryNotFoundException.""" mocker.patch( "race_service.adapters.start_entries_adapter.StartEntriesAdapter.get_start_entry_by_id", return_value=None, ) mocker.patch( "race_service.adapters.start_entries_adapter.StartEntriesAdapter.delete_start_entry", return_value=True, ) with pytest.raises(StartEntryNotFoundException): await StartEntriesService.delete_start_entry(db=None, id=start_entry_mock["id"])
29,079
def test_handler_issue_pr_mentioned(monkeypatch): """ handler_issue_pr_mentioned にサンプル入力を入れて動作確認 """ mentioned_header_path = SCRIPT_PATH.parent / "testdata/mentioned-header.json" mentioned_body_path = SCRIPT_PATH.parent / "testdata/mentioned-body.json" header = json.loads(mentioned_header_path.read_text()) body = json.loads(mentioned_body_path.read_text()) import github_webhook_lambda mock = MagicMock() monkeypatch.setattr(github_webhook_lambda, "GITHUB_TO_SLACK", {"@smatsumt": "@smatsumt", "@smatsumt2": "@smatsumt2"}) monkeypatch.setattr(github_webhook_lambda, "notify_slack", mock) r = github_webhook_lambda.handler_issue_pr_mentioned(header, body) args, kwargs = mock.call_args assert args[0] == ":wave: <@smatsumt2>, *mentioned* by smatsumt in https://github.com/smatsumt/testrepo2/issues/1#issuecomment-619470010" assert kwargs["attach_message"] == "@smatsumt and @smatsumt2, you are mentioned."
29,080
def CNN2d(CNN=None, second=10, saveable=True, name='cnn', fig_idx=3119362): """Display a group of RGB or Greyscale CNN masks. Parameters ---------- CNN : numpy.array The image. e.g: 64 5x5 RGB images can be (5, 5, 3, 64). second : int The display second(s) for the image(s), if saveable is False. saveable : boolean Save or plot the figure. name : a string A name to save the image, if saveable is True. fig_idx : int matplotlib figure index. Examples -------- >>> tl.visualize.CNN2d(network.all_params[0].eval(), second=10, saveable=True, name='cnn1_mnist', fig_idx=2012) """ import matplotlib.pyplot as plt # print(CNN.shape) # (5, 5, 3, 64) # exit() n_mask = CNN.shape[3] n_row = CNN.shape[0] n_col = CNN.shape[1] n_color = CNN.shape[2] row = int(np.sqrt(n_mask)) col = int(np.ceil(n_mask/row)) plt.ion() # active mode fig = plt.figure(fig_idx) count = 1 for ir in range(1, row+1): for ic in range(1, col+1): if count > n_mask: break a = fig.add_subplot(col, row, count) # print(CNN[:,:,:,count-1].shape, n_row, n_col) # (5, 1, 32) 5 5 # exit() # plt.imshow( # np.reshape(CNN[count-1,:,:,:], (n_row, n_col)), # cmap='gray', interpolation="nearest") # theano if n_color == 1: plt.imshow( np.reshape(CNN[:,:,:,count-1], (n_row, n_col)), cmap='gray', interpolation="nearest") elif n_color == 3: plt.imshow( np.reshape(CNN[:,:,:,count-1], (n_row, n_col, n_color)), cmap='gray', interpolation="nearest") else: raise Exception("Unknown n_color") plt.gca().xaxis.set_major_locator(plt.NullLocator()) # distable tick plt.gca().yaxis.set_major_locator(plt.NullLocator()) count = count + 1 if saveable: plt.savefig(name+'.pdf',format='pdf') else: plt.draw() plt.pause(second)
29,081
def get_client_ip(request): """ Simple function to return IP address of client :param request: :return: """ x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') if x_forwarded_for: ip = x_forwarded_for.split(',')[0] # pylint: disable=invalid-name else: ip = request.META.get('REMOTE_ADDR') # pylint: disable=invalid-name return ip
29,082
def entropy(wair,temp,pres,airf=None,dhum=None,dliq=None,chkvals=False, chktol=_CHKTOL,airf0=None,dhum0=None,dliq0=None,chkbnd=False, mathargs=None): """Calculate wet air entropy. Calculate the specific entropy of wet air. :arg float wair: Total dry air fraction in kg/kg. :arg float temp: Temperature in K. :arg float pres: Pressure in Pa. :arg airf: Dry air fraction in humid air in kg/kg. :type airf: float or None :arg dhum: Humid air density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dhum: float or None :arg dliq: Liquid water density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dliq: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg airf0: Initial guess for the dry fraction in kg/kg. If None (default) then `iceair4a._approx_tp` is used. :type airf0: float or None :arg dhum0: Initial guess for the humid air density in kg/m3. If None (default) then `liqair4a._approx_tp` is used. :type dhum0: float or None :arg dliq0: Initial guess for the liquid water density in kg/m3. If None (default) then `liqair4a._approx_tp` is used. :type dliq0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: Entropy in J/kg/K. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :raises RuntimeWarning: If air with the given parameters would be unsaturated. :Examples: >>> entropy(0.5,300.,1e5) 343.783393872 """ g_t = liqair_g(0,1,0,wair,temp,pres,airf=airf,dhum=dhum,dliq=dliq, chkvals=chkvals,chktol=chktol,airf0=airf0,dhum0=dhum0,dliq0=dliq0, chkbnd=chkbnd,mathargs=mathargs) s = -g_t return s
29,083
def get_list_item(view, index): """ get item from listView by index version 1 :param view: :param index: :return: """ return var_cache['proxy'].get_list_item(view, index)
29,084
def test_update_user_ensures_request_data_id_matches_resource_id(client, auth): """If request data contains an (optional) "id" then it has to match the resource id.""" auth.login() assert client.put("/api/users/{}".format(auth.id), headers=auth.headers, json={"id": auth.id, "name": "??", "password": "????"}).status_code == 200 assert client.put("/api/users/{}".format(auth.id), headers=auth.headers, json={"name": "??", "password": "????"}).status_code == 200 r = client.put("/api/users/{}".format(auth.id), headers=auth.headers, json={"id": auth.id + 1, "name": "??", "password": "????"}) assert r.status_code == 400 json = r.get_json() assert "message" in json assert json["message"] == "Request data id has to match resource id."
29,085
def interquartile_range_checker(train_user: list) -> float: """ Optional method: interquatile range input : list of total user in float output : low limit of input in float this method can be used to check whether some data is outlier or not >>> interquartile_range_checker([1,2,3,4,5,6,7,8,9,10]) 2.8 """ train_user.sort() q1 = np.percentile(train_user, 25) q3 = np.percentile(train_user, 75) iqr = q3 - q1 low_lim = q1 - (iqr * 0.1) return low_lim
29,086
def pause(args): """Pause all the swift services. @raises Exception if any services fail to stop """ for service in args.services: stopped = service_pause(service) if not stopped: raise Exception("{} didn't stop cleanly.".format(service)) with HookData()(): kv().set('unit-paused', True) set_os_workload_status(CONFIGS, REQUIRED_INTERFACES, charm_func=assess_status)
29,087
def make_linear_colorscale(colors): """ Makes a list of colors into a colorscale-acceptable form For documentation regarding to the form of the output, see https://plot.ly/python/reference/#mesh3d-colorscale """ scale = 1.0 / (len(colors) - 1) return [[i * scale, color] for i, color in enumerate(colors)]
29,088
def background(image_size: int, level: float=0, grad_i: float=0, grad_d: float=0) -> np.array: """ Return array representing image background of size `image_size`. The image may have an illimination gradient of intensity `I` and direction `grad_d`. The `image_size` is in pixels. `grad_i` expected to be between 0 and 1. `grad_d` is gradient direction in radians. """ h = image_size // 2 background = np.ones((image_size,image_size)) * level ix,iy = np.meshgrid(np.arange(-h, h + 1), np.arange(-h, h + 1)) illumination_gradient = grad_i * ((ix * np.sin(grad_d)) + (iy * np.cos(grad_d))) / (np.sqrt(2) * image_size) return background + illumination_gradient
29,089
def _warn_if_uri_clash_on_same_marshaled_representation(uri_schema_mappings, marshal_uri): """ Verifies that all the uris present on the definitions are represented by a different marshaled uri. If is not the case a warning will filed. In case of presence of warning please keep us informed about the issue, in the meantime you can workaround this calling directly ``flattened_spec(spec, marshal_uri_function)`` passing your marshalling function. """ # Check that URIs are NOT clashing to same marshaled representation marshaled_uri_mapping = defaultdict(set) for uri in iterkeys(uri_schema_mappings): marshaled_uri_mapping[marshal_uri(uri)].add(uri) if len(marshaled_uri_mapping) != len(uri_schema_mappings): # At least two uris clashed to the same marshaled representation for marshaled_uri, uris in iteritems(marshaled_uri_mapping): if len(uris) > 1: warnings.warn( message='{s_uris} clashed to {marshaled}'.format( s_uris=', '.join(sorted(urlunparse(uri) for uri in uris)), marshaled=marshaled_uri, ), category=Warning, )
29,090
def mapTypeCategoriesToSubnetName(nodetypecategory, acceptedtypecategory): """This function returns a name of the subnet that accepts nodetypecategory as child type and can be created in a container whose child type is acceptedtypecategory. Returns None if these two categories are the same (ie, no need for a subnet to accommodate nodetypecategory). Also returns None if the mapping has not been defined yet. """ return ''
29,091
def sorted_unique(series): """Return the unique values of *series*, correctly sorted.""" # This handles Categorical data types, which sorted(series.unique()) fails # on. series.drop_duplicates() is slower than Series(series.unique()). return list(pd.Series(series.unique()).sort_values())
29,092
def create_query_from_request(p, request): """ Create JSON object representing the query from request received from Dashboard. :param request: :return: """ query_json = {'process_type': DVAPQL.QUERY} count = request.POST.get('count') generate_tags = request.POST.get('generate_tags') selected_indexers = json.loads(request.POST.get('selected_indexers',"[]")) selected_detectors = json.loads(request.POST.get('selected_detectors',"[]")) query_json['image_data_b64'] = request.POST.get('image_url')[22:] query_json['tasks'] = [] indexer_tasks = defaultdict(list) if generate_tags and generate_tags != 'false': query_json['tasks'].append({'operation': 'perform_analysis', 'arguments': {'analyzer': 'tagger','target': 'query',} }) if selected_indexers: for k in selected_indexers: indexer_pk, retriever_pk = k.split('_') indexer_tasks[int(indexer_pk)].append(int(retriever_pk)) for i in indexer_tasks: di = TrainedModel.objects.get(pk=i,model_type=TrainedModel.INDEXER) rtasks = [] for r in indexer_tasks[i]: rtasks.append({'operation': 'perform_retrieval', 'arguments': {'count': int(count), 'retriever_pk': r}}) query_json['tasks'].append( { 'operation': 'perform_indexing', 'arguments': { 'index': di.name, 'target': 'query', 'map': rtasks } } ) if selected_detectors: for d in selected_detectors: dd = TrainedModel.objects.get(pk=int(d),model_type=TrainedModel.DETECTOR) if dd.name == 'textbox': query_json['tasks'].append({'operation': 'perform_detection', 'arguments': {'detector_pk': int(d), 'target': 'query', 'map': [{ 'operation': 'perform_analysis', 'arguments': {'target': 'query_regions', 'analyzer': 'crnn', 'filters': {'event_id': '__parent_event__'} } }] } }) elif dd.name == 'face': dr = Retriever.objects.get(name='facenet',algorithm=Retriever.EXACT) query_json['tasks'].append({'operation': 'perform_detection', 'arguments': {'detector_pk': int(d), 'target': 'query', 'map': [{ 'operation': 'perform_indexing', 'arguments': {'target': 'query_regions', 'index': 'facenet', 'filters': {'event_id': '__parent_event__'}, 'map':[{ 'operation':'perform_retrieval', 'arguments':{'retriever_pk':dr.pk, 'filters':{'event_id': '__parent_event__'}, 'target':'query_region_index_vectors', 'count':10} }]} }] } }) else: query_json['tasks'].append({'operation': 'perform_detection', 'arguments': {'detector_pk': int(d), 'target': 'query', }}) user = request.user if request.user.is_authenticated else None p.create_from_json(query_json, user) return p.process
29,093
def create_cxr_transforms_from_config(config: CfgNode, apply_augmentations: bool) -> ImageTransformationPipeline: """ Defines the image transformations pipeline used in Chest-Xray datasets. Can be used for other types of images data, type of augmentations to use and strength are expected to be defined in the config. :param config: config yaml file fixing strength and type of augmentation to apply :param apply_augmentations: if True return transformation pipeline with augmentations. Else, disable augmentations i.e. only resize and center crop the image. """ transforms: List[Any] = [ExpandChannels()] if apply_augmentations: if config.augmentation.use_random_affine: transforms.append(RandomAffine( degrees=config.augmentation.random_affine.max_angle, translate=(config.augmentation.random_affine.max_horizontal_shift, config.augmentation.random_affine.max_vertical_shift), shear=config.augmentation.random_affine.max_shear )) if config.augmentation.use_random_crop: transforms.append(RandomResizedCrop( scale=config.augmentation.random_crop.scale, size=config.preprocess.resize )) else: transforms.append(Resize(size=config.preprocess.resize)) if config.augmentation.use_random_horizontal_flip: transforms.append(RandomHorizontalFlip(p=config.augmentation.random_horizontal_flip.prob)) if config.augmentation.use_gamma_transform: transforms.append(RandomGamma(scale=config.augmentation.gamma.scale)) if config.augmentation.use_random_color: transforms.append(ColorJitter( brightness=config.augmentation.random_color.brightness, contrast=config.augmentation.random_color.contrast, saturation=config.augmentation.random_color.saturation )) if config.augmentation.use_elastic_transform: transforms.append(ElasticTransform( alpha=config.augmentation.elastic_transform.alpha, sigma=config.augmentation.elastic_transform.sigma, p_apply=config.augmentation.elastic_transform.p_apply )) transforms.append(CenterCrop(config.preprocess.center_crop_size)) if config.augmentation.use_random_erasing: transforms.append(RandomErasing( scale=config.augmentation.random_erasing.scale, ratio=config.augmentation.random_erasing.ratio )) if config.augmentation.add_gaussian_noise: transforms.append(AddGaussianNoise( p_apply=config.augmentation.gaussian_noise.p_apply, std=config.augmentation.gaussian_noise.std )) else: transforms += [Resize(size=config.preprocess.resize), CenterCrop(config.preprocess.center_crop_size)] pipeline = ImageTransformationPipeline(transforms) return pipeline
29,094
def is_compiled_with_npu(): """ Whether paddle was built with WITH_ASCEND_CL=ON to support Ascend NPU. Returns (bool): `True` if NPU is supported, otherwise `False`. Examples: .. code-block:: python import paddle support_npu = paddle.device.is_compiled_with_npu() """ return core.is_compiled_with_npu()
29,095
def odd_occurrence_parity_set(arr): """ A similar implementation to the XOR idea above, but more naive. As we iterate over the passed list, a working set keeps track of the numbers that have occurred an odd number of times. At the end, the set will only contain one number. Though the worst-case time complexity is the same as the hashmap method implemented below, this will probably be significantly faster as dictionaries have much longer lookup times than sets. Space complexity: $O(n)$; Time complexity: $O(n)$. Parameters ---------- arr : integer Returns ------- integer """ seen_odd_times = set() for num in arr: if num in seen_odd_times: seen_odd_times.remove(num) else: seen_odd_times.add(num) return list(seen_odd_times)[0]
29,096
def write_directory_status(directory_status, run_id=None): """ Writes a status to the status file: Overwrites anything that is in the file Writes a timestamp to the time of last written :param directory_status: DirectoryStatus object containing status to write to directory :param run_id: optional, when used, the run id will be included in the status file, along with the irida instance the run is uploaded to. :return: None """ if not os.access(directory_status.directory, os.W_OK): # Cannot access upload directory raise exceptions.DirectoryError("Cannot access directory", directory_status.directory) uploader_info_file = os.path.join(directory_status.directory, STATUS_FILE_NAME) if not os.access(uploader_info_file, os.W_OK): # Cannot access upload directory uploader_info_file = os.path.join('/tmp/', STATUS_FILE_NAME) if run_id: json_data = {STATUS_FIELD: directory_status.status, DATE_TIME_FIELD: _get_date_time_field(), RUN_ID_FIELD: run_id, IRIDA_INSTANCE_FIELD: config.read_config_option('base_url')} else: json_data = {STATUS_FIELD: directory_status.status, DATE_TIME_FIELD: _get_date_time_field()} with open(uploader_info_file, "w") as json_file: json.dump(json_data, json_file, indent=4, sort_keys=True) json_file.write("\n")
29,097
def larmor_step_search(step_search_center=cfg.LARMOR_FREQ, steps=200, step_bw_MHz=5e-3, plot=False, shim_x=cfg.SHIM_X, shim_y=cfg.SHIM_Y, shim_z=cfg.SHIM_Z, delay_s=1, gui_test=False): """ Run a stepped search through a range of frequencies to find the highest signal response Used to find a starting point, not for precision Args: step_search_center (float): [MHz] Center for search, defaults to config LARMOR_FREQ steps (int): Number of search steps step_bw_MHz (float): [MHz] Distance in MHz between each step plot (bool): Default False, plot final data shim_x, shim_y, shim_z (float): Shim value, defaults to config SHIM_ values, must be less than 1 magnitude delay_s (float): Delay between readings in seconds gui_test (bool): Default False, takes dummy data instead of actual data for GUI testing away from scanner Returns: float: Estimated larmor frequency in MHz dict: Dictionary of data """ # Pick out the frequencies to run through swept_freqs = np.linspace(step_search_center - ((steps-1)/2 * step_bw_MHz), step_search_center + ((steps-1)/2 * step_bw_MHz), num=steps) larmor_freq = swept_freqs[0] # Set the sequence file for a single spin echo seq_file = cfg.MGH_PATH + 'cal_seq_files/se_1.seq' # Run the experiment once to prep array rxd, rx_t = scr.run_pulseq(seq_file, rf_center=larmor_freq, tx_t=1, grad_t=10, tx_warmup=100, shim_x=shim_x, shim_y=shim_y, shim_z=shim_z, grad_cal=False, save_np=False, save_mat=False, save_msgs=False, gui_test=gui_test) # Create array for storing data rx_arr = np.zeros((rxd.shape[0], steps), dtype=np.cdouble) rx_arr[:,0] = rxd # Pause for spin recovery time.sleep(delay_s) # Repeat for each frequency after the first for i in range(1, steps): print(f'{swept_freqs[i]:.4f} MHz') rx_arr[:,i], _ = scr.run_pulseq(seq_file, rf_center=swept_freqs[i], tx_t=1, grad_t=10, tx_warmup=100, shim_x=shim_x, shim_y=shim_y, shim_z=shim_z, grad_cal=False, save_np=False, save_mat=False, save_msgs=False, gui_test=gui_test) time.sleep(delay_s) # Find the frequency data with the largest maximum absolute value max_ind = np.argmax(np.max(np.abs(rx_arr), axis=0, keepdims=False)) max_freq = swept_freqs[max_ind] print(f'Max frequency: {max_freq:.4f} MHz') # Plot figure if plot: fig, axs = plt.subplots(2, 1, constrained_layout=True) fig.suptitle(f'{steps}-step search around {step_search_center:.4f} MHz') axs[0].plot(np.real(rx_arr)) axs[0].legend([f'{freq:.4f} MHz' for freq in swept_freqs]) axs[0].set_title('Concatenated signal -- Real') axs[1].plot(np.abs(rx_arr)) axs[1].set_title('Concatenated signal -- Magnitude') plt.show() # Output of useful data for visualization data_dict = {'rx_arr': rx_arr, 'rx_t': rx_t, 'larmor_freq': larmor_freq } # Return the frequency that worked the best return max_freq, data_dict
29,098
def get_role_tempalte(context: Optional[str] = None, name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRoleTempalteResult: """ Use this data source to access information about an existing resource. """ pulumi.log.warn("""get_role_tempalte is deprecated: rancher2.getRoleTempalte has been deprecated in favor of rancher2.getRoleTemplate""") __args__ = dict() __args__['context'] = context __args__['name'] = name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('rancher2:index/getRoleTempalte:getRoleTempalte', __args__, opts=opts, typ=GetRoleTempalteResult).value return AwaitableGetRoleTempalteResult( administrative=__ret__.administrative, annotations=__ret__.annotations, builtin=__ret__.builtin, context=__ret__.context, default_role=__ret__.default_role, description=__ret__.description, external=__ret__.external, hidden=__ret__.hidden, id=__ret__.id, labels=__ret__.labels, locked=__ret__.locked, name=__ret__.name, role_template_ids=__ret__.role_template_ids, rules=__ret__.rules)
29,099