content
stringlengths
22
815k
id
int64
0
4.91M
def target_mask(image, path, num_grid_corners): """ Arguments: image: grayscale image of shape (N, M) path: pathlib.Path object for the image Returns: Boolean mask of shape (N, M), which is True for pixels that we think are on the calibration target. """ ret, corners = get_cached_corners( image_path=path, gray=image, num_grid_corners=num_grid_corners ) if ret: # Take the hull to get the outer 2D shape hull = ConvexHull(corners.squeeze()) points2d = hull.points[hull.vertices] # Scale the points outward slightly scale = 1.3 center = numpy.average(points2d, axis=0) for i in range(len(points2d)): points2d[i] = center + scale * (points2d[i] - center) # Clip to edges, note corners are (axis1, axis0) points2d[:, 0] = numpy.clip(points2d[:, 0], 0, image.shape[1] - 1) points2d[:, 1] = numpy.clip(points2d[:, 1], 0, image.shape[0] - 1) # Make a boolean mask mask = numpy.zeros(image.shape[:2], dtype=numpy.int32) # import ipdb; ipdb.set_trace() mask = cv2.fillPoly( mask, [points2d.reshape((-1, 1, 2)).astype(numpy.int32)], color=1.0 ) mask = mask.astype(bool) else: mask = numpy.ones(image.shape[:2], dtype=bool) return mask
5,338,900
def boddef(name, code): """ Define a body name/ID code pair for later translation via :func:`bodn2c` or :func:`bodc2n`. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/boddef_c.html :param name: Common name of some body. :type name: str :param code: Integer code for that body. :type code: int """ name = stypes.stringToCharP(name) code = ctypes.c_int(code) libspice.boddef_c(name, code)
5,338,901
def write_tar(archive_url, manifest_path, tar_path, strip_prefix=None, progress_bar=False, overwrite=False): """ Write all objects from archive_url to tar_path. Write list of objects to manifest_path. """ if not overwrite: if exists(tar_path): raise IOError("%s already exists." % tar_path) if exists(manifest_path): raise IOError("%s already exists." % manifest_path) # get iterator of items to tar, and check that it includes at least one item objects = list_objects(archive_url) try: _, objects = peek(iter(objects)) except StopIteration: raise IOError("No objects found at %s" % archive_url) # write tar make_parent_dir(tar_path) files_written = [] with open(tar_path, 'wb', ignore_ext=True) as tar_out, \ LoggingTarFile.open(fileobj=tar_out, mode='w|') as tar, \ TemporaryDirectory() as temp_dir: # load object contents in background threads items = threaded_queue(load_object, ((obj, temp_dir) for obj in objects)) # tar each item for obj, response, body in tqdm(items, disable=not progress_bar): body = HashingFile(body) tar_info = TarInfo() tar_info.size = int(response['ContentLength']) tar_info.mtime = response['LastModified'].timestamp() tar_info.name = obj.key if strip_prefix and tar_info.name.startswith(strip_prefix): tar_info.name = tar_info.name[len(strip_prefix):] tar.addfile(tar_info, body) member = tar.members[-1] files_written.append(OrderedDict(( # inventory fields ('Bucket', obj.bucket_name), ('Key', obj.key), ('Size', response['ContentLength']), ('LastModifiedDate', response['LastModified'].isoformat()), ('ETag', response['ETag'].strip('"')), ('StorageClass', response.get('StorageClass', 'STANDARD')), ('VersionId', response.get('VersionId', '')), # ('Owner', obj.owner['DisplayName'] if obj.owner else ''), # tar fields ('TarMD5', body.hexdigest()), ('TarOffset', member.offset), ('TarDataOffset', member.offset_data), ('TarSize', member.size), ) + (( ('TarStrippedPrefix', strip_prefix), ) if strip_prefix else tuple()))) if response['ContentLength'] != member.size: raise ValueError("Object size mismatch: %s" % obj.key) # write csv make_parent_dir(manifest_path) files_written.sort(key=lambda f: f['Key']) write_dicts_to_csv(manifest_path, files_written)
5,338,902
def read_data(spec: dict) -> (dict, DataFrame): """Creates Pandas DataFrame by reading file at path. Appropriate read_* pandas method will be called based on the extension of the input file specified.""" path = spec['input']['file'] ext = Path(path).suffix kwargs = build_kwargs_read(spec, ext) return spec, read_funcs[ext](path, **kwargs)
5,338,903
def bunk_choose(bot, update, user_data): """Removes keyboardMarkup sent in previous handler. Stores the response (for Lectures/Practicals message sent in previous handler) in a ``user_data`` dictionary with the key `"stype"`. ``user_data`` is a user relative dictionary which holds data between different handlers/functions in a ConversationHandler. Selects the appropriate table (Lecture or Practical) based on ``stype`` value. Checks if records exist in the table for a user and sends a warning message or proceeds to list names of all subjects in the table. Passes control to :py:func:`bunk_input` :param bot: Telegram Bot object :type bot: telegram.bot.Bot :param update: Telegram Update object :type update: telegram.update.Update :param user_data: User data dictionary :type user_data: dict :return: ConversationHandler.END if no records else INPUT :rtype: int """ user_data['type'] = update.message.text chat_id = update.message.chat_id stype = user_data['type'] reply_markup = ReplyKeyboardRemove() reply_text = "{}\nChoose `Cancel` to exit.".format(stype) bot.sendMessage(chat_id=chat_id, text=reply_text, reply_markup=reply_markup, parse_mode='markdown') if stype == "Lectures": subject_data = Lecture.query.filter(Lecture.chatID == chat_id).all() else: subject_data = Practical.query.filter(Practical.chatID == chat_id).all() if not subject_data: #If list is empty messageContent = textwrap.dedent(""" No records found! Please use /attendance to pull your attendance from the website first. """) bot.sendMessage(chat_id=chat_id, text=messageContent) return ConversationHandler.END messageContent = "" for digit, subject in enumerate(subject_data): subject_name = subject.name messageContent += "{digit}. {subject_name}\n".format(digit=digit+1, subject_name=subject_name) keyboard = build_menu(subject_data, 3, footer_buttons='Cancel') reply_markup = ReplyKeyboardMarkup(keyboard) user_data['reply_markup'] = reply_markup bot.sendMessage(chat_id=chat_id, text=messageContent, reply_markup=reply_markup) return INPUT
5,338,904
def schema_instance(): """JSONSchema schema instance.""" schema_instance = JsonSchema( schema=LOADED_SCHEMA_DATA, filename="dns.yml", root=os.path.join(FIXTURES_DIR, "schema", "schemas"), ) return schema_instance
5,338,905
def module_of(obj): """Return the Module given object is contained within. """ if isinstance(obj, Module): return obj elif isinstance(obj, (Function, Class)): return obj.module elif isinstance(obj, Method): return module_of(obj.klass) elif isinstance(obj, TestCase): return module_of(obj.parent) else: raise TypeError("Don't know how to find the module of %r" % obj)
5,338,906
def day(date, atmos=atmos): """ Returns a dataframe of daily aggregated data Parameters ------- date: str Format yyyy/mm/dd """ path = f"{get_day_folder_path(date)}{date.replace('/','')}_daily_agg.csv.gz" return load_agg(path, atmos)
5,338,907
async def find_deck_position(hcapi: OT3API, mount: OT3Mount) -> float: """ Find the true position of the deck in this mount's frame of reference. The deck nominal position in deck coordinates is 0 (that's part of the definition of deck coordinates) but if we have not yet calibrated a particular tool on a particular mount, then the z deck coordinate that will cause a collision is not 0. This routine finds that value. """ z_offset_settings = hcapi.config.calibration.z_offset await hcapi.home_z() here = await hcapi.gantry_position(mount) z_prep_point = Point(*z_offset_settings.point) above_point = z_prep_point._replace(z=here.z) await hcapi.move_to(mount, above_point) deck_z = await hcapi.capacitive_probe( mount, OT3Axis.by_mount(mount), z_prep_point.z, z_offset_settings.pass_settings ) LOG.info(f"autocalibration: found deck at {deck_z}") await hcapi.move_to(mount, z_prep_point + Point(0, 0, CAL_TRANSIT_HEIGHT)) return deck_z
5,338,908
def assert_pipeline_notify_output_is(pipeline_name, expected_notify_output): """Assert that the pipeline has the expected output to NOTIFY log. Args: pipeline_name: str. Name of pipeline to run. Relative to ./tests/ expected_notify_output: list of str. Entirety of strings expected in the NOTIFY level output during pipeline run. """ with patch_logger('pypyr.steps.echo', logging.NOTIFY) as mock_log: pypyr.pipelinerunner.main(pipeline_name=pipeline_name, pipeline_context_input=None, working_dir=working_dir) assert mock_log.mock_calls == [call(v) for v in expected_notify_output]
5,338,909
def safe_get_request(url, auth): """Sends HTTP GET request. Safetly sends HTTP GET request and handles exceptions. Args: url (str): The URL to send request to auth (tuple): The username and password Yields: requests.models.Request: The request object Raises: RequestException: If an error occurs in the request SystemExit: If an error occurs, exit system """ get_request = get(url=url, auth=auth) try: yield get_request except RequestException as err: print(err) sys.exit(0) finally: get_request.close()
5,338,910
def load_measure_defs(measure_ids=None): """Load measure definitions from JSON files. Since the lpzomnibus measure depends on other LP measures having already been calculated, it is important that the measures are returned in alphabetical order. (This is a bit of a hack...) """ measures = [] errors = [] glob_path = os.path.join(settings.MEASURE_DEFINITIONS_PATH, "*.json") for path in sorted(glob.glob(glob_path)): measure_id = os.path.basename(path).split(".")[0] with open(path) as f: try: measure_def = json.load(f) except ValueError as e: # Add the measure_id to the exception errors.append("* {}: {}".format(measure_id, e.args[0])) continue if measure_ids is None: if "skip" in measure_def: continue else: if measure_id not in measure_ids: continue measure_def["id"] = measure_id measures.append(measure_def) if errors: raise ValueError("Problems parsing JSON:\n" + "\n".join(errors)) return measures
5,338,911
def test_stop_tcp_with_delay(sdc_builder, sdc_executor): """Make sure that the origin can properly be started after stopping it with long batch times.""" builder = sdc_builder.get_pipeline_builder() tcp_server = builder.add_stage('TCP Server') tcp_server.configuration.update({'conf.dataFormat': 'TEXT', 'conf.ports': [str(TCP_PORT)], 'conf.tcpMode': 'DELIMITED_RECORDS', 'conf.recordProcessedAckMessage': 'record_${record:value(\'/text\')}'}) # Make sure that each batch takes at least 5 seconds delay = builder.add_stage('Delay') delay.delay_between_batches = 5 * 1000 trash = builder.add_stage('Trash') tcp_server >> delay >> trash pipeline = builder.build() sdc_executor.add_pipeline(pipeline) # Let's start/stop the pipeline few times, it should always properly wait for graceful shutdown and subsequent # start of pipeline should be immediate. for _ in range(3): # Start the pipeline sdc_executor.start_pipeline(pipeline) # Send exactly one record tcp_client = TCPClient(sdc_executor.server_host, TCP_PORT) tcp_client.send_str_and_ack('Something not important\n') # Wait one second to make sure that the batch is 'processing' (it should take ~5 seconds to process that batch) time.sleep(1) # Stop the pipeline, the pipeline stop command should take time to finish. In 'zero' operation cost world that # would be around 4 seconds, but we don't want to let the test fail for random race conditions and thus we # verify at least 2 seconds. start = time.time() sdc_executor.stop_pipeline(pipeline) assert time.time() - start > 2 # There should be exactly one record and batch processing time should be more than 5 seconds history = sdc_executor.get_pipeline_history(pipeline) assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 1 #TODO: TLKT-167: Add access methods to metric objects assert history.latest.metrics.timer('pipeline.batchProcessing.timer')._data.get('mean') >= 5
5,338,912
def setup_logger(name, warninglevel=logging.WARNING, logfilepath=path_to_log, logformat='%(asctime)s %(levelname)s - %(name)-6s - %(message)s'): """Basic setup function to create a standard logging config. Default output is to file in /tmp/dir.""" logfile=os.path.join(logfilepath,'magpy.log') # Check file permission/existance if not os.path.isfile(logfile): pass else: if os.access(logfile, os.W_OK): pass else: for count in range (1,100): logfile=os.path.join(logfilepath,'magpy{:02}.log'.format(count)) value = os.access(logfile, os.W_OK) if value or not os.path.isfile(logfile): count = 100 break try: logging.basicConfig(filename=logfile, filemode='w', format=logformat, level=logging.INFO) except: logging.basicConfig(format=logformat, level=logging.INFO) logger = logging.getLogger(name) # Define a Handler which writes "setLevel" messages or higher to the sys.stderr console = logging.StreamHandler() console.setLevel(warninglevel) logger.addHandler(console) return logger
5,338,913
def create_symbol_id(path_to_db: str) -> str: """ When creating a new symbol, need to ensure that ID is not already used in the Physics Derivation Graph Args: path_to_db: filename of the SQL database containing a JSON entry that returns a nested dictionary Returns: proposed_symbol_id Raises: >>> create_symbol_id("pdg.db") """ # trace_id = str(random.randint(1000000, 9999999)) # logger.info("[trace start " + trace_id + "]") dat = clib.read_db(path_to_db) symbol_ids_in_use = list(dat["symbols"].keys()) found_valid_id = False loop_count = 0 while not found_valid_id: loop_count += 1 proposed_symbol_id = str(random.randint(1000, 9999)) # 4 digits if proposed_symbol_id not in symbol_ids_in_use: found_valid_id = True if loop_count > 100000: logger.error("too many; this seems unlikely") raise Exception("this seems unlikely") # logger.info("[trace end " + trace_id + "]") return proposed_symbol_id
5,338,914
def zeros_bitonic(mz, i): """Cluster based on zeros and bitonic intensities. Args: mz (iterable): m/z ratios. i (iterable): Intensiities (contains the zero intensities), Yields: Tuples of m/z ratios and intensities within clusters. """ M = [] I = [] i__ = -2 _i_ = -1 for __m, __i in peaks: if __i == 0 or i__ > _i_ < __i: if len(M) > 0: yield M, I M = [] I = [] if __i != 0: M.append(__m) I.append(__i) i__, _i_ = _i_, __i if len(M)>0: yield M, I
5,338,915
def get_file_name(content_disposition: str, ) -> str: """Content-Disposition has the filename between the `"`. get it. Args: content_disposition: the content disposition from download header Returns: the file name """ if match := re.search(r'"(.*?)"', content_disposition): file_name = match.group(1) else: file_name = demisto.uniqueFile() return file_name
5,338,916
def authenticate(connect=True): """ Generally you will not need to call this directly; passing in your credentials via set_credentials() and set_credential_file() will call authenticate() on the identity object by default. But for situations where you set your credentials manually or otherwise need finer control over the authentication sequence, this method will call the identity object's authenticate() method, and an AuthenticationFailed exception will be raised if your credentials have not been properly set first. Normally after successful authentication, connections to the various services will be made. However, passing False to the `connect` parameter will skip the service connection step. The 'connect' parameter is retained for backwards compatibility. It no longer has any effect. """ identity.authenticate()
5,338,917
def test__data_series__reaction_trunk(): """ test data_series.reaction_trunk """ prefix = os.path.join(PREFIX, 'reaction_trunk') os.mkdir(prefix) # without a root directory ds_ = autofile.schema.data_series.reaction_trunk(prefix) assert not ds_.exists() ds_.create() assert ds_.exists() # with a root directory root_ds = root_data_series(prefix) ds_ = autofile.schema.data_series.reaction_trunk(prefix, root_ds=root_ds) root_locs_lst = [ [1, 'a'], [1, 'b'], [2, 'a'], [2, 'b'], [2, 'c'], ] for root_locs in root_locs_lst: locs = root_locs assert not ds_.exists(locs) ds_.create(locs) assert ds_.exists(locs) assert sorted(root_ds.existing()) == sorted(root_locs_lst)
5,338,918
def get_object(node): """ Parse rebaron AtomTrailers node into Python object (taken from ongoing conversion object) Works for object and local scope """ if len(node) > 1 and (node[0].value == 'self' or node[0].value == 'self_next'): var_t = super_getattr(convert_obj, str(node)) else: # get the SOURCE function (where call is going on) from datamodel def_parent = node.parent while not isinstance(def_parent, DefNode): def_parent = def_parent.parent source_func_name = f'self.{def_parent.name}' source_func_obj = super_getattr(convert_obj, str(source_func_name)) func_locals = source_func_obj.get_local_types() class Struct: def __init__(self, **entries): self.__dict__.update(entries) struct = Struct(**func_locals) var_t = super_getattr(struct, str(node), is_local=True) return var_t
5,338,919
def read_label_from_txt(label_path): """Read label from txt file.""" text = np.fromfile(label_path) bounding_box = [] with open(label_path, "r") as f: labels = f.read().split("\n") for label in labels: if not label: continue label = label.split(" ") if (label[0] == "DontCare"): continue if label[0] == ("Car" or "Van"): # or "Truck" bounding_box.append(label[8:15]) if bounding_box: data = np.array(bounding_box, dtype=np.float32) return data[:, 3:6], data[:, :3], data[:, 6] else: return None, None, None
5,338,920
def _load_recipe(module, baked: bool = False) -> Union[BakedRecipe, Recipe]: # load entry-point DAG """Load Queenbee plugin from Python package. Usually you should not be using this function directly. Use ``load`` function instead. args: module: Python module object for a Queenbee Recipe. returns: Recipe - A Queenbee recipe. It will be a baked recipe if baked is set to True. """ qb_info = module.__pollination__ package_name = module.__name__ main_dag_entry = qb_info.get('entry_point', None) assert main_dag_entry, \ f'{package_name} __pollination__ info is missing the enetry_point key.' main_dag = main_dag_entry() # get metadata metadata = _get_meta_data(module, 'recipe') _dependencies = main_dag._dependencies # create a queenbee Recipe object # load dags qb_dag = main_dag.queenbee qb_dag.name = 'main' dags = [qb_dag] + [dag.queenbee for dag in _dependencies['dag']] # add dependencies repo = _init_repo() plugins = [ Dependency( kind=DependencyKind.plugin, name=plugin['name'], tag=plugin['tag'], source=repo.as_uri() ) for plugin in _dependencies['plugin'] ] recipes = [ Dependency( kind=DependencyKind.recipe, name=recipe['name'], tag=recipe['tag'], source=repo.as_uri() ) for recipe in _dependencies['recipe'] ] recipe = Recipe(metadata=metadata, dependencies=plugins + recipes, flow=dags) if baked: package_recipe_dependencies(recipe) rf = RepositoryReference( name='pollination-dsl', path='file:///' + repo.as_posix() ) config = Config(repositories=[rf]) recipe = BakedRecipe.from_recipe(recipe=recipe, config=config) return recipe
5,338,921
def run(editor: str = "", edit_args: str = ""): """ execute execute Custom PHP code by notepad / vi as default or your own editor, edit_args split by space. eg: execute {editor=""} {edit_args=""} execute code '"--wait"' """ file_name = str(uuid4()) + ".php" real_file_path = newfile(file_name) open_editor(real_file_path, editor, edit_args) with open(real_file_path, "r") as f: code = f.read() if (code.startswith("<?php")): code = code[5:] if (code.endswith("?>")): code = code[:-2] print(color.yellow("Execute php code...")) res = send(code) if (not res): return text = res.r_text.strip() status_code = color.green(str(res.status_code)) if res.status_code == 200 else color.yellow(str(res.status_code)) print(f"\n{color.green('Result:')}\n[{status_code}] {color.cyan('length')}: {len(text)} \n{text}\n") remove(real_file_path)
5,338,922
def test_no_threshold_coordinate(probability_cube): """Test an exception is raised if no threshold coordinate is found.""" cube = probability_cube[0] threshold = cube.coord(var_name="threshold") cube.remove_coord(threshold) with pytest.raises(ValueError, match="Cube does not have a threshold coordinate"): invert_probabilities(cube)
5,338,923
def compute_coeffs(shape, Aref, alfa): """Computes the lift and drag coefficients of the given shape at the given angle of attack using the given reference area""" alfa_vect = np.array([-np.sin(alfa),0,-np.cos(alfa)]) Fvect = np.array([0,0,0]) #Force coefficient vector for panel in shape: panel.alfa = np.arcsin(np.dot(alfa_vect,-panel.N)/ \ (np.linalg.norm(alfa_vect)*np.linalg.norm(panel.N))) panel_Cpvect = (panel.A/Aref) * (2*np.sin(panel.alfa)**2) * (-panel.N/np.linalg.norm(panel.N)) Fvect = Fvect + panel_Cpvect CN = -Fvect[0]#np.dot(Fvect,np.array([-1,0,0])) CA = -Fvect[2]#np.dot(Fvect,np.array([0,0,-1])) CL = CN * np.cos(alfa) - CA * np.sin(alfa) CD = CA * np.cos(alfa) + CN * np.sin(alfa) #return CA, CN return CL, CD
5,338,924
def draw_pitch(axis, rotate=False): """ Plots the lines of a soccer pitch using matplotlib. Arguments --------- axis : matplotlib.axes._subplots.AxesSubplot - matplotlib axis object on which to plot shot freeze frame rotate : bool - if set to True, pitch is horizontal, default to False Returns ------- None """ line_width = 4 alpha = 0.5 r = 10 line_coords = [[[0, 0], [0, 120]], [[0, 80], [120, 120]], [[80, 80], [120, 0]], [[0, 80], [0, 0]], [[0, 80], [60, 60]], [[18, 18], [0, 18]], [[18, 62], [18, 18]], [[62, 62], [0, 18]], [[30, 30], [0, 6]], [[30, 50], [6, 6]], [[50, 50], [0, 6]], [[18, 18], [120, 102]], [[18, 62], [102, 102]], [[62, 62], [102, 120]], [[30, 30], [120, 114]], [[30, 50], [114, 114]], [[50, 50], [120, 114]]] if not rotate: for lines in line_coords: axis.plot(lines[0], lines[1], color='grey', linewidth=line_width, alpha=alpha) theta1 = np.linspace(0, 2*np.pi, 100) theta2 = np.linspace(0.65, 2.47, 100) theta3 = np.linspace(3.8, 5.6, 100) x1 = r*np.cos(theta1) + 40 x2 = r*np.sin(theta1) + 60 x3 = r*np.cos(theta2) + 40 x4 = r*np.sin(theta2) + 12 x5 = r*np.cos(theta3) + 40 x6 = r*np.sin(theta3) + 108 axis.plot(x1, x2, color='grey', linewidth=line_width, alpha=alpha) axis.plot(x3, x4, color='grey', linewidth=line_width, alpha=alpha) axis.plot(x5, x6, color='grey', linewidth=line_width, alpha=alpha) else: for lines in line_coords: axis.plot([-(lines[1][0]-40) + 80, -(lines[1][1]-40) + 80], [lines[0][0], lines[0][1]], color='grey', linewidth=line_width, alpha=alpha) theta1 = np.linspace(0, 2*np.pi, 100) theta2 = np.linspace(5.4, 7.2, 100) theta3 = np.linspace(2.2, 4, 100) x1 = r*np.cos(theta1) + 60 x2 = r*np.sin(theta1) + 40 x3 = r*np.cos(theta2) + 12 x4 = r*np.sin(theta2) + 40 x5 = r*np.cos(theta3) + 108 x6 = r*np.sin(theta3) + 40 axis.plot(x1, x2, color='grey', linewidth=line_width, alpha=alpha) axis.plot(x3, x4, color='grey', linewidth=line_width, alpha=alpha) axis.plot(x5, x6, color='grey', linewidth=line_width, alpha=alpha) return axis
5,338,925
def end_scan_setup(start_time): """Process results and deactivate specific buttons on each scan end.""" process_results(start_time) stop_bt.set_sensitive(False) cb_display.set_sensitive(True)
5,338,926
def step_impl(context, url): """ :type context: behave.runner.Context :type url: str """ context.api_url = 'http://localhost:8080' context.resource = url
5,338,927
def test_cache(app, dir_factory): """Test cached schema loading.""" m = mock_open with mock.patch('invenio_jsonschemas.ext.open', m): ext = InvenioJSONSchemas(app, entry_point_group=None) schema_files = build_schemas(1) with dir_factory(schema_files) as directory: ext.register_schemas_dir(directory) assert m.counter == 0 ext.get_schema('rootschema_1.json') assert m.counter == 1 ext.get_schema('rootschema_1.json') ext.get_schema('rootschema_1.json') assert m.counter == 1 ext.get_schema('sub1/subschema_1.json') assert m.counter == 2 ext.get_schema('sub1/subschema_1.json') assert m.counter == 2
5,338,928
async def test_setup_config_flow(hass): """Test for successfully setting up the IPMA platform.""" with patch( "homeassistant.components.ipma.weather.async_get_location", return_value=MockLocation(), ): entry = MockConfigEntry(domain="ipma", data=TEST_CONFIG) await hass.config_entries.async_forward_entry_setup(entry, WEATHER_DOMAIN) await hass.async_block_till_done() state = hass.states.get("weather.hometown") assert state.state == "rainy" data = state.attributes assert data.get(ATTR_WEATHER_TEMPERATURE) == 18.0 assert data.get(ATTR_WEATHER_HUMIDITY) == 71 assert data.get(ATTR_WEATHER_PRESSURE) == 1000.0 assert data.get(ATTR_WEATHER_WIND_SPEED) == 3.94 assert data.get(ATTR_WEATHER_WIND_BEARING) == "NW" assert state.attributes.get("friendly_name") == "HomeTown"
5,338,929
def test_next_events(hass): """Test retrieving next sun events.""" utc_now = datetime(2016, 11, 1, 8, 0, 0, tzinfo=dt_util.UTC) from astral import Astral astral = Astral() utc_today = utc_now.date() latitude = hass.config.latitude longitude = hass.config.longitude mod = -1 while True: next_dawn = astral.dawn_utc( utc_today + timedelta(days=mod), latitude, longitude ) if next_dawn > utc_now: break mod += 1 mod = -1 while True: next_dusk = astral.dusk_utc( utc_today + timedelta(days=mod), latitude, longitude ) if next_dusk > utc_now: break mod += 1 mod = -1 while True: next_midnight = astral.solar_midnight_utc( utc_today + timedelta(days=mod), longitude ) if next_midnight > utc_now: break mod += 1 mod = -1 while True: next_noon = astral.solar_noon_utc(utc_today + timedelta(days=mod), longitude) if next_noon > utc_now: break mod += 1 mod = -1 while True: next_rising = astral.sunrise_utc( utc_today + timedelta(days=mod), latitude, longitude ) if next_rising > utc_now: break mod += 1 mod = -1 while True: next_setting = astral.sunset_utc( utc_today + timedelta(days=mod), latitude, longitude ) if next_setting > utc_now: break mod += 1 with patch("homeassistant.helpers.condition.dt_util.utcnow", return_value=utc_now): assert next_dawn == sun.get_astral_event_next(hass, "dawn") assert next_dusk == sun.get_astral_event_next(hass, "dusk") assert next_midnight == sun.get_astral_event_next(hass, "solar_midnight") assert next_noon == sun.get_astral_event_next(hass, "solar_noon") assert next_rising == sun.get_astral_event_next(hass, SUN_EVENT_SUNRISE) assert next_setting == sun.get_astral_event_next(hass, SUN_EVENT_SUNSET)
5,338,930
def plot_dist(distances, filename): """ Plot the average distance between a fish and its neighbors over the course of a simulation and save this graph Arguements: distances {flot list} -- the average distance at each timestep filename {string} -- name of file in which to save graph """ fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.plot(range(len(distances)), distances) ax.scatter(range(len(distances)), distances) ax.set_xlabel("Time") ax.set_ylabel("Mean neighbor spacing") plt.savefig(filename)
5,338,931
def file(filename, searchspec=None, searchtype=None, list_images=False, sort_by=None, fields=None): """Examine images from a local file.""" if not list_images: if searchtype is None or searchspec is None: raise click.BadParameter( 'SEARCHTYPE and SEARCHSPEC must be specified when not listing ' 'images') try: images = sources.read_images_file(filename) except Exception as e: abort(e) if list_images: return _list_images(images) _process_images(searchtype, images, searchspec, sort_by, fields)
5,338,932
def clip_to_norm(array, clip): """Clips the examples of a 2-dimensional array to a given maximum norm. Parameters ---------- array : np.ndarray Array to be clipped. After clipping, all examples have a 2-norm of at most `clip`. clip : float Norm at which to clip each example Returns ------- array : np.ndarray The clipped array. """ if not isinstance(array, np.ndarray): raise TypeError(f"Input array must be a numpy array, got {type(array)}.") if array.ndim != 2: raise ValueError(f"input array must be 2-dimensional, got {array.ndim} dimensions.") if not isinstance(clip, Real): raise TypeError(f"Clip value must be numeric, got {type(clip)}.") if clip <= 0: raise ValueError(f"Clip value must be strictly positive, got {clip}.") norms = np.linalg.norm(array, axis=1) / clip norms[norms < 1] = 1 return array / norms[:, np.newaxis]
5,338,933
def test_bench_day_4(benchmark): """pytest-benchmark function""" benchmark(main)
5,338,934
def getlineno(frame): """Get the line number from a frame object, allowing for optimization.""" # FrameType.f_lineno is now a descriptor that grovels co_lnotab return frame.f_lineno
5,338,935
def _process_sample(sample, fwd, rev, common_args, out): """Constructs assembly command for MEGAHIT and runs the assembly. Args: sample: Name of the sample to be processed. fwd: Location of the forward reads. rev: Location of the reverse reads. If set to None, single-end reads are assumed. common_args: list of arguments that should be passed to the megahit assembly command out: output format """ with tempfile.TemporaryDirectory() as tmp: results_dir = os.path.join(tmp, 'results') cmd = ['megahit'] if rev: cmd.extend(['-1', fwd, '-2', rev]) else: cmd.extend(['-r', fwd]) cmd.extend(['-o', results_dir]) cmd.extend(common_args) try: run_command(cmd, verbose=True) except subprocess.CalledProcessError as e: raise Exception('An error was encountered while running MEGAHIT, ' f'(return code {e.returncode}), please inspect ' 'stdout and stderr to learn more.') shutil.move(os.path.join(results_dir, 'final.contigs.fa'), os.path.join(str(out), f'{sample}_contigs.fa'))
5,338,936
def _batch_sum(F, loss, batch_axis): """Return sum on the specified batch axis, not keeping the axis""" if is_np_array(): axes = list(range(loss.ndim)) del axes[batch_axis] return F.np.sum(loss, axis=axes) else: return F.sum(loss, axis=batch_axis, exclude=True)
5,338,937
def _ExpandSections(section_names, name_to_symbol_infos, offset_to_symbol_infos, section_to_symbols_map, symbol_to_sections_map, suffixed_sections): """Gets an ordered set of section matching rules for a list of sections. Rules will not be repeated. Args: section_names: The sections to expand. name_to_symbol_infos: {name: [symbol_info1], ...}, as returned by _GroupSymbolInfosFromBinary. offset_to_symbol_infos: {offset: [symbol_info1, ...], ...} section_to_symbols_map: The mapping from section to symbol names. symbol_to_sections_map: The mapping from symbol name to names of linker sections containing the symbol. suffixed_sections: A set of sections which can have suffixes. Yields: Section matching rules including at least section_names. """ for profiled_section in section_names: for section in _SectionMatchingRules( profiled_section, name_to_symbol_infos, offset_to_symbol_infos, section_to_symbols_map, symbol_to_sections_map, suffixed_sections): yield section
5,338,938
async def test_device_registry(opp, config_entry, config, soco): """Test sonos device registered in the device registry.""" await setup_platform(opp, config_entry, config) device_registry = dr.async_get(opp) reg_device = device_registry.async_get_device( identifiers={("sonos", "RINCON_test")} ) assert reg_device.model == "Model Name" assert reg_device.sw_version == "13.1" assert reg_device.connections == {(dr.CONNECTION_NETWORK_MAC, "00:11:22:33:44:55")} assert reg_device.manufacturer == "Sonos" assert reg_device.suggested_area == "Zone A" assert reg_device.name == "Zone A"
5,338,939
def tree_cons(a, tree: Pytree) -> Pytree: """ Prepend ``a`` in all tuples of the given tree. """ return jax.tree_map( lambda x: _OpaqueSequence((a,) + tuple(x)), tree, is_leaf=lambda x: isinstance(x, tuple), )
5,338,940
def add(moment: datetime) -> datetime: """Add one gigasecond to a given date and time.""" return moment + GIGASECOND
5,338,941
def get_test_hooks(test_files, cfg, tracer=None): """Returns a list of test hooks from a given list of test modules.""" results = [] dirs = set(map(os.path.dirname, test_files)) for dir in list(dirs): if os.path.basename(dir) == 'ftests': dirs.add(os.path.join(os.path.dirname(dir), 'tests')) dirs = list(dirs) dirs.sort() for dir in dirs: filename = os.path.join(dir, 'checks.py') if os.path.exists(filename): module = import_module(filename, cfg, tracer=tracer) if tracer is not None: hooks = tracer.runfunc(module.test_hooks) else: hooks = module.test_hooks() results.extend(hooks) return results
5,338,942
def convert_to_short_log(log_level, message): """Convert a log message to its shorter format. :param log_level: enum - 'LogLevel.<level>' e.g. 'LogLevel.Error' :param message: str - log message :return: enum - 'LogLevelInt.<value>` e.g. 'LogLevelInt.5' """ return f'{LogLevelInt[log_level.name].value}:{message}'
5,338,943
def load_map(path, callback=None, meta_override=None): """Load a set of zipped csv AFM workshop data If you are recording quantitative force-maps (i.e. multiple curves on an x-y-grid) with AFM workshop setups, then you might have realized that you get *multiple* .csv files (one file per indentation) instead of *one* file that contains all the data (as you might be accustomed to from other manufacturers). Since afmformats expects one file per measurement, it would not be straight forward to obtain a properly enumerated quantitative imaging group. This function offers a workaround - it loads a zip archive created from the the .csv files. The files are structured like this:: Force-Distance Curve File Format: 3 Date: Wednesday, August 1, 2018 Time: 1:07:47 PM Mode: Mapping Point: 16 X, um: 27.250000 Y, um: 27.250000 Extend Z-Sense(nm),Extend T-B(V),Retract Z-Sense(nm),Retract T-B(V) 13777.9288,0.6875,14167.9288,1.0917 13778.9288,0.6874,14166.9288,1.0722 13779.9288,0.6876,14165.9288,1.0693 13780.9288,0.6877,14164.9288,1.0824 13781.9288,0.6875,14163.9288,1.0989 ... Please make sure that the ``Point`` is enumerated from 1 onwards (and matches the alphanumerical order of the files in the archive) and that ``Mode`` is ``Mapping``. The ``X`` and ``Y`` coordinates can be used by e.g. PyJibe to display QMap data on a grid. Parameters ---------- path: str or pathlib.Path path to zip file containing AFM workshop .csv files callback: callable function for progress tracking; must accept a float in [0, 1] as an argument. meta_override: dict if specified, contains key-value pairs of metadata that are used when loading the files (see :data:`afmformats.meta.META_FIELDS`) """ datasets = [] with zipfile.ZipFile(path) as arc: names = sorted(arc.namelist()) for ii, name in enumerate(names): with arc.open(name, "r") as fd: tfd = io.TextIOWrapper(fd, encoding="utf-8") dd = load_csv( tfd, # recurse into callback with None as default callback=lambda x: callback((ii + x) / len(names)) if callback is not None else None, meta_override=meta_override, mode="mapping") dd[0]["metadata"]["path"] = pathlib.Path(path) cur_enum = dd[0]["metadata"]["enum"] if cur_enum != ii + 1: warnings.warn("Dataset 'Point' enumeration mismatch for " f"'{name}' in '{path}' (expected {ii + 1}, " f"got {cur_enum})!", AFMWorkshopFormatWarning) datasets += dd # Populate missing grid metadata xvals = list(set([ad["metadata"]["position x"] for ad in datasets])) yvals = list(set([ad["metadata"]["position y"] for ad in datasets])) mdgrid = { "grid center x": np.mean(xvals), "grid center y": np.mean(yvals), "grid shape x": len(xvals), "grid shape y": len(yvals), # grid size in um includes boundaries of pixels "grid size x": np.ptp(xvals)*(1 + 1/(len(xvals)-1)), "grid size y": np.ptp(yvals)*(1 + 1/(len(yvals)-1)), } # Update with new metadata (note that grid index x/y is populated via # MetaData._autocomplete_grid_metadata) [ad["metadata"].update(mdgrid) for ad in datasets] return datasets
5,338,944
def _asthetics(fig, axs, lines): """ Extra formatting tasks :param `matplotlib.figure.Figure` fig: mpl figure :param list axs: list of `matplotlib.axes.Axes` :param `matplotlib.lines.Line2D` lines: ROC lines :return: """ # format axes axs[0][0].set_yticks([0, 0.5, 1]) yticks = [-1, 0, 1, 2] axs[1][0].set_yticks(yticks) axs[1][0].set_yticklabels([str(10 ** float(l)) for l in yticks]) axs[0][1].set_xlabel("FPR") axs[0][0].set_ylabel("TPR") axs[1][0].set_ylabel("Time per Molecule (s)") # format legend fig.legend(lines, [f"{w}" for w in [0,10,100]], (.83, .42), title="Constraint Weight") # format canvas plt.subplots_adjust(left=0.1, right=0.8, top=0.86)
5,338,945
def format_len(x): """ >>> format_len('abc') 3 >>> format_len(('(', ('(', 'def', ')'), 'yz', ')')) 11 """ if not isinstance(x, (list, tuple)): return len(x) if len(x) > 3: sep_len = 2 * (len(x) - 3) else: sep_len = 0 return sum(map(format_len, x)) + sep_len
5,338,946
def build_user_agent(): """Build the charmcraft's user agent.""" if any(key.startswith(prefix) for prefix in TESTING_ENV_PREFIXES for key in os.environ.keys()): testing = " (testing) " else: testing = " " os_platform = "{0.system}/{0.release} ({0.machine})".format(utils.get_os_platform()) return "charmcraft/{}{}{} python/{}".format( __version__, testing, os_platform, platform.python_version() )
5,338,947
def test_presun_tam_a_zpet(): """Zkontroluje přesouvání karet tam a zpátky""" from klondike import presun_nekolik_karet zdroj = [ (3, 'Kr', False), (4, 'Sr', False), (5, 'Kr', False), ] cil = [ (11, 'Pi', True), (12, 'Ka', True), (13, 'Pi', True), ] presun_nekolik_karet(zdroj, cil, 1) assert zdroj == [ (3, 'Kr', False), (4, 'Sr', False), ] assert cil == [ (11, 'Pi', True), (12, 'Ka', True), (13, 'Pi', True), (5, 'Kr', False), ] presun_nekolik_karet(cil, zdroj, 2) assert zdroj == [ (3, 'Kr', False), (4, 'Sr', False), (13, 'Pi', True), (5, 'Kr', False), ] assert cil == [ (11, 'Pi', True), (12, 'Ka', True), ] presun_nekolik_karet(zdroj, cil, 3) assert zdroj == [ (3, 'Kr', False), ] assert cil == [ (11, 'Pi', True), (12, 'Ka', True), (4, 'Sr', False), (13, 'Pi', True), (5, 'Kr', False), ] presun_nekolik_karet(cil, zdroj, 4) assert zdroj == [ (3, 'Kr', False), (12, 'Ka', True), (4, 'Sr', False), (13, 'Pi', True), (5, 'Kr', False), ] assert cil == [ (11, 'Pi', True), ] presun_nekolik_karet(zdroj, cil, 5) assert zdroj == [ ] assert cil == [ (11, 'Pi', True), (3, 'Kr', False), (12, 'Ka', True), (4, 'Sr', False), (13, 'Pi', True), (5, 'Kr', False), ]
5,338,948
def set_doi_ark(page_number, records_per_page, sort_on, doi_ark_value): """ Retrieve all metadata records for admin view. Retrieval is done via POST because we must pass a session id so that the user is authenticated. Access control is done here. A user can modify only their own records because their session_id sent with the request. """ username = _authenticate_admin_from_session(request) #pageNumber is 0 based index. Need first page to start at 0 for math for setting arrayLowerBound and arrayUpperBound. try: if username: if request.method == 'POST': #need to do input sanitization on all these values! Separating variables so outside does not have direct access to #database query. sort_by = validate_admin_sort_by(sort_on) record_list = Metadata.objects(__raw__={'published':'pending'}).order_by(sort_by) arrayLowerBound = int(page_number) * int(records_per_page) arrayUpperBound = int(page_number) * int(records_per_page) + int(records_per_page) #Only return array elements between indicies. Don't want to return all possible values #and overload browser with too much data. This is a version of 'pagination.' return jsonify(dict(results=record_list[arrayLowerBound:arrayUpperBound], num_entries=(len(record_list)/int(records_per_page)))) else: return Response('Bad or missing session id.', status=401) except: return Response('Bad request for records', 400)
5,338,949
def check_datetime(value): """ Check and convert "value" to a datetime object. Value can have multiple formats, according to the argparse.ArgumentParser doc (defined in :func:`parse_cmd_args`) Args: value (str): The input value Returns: datetime.datetime: the input value converted to a datetime object Raises: argparse.ArgumentTypeError """ # Case "now" if value == 'now': return datetime.datetime.now() # Case "+hh" and "-hh" if value.startswith('+') or value.startswith('-'): if not value[1:].isdigit(): raise argparse.ArgumentTypeError('"%s": format admitted "[+|-]nn" (e.g +24)' % value) hours = int(value) return datetime.datetime.now() + datetime.timedelta(0, hours * 3600) # Case "%y/%m/%d-%H:%M" try: return datetime.datetime.strptime(value, '%y/%m/%d-%H:%M') except ValueError: pass # Case "%y/%m/%d" try: return datetime.datetime.strptime(value, '%y/%m/%d') except ValueError: pass raise argparse.ArgumentTypeError( '"%s": not a valid format (admitted: "now", "[+|-]hh" (e.g. "+24" or "-4") or "yy/mm/dd[-HH:MM]")' % value)
5,338,950
def empiriline(x,p,L): """ Use the line L (which is an EmissionLine object) as a template. The line is shifted, then interpolated, then rescaled, and allowed to float. """ xnew = x - p[1] yout = sp.zeros(len(xnew)) m = (xnew >= L.wv.min())*(xnew <= L.wv.max() ) ynew,znew = L.interp(xnew[m]) yout[m] = p[0]*ynew + p[2] return yout
5,338,951
def _get_z_slice_fn(z, data_dir): """Get array slice map to be applied to z dimension Args: z: String or 1-based index selector for z indexes constructed as any of the following: - "best": Indicates that z slices should be inferred based on focal quality - "all": Indicates that a slice for all z-planes should be used - str or int: A single value will be interpreted as a single index - tuple: A 2-item or 3-item tuple forming the slice (start, stop[, step]); stop is inclusive - list: A list of integers will be used as is data_dir: Data directory necessary to infer 'best' z planes Returns: A function with signature (region_index, tile_x, tile_y) -> slice_for_array where slice_for_array will either be a slice instance or a list of z-indexes (Note: all indexes are 0-based) """ if not z: raise ValueError('Z slice cannot be defined as empty value (given = {})'.format(z)) # Look for keyword strings if isinstance(z, str) and z == 'best': map = function_data.get_best_focus_coord_map(data_dir) return lambda ri, tx, ty: [map[(ri, tx, ty)]] if isinstance(z, str) and z == 'all': return lambda ri, tx, ty: slice(None) # Parse argument as 1-based index list and then convert to 0-based zi = cli.resolve_index_list_arg(z, zero_based=True) return lambda ri, tx, ty: zi
5,338,952
def test_stem_context_manager(tokenize, benchmark_text): """Использование морфологического анализатора в контекстном менеджере""" with stemming.jstem_ctx() as stem: jstem = stem tokens = tokenize(benchmark_text) result = stem.analyze([t[0] for t in tokens]) assert type(result) is list assert 'analysis' in result[0] assert jstem._proc assert not jstem._proc
5,338,953
def fit_and_print_all(model, model_name): """Fits the model against all data instances Args: model: model to fit to the data sets model_name: identifier for the outcomes """ for data_set, x in data_sets.items(): selector, method = data_set train, test = x key = ','.join([model_name, selector, method]) print("Training Shape: {}".format(train.shape)) if key not in scores: print(key) fitted, score = fit_and_print(model, train, test) scores[key] = score else: score = scores[key] print("{}: {:.3f}".format(key, score)) print() best_score = max(scores.values()) best_key = key_by_value(scores, best_score) print("Best Model So Far: {}, Score={:.2f}".format( best_key, best_score)) with open(Files.future_model_selection, 'wb') as writer: pickle.dump(scores, writer) return
5,338,954
def bar_data_wrapper(func): """Standardizes column names for any bar data""" def wrapper(*args, **kwargs): assert Ticker(args[0]) res: pd.DataFrame = func(*args, **kwargs) return res.rename(columns=COL_NAMES).iterrows() return wrapper
5,338,955
def rgb_to_grayscale( image: Tensor, rgb_weights: list[float] = [0.299, 0.587, 0.114] ) -> Tensor: """Convert an RGB image to grayscale version of image. Image data is assumed to be in the range of [0.0, 1.0]. Args: image (Tensor[B, 3, H, W]): RGB image to be converted to grayscale. rgb_weights (list[float]): Weights that will be applied on each channel (RGB). Sum of the weights should add up to one. Returns: grayscale (Tensor[B, 1, H, W]): Grayscale version of the image. """ rgb_weights = torch.FloatTensor(rgb_weights) if not isinstance(rgb_weights, Tensor): raise TypeError(f"`rgb_weights` must be a `Tensor`. " f"But got: {type(rgb_weights)}.") if rgb_weights.shape[-1] != 3: raise ValueError(f"`rgb_weights` must have a shape of [*, 3]. " f"But got: {rgb_weights.shape}.") r = image[..., 0:1, :, :] g = image[..., 1:2, :, :] b = image[..., 2:3, :, :] if not torch.is_floating_point(image) and (image.dtype != rgb_weights.dtype): raise ValueError(f"`image` and `rgb_weights` must have the same dtype. " f"But got: {image.dtype} and {rgb_weights.dtype}.") w_r, w_g, w_b = rgb_weights.to(image).unbind() return w_r * r + w_g * g + w_b * b
5,338,956
def parse(msg: str) -> Dict[str, Union[str, int, float, bool, datetime]]: """Parse message from the feed output by dump1090 on port 30003 A dict is returned withAn SBS-1 message has the following attributes: messageType : string transmissionType : sbs1.TransmissionType sessionID : int aircraftID : int icao24 : string flightID : int generatedDate : datetime loggedDate : datetime callsign : string altitude : int groundSpeed : int track : int lat : float lon : float verticalRate : int squawk : int alert : bool emergency : bool spi : bool onGround : bool None is returned if the message was not valid A field not present in the parsed message will be set to None. For a description of the attributes, please see github.com/wiseman/node-sbs1 """ if msg is None: return None sbs1 = {} parts = msg.lstrip().rstrip().split(',') try: # logging.debug("%s %s %s" % (parts[1], parts[4], ",".join(parts[10:]))) sbs1["messageType"] = __parseString(parts, 0) if sbs1["messageType"] != "MSG": return None sbs1["transmissionType"] = __parseInt(parts, 1) sbs1["sessionID"] = __parseString(parts, 2) sbs1["aircraftID"] = __parseString(parts, 3) sbs1["icao24"] = __parseString(parts, 4) sbs1["flightID"] = __parseString(parts, 5) sbs1["generatedDate"] = __parseDateTime(parts, 6, 7) sbs1["loggedDate"] = __parseDateTime(parts, 8, 9) sbs1["callsign"] = __parseString(parts, 10) if sbs1["callsign"]: sbs1["callsign"] = sbs1["callsign"].rstrip() sbs1["altitude"] = __parseInt(parts, 11) sbs1["groundSpeed"] = __parseFloat(parts, 12) sbs1["track"] = __parseFloat(parts, 13) sbs1["lat"] = __parseFloat(parts, 14) sbs1["lon"] = __parseFloat(parts, 15) sbs1["verticalRate"] = __parseInt(parts, 16) sbs1["squawk"] = __parseInt(parts, 17) sbs1["alert"] = __parseBool(parts, 18) sbs1["emergency"] = __parseBool(parts, 19) sbs1["spi"] = __parseBool(parts, 20) sbs1["onGround"] = __parseBool(parts, 21) except IndexError as e: logging.error("Failed to init sbs1 message from '%s'" % (msg), exc_info=True) return None return sbs1
5,338,957
def test_phrase_tag(): """ test for phrase POS tagger """ text = 'Lionel Messi pergi ke pasar di area Jakarta Pusat.' expected_result =[ ('Lionel Messi', 'NP'), ('pergi', 'VP'), ('ke', 'IN'), ('pasar', 'NN'), ('di', 'IN'), ('area', 'NN'), ('Jakarta Pusat', 'NP'), ('.', 'SYM') ] assert postagger.get_phrase_tag(text) == expected_result
5,338,958
def _aware_to_agnostic(fr: NDFrame) -> NDFrame: """Recalculate values in tz-aware series or dataframe, to get a tz-agnostic one. (i.e., A to B).""" if not fr.index.tz: raise ValueError("``fr`` must be tz-aware.") idx_out = _idx_after_conversion(fr, None) # Convert daily or longer. if stamps.freq_shortest(idx_out.freq, "D") == "D": # One-to-one correspondence between the timestamps in input and ouput frames. # --> Simply replace the index. return fr.set_axis(idx_out) # Convert hourly or shorter. # There are timestamps in the output that do not exist in the input. In that case, # repeat the value of the previous hour. partly = fr.tz_localize(None) partly = partly[~partly.index.duplicated()] # remove duplicates def value(ts): # Take value of prev hour if current time not found in the input. try: return partly.loc[ts] except KeyError: return partly.loc[ts - pd.Timedelta(hours=1)] return fr.__class__([value(ts) for ts in idx_out], index=idx_out)
5,338,959
def cumulative_prob_to_value(prob, hp): """Convert a value from [0, 1] to a hyperparameter value.""" if isinstance(hp, Fixed): return hp.value elif isinstance(hp, Boolean): return bool(prob >= 0.5) elif isinstance(hp, Choice): ele_prob = 1 / len(hp.values) index = math.floor(prob / ele_prob) # Can happen when `prob` is very close to 1. if index == len(hp.values): index = index - 1 return hp.values[index] elif isinstance(hp, (Int, Float)): sampling = hp.sampling or 'linear' if sampling == 'linear': value = prob * (hp.max_value - hp.min_value) + hp.min_value elif sampling == 'log': value = hp.min_value * math.pow(hp.max_value / hp.min_value, prob) elif sampling == 'reverse_log': value = (hp.max_value + hp.min_value - hp.min_value * math.pow(hp.max_value / hp.min_value, 1 - prob)) else: raise ValueError('Unrecognized sampling value: {}'.format(sampling)) if hp.step is not None: values = np.arange(hp.min_value, hp.max_value + 1e-7, step=hp.step) closest_index = np.abs(values - value).argmin() value = values[closest_index] if isinstance(hp, Int): return int(value) return value else: raise ValueError('Unrecognized HyperParameter type: {}'.format(hp))
5,338,960
def sms_confirm_payment(user): """ Mark your deposit as paid after you have deposited the money :param msg: A full telerivet message object :param parts: 'done' :returns: A message letting you know the status of the deposit """ connections = lookup_connections(backend="telerivet", identities=[user.userdata.phone]) send("Congrats! Your {} deposit has been repaid to {}!".format(270, 'test'), connections=connections)
5,338,961
def get_ui_node_spec(module=None, category="default"): """ Returns a dictionary describing the specifications for each Node in a module. Parameters ----------- module: module The Python module for which the ui specs should be summarized. Only the top-level classes will be included in the spec. (i.e. no recursive search through submodules) category: str, optional Default is "default". Top-level category name for the group of Nodes. Returns -------- dict Dictionary of {category: {Node1: spec_1, Node2: spec2, ...}} describing the specs for each Node. """ import podpac import podpac.datalib # May not be imported by default spec = {} def get_ui_spec(cls): filter = [] spec = {"help": cls.__doc__, "module": cls.__module__ + "." + cls.__name__, "attrs": {}} for attr in dir(cls): if attr in filter: continue attrt = getattr(cls, attr) if not isinstance(attrt, tl.TraitType): continue if "attr" not in attrt.metadata: continue type_ = attrt.__class__.__name__ type_extra = str(attrt) if type_ == "Union": type_ = [t.__class__.__name__ for t in attrt.trait_types] type_extra = "Union" elif type_ == "Instance": type_ = attrt.klass.__name__ type_extra = attrt.klass default_val = attrt.default() if not isinstance(type_extra, str): type_extra = str(type_extra) try: if np.isnan(default_val): default_val = 'nan' except: pass if default_val == tl.Undefined: default_val = None spec["attrs"][attr] = { "type": type_, "type_str": type_extra, # May remove this if not needed "values": getattr(attrt, "values", None), "default": default_val, "help": attrt.help, } spec.update(getattr(cls, "_ui_spec", {})) return spec if module is None: modcat = zip( [podpac.data, podpac.algorithm, podpac.compositor, podpac.datalib], ["data", "algorithms", "compositors", "datalib"], ) for mod, cat in modcat: spec.update(get_ui_node_spec(mod, cat)) return spec spec[category] = {} for obj in dir(module): ob = getattr(module, obj) if not inspect.isclass(ob): continue if not issubclass(ob, podpac.Node): continue spec[category][obj] = get_ui_spec(ob) return spec
5,338,962
def validate_remote_id(value): """make sure the remote_id looks like a url""" if not value or not re.match(r"^http.?:\/\/[^\s]+$", value): raise ValidationError( _("%(value)s is not a valid remote_id"), params={"value": value}, )
5,338,963
def parse_branch_name(branch_name): """Split up a branch name of the form 'ocm-X.Y[-mce-M.N]. :param branch_name: A branch name. If of the form [remote/]ocm-X.Y[-mce-M.N] we will parse it as noted below; otherwise the first return will be False. :return parsed (bool): True if the branch_name was parseable; False otherwise. :return remote (str): If parsed and the branch_name contained a remote/ prefix, it is returned here; otherwise this is the empty string. :return prefix (str): Two-digit semver prefix of the bundle to be generated. If the branch name is of the form [remote/]ocm-X.Y, this will be X.Y; if of the form [remote/]ocm-X.Y-mce-M.N it will be M.N. If not parseable, it will be the empty string. :return channel (str): The name of the channel in which we'll include the bundle. If the branch name is of the form [remote/]ocm-X.Y, this will be ocm-X.Y; if of the form [remote/]ocm-X.Y-mce-M.N it will be mce-M.N. If not parseable, it will be the empty string. """ m = MCE_BRANCH_RE.match(branch_name) if m: return True, m.group(1), m.group(2), m.group(3) m = OCM_BRANCH_RE.match(branch_name) if m: return True, m.group(1), m.group(3), m.group(2) return False, '', '', ''
5,338,964
def form_update(form, node_xyz, trail_forces, reaction_forces): """ Update the node and edge attributes of a form after equilibrating it. """ # assign nodes' coordinates for node, xyz in node_xyz.items(): form.node_xyz(node, xyz) # form.node_attributes(key=node, names=["x", "y", "z"], values=xyz) # assign forces on trail edges for edge, tforce in trail_forces.items(): form.edge_attribute(key=edge, name="force", value=tforce) # assign reaction forces for node in form.support_nodes(): rforce = reaction_forces[node] form.node_attributes(key=node, names=["rx", "ry", "rz"], values=rforce) # assign lengths to deviation edges for u, v in form.edges(): # length = form.edge_length(u, v) length = length_vector(node_xyz[u] - node_xyz[v]) force = form.edge_attribute(key=edge, name="force") length = np.copysign(length, force) form.edge_attribute(key=(u, v), name="length", value=length)
5,338,965
def symmetric_padding( arr, width): """ Pad an array using symmetric values. This is equivalent to `np.pad(mode='symmetric')`, but should be faster. Also, the `width` parameter is interpreted in a more general way. Args: arr (np.ndarray): The input array. width (int|float|Iterable[int|float]): Size of the padding to use. This is used with `flyingcircus.base.multi_scale_to_int()`. The shape of the array is used for the scales. Returns: result (np.ndarray): The padded array. Examples: >>> arr = arange_nd((2, 3)) + 1 >>> print(arr) [[1 2 3] [4 5 6]] >>> new_arr = symmetric_padding(arr, (1, 2)) >>> print(new_arr) [[2 1 1 2 3 3 2] [2 1 1 2 3 3 2] [5 4 4 5 6 6 5] [5 4 4 5 6 6 5]] >>> new_arr = symmetric_padding(arr, ((0, 1), 2)) >>> print(new_arr) [[2 1 1 2 3 3 2] [5 4 4 5 6 6 5] [5 4 4 5 6 6 5]] >>> new_arr = symmetric_padding(arr, ((1, 0), 2)) >>> print(new_arr) [[2 1 1 2 3 3 2] [2 1 1 2 3 3 2] [5 4 4 5 6 6 5]] >>> new_arr = symmetric_padding(arr, ((0, 1.0),)) >>> print(new_arr) [[1 2 3 3 2 1] [4 5 6 6 5 4] [4 5 6 6 5 4] [1 2 3 3 2 1]] >>> arr = arange_nd((5, 7, 11)) + 1 >>> np.all(symmetric_padding(arr, 17) == np.pad(arr, 17, 'symmetric')) True """ width = fc.base.multi_scale_to_int(width, arr.shape) if any(any(size for size in sizes) for sizes in width): shape = tuple( low + dim + up for dim, (low, up) in zip(arr.shape, width)) result = np.zeros(shape, dtype=arr.dtype) target_slices = tuple( tuple( slice( max((i - (1 if low % dim else 0)) * dim + low % dim, 0), min((i + 1 - (1 if low % dim else 0)) * dim + low % dim, low + dim + up)) for i in range( fc.base.div_ceil(low, dim) + fc.base.div_ceil(up, dim) + 1)) for dim, (low, up) in zip(arr.shape, width)) len_target_slices = tuple(len(items) for items in target_slices) parities = tuple( fc.base.div_ceil(low, dim) % 2 for dim, (low, up) in zip(arr.shape, width)) for i, target_slicing in enumerate(itertools.product(*target_slices)): ij = np.unravel_index(i, len_target_slices) source_slicing = [] for idx, target_slice, parity, dim in \ zip(ij, target_slicing, parities, arr.shape): step = 1 if idx % 2 == parity else -1 start = stop = None span = target_slice.stop - target_slice.start if span != dim: if target_slice.start == 0: start = \ (dim - span) if idx % 2 == parity else (span - 1) else: stop = \ span if idx % 2 == parity else (dim - span - 1) source_slicing.append(slice(start, stop, step)) source_slicing = tuple(source_slicing) result[target_slicing] = arr[source_slicing] else: result = arr return result
5,338,966
def main(): """Will strip metadata and optionally randomize filenames from a directory of videos.""" args = docopt(__doc__) vid_paths = get_video_paths(args["INDIR"]) outdir = Path(args["OUTDIR"]) try: outdir.mkdir() except FileExistsError: print(f"OUTDIR {outdir} must not already exist. Aborting.") sys.exit(2) setup_log(args["-l"]) if args["-m"]: vid_map = transpose_paths(vid_paths, outdir) else: vid_map = randomize_paths(vid_paths, outdir, args["-s"]) for orig_path, new_path in vid_map.items(): # strip metadata then save into the csv log file: # orig_path,output (either new_path or "FAILED" if it was not successful) logging.info("%s,%s", orig_path, strip_metadata(orig_path, new_path))
5,338,967
def start(app_component="Main"): """ Starts the script """ global t0 t0 = round(time.time()*1000, 0).__int__() log_to_disk('Start', msg=app_name+':'+app_component+' starting invocation', kv=kvalue(t0=t0))
5,338,968
def _uid_or_str(node_or_entity): """ Helper function to support the transition from `Entitie`s to `Node`s. """ return ( node_or_entity.uid if hasattr(node_or_entity, "uid") else str(node_or_entity) )
5,338,969
def run(ctx): """ Run the application. """ ctx.run("python manage.py run", echo=True)
5,338,970
def main(src_fn, dst_fn, defect_net_name='minnie_mip2_defect_v03'): """Add defect annotations to a training dataset Args: * src_fn: path to H5 to be processed * dst_fn: path where the new H5 will be written """ defect_net = ModelArchive(defect_net_name) down = downsample(2) print('Processing H5 {0}'.format(src_fn)) with h5py.File(src_fn, 'r') as src, h5py.File(dst_fn, 'w') as dst: for src_k in src.keys(): print('Processing dset {0}'.format(src_k)) n = 0 src_dset = src[src_k] dst_dset = dst.create_dataset( src_k, (src_dset.shape[0], 6, 1536, 1536), dtype='f') for i in range(src_dset.shape[0]): a, b = src_dset[i] a, a_c, a_f = run_net_chunked(defect_net, a) b, b_c, b_f = run_net_chunked(defect_net, b) a = down(a.unsqueeze(0)).squeeze(0).numpy() b = down(b.unsqueeze(0)).squeeze(0).numpy() a_c = down(a_c.unsqueeze(0)).squeeze(0).numpy() b_c = down(b_c.unsqueeze(0)).squeeze(0).numpy() a_f = down(a_f.unsqueeze(0)).squeeze(0).numpy() b_f = down(b_f.unsqueeze(0)).squeeze(0).numpy() dst_dset[n] = a, b, a_c, b_c, a_f, b_f n += 1 print('Progress {}/{}'.format(n, src_dset.shape[0]), end='\r')
5,338,971
def power(maf=0.5,beta=0.1, N=100, cutoff=5e-8): """ estimate power for a given allele frequency, effect size beta and sample size N Assumption: z-score = beta_ML distributed as p(0) = N(0,1.0(maf*(1-maf)*N))) under the null hypothesis the actual beta_ML is distributed as p(alt) = N( beta , 1.0/(maf*(1-maf)N) ) Arguments: maf: minor allele frequency of the SNP beta: effect size of the SNP N: sample size (number of individuals) Returns: power: probability to detect a SNP in that study with the given parameters """ """ std(snp)=sqrt(2.0*maf*(1-maf)) power = \int beta_ML = (snp^T*snp)^{-1}*snp^T*Y = cov(snp,Y)/var(snp) E[beta_ML] = (snp^T*snp)^{-1}*snp^T*E[Y] = (snp^T*snp)^{-1}*snp^T*snp * beta = beta Var[beta_ML]= (snp^T*snp)^{-1}*(snp^T*snp)*(snp^T*snp)^{-1} = (snp^T*snp)^{-1} = 1/N * var(snp) = 1/N * maf*(1-maf) """ assert maf>=0.0 and maf<=0.5, "maf needs to be between 0.0 and 0.5, got %f" % maf if beta<0.0: beta=-beta std_beta = 1.0/np.sqrt(N*(2.0 * maf*(1.0-maf))) non_centrality = beta beta_samples = np.random.normal(loc=non_centrality, scale=std_beta) n_grid = 100000 beta_in = np.arange(0.5/(n_grid+1.0),(n_grid-0.5)/(n_grid+1.0),1.0/(n_grid+1.0)) beta_theoretical = ((st.norm.isf(beta_in)* std_beta) + non_centrality) pvals = st.chi2.sf( (beta_theoretical/std_beta)*(beta_theoretical/std_beta) ,1.0) power = (pvals<cutoff).mean() return power, pvals
5,338,972
def test_bheap_pop_twice_push(build_heap_of_ten): """Test popping twice and pushing.""" popped1 = build_heap_of_ten.pop() popped2 = build_heap_of_ten.pop() build_heap_of_ten.push(10) assert popped1 == 1.5 assert popped2 == 2 assert build_heap_of_ten._list == [3, 6, 15, 7, 9, 16, 27, 8, 10]
5,338,973
def get_rotation_matrix(angle: float, direction: np.ndarray, point: np.ndarray = None) -> np.ndarray: """Compute rotation matrix relative to point and direction Args: angle (float): angle of rotation in radian direction (np.ndarray): axis of rotation point (np.ndarray, optional): center of rotation. Defaults to None. Returns: np.ndarray: rotation_matrix """ sina = np.sin(angle) cosa = np.cos(angle) direction = direction[:3] / np.linalg.norm(direction[:3]) M = np.diag([cosa, cosa, cosa, 1.0]) M[:3, :3] += np.outer(direction, direction) * (1.0 - cosa) direction = direction * sina M[:3, :3] += np.array([[0.0, -direction[2], direction[1]], [direction[2], 0.0, -direction[0]], [-direction[1], direction[0], 0.0]]) # if point is specified, rotation is not around origin if point is not None: point = np.array(point[:3], dtype=np.float64, copy=False) M[:3, 3] = point - np.dot(M[:3, :3], point) return M
5,338,974
def assert_check(args: dict = None, log_level: str = LOG_LEVEL) -> bool: """ assert caller function args """ if args is None: logger.critical("Arguments dict is empty or does not exist!") return False else: logging.debug("Args dictionary exists, processing assertion check...") try: for k, v in args.items(): assert k is not None assert k != "" assert k != [] assert k != {} assert k != () assert type(k) == v return True except AssertionError: if log_level == "DEBUG": _, _, tb = sys.exc_info() traceback.print_tb(tb) tb_info = traceback.extract_tb(tb) _, line, _func, text = tb_info[-1] logging.error( f'An error occurred on line {line} in statement "{text}" in function "{inspect.stack()[1].function}".' ) return False else: logging.critical( f'An assertion error occured but did not call traceback because log level is: "{log_level}".' ) return False except Exception as e: logging.error(e) raise return False
5,338,975
def establish_github_connection(store: dict[str, Any]) -> ValidationStepResult: """ Establishes the connection to GitHub. If the name of the environment variable storing the GitHub PAT is not given, then it will default to searching for one named "GH_TOKEN". If provided, can help rate-limiting be less stringent. See https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting for more details. Uses the repository named in the system environment variable "GITHUB_REPOSITORY" if it exists. If not, default to the hub repository which is named in the configurations (loaded in using the store). Returns: A ValidationStepResult object with * the Github object, * the object of the repository from which the pull request originated * a dictionary of label names to labels that can be applied to the pull request. """ logger.info( "Running validations version %s", store.get( "VALIDATIONS_VERSION", "<missing validation version number>" ) ) logger.info("Current working directory: %s", os.getcwd()) logger.info("GitHub Actions information:") logger.info( "GitHub Actions event name: %s", os.environ.get("GITHUB_EVENT_NAME", "<missing GitHub event name>") ) logger.info("Connecting to GitHub and retrieving repository...") # initial GitHub connection github_PAT: str = os.environ.get(store.get( "GITHUB_TOKEN_ENVIRONMENT_VARIABLE_NAME", "GH_TOKEN" )) github: Github = Github(github_PAT) if github_PAT is not None else Github() # Get specific repository repository_name = os.environ.get( "GITHUB_REPOSITORY", store.get("HUB_REPOSITORY_NAME") ) if repository_name is None: raise RuntimeError("FAILURE: could not find GitHub repository") repository: Repository = github.get_repo(repository_name) # Get list of possible labels to apply to PR possible_labels = {l.name: l for l in repository.get_labels()} logger.info("Repository successfully retrieved") logger.info("Github repository: %s", repository.full_name) return ValidationStepResult( success=True, to_store={ "github": github, "repository": repository, "possible_labels": possible_labels } )
5,338,976
def recreate(): """ Recreate elasticsearch indices and request docs. """ delete_index() create_index() create_docs()
5,338,977
def answer(panel_array): """ Returns the maximum product of positive and (odd) negative numbers.""" print("panel_array=", panel_array) # Edge case I: no panels :] if (len(panel_array) == 0): return str(0) # Get zero panels. zero_panels = list(filter(lambda x: x == 0 , panel_array)) print("zero_panels=", zero_panels) # Edge case II: no positive nor negative panels. if (len(zero_panels) == len(panel_array)): return str(0) # Get positive panels positive_panels = list(filter(lambda x: x >0 , panel_array)) print("positive_panels=", positive_panels) positive_product = 1 for x in positive_panels: positive_product *= x # Get negative panels. negative_panels = sorted(list(filter(lambda x: x <0 , panel_array))) print("negative_panels=", negative_panels) # Edge case III: there is only one "negative panel". if (len(negative_panels) == 1): # If this is the only panel. if (len(panel_array) == 1): return negative_panels[0] # If there are no positive panels, but there are some panels with zeros elif (len(positive_panels) == 0) and (len(zero_panels) > 1): return 0 # Check number of negative panels. if len(negative_panels) % 2 != 0: # Remove smallest. negative_panels.pop() print("final negative_panels=", negative_panels) negative_product = 1 for x in negative_panels: negative_product *= x # Return product of those two. return str(negative_product * positive_product)
5,338,978
def load_fromh5(filepath, dir_structure, slice_num, strt_frm=0): """ load_fromh5 will extract the sinogram from the h5 file Output: the sinogram filepath: where the file is located in the system dir_structure: the h5 file directory structure slice_num: the slice where the singoram will be extracted strt_frm (optional): where the sinogram should begin """ f = h5py.File(filepath, 'r') #["entry/data/data"] print(f[dir_structure].shape) end_frm = f[dir_structure].shape[0] sino = f[dir_structure][int(strt_frm):int(end_frm),int(slice_num),:] #For APS 2BM h5 file format return sino
5,338,979
def lqr_6_2(time_limit=_DEFAULT_TIME_LIMIT, random=None, environment_kwargs=None): """Returns an LQR environment with 6 bodies of which first 2 are actuated.""" return _make_lqr( n_bodies=6, n_actuators=2, control_cost_coef=_CONTROL_COST_COEF, time_limit=time_limit, random=random, environment_kwargs=environment_kwargs, )
5,338,980
def machine_stop(request, tenant, machine): """ Stop (power off) the specified machine. """ with request.auth.scoped_session(tenant) as session: serializer = serializers.MachineSerializer( session.stop_machine(machine), context = { "request": request, "tenant": tenant } ) return response.Response(serializer.data)
5,338,981
def exec_command_rc(*cmdargs, **kwargs): """ Return the exit code of the command specified by the passed positional arguments, optionally configured by the passed keyword arguments. Parameters ---------- cmdargs : list Variadic list whose: 1. Mandatory first element is the absolute path, relative path, or basename in the current `${PATH}` of the command to run. 2. Optional remaining elements are arguments to pass to this command. All keyword arguments are passed as is to the `subprocess.call()` function. Returns ---------- int This command's exit code as an unsigned byte in the range `[0, 255]`, where 0 signifies success and all other values signal a failure. """ # 'encoding' keyword is not supported for 'subprocess.call'; remove it from kwargs. if 'encoding' in kwargs: kwargs.pop('encoding') return subprocess.call(cmdargs, **kwargs)
5,338,982
def _get_input_value(arg: Tuple[str, GraphQLArgument]) -> Dict[str, Any]: """Compute data for the InputValue fragment of the introspection query for a particular arg.""" return { "name": __InputValue.fields["name"].resolve(arg, None), "description": __InputValue.fields["description"].resolve(arg, None), "type": _get_type_ref(__InputValue.fields["type"].resolve(arg, None)), "defaultValue": __InputValue.fields["defaultValue"].resolve(arg, None), }
5,338,983
def wrapper(X_mixture,X_component): """ Takes in 2 arrays containing the mixture and component data as numpy arrays, and prints the estimate of kappastars using the two gradient thresholds as detailed in the paper as KM1 and KM2""" N=X_mixture.shape[0] M=X_component.shape[0] best_width,kernel=compute_best_rbf_kernel_width(X_mixture,X_component) lambda_values=np.array([1.00,1.05]) dists=get_distance_curve(kernel,lambda_values,N=N,M=M) begin_slope=(dists[1]-dists[0])/(lambda_values[1]-lambda_values[0]) dist_diff = np.concatenate((np.ones((N, 1)) / N, -1 * np.ones((M,1)) / M)) distribution_RKHS_dist = sqrt(np.dot(dist_diff.T, np.dot(kernel, dist_diff))[0,0]) thres_par=0.2 nu1=(1-thres_par)*begin_slope + thres_par*distribution_RKHS_dist nu1=nu1/distribution_RKHS_dist lambda_star_est_1=mpe(kernel,N,M,nu=nu1) kappa_star_est_1=(lambda_star_est_1-1)/lambda_star_est_1 nu2=1/sqrt(np.min([M,N])) nu2=nu2/distribution_RKHS_dist if nu2>0.9: nu2=nu1 lambda_star_est_2=mpe(kernel,N,M,nu=nu2) kappa_star_est_2=(lambda_star_est_2-1)/lambda_star_est_2 return (kappa_star_est_2,kappa_star_est_1)
5,338,984
def plot_abnormal_cumulative_return_with_errors(abnormal_volatility, abnormal_returns, events): """ Capturing volatility of abnormal returns """ pyplot.figure(figsize=FIGURE_SIZE) pyplot.errorbar( abnormal_returns.index, abnormal_returns, xerr=0, yerr=abnormal_volatility, label="events=%s" % events, color=COLOR_1 ) pyplot.axvline(x=0, color='black', alpha=.3) pyplot.grid(b=None, which=u'major', axis=u'y') pyplot.title("Abnormal Cumulative Return from Events with error") pyplot.xlabel("Window Length (t)") pyplot.ylabel("Cumulative Return (r)") pyplot.legend() pyplot.show()
5,338,985
def iou( outputs: torch.Tensor, targets: torch.Tensor, eps: float = 1e-7, threshold: float = 0.5, activation: str = "sigmoid" ): """ Args: outputs (torch.Tensor): A list of predicted elements targets (torch.Tensor): A list of elements that are to be predicted eps (float): epsilon to avoid zero division threshold (float): threshold for outputs binarization activation (str): An torch.nn activation applied to the outputs. Must be one of ['none', 'sigmoid', 'softmax2d'] Returns: float: IoU (Jaccard) score """ activation_fn = get_activation_by_name(activation) outputs = activation_fn(outputs) if threshold is not None: outputs = (outputs > threshold).float() intersection = torch.sum(targets * outputs) union = torch.sum(targets) + torch.sum(outputs) - intersection + eps return (intersection + eps) / union
5,338,986
def readCoords(f): """Read XYZ file and return as MRChem JSON friendly string.""" with open(f) as file: return '\n'.join([line.strip() for line in file.readlines()[2:]])
5,338,987
def fetch_hillstrom(target_col='visit', data_home=None, dest_subdir=None, download_if_missing=True, return_X_y_t=False, as_frame=True): """Load and return Kevin Hillstrom Dataset MineThatData (classification or regression). This dataset contains 64,000 customers who last purchased within twelve months. The customers were involved in an e-mail test. Major columns: * ``Visit`` (binary): target. 1/0 indicator, 1 = Customer visited website in the following two weeks. * ``Conversion`` (binary): target. 1/0 indicator, 1 = Customer purchased merchandise in the following two weeks. * ``Spend`` (float): target. Actual dollars spent in the following two weeks. * ``Segment`` (str): treatment. The e-mail campaign the customer received Read more in the :ref:`docs <Hillstrom>`. Args: target_col (string, 'visit' or 'conversion' or 'spend', default='visit'): Selects which column from dataset will be target data_home (str): The path to the folder where datasets are stored. dest_subdir (str): The name of the folder in which the dataset is stored. download_if_missing (bool): Download the data if not present. Raises an IOError if False and data is missing. return_X_y_t (bool, default=False): If True, returns (data, target, treatment) instead of a Bunch object. as_frame (bool): If True, returns a pandas Dataframe for the data, target and treatment objects in the Bunch returned object; Bunch return object will also have a frame member. Returns: Bunch or tuple: dataset. Bunch: By default dictionary-like object, with the following attributes: * ``data`` (ndarray or DataFrame object): Dataset without target and treatment. * ``target`` (Series object): Column target by values. * ``treatment`` (Series object): Column treatment by values. * ``DESCR`` (str): Description of the Lenta dataset. * ``feature_names`` (list): Names of the features. * ``target_name`` (str): Name of the target. * ``treatment_name`` (str): Name of the treatment. Tuple: tuple (data, target, treatment) if `return_X_y` is True References: https://blog.minethatdata.com/2008/03/minethatdata-e-mail-analytics-and-data.html """ url = 'https://hillstorm1.s3.us-east-2.amazonaws.com/hillstorm_no_indices.csv.gz' csv_path = _get_data(data_home=data_home, url=url, dest_subdir=dest_subdir, dest_filename='hillstorm_no_indices.csv.gz', download_if_missing=download_if_missing) if target_col != ('visit' or 'conversion' or 'spend'): raise ValueError(f"target_col value must be from {['visit', 'conversion', 'spend']}. " f"Got value {target_col}.") data = pd.read_csv(csv_path, usecols=[i for i in range(8)]) feature_names = list(data.columns) treatment = pd.read_csv(csv_path, usecols=['segment']) target = pd.read_csv(csv_path, usecols=[target_col]) if as_frame: target = target[target_col] treatment = treatment['segment'] else: data = data.to_numpy() target = target.to_numpy() treatment = treatment.to_numpy() module_path = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(module_path, 'descr', 'hillstrom.rst')) as rst_file: fdescr = rst_file.read() if return_X_y_t: return data, target, treatment else: target_name = target_col return Bunch(data=data, target=target, treatment=treatment, DESCR=fdescr, feature_names=feature_names, target_name=target_name, treatment_name='segment')
5,338,988
def test_format(name, value): """Format test results.""" RESULTS[name] = value value = 'OK' if value else 'FAIL' print(name.ljust(40, '.'), value)
5,338,989
def api_update_note(note_id: int): """Update a note""" db = get_db() title = request.form["title"] if "title" in request.form.keys() else None content = request.form["content"] if "content" in request.form.keys() else None note = db.update_note(note_id, title, content) return jsonify(note.__dict__)
5,338,990
def padding_oracle(decrypt, cipher, *, bs, unknown=b"\x00", iv=None): """Padding Oracle Attack Given a ciphersystem such that: - The padding follows the format of PKCS7 - The mode of the block cipher is CBC - We can check if the padding of a given cipher is correct - We can try to decrypt ciphertexts without limit we can break the ciphertext with Padding Oracle Attack. Usage: plain = padding_oracle(decrypt, cipher, bs, unknown) The function decrypt must receive ciphertext and return True or False: True when the given cipher could successfully be decrypted (No padding error) False when the given cipher cannot be decrypted (Padding error detected) """ if len(cipher) % bs != 0: raise ValueError("The length of `cipher` must be a multiple of `bs`") # Split ciphertext into blocks cipher_blocks = [] for i in range(0, len(cipher), bs): cipher_blocks.append(cipher[i : i + bs]) plain_blocks = [None for i in range(len(cipher_blocks))] # Break the cipher for k in range(len(cipher_blocks) - 1, 0, -1): plain_blocks[k] = padding_oracle_block( decrypt, cipher_blocks[k - 1], cipher_blocks[k], bs ) logger.info( "decrypted a block {}/{}: {}".format( len(cipher_blocks) - k + 1, len(cipher_blocks), plain_blocks[k] ) ) if isinstance(unknown, str): unknown = str2bytes(unknown) if iv: plain_blocks[0] = padding_oracle_block(decrypt, iv, cipher_blocks[0], bs) logger.info("decrypted an iv block: {}".format(plain_blocks[0])) else: plain_blocks[0] = unknown * bs return b"".join(plain_blocks)
5,338,991
def pixels(): """ Raspberry Pi pixels """ return render_template("pixels.html")
5,338,992
def env_break_shooter(pack: PackList, ent: Entity): """Special behaviour on the 'model' KV.""" if conv_int(ent['modeltype']) == 1: # MODELTYPE_MODEL pack.pack_file(ent['model'], FileType.MODEL) # Otherwise, a template name or a regular gib.
5,338,993
async def get_song_info(id: str): """ 获取歌曲详情 """ params = {'ids': id} return get_json(base_url + '/song/detail', params=params)
5,338,994
def test_dicom_sender_cli(test_dataset): """Test the command line interface to the DicomSender""" scp_ae_title = "PYMEDPHYSTEST" with tempfile.TemporaryDirectory() as tmp_directory: test_directory = pathlib.Path(tmp_directory) send_directory = test_directory.joinpath("send") send_directory.mkdir() send_file = send_directory.joinpath("test.dcm") test_dataset.save_as(send_file, write_like_original=False) receive_directory = test_directory.joinpath("receive") receive_directory.mkdir() sender_command = prepare_send_command(TEST_PORT, scp_ae_title, send_file) with listener_process(TEST_PORT, receive_directory, scp_ae_title) as lp: subprocess.call(sender_command) stream_output = b"" for b in iter(lambda: lp.stdout.read(1), b""): stream_output += b if b"DICOM object received" in stream_output: break dcm_files = receive_directory.glob("**/*.dcm") dcm_file = next(dcm_files) ds = pydicom.read_file(dcm_file) check_dicom_agrees(ds, test_dataset)
5,338,995
def save_data( discord_id, bga_userid="", username="", password="", purge_data=False, bga_global_options=[], tfm_global_options=[], bga_game_options={}, ): """save data.""" user_json = get_all_logins() if purge_data: # Keep username. User can rename themselves if they want. if "username" in user_json[str(discord_id)]: username = user_json[str(discord_id)]["username"] user_json[str(discord_id)] = {"username": username} else: user_json[str(discord_id)] = {} write_data(user_json) return if str(discord_id) not in user_json: user_json[str(discord_id)] = {} if bga_userid: user_json[str(discord_id)]["bga_userid"] = bga_userid if username: user_json[str(discord_id)]["username"] = username if password: user_json[str(discord_id)]["password"] = password if bga_global_options: if "bga options" not in user_json[str(discord_id)]: user_json[str(discord_id)]["bga options"] = {} user_json[str(discord_id)]["bga options"].update(bga_global_options) if tfm_global_options: if "tfm options" not in user_json[str(discord_id)]: user_json[str(discord_id)]["tfm options"] = {} user_json[str(discord_id)]["tfm options"].update(tfm_global_options) if bga_game_options: if "bga game options" not in user_json[str(discord_id)]: user_json[str(discord_id)]["bga game options"] = {} game_name = list(bga_game_options.keys())[0] if game_name not in user_json[str(discord_id)]["bga game options"]: user_json[str(discord_id)]["bga game options"][game_name] = {} user_json[str(discord_id)]["bga game options"][game_name].update(bga_game_options[game_name]) write_data(user_json)
5,338,996
def _find_odf_idx(map, position): """Find odf_idx in the map from the position (col or row). """ odf_idx = bisect_left(map, position) if odf_idx < len(map): return odf_idx return None
5,338,997
def get_or_create(session, model, **kwargs): """ Creates and returns an instance of the model with given kwargs, if it does not yet exist. Otherwise, get instance and return. Parameters: session: Current database session model: The Class of the database model **kwargds: The attributes for the desired instance Returns: (object): An object instance of the model with given kwargs """ instance = session.query(model).filter_by(**kwargs).first() if instance: return instance else: instance = model(**kwargs) session.add(instance) return instance
5,338,998
def pack_asn1(tag_class, constructed, tag_number, b_data): """Pack the value into an ASN.1 data structure. The structure for an ASN.1 element is | Identifier Octet(s) | Length Octet(s) | Data Octet(s) | """ b_asn1_data = bytearray() if tag_class < 0 or tag_class > 3: raise ValueError("tag_class must be between 0 and 3 not %s" % tag_class) # Bit 8 and 7 denotes the class. identifier_octets = tag_class << 6 # Bit 6 denotes whether the value is primitive or constructed. identifier_octets |= ((1 if constructed else 0) << 5) # Bits 5-1 contain the tag number, if it cannot be encoded in these 5 bits # then they are set and another octet(s) is used to denote the tag number. if tag_number < 31: identifier_octets |= tag_number b_asn1_data.append(identifier_octets) else: identifier_octets |= 31 b_asn1_data.append(identifier_octets) b_asn1_data.extend(_pack_octet_integer(tag_number)) length = len(b_data) # If the length can be encoded in 7 bits only 1 octet is required. if length < 128: b_asn1_data.append(length) else: # Otherwise the length must be encoded across multiple octets length_octets = bytearray() while length: length_octets.append(length & 0b11111111) length >>= 8 length_octets.reverse() # Reverse to make the higher octets first. # The first length octet must have the MSB set alongside the number of # octets the length was encoded in. b_asn1_data.append(len(length_octets) | 0b10000000) b_asn1_data.extend(length_octets) return bytes(b_asn1_data) + b_data
5,338,999