content
stringlengths
22
815k
id
int64
0
4.91M
def run(): """Default Run Method""" return problem51(8)
5,325,800
def calc_c(e, a, b, u=1): # Check units """ calculate the z components of 4 partial waves in medium e: dielectric tensor a,b: components of wavevector in direction of x and y direction return a list containting 4 roots for the z components of the partial waves """ # assign names x = e * u x11, x12, x13 = x[0] x21, x22, x23 = x[1] x31, x32, x33 = x[2] # calculate the coeffciency based on symbolic expression coef4 = x33 coef3 = a * x13 + a * x31 + b * x23 + b * x32 coef2 = a**2*x11 + a**2*x33 + a*b*x12 + a*b*x21 + b**2*x22 + b**2*x33 - \ x11*x33 + x13*x31 - x22*x33 + x23*x32 coef1 = a**3*x13 + a**3*x31 + a**2*b*x23 + a**2*b*x32 + a*b**2*x13 + \ a*b**2*x31 + a*x12*x23 - a*x13*x22 + a*x21*x32 - a*x22*x31 + b**3*x23 \ + b**3*x32 - b*x11*x23 - b*x11*x32 + b*x12*x31 + b*x13*x21 coef0 = a**4*x11 + a**3*b*x12 + a**3*b*x21 + a**2*b**2*x11 + a**2*b**2*x22 \ - a**2*x11*x22 - a**2*x11*x33 + a**2*x12*x21 + a**2*x13*x31 + a*b**3*x12 + \ a*b**3*x21 - a*b*x12*x33 + a*b*x13*x32 - a*b*x21*x33 + a*b*x23*x31 + \ b**4*x22 - b**2*x11*x22 + b**2*x12*x21 - b**2*x22*x33 + b**2*x23*x32 + \ x11*x22*x33 - x11*x23*x32 - x12*x21*x33 + x12*x23*x31 + x13*x21*x32 - \ x13*x22*x31 # calculate the roots of the quartic equation c = np.roots([coef4, coef3, coef2, coef1, coef0]) if len(c) == 2: return np.append(c, c) return c
5,325,801
def printWithReplace(fileDict, opt_searchStr = '', opt_replaceStr = ''): """ Prints the dictionary. @param fileDict: The file dictionary @type fileDict: dict @param fileDict: The optional search string. @type opt_searchStr: string @param fileDict: The optional replace string. @type opt_replaceStr: string """ printStr = ''; for key, val in fileDict.iteritems(): printStr += '\n\n[\n'; for fileName in val: printStr += '\'' + \ fileName.replace(opt_searchStr, opt_replaceStr) + "\',\n" printStr += ']'; print printStr
5,325,802
def _jax_decode( compressed_message: ndarray, tail_limit: int, message_len: int, message_shape: Sequence[int], codec: CrayCodec, cdf_state: Sequence[ndarray], ) -> Tuple[Tuple[ndarray, int], ndarray, Sequence[ndarray]]: """ JAX rANS decoding function. At a high level, this function takes a stack of information (``compressed_message``) and peeks at the top of the stack to see what the current symbol is. After identifying the symbol, this function pops a number of bits from the top of the stack approximately equal to the information content of the symbol (i.e. ``-log(symbol probability)``). This is done ``message_len`` times until the full message is retrieved. Args: compressed_message: The input stack containing the compressed meessage. tail_limit: A pointer to the current end of the tail. message_len: The size of the message to be decoded. message_shape: The message shape containing the interleaved dimension size. codec: A named tuple object containing functions for push and pop operations, as well as an initial state fo the CDF functions (for context-adaptive coding) and a data type specification for the message. cdf_state: The initialization state of the inverse CDF function (contains CDF array or can be used for conditional probabilites). Returns: A 3-tuple containing: The decoded messages of size ``(message_len, *message_shape)``. A byte array of compressed data after removing the target message. The final CDF state. """ message = jnp.zeros((message_len, *message_shape), dtype=codec.message_dtype) def pop_one_symbol(msg_index, vals): return codec.pop(msg_index, *vals) result = lax.fori_loop( 0, message_len, pop_one_symbol, ( array_to_craymessage(compressed_message, message_shape, tail_limit), message, cdf_state, ), ) return craymessage_to_array(result[0]), result[1], result[2]
5,325,803
def test_extract_features(waveform, flat_waveform): """Test the extract features function with and without statistics from the all_features.yaml example config. Args: waveform (Waveform): The waveform PyTest fixture returning an example audio file. flat_waveform (Waveform): The flat_waveform PyTest fixture returning a flat wave of ones. """ config_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), '../../example_configs/all_features.yaml' ) config = yaml.full_load(open(config_path, 'r')) components_list = list(config['components']) statistics_list = list(config['statistics']) output_without_statistics = extract_features( [waveform, flat_waveform], components_list ) # Check correct return type. assert isinstance( output_without_statistics, pd.DataFrame ) # Check that not all values are NaNs. assert not output_without_statistics.isnull().values.all() output_with_statistics = extract_features( [waveform, flat_waveform], components_list, statistics_list ) # Check correct return type. assert isinstance(output_with_statistics, pd.DataFrame) # Check that not all values are NaNs. assert not output_with_statistics.isnull().values.all()
5,325,804
def subtask1_eval(_answers, _ref): """ 子任务1的评分函数。 :param _answers: 答卷答案。 :param _ref: 参考答案。 :return: 统计数据对象。 """ _map = { '11': 'TP', '00': 'TN', '10': 'FN', '01': 'FP', } _st = { 'TP': 0, 'TN': 0, 'FN': 0, 'FP': 0, } for _k, _v in _ref.items(): _ga = int(_v) _aa = int(_answers[_k]) if _k in _answers else 0 _st[_map[f"{_ga}{_aa}"]] += 1 _st['Accuracy'] = (_st['TP'] + _st['TN']) / (_st['TP'] + _st['FP'] + _st['FN'] + _st['TN']) return _st
5,325,805
def plot_degree_histogram(G, log_yscale=True): """ Plots the histogram of the degree :param G: nx.Graph :param log_yscale: if True, pyplot's yscale is log """ degree_sequence = sorted([d for n, d in G.degree()], reverse=True) plt.hist(degree_sequence) if log_yscale: plt.yscale('log') plt.title("Degree Histogram") plt.ylabel("Count") plt.xlabel("Degree") plt.show()
5,325,806
def render_horizontal_fields(*fields_to_render, **kwargs): """Render given fields with optional labels""" labels = kwargs.get('labels', True) media = kwargs.get('media') hidden_fields = [] visible_fields = [] for bound_field in fields_to_render: if bound_field.field.widget.is_hidden: hidden_fields.append(bound_field) else: visible_fields.append(bound_field) return { 'fields_to_render': fields_to_render, 'hidden_fields': hidden_fields, 'visible_fields': visible_fields, 'labels': labels, 'media': media, }
5,325,807
def get_darwin_memory(): """ Use system-call to extract total memory on macOS """ system_output = sabnzbd.newsunpack.run_simple(["sysctl", "hw.memsize"]) return float(system_output.split()[1])
5,325,808
def parseTextModeTimeStr(timeStr): """ Parses the specified SMS text mode time string The time stamp format is "yy/MM/dd,hh:mm:ss±zz" (yy = year, MM = month, dd = day, hh = hour, mm = minute, ss = second, zz = time zone [Note: the unit of time zone is a quarter of an hour]) @param timeStr: The time string to parse @type timeStr: str @return: datetime object representing the specified time string @rtype: datetime.datetime """ msgTime = timeStr[:-3] tzOffsetHours = int(int(timeStr[-3:]) * 0.25) return datetime.strptime(msgTime, '%y/%m/%d,%H:%M:%S').replace(tzinfo=SimpleOffsetTzInfo(tzOffsetHours))
5,325,809
def main(): """Recording the inbound traffic for a database.""" db_config = config.DB_CONFIG recorder = MongoQueryRecorder(db_config) def signal_handler(sig, dummy): """Handle the Ctrl+C signal""" print 'Trying to gracefully exit the program...' recorder.force_quit_all() signal.signal(signal.SIGINT, signal_handler) recorder.record()
5,325,810
def angle_to_rotation_matrix(angle) -> Tensor: """ Creates a rotation matrix out of angles in degrees Args: angle: (Tensor): tensor of angles in degrees, any shape. Returns: Tensor: tensor of *x2x2 rotation matrices. Shape: - Input: :math:`(*)` - Output: :math:`(*, 2, 2)` Examples: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_to_rotation_matrix(input) # Nx3x2x2 """ ang_rad = angle * np.pi / 180 cos_a = torch.cos(ang_rad) sin_a = torch.sin(ang_rad) return torch.stack([cos_a, sin_a, -sin_a, cos_a], dim=-1).view(*angle.shape, 2, 2)
5,325,811
def main(): """Find every python file recursively inside given directory and call posixlines on them.""" parser = optparse.OptionParser("fix_endl.py directory") (options, args) = parser.parse_args() if len(args) != 1: parser.print_help() parser.error("incorrect number of arguments") destination = args[0] for arg in glob.glob(destination): if os.path.isdir(arg): print("processing: " + arg) for root, dirs, files in os.walk(arg): for file in files: if file.endswith(".py"): posixlines(os.path.join(root, file)) else: print(arg + " is not a valid directory.")
5,325,812
def main(): """run-time code""" # consolidate the above steps into a single line using the index_col and parse_dates parameters of the read_csv() function opsd_daily = pd.read_csv('netTraffic.csv', index_col=0, parse_dates=True) # add some additional columns to our data # Add columns with year, month, and weekday name opsd_daily['Year'] = opsd_daily.index.year opsd_daily['Month'] = opsd_daily.index.month # required to 'pull' the day name (ex. Monday, Tuesday, happy days...) opsd_daily['Weekday Name'] = opsd_daily.index.day_name() # select data for a single day using a string such as '2017-08-10' input("\nPress ENTER to see the data for 2017-08-10") print(opsd_daily.loc['2017-08-10']) # select a slice of days, '2014-01-20':'2014-01-22' # Note that the slice is inclusive of both endpoints input("\nPress ENTER to see the data slice from 2014-01-20 to 2014-01-22") print(opsd_daily.loc['2014-01-20':'2014-01-22']) # partial-string indexing select all date/times which partially match a given string # select the entire year 2006 with opsd_daily.loc['2006'] # select the entire month of February 2012 with opsd_daily.loc['2012-02'] input("\nPress ENTER to see the data slice for 2012-02") print(opsd_daily.loc['2012-02'])
5,325,813
def extract_text(): """Extracts text from an HTML document.""" html = request.form['html'] article = Article(html) try: return article.text except AttributeError as e: log.warn(e) # NOTE: When a parsing error occurs, an AttributeError is raised. # We'll deal with this exception later. return ''
5,325,814
def get_new_account_id(event): """Return account id for new account events.""" create_account_status_id = ( event["detail"] .get("responseElements", {}) .get("createAccountStatus", {})["id"] # fmt: no ) log.info("createAccountStatus = %s", create_account_status_id) org = boto3.client("organizations") while True: account_status = org.describe_create_account_status( CreateAccountRequestId=create_account_status_id ) state = account_status["CreateAccountStatus"]["State"].upper() if state == "SUCCEEDED": return account_status["CreateAccountStatus"]["AccountId"] elif state == "FAILED": log.error("Account creation failed:\n%s", json.dumps(account_status)) raise AccountCreationFailedException else: log.info( "Account state: %s. Sleeping 5 seconds and will try again...", state ) time.sleep(5)
5,325,815
def lag_indexes(tf_stat)-> List[pd.Series]: """ Calculates indexes for 3, 6, 9, 12 months backward lag for the given date range :param begin: start of date range :param end: end of date range :return: List of 4 Series, one for each lag. For each Series, index is date in range(begin, end), value is an index of target (lagged) date in a same Series. If target date is out of (begin,end) range, index is -1 """ date_range = pd.date_range(tf_stat['days'][0],tf_stat['days'][-1]) # key is date, value is day index base_index = pd.Series(np.arange(0, len(date_range)),index=date_range) def lag(offset): dates = date_range - offset return pd.Series(data=base_index[dates].fillna(-1).astype(np.int16).values, index=date_range) return [lag(pd.DateOffset(months=m)) for m in (1, 2)]
5,325,816
def weather_outfit(req): """Returns a string containing text with a response to the user with a indication if the outfit provided is appropriate for the current weather or a prompt for more information Takes a city, outfit and (optional) dates uses the template responses found in weather_responses.py as templates and the outfits listed in weather_entities.py """ # validate request parameters, return an error if there are issues error, forecast_params = validate_params(req['queryResult']['parameters']) if error: return error # Validate that there are the required parameters to retrieve a forecast if not forecast_params['outfit']: return 'What are you planning on wearing?' # create a forecast object which retrieves the forecast from a external API try: forecast = Forecast(forecast_params) # return an error if there is an error getting the forecast except (ValueError, IOError) as error: return error return forecast.get_outfit_response()
5,325,817
def get_GEOS5_data_cubes4campaign(ds=None, year=2018, region='Cape_Verde', doys2use=None, vars2use=None, collection='inst3_3d_aer_Nv', limit_lvls=False, limit_lons=False): """ Extract cubes of data for region of interest during campaign period """ # Which variables to use if isinstance(vars2use, type(None)): vars2use = ['du{:0>3}'.format(i) for i in range(1, 6)] # Setup lists of days to use dates2use = get_dates4campaigns(year=year) print('Using dates:', dates2use) # - Get the data as dataset via OPeNDAP if isinstance(ds, type(None)): ds = AC.get_GEOS5_as_ds_via_OPeNDAP(collection=collection) # - Subset the dataset by date - Only consider dates that are in dates2use list # Use the datetime components via pandas as not yet fully in xarray. df = pd.DataFrame(index=ds['time'].values) df['date'] = df.index.date date_bool = df['date'].isin(dates2use).values ds = ds.isel(time=date_bool) # - Subset the dataset by region if region == 'Cape_Verde': # only consider the region the the plane will fly through +2 days transport # 20W-55E, 40S-40N, bool1 = ((ds.lon >= -35) & (ds.lon <= 10)).values bool2 = ((ds.lat >= 0) & (ds.lat <= 60)).values # Cut by lon, then lat ds = ds.isel(lon=bool1) ds = ds.isel(lat=bool2) elif region == 'global': pass else: print('WARNING: exiting as no region set') sys.exit() # State that all data (cuboids are being extracted) extr_str = 'ALL_lvls' # - Save the data by variable and day of year reduce packet size of transfers # Where to save? if isinstance(folder, type(None)): folder = './' # Save format for data (region, year, doy, variable) filestr = 'ARNA_GEOS5_{}_{}_{}_{:0>3}_{}_{}.nc' if isinstance(doys2use, type(None)): doys2use = list(set(ds['time.dayofyear'].values)) # Make an extra string to print what slicing the data has had if limit_lons and limit_lvls: pstr = 'sliced by lvls+lons' elif limit_lvls: pstr = 'sliced by lvls' elif limit_lons: pstr = 'sliced by lons' else: pstr = '' # Loop by var and save for var in vars2use: # Now loop and save files in single day units print("Getting data via OPeNDAP for var '{}' {}".format(var, pstr)) for doy in doys2use: doy_int = int(float(doy)) pstr = "Getting data via OPeNDAP for doy '{}' ({})" print(pstr.format(doy_int, var)) # Subset for a single day ds_tmp = ds[var].sel(time=ds['time.dayofyear'] == doy) # Get the year for the day in the model output year = list(set(ds_tmp['time.year'].values))[0] # now save this single file savename = filestr.format(collection, region, year, doy_int, var, extr_str) print(folder+savename) ds_tmp.to_netcdf(folder+savename) # Do some memory management del ds_tmp gc.collect()
5,325,818
def WriteTo(linelist, outfile, SoP): """writes the sorted list to a CSV file with name outfile""" file = open(str(outfile),'w') i=0 if SoP == 'P': for line in linelist: file.write(linelist[i].borough+ ',') file.write(linelist[i].block+ ',') file.write(linelist[i].lot+ ',') file.write(linelist[i].numbers+ ',') file.write(linelist[i].spaced_numbers+ ',') file.write(linelist[i].letters+ ',') file.write(linelist[i].BBL+ ',') file.write(linelist[i].x+ ',') file.write(linelist[i].y+ ',') file.write(linelist[i].E_des) i+=1 file.close() elif SoP == 'S': for line in lineList: file.write(linelist[i].numbers+ ',') file.write(linelist[i].spaced_numbers+ ',') file.write(linelist[i].letters+ ',') i+=1 file.close()
5,325,819
def _poll_scheduler_status(config, asg_name, scheduler_module, instance_properties): """ Verify scheduler status and ask the ASG new nodes, if required. :param config: JobwatcherConfig object :param asg_name: ASG name :param scheduler_module: scheduler module :param instance_properties: instance properties """ while True: # Get number of nodes requested pending = scheduler_module.get_required_nodes(instance_properties) if pending < 0: log.critical("Error detecting number of required nodes. The cluster will not scale up.") elif pending == 0: log.info("There are no pending jobs. Noop.") else: # Get current number of nodes running = scheduler_module.get_busy_nodes(instance_properties) log.info("%d nodes requested, %d nodes running", pending, running) # get current limits _, current_desired, max_size = get_asg_settings(config.region, config.proxy_config, asg_name, log) # Check to make sure requested number of instances is within ASG limits required = running + pending if required <= current_desired: log.info("%d nodes required, %d nodes in asg. Noop" % (required, current_desired)) else: if required > max_size: log.info( "The number of required nodes %d is greater than max %d. Requesting max %d." % (required, max_size, max_size) ) else: log.info( "Setting desired to %d nodes, requesting %d more nodes from asg." % (required, required - current_desired) ) requested = min(required, max_size) # update ASG asg_client = boto3.client('autoscaling', region_name=config.region, config=config.proxy_config) asg_client.update_auto_scaling_group(AutoScalingGroupName=asg_name, DesiredCapacity=requested) time.sleep(60)
5,325,820
def parse_paragraphs(record): """ parse paragraphs into sentences, returns list """ from sentence_splitter import SentenceSplitter splitter = SentenceSplitter(language='en') sentences=splitter.split(record['value']) article_id = remove_prefix(record['key'],'paragraphs:') pre = 'sentence:' + article_id l = [{ 'key': f'{pre}','idx':f'{idx}','value': sentence } for idx,sentence in enumerate(sentences)] return l
5,325,821
def get_stage_environment() -> str: """ Indicates whether the source is running as PRD or DEV. Accounts for the user preference via TEST_WORKING_STAGE. :return: One of the STAGE_* constants. """ return TEST_WORKING_STAGE
5,325,822
def get_all_table_acls(conn, schema=None): """Get privileges for all tables, views, materialized views, and foreign tables. Specify `schema` to limit the results to that schema. Returns: List of :class:`~.types.SchemaRelationInfo` objects. """ stmt = _table_stmt(schema=schema) return [SchemaRelationInfo(**row) for row in conn.execute(stmt)]
5,325,823
async def test_effect_template(hass, expected_effect, start_ha): """Test the template for the effect.""" state = hass.states.get("light.test_template_light") assert state is not None assert state.attributes.get("effect") == expected_effect
5,325,824
def add_atom_map(molecule, **kwargs): """ Add canonical ordered atom map to molecule Parameters ---------- molecule : `oechem.OEMOl` or `rdkit.Chem.Mol` Returns ------- molecule with map indices """ toolkit = _set_toolkit(molecule) return toolkit.add_atom_map(molecule, **kwargs)
5,325,825
def build_put_dictionary_request(*, json: Any = None, content: Any = None, **kwargs: Any) -> HttpRequest: """Put External Resource as a Dictionary. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow. :keyword json: Pass in a JSON-serializable object (usually a dictionary). See the template in our example to find the input shape. External Resource as a Dictionary to put. :paramtype json: any :keyword content: Pass in binary content you want in the body of the request (typically bytes, a byte iterator, or stream input). External Resource as a Dictionary to put. :paramtype content: any :return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's `send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow. :rtype: ~azure.core.rest.HttpRequest Example: .. code-block:: python # JSON input template you can fill out and use as your body input. json = { "str": { "id": "str", # Optional. Resource Id. "location": "str", # Optional. Resource Location. "name": "str", # Optional. Resource Name. "properties": { "p.name": "str", # Optional. "provisioningState": "str", # Optional. "provisioningStateValues": "str", # Optional. Possible values include: "Succeeded", "Failed", "canceled", "Accepted", "Creating", "Created", "Updating", "Updated", "Deleting", "Deleted", "OK". "type": "str" # Optional. }, "tags": { "str": "str" # Optional. A set of tags. Dictionary of :code:`<string>`. }, "type": "str" # Optional. Resource Type. } } """ content_type = kwargs.pop("content_type", None) # type: Optional[str] accept = "application/json" # Construct URL url = kwargs.pop("template_url", "/model-flatten/dictionary") # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str") header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="PUT", url=url, headers=header_parameters, json=json, content=content, **kwargs)
5,325,826
def midpoint(rooms): """ Helper function to help find the midpoint between the two rooms. Args: rooms: list of rooms Returns: int: Midpoint """ return rooms[0] + (rooms[0] + rooms[2]) // 2, rooms[1] + (rooms[1] + rooms[3]) // 2
5,325,827
def secondSolution( fixed, c1, c2, c3 ): """ If given four tangent circles, calculate the other one that is tangent to the last three. @param fixed: The fixed circle touches the other three, but not the one to be calculated. @param c1, c2, c3: Three circles to which the other tangent circle is to be calculated. @type fixed: L{Circle} @type c1: L{Circle} @type c2: L{Circle} @type c3: L{Circle} @return: The circle. @rtype: L{Circle} """ curf = fixed.curvature() cur1 = c1.curvature() cur2 = c2.curvature() cur3 = c3.curvature() curn = 2 * (cur1 + cur2 + cur3) - curf mn = (2 * (cur1*c1.m + cur2*c2.m + cur3*c3.m) - curf*fixed.m ) / curn return Circle( mn.real, mn.imag, 1/curn )
5,325,828
def generate_concept_chain(concept_desc, sequential): """ Given a list of availiable concepts, generate a dict with (start, id) pairs giving the start of each concept. Parameters ---------- sequential: bool If true, concept transitions are determined by ID without randomness. """ concept_chain = [] num_samples = 0 more_appearences = True appearence = 0 while more_appearences: concepts_still_to_appear = [] for cID in concept_desc: concept = concept_desc[cID] if concept.appearences > appearence: concepts_still_to_appear.append(concept) more_appearences = len(concepts_still_to_appear) > 0 for concept in concepts_still_to_appear: concept_chain.append(concept.id) num_samples += concept.examples_per_appearence appearence += 1 if not sequential: random.shuffle(concept_chain) return concept_chain, num_samples
5,325,829
def get_inputs(input_queue, num_classes, merge_multiple_label_boxes=False, use_multiclass_scores=False): """Dequeues batch and constructs inputs to object detection model. Args: input_queue: BatchQueue object holding enqueued tensor_dicts. num_classes: Number of classes. merge_multiple_label_boxes: Whether to merge boxes with multiple labels or not. Defaults to false. Merged boxes are represented with a single box and a k-hot encoding of the multiple labels associated with the boxes. use_multiclass_scores: Whether to use multiclass scores instead of groundtruth_classes. Returns: images: a list of 3-D float tensor of images. image_keys: a list of string keys for the images. locations_list: a list of tensors of shape [num_boxes, 4] containing the corners of the groundtruth boxes. classes_list: a list of padded one-hot (or K-hot) float32 tensors containing target classes. masks_list: a list of 3-D float tensors of shape [num_boxes, image_height, image_width] containing instance masks for objects if present in the input_queue. Else returns None. keypoints_list: a list of 3-D float tensors of shape [num_boxes, num_keypoints, 2] containing keypoints for objects if present in the input queue. Else returns None. weights_lists: a list of 1-D float32 tensors of shape [num_boxes] containing groundtruth weight for each box. """ read_data_list = input_queue.dequeue() label_id_offset = 1 def extract_images_and_targets(read_data): """Extract images and targets from the input dict.""" image = read_data[fields.InputDataFields.image] key = '' if fields.InputDataFields.source_id in read_data: key = read_data[fields.InputDataFields.source_id] location_gt = read_data[fields.InputDataFields.groundtruth_boxes] classes_gt = tf.cast(read_data[fields.InputDataFields.groundtruth_classes], tf.int32) classes_gt -= label_id_offset if merge_multiple_label_boxes and use_multiclass_scores: raise ValueError( 'Using both merge_multiple_label_boxes and use_multiclass_scores is' 'not supported' ) if merge_multiple_label_boxes: location_gt, classes_gt, _ = util_ops.merge_boxes_with_multiple_labels( location_gt, classes_gt, num_classes) classes_gt = tf.cast(classes_gt, tf.float32) elif use_multiclass_scores: classes_gt = tf.cast(read_data[fields.InputDataFields.multiclass_scores], tf.float32) else: classes_gt = util_ops.padded_one_hot_encoding( indices=classes_gt, depth=num_classes, left_pad=0) masks_gt = read_data.get(fields.InputDataFields.groundtruth_instance_masks) keypoints_gt = read_data.get(fields.InputDataFields.groundtruth_keypoints) if (merge_multiple_label_boxes and ( masks_gt is not None or keypoints_gt is not None)): raise NotImplementedError('Multi-label support is only for boxes.') weights_gt = read_data.get( fields.InputDataFields.groundtruth_weights) return (image, key, location_gt, classes_gt, masks_gt, keypoints_gt, weights_gt) return zip(*map(extract_images_and_targets, read_data_list))
5,325,830
def merge_metadata(m1, m2): """Given 2 dictionaries that map str->set, merge m2 into m1. For each key in m2, m1's value for that key becomes the union of its own value and m2's value. m1 and m2 must have the same set of keys; otherwise, raise an exception.""" if set(m1) != set(m2): raise BuildError(f'Wheels have conflicting metadata and cannot be merged. One wheel has keys {set(m1)!r} while another has keys {set(m2)!r}.') for key in m2: m1[key] |= m2[key]
5,325,831
def doFilter(pTable, proxyService): """ filter candidates by column header candidates - column headers are kept, if they support at least (minSupport * #rows) many cells - only filter for columns that are part of the targets (if activated) subsequently remove: - CTA candidates with less support - CEA candidates that do not support any of the remaining CTA candidates of their column """ # keep track, if this changed anything changed = False # table cols cols = pTable.getCols(unsolved=False) # process each column separately for col in cols: if not col['sel_cand']: continue # check, if we have to process this column at all if not pTable.isTarget(col_id=col['col_id']): continue # grab all cells in this column cells = pTable.getCells(col_id=col['col_id']) beforeCount = len(cells) # get the hierarchy over our candidates hierarchy = proxyService.get_hierarchy_for_lst.send([col['sel_cand']['uri']]) typesSupported = [col['sel_cand']['uri']] for parentList in hierarchy.values(): typesSupported.extend([item['parent'] for item in parentList]) typesSupported = list(set(typesSupported)) # purge the candidate lists # for cell in cells: # candSupport = {} # for cand in cell['cand']: # candSupport[cand['uri']] = 0 # try: # foundTypes = [t for t in cand['types'] if t in typesSupported] # candSupport[cand['uri']] += len(foundTypes) # except KeyError as e: # candSupport[cand['uri']] += 0 # # keep cands with highest support only # maxFreq = max([candSupport[uri] for uri in candSupport.keys()]) # for cand in cell['cand']: # if candSupport[cand['uri']] < maxFreq: # cell['cand'].remove(cand) # purged = [] # # remove all CEA candidates from the cells that are not associated with any remaining type for cell in cells: # add_purged = [] # check if the sel_cand is semantically correct for cand in cell['cand']: try: foundTypes = [t for t in cand['types'] if t in typesSupported] if not foundTypes: # add to purged cells # add_purged.append(cand) cell['cand'].remove(cand) except KeyError as e: # print(e) # add_purged.append(cand) cell['cand'].remove(cand) # if add_purged: # # update the cell # cell['purged_cand'].extend(add_purged) # collect purged candidates # purged.extend(add_purged) # purge the cell-pair list # pTable.purgeCellPairs(purged) # done return changed
5,325,832
def int_or_float(x): """Convert `x` to either `int` or `float`, preferring `int`. Raises: ValueError : If `x` is not convertible to either `int` or `float` """ try: return int(x) except ValueError: return float(x)
5,325,833
def show(scheme, hexa=cube_points([0.0, 1.0], [0.0, 1.0], [0.0, 1.0]), backend="mpl"): """Shows the quadrature points on a given hexahedron. The size of the balls around the points coincides with their weights. """ edges = numpy.array( [ [hexa[0, 0, 0], hexa[1, 0, 0]], [hexa[1, 0, 0], hexa[1, 1, 0]], [hexa[1, 1, 0], hexa[0, 1, 0]], [hexa[0, 1, 0], hexa[0, 0, 0]], # [hexa[0, 0, 1], hexa[1, 0, 1]], [hexa[1, 0, 1], hexa[1, 1, 1]], [hexa[1, 1, 1], hexa[0, 1, 1]], [hexa[0, 1, 1], hexa[0, 0, 1]], # [hexa[0, 0, 0], hexa[0, 0, 1]], [hexa[1, 0, 0], hexa[1, 0, 1]], [hexa[1, 1, 0], hexa[1, 1, 1]], [hexa[0, 1, 0], hexa[0, 1, 1]], ] ) edges = numpy.moveaxis(edges, 1, 2) helpers.backend_to_function[backend]( transform(scheme.points.T, hexa), scheme.weights, integrate(lambda x: 1.0, hexa, scheme), edges, ) return
5,325,834
def setup_torch_process_group( backend: str, world_rank: int, world_size: int, init_method: str, timeout_s: int = 1800, ): """Connects the distributed PyTorch backend. Args: backend (str): The backend (nccl, gloo, etc.) to use for training. world_rank (int): Rank of the current worker. world_size (int): Number of workers participating in the job. init_method (str): URL specifying how to initialize the process group. timeout_s (timedelta): Seconds for process group operations to timeout. """ logger.info( f"Setting up process group for: {init_method} [rank={world_rank}, " f"world_size={world_size}]" ) logger.debug(f"using {backend}") if backend == "nccl" and "NCCL_BLOCKING_WAIT" not in os.environ: logger.debug( "Setting NCCL_BLOCKING_WAIT for detecting node failure. " "To override this behavior, you can set NCCL_BLOCKING_WAIT=0." ) os.environ["NCCL_BLOCKING_WAIT"] = "1" dist.init_process_group( backend=backend, init_method=init_method, rank=world_rank, world_size=world_size, timeout=timedelta(seconds=timeout_s), )
5,325,835
def request_factory(): """Pytest setup for factory.""" return RequestFactory()
5,325,836
def send_tls(text, subject="Testing", address=None): """ Send encrypted via TLS SMTPHeloError The server didn’t reply properly to the HELO greeting. SMTPNotSupportedError The server does not support the STARTTLS extension. RuntimeError SSL/TLS support is not available to your Python interpreter. """ msg = get_message( text, subject, address) with smtplib.SMTP( config['email']['smtp_host'], smtp_port ) as s: try: s.set_debuglevel(2) # keyfile, certfile, context s.starttls() s.ehlo() if 'username' in config['email']: print("login '{}' '{}' ".format( config['email']['username'], config['email']['password']) ) s.login(user=config['email']['username'], password=config['email']['password']) s.send_message(msg) except SMTPRecipientsRefused as err: print("ERROR: send failed {}".format(err))
5,325,837
def vegaGraphics( cmdTag, id1, id2, parameters, sql, transformedData, verbose,): """Create interactive charts for specified data""" # making function more explicit cmdTag = cmdTag id1 = id1 id2 = id2 parameters = parameters sql = sql transformedData = transformedData verbose = verbose if verbose >= 1: print( "Creating Vega Graphics" ) transformedData = transformedData.rename( columns = { id1 : "id1", id2 : "id2", sql : "sql", cmdTag : "cmdTag", parameters : "parameters"}) dataInfo = transformedData.copy() data = transformedData[["total_duration", "cmdTag", "id1", "id2", "sql", "parameters"]].copy() data = data.sort_values(by = ["total_duration"], ascending = True, inplace = False).dropna().reset_index(drop = True) data["length"] = data["sql"].str.len() + data["parameters"].str.len() alt.data_transformers.disable_max_rows() brush = alt.selection_interval() # -----> create the scatter plot graph line = alt.Chart(data.reset_index()).mark_point().encode( x = alt.X( "length:Q", axis = alt.Axis(title = "Query Length")), y=alt.Y( "total_duration:Q", axis = alt.Axis(title = "Latency (ms)")), color = alt.condition( brush, "cmdTag:N", alt.value("lightgray")), shape = "cmdTag:N", tooltip = ["index:O", "total_duration:Q", "length:Q", "log_time_with_tz:N", "sql:N", "parameters:N", "cmdTag:N", "id1:N", "id2:N"] ).properties( width = 500, height = 500, title = "Einherjar Queries" ).add_selection( brush ).interactive() # -----> display the mean via a line across our chart rule = alt.Chart(data).mark_rule(color = "red").encode( y = "median(total_duration):Q", size = alt.value(2) ) alt.Chart(data).configure_title( fontSize = 30 ) # -----> display number of interations per table insert dog = dataInfo[["inserted_data", "cmdTag"]].dropna() bars1 = alt.Chart(dog).mark_bar().encode( y = "inserted_into:N", color = "cmdTag:N", x = "count(inserted_into):Q" ).transform_filter( brush ) # -----> display number of interations per table select cat = dataInfo[["selected_from", "cmdTag"]].dropna() bars2 = alt.Chart(cat).mark_bar().encode( y = "selected_from:N", color = "cmdTag:N", x = "count(selected_from):Q" ).transform_filter( brush ) # -----> add the line and rule charts to the base chart chart = line + rule chart = chart & bars1 & bars2 chart.save("results/data.json") chart.save("results/data.html") if verbose >= 1: print( "Vega Graphics have been completed" )
5,325,838
def is_onehotencoded(x): """If input is a one-hot encoded representation of some set of values. Parameters ---------- x : array-like Returns ------- bool Whether `x` is a one-hot encoded / categorical representation. """ if x.ndim != 2: return False fractional, integral = np.modf(x) if fractional.sum() != 0: return False if not np.array_equal(integral, integral.astype(bool)): return False return np.all(integral.sum(axis=1) == 1)
5,325,839
def test_empty_project(project_test, qtbot): """Test creation of an Empy project, and its configuration files.""" project_dir, project = project_test assert project.root_path == str(project_dir) # Assert Project configs qtbot.wait(3000) for filename in [WORKSPACE, CODESTYLE, ENCODING, VCS]: config_path = os.path.join(project.root_path, '.spyproject', 'config') files = os.listdir(config_path) assert filename + '.ini' in files
5,325,840
def sequence_of_words(fname_doc, dictionary): """ Compute Sequence-of-Words from word list and dictionary """ txtdata = loadtxt(fname_doc) words = extract_keyword(txtdata, "all") SOW = [] for i,word in enumerate(words): print(word) if word in dictionary.keys(): SOW.append(dictionary[word]["id"]) SOW = sp.array(SOW) return SOW
5,325,841
def shd(B_est, B_true): """Compute various accuracy metrics for B_est. true positive = predicted association exists in condition in correct direction reverse = predicted association exists in condition in opposite direction false positive = predicted association does not exist in condition Args: B_true (np.ndarray): [d, d] ground truth graph, {0, 1} B_est (np.ndarray): [d, d] estimate, {0, 1, -1}, -1 is undirected edge in CPDAG Returns: fdr: (reverse + false positive) / prediction positive tpr: (true positive) / condition positive fpr: (reverse + false positive) / condition negative shd: undirected extra + undirected missing + reverse nnz: prediction positive """ if (B_est == -1).any(): # cpdag if not ((B_est == 0) | (B_est == 1) | (B_est == -1)).all(): raise ValueError('B_est should take value in {0,1,-1}') if ((B_est == -1) & (B_est.T == -1)).any(): raise ValueError('undirected edge should only appear once') else: # dag if not ((B_est == 0) | (B_est == 1)).all(): raise ValueError('B_est should take value in {0,1}') #if not is_dag(B_est): # raise ValueError('B_est should be a DAG') d = B_true.shape[0] # linear index of nonzeros pred_und = np.flatnonzero(B_est == -1) pred = np.flatnonzero(B_est == 1) cond = np.flatnonzero(B_true) cond_reversed = np.flatnonzero(B_true.T) cond_skeleton = np.concatenate([cond, cond_reversed]) # true pos true_pos = np.intersect1d(pred, cond, assume_unique=True) # treat undirected edge favorably true_pos_und = np.intersect1d(pred_und, cond_skeleton, assume_unique=True) true_pos = np.concatenate([true_pos, true_pos_und]) # false pos false_pos = np.setdiff1d(pred, cond_skeleton, assume_unique=True) false_pos_und = np.setdiff1d(pred_und, cond_skeleton, assume_unique=True) false_pos = np.concatenate([false_pos, false_pos_und]) # reverse extra = np.setdiff1d(pred, cond, assume_unique=True) reverse = np.intersect1d(extra, cond_reversed, assume_unique=True) # compute ratio pred_size = len(pred) + len(pred_und) cond_neg_size = 0.5 * d * (d - 1) - len(cond) fdr = float(len(reverse) + len(false_pos)) / max(pred_size, 1) tpr = float(len(true_pos)) / max(len(cond), 1) fpr = float(len(reverse) + len(false_pos)) / max(cond_neg_size, 1) # structural hamming distance pred_lower = np.flatnonzero(np.tril(B_est + B_est.T)) cond_lower = np.flatnonzero(np.tril(B_true + B_true.T)) extra_lower = np.setdiff1d(pred_lower, cond_lower, assume_unique=True) missing_lower = np.setdiff1d(cond_lower, pred_lower, assume_unique=True) shd = len(extra_lower) + len(missing_lower) + len(reverse) shd_wc = shd + len(pred_und) prc = float(len(true_pos)) / max(float(len(true_pos)+len(reverse) + len(false_pos)), 1.) rec = tpr return {'fdr': fdr, 'tpr': tpr, 'fpr': fpr, 'prc': prc, 'rec' : rec, 'shd': shd, 'shd_wc': shd_wc, 'nnz': pred_size}
5,325,842
def save_adv_examples(data, **kwargs): """ Serialize generated adversarial examples to an npy file. :param data: the adversarial examples to store. :param kwargs: information needed to construct the file name :return: na """ prefix = kwargs.get('prefix', 'test') dataset = kwargs.get('dataset', 'cifar10') architect = kwargs.get('architect', 'cnn') transformation = kwargs.get('transformation', 'clean') attack_method = kwargs.get('attack_method', 'fgsm') attack_params = kwargs.get('attack_params', None) bs_samples = kwargs.get('bs_samples', None) prefix = '{}_AE'.format(prefix) attack_info = '{}_{}'.format(attack_method, attack_params) model_info = '{}-{}-{}'.format(dataset, architect, transformation) file_name = '{}-{}-{}.npy'.format(prefix, model_info, attack_info) np.save('{}/{}'.format(PATH.ADVERSARIAL_FILE, file_name), data) if MODE.DEBUG: if bs_samples is not None: title = '{}-{}'.format(model_info, attack_info) plot_comparisons(bs_samples[:10], data[:10], title) else: print('Print AEs only') title = '{}-{}'.format(model_info, attack_info) plot_comparisons(data[:10], data[10:20], title) print('Adversarial examples saved to {}/{}.'.format(PATH.ADVERSARIAL_FILE, file_name))
5,325,843
def test_separate_networks( configs: dict[str, Any], make_plots: bool = True, **kwargs, ) -> TestOutputs: """Test training on separate networks.""" t0 = time.time() logger.info(f'Testing separate networks') configs_ = dict(copy.deepcopy(configs)) configs_['dynamics_config']['separate_networks'] = True train_out = train(configs_, make_plots=make_plots, verbose=False, num_chains=4, **kwargs) x = train_out.x dynamics = train_out.dynamics logdir = train_out.logdir runs_dir = os.path.join(logdir, 'inference') run_out = None if RANK == 0: run_out = run(dynamics, configs_, x=x, runs_dir=runs_dir, make_plots=make_plots) logger.info(f'Passed! Took: {time.time() - t0:.4f} seconds') return TestOutputs(train_out, run_out)
5,325,844
async def test_already_configured(hass): """Test we reject already configured devices.""" MockConfigEntry( domain=DOMAIN, data=FIXTURE_USER_INPUT, title="Already configured" ).add_to_hass(hass) result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER}, data={ **FIXTURE_USER_INPUT, # Tweak URL a bit to check that doesn't fail duplicate detection CONF_URL: FIXTURE_USER_INPUT[CONF_URL].replace("http", "HTTP"), }, ) assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT assert result["reason"] == "already_configured"
5,325,845
def main(models, datasets, *args, **kwargs): """ Entry point to the toolkit Returns: """ from .model_evaluator import ModelEvaluator if "all" in models: models = supported_models if "all" in datasets: datasets = supported_datasets evaluate = ModelEvaluator() evaluate(models, datasets, *args, **kwargs)
5,325,846
def _render_html_page(gallery: Gallery, image: Image) -> None: """Create an HTML document for a single image.""" context = {'gallery': gallery, 'image': image} _render_html_to_file( 'view', context, gallery.destination_path, image.page_name )
5,325,847
def test_jsarray_output(): """Test that the output function for JSArray doesn't bork.""" ja = JSArray(traverser=traverser) ja.elements = [None, None] repr(ja) # Used to throw tracebacks. ja.as_primitive()
5,325,848
def nohighlight(nick): """add a ZWNJ to nick to prevent highlight""" return nick[0] + "\u200c" + nick[1:]
5,325,849
def _get_r_val(z, omega_m, omega_l): """Returns the comoving distance at for one z value. Parameters ---------- z : float Redshift. omega_m : float Present matter density. omega_l : float Present dark energy density. """ r, err = integrate.quad(_get_r_integrand, 0., z, args=(omega_m, omega_l)) r *= 3000. return r
5,325,850
def user_response_controller(bank_request, user_response): """ processes user's response for bank's request sent : bank_request --> what is user currently requesting for : user_response --> what a user wants to actually do amongst the options in the above bank_requests """ user_response = validate_user_input_to_int(user_response) if user_response == "error": return ['resend_same_bank_request', 'No valid option choosen'] if user_response >= 1 and user_response <= bank_request.get('available_options'): return user_response else: return ['resend_same_bank_request', 'Selected option not found']
5,325,851
def create_features(datestrs): """ Find the features associated with a set of dates. These will include: weekday / weekend day of week season month of year Parameters ---------- datestrs: list of strings Date strings of the format YYYY-MM-DD. Returns ------- features: DataFrame Each row corrsponds to one date. The datestring is the index. """ feature_data = [] for datestr in datestrs: current_date = datetime.datetime.strptime(datestr, '%Y-%m-%d').date() current_weekday = current_date.weekday() day_of_week = np.zeros(7) day_of_week[current_weekday] = 1 current_month = current_date.month month_of_year = np.zeros(12) # Adjust months to January = 0 month_of_year[current_month - 1] = 1 # Season 0 = winter, 1 = spring, 2 = summer, 3 = autumn season = np.zeros(4) if current_month <= 2: season[0] = 1 elif current_month <= 5: season[1] = 1 elif current_month <= 8: season[2] = 1 elif current_month <= 11: season[3] = 1 else: season[0] = 1 feature_set = { 'Saturday': day_of_week[5], 'Sunday': day_of_week[6], 'winter': season[0], 'spring': season[1], 'summer': season[2], 'autumn': season[3], } feature_data.append(feature_set) features_df = pd.DataFrame(data=feature_data, index=datestrs) return features_df
5,325,852
def reset_print(): """ Resets print """ local_print = print yield builtins.print = local_print
5,325,853
def obj_src(py_obj, escape_docstring=True): """Get the source for the python object that gets passed in Parameters ---------- py_obj : callable Any python object escape_doc_string : bool If true, prepend the escape character to the docstring triple quotes Returns ------- list Source code lines Raises ------ IOError Raised if the source code cannot be retrieved """ src = inspect.getsource(py_obj) if escape_docstring: src.replace("'''", "\\'''") src.replace('"""', '\\"""') return src # return src.split('\n')
5,325,854
def get_back_button_handler(current_panel: "GenericPanel") -> CallbackQueryHandler: """ returns a Handler for BACK_PATTERN that returns the user to current_panel :param GenericPanel current_panel: the destination panel :return: a CallbackQueryHandler for BACK_PATTERN that returns the user to current_panel """ return CallbackQueryHandler(current_panel.prompt, pattern=Globals.BACK_PATTERN)
5,325,855
def main(): """Main function""" model = None if len(sys.argv) > 1: model = sys.argv[1] motor_driver_helper.set_gpio_pins() autonomous_control(model)
5,325,856
def _build_import_library_x86(): """ Build the import libraries for Mingw32-gcc on Windows """ lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) lib_file = os.path.join(sys.prefix, 'libs', lib_name) out_name = "libpython%d%d.a" % tuple(sys.version_info[:2]) out_file = os.path.join(sys.prefix, 'libs', out_name) if not os.path.isfile(lib_file): # didn't find library file in virtualenv, try base distribution, too, # and use that instead if found there base_lib = os.path.join(sys.base_prefix, 'libs', lib_name) if os.path.isfile(base_lib): lib_file = base_lib else: log.warn('Cannot build import library: "%s" not found', lib_file) return if os.path.isfile(out_file): log.debug('Skip building import library: "%s" exists', out_file) return # didn't find in virtualenv, try base distribution, too base_file = os.path.join(sys.base_prefix, 'libs', out_name) if os.path.isfile(base_file): log.debug('Skip building import library: "%s" exists', out_file) return log.info('Building import library (ARCH=x86): "%s"', out_file) from numpy.distutils import lib2def def_name = "python%d%d.def" % tuple(sys.version_info[:2]) def_file = os.path.join(sys.prefix, 'libs', def_name) nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file) nm_output = lib2def.getnm(nm_cmd) dlist, flist = lib2def.parse_nm(nm_output) lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w')) dll_name = "python%d%d.dll" % tuple(sys.version_info[:2]) args = (dll_name, def_file, out_file) cmd = 'dlltool --dllname %s --def %s --output-lib %s' % args status = os.system(cmd) # for now, fail silently if status: log.warn('Failed to build import library for gcc. Linking will fail.') return
5,325,857
def save_image(filename: str, pixels: list, width: int, height: int) -> None: """Create a width by height image containing pixels and save it to a file called filename. """ image = Image.new('RGB', (width, height)) [image.putpixel((x, y), pixels[x + y * width]) for x in range(width) for y in range(height)] image.save(filename)
5,325,858
def openocd_prog_path(request: FixtureRequest) -> Optional[str]: """Enable parametrization for the same cli option""" return _request_param_or_config_option_or_default(request, 'openocd_prog_path', None)
5,325,859
def reindexMatrix(iss, jss, A): """iss and jss are lists of indices of equal size, representing a permuation: iss[i] is replaced with jss[i]. all other indices which are not in the lists left unchanged. """ n = len(A) B = np.zeros_like(A) tss = [i for i in range(n)] for i in range(len(iss)): tss[iss[i]] = jss[i] print(tss) for i in range(n): for j in range(n): B[i, j] = A[tss[i], tss[j]] return B
5,325,860
def createInfoMatix(character_id): """初始化创建的角色的阵法 """ petlist = getCharacterPetList(character_id) sql = "INSERT INTO `tb_character_matrix`(`characterId`,`eyes_4`,`eyes_5`,`eyes_6`) \ VALUES(%d,%d,%d,%d);"%(character_id,petlist[0],petlist[1],petlist[2]) conn = dbpool.connection() cursor = conn.cursor() count = cursor.execute(sql) conn.commit() cursor.close() conn.close() if(count >= 1): return True return False
5,325,861
def help(): """<b>Print available functions as json.<br>""" func_list = {} for rule in app.url_map.iter_rules(): if rule.endpoint != 'static': func_list[rule.rule] = app.view_functions[rule.endpoint].__doc__ return jsonify(func_list)
5,325,862
async def grid_train(params: Grid, background_tasks: BackgroundTasks, credentials: HTTPBasicCredentials = Depends(validate_access)): """ Choose an estimator, and hyper-parameters to optimize for a GridSearchCV. Results can be recorded in Neptune.ai. """ try: X_train, X_test, y_train, y_test = load_data(params.data_path, params.comment_col, params.target_col) except Exception as e: raise HTTPException(status_code=400, detail=str(e)) params.estimator = params.estimator.upper() if params.estimator not in MODELS.keys(): raise HTTPException(status_code=400, detail=f"The model isn't registered in the API. You can choose between {','.join(list(MODELS.keys()))}") if params.parameters is None: params.parameters = MODELS[params.estimator]["default_hyperparams"] else: # ici il faut une validation des hyper paramètres params.parameters = {f"clf__{param}": liste for param, liste in params.parameters.items()} if params.estimator == "SVC": params.parameters["clf__probability"] = [True] # start logging run = None if params.neptune_log: try: run = activate_monitoring(os.getenv('NEPTUNE_USER'), os.getenv('NEPTUNE_PROJECT')) params.tags.extend([params.estimator, "grid"]) create_exp(params.parameters, params.tags, run) except neptune.exceptions.NeptuneInvalidApiTokenException as e: raise HTTPException(status_code=400, detail="Not currently connected to NEPTUNE.ai. Ask the developer to provide its user access.") # run modeling in the background background_tasks.add_task(grid_run_model, params, run, X_train, X_test, y_train, y_test) return {'res' : "The model is running. You will receive a mail if you provided your email address."}
5,325,863
def version(): """ Returns the name, version and api_version of the application when a HTTP GET request is made. """ return jsonify( name='openshift-python-flask-sample', version=VERSION )
5,325,864
def test_get_text_anchors(anchor_type, ndisplay, expected_coords): """Round trip tests for getting anchor coordinates.""" coords = [np.array([[0, 0], [10, 0], [0, 10], [10, 10]])] anchor_coords, _, _ = get_text_anchors( coords, anchor=anchor_type, ndisplay=ndisplay ) np.testing.assert_equal(anchor_coords, expected_coords)
5,325,865
def search_quotes(request, currency): """ Consulta a API procurando por ações que contenham o campo 'currency' no nome """ # verifica se a barra de pesquisa foi preenchida ou se está vazia if currency: conn.request("GET", "/auto-complete?q="+currency+"&region=BR", headers=headers) res = conn.getresponse() data = res.read() api_quotes = json.loads(data.decode("utf-8"))['quotes'] # verifica se o usuário quer procurar somente na B3 ou se quer procurar globalmente utilizando selector na página if 'onlyB3' in request.GET: if request.GET['onlyB3'] == '1': SA_quotes = [ { key: quote[key] for key in quote } for quote in api_quotes if quote['exchange'] == 'SAO' or quote['symbol'].endswith('.SA') ] return { 'quotes': SA_quotes } return { 'quotes': api_quotes } else: # se a pesquisa é vazia retorna um dicionario nulo return { 'quotes': {} }
5,325,866
def destroy(N, dtype=tf.complex64): """Returns a destruction (lowering) operator in the Fock basis. Args: N (int): Dimension of Hilbert space dtype (tf.dtypes.DType, optional): Returned dtype. Defaults to c64. Returns: Tensor([N, N], dtype): NxN creation operator """ a = diag(tf.sqrt(tf.range(1, N, dtype=tf.float64)), k=1) return tf.cast(a, dtype=dtype)
5,325,867
def get_nearest_operation( db: Redis[bytes], address: hash_t, subdag: Optional[str] = None ) -> Optional[Operation]: """Return the operation at address or the operation generating address.""" root = "root" art = None try: node = Operation.grab(db, address) return node except RuntimeError: # one possibility is that address is an artefact... try: art = Artefact[Any].grab(db, address) except RuntimeError: raise RuntimeError( f"address {address} neither a valid operation nor a valid artefact." ) if art.parent == root: # We have basically just a single artefact as the network... return None else: node = Operation.grab(db, art.parent) return node
5,325,868
def load_CIFAR_batch(file_path): """ load single batch of cifar """ data_dict = load_pickle(file_path) data = data_dict['data'] labels = data_dict['labels'] data = data.reshape(10000, 3, 32, 32).astype("float") labels = np.array(labels) return data, labels
5,325,869
def _disposable_and_async_gen_from_obs(obs: Observable): """ Compatability layer for legacy Observable to async generator This should be removed and subscription resolvers changed to return async generators after removal of flask & gevent based dagit. """ queue: Queue = Queue() disposable = obs.subscribe(on_next=queue.put_nowait) async def async_gen(): while True: i = await queue.get() yield i return disposable, async_gen()
5,325,870
def interesting_pattern(x: float, y: float) -> float: """This function is interesting in x and y in range -10..10, returning a float value in range 0..1 """ z = 0.5 + (np.sin(x) ** 10 + np.cos(10 + y * x) * np.cos(x)) / 2 return z
5,325,871
def cut(st, sec_before_split=None): """ Cut/trim the record. This method minimally requires that the windows.signal_end method has been run, in which case the record is trimmed to the end of the signal that was estimated by that method. To trim the beginning of the record, the sec_before_split must be specified, which uses the noise/signal split time that was estiamted by the windows.signal_split mehtod. Args: st (StationStream): Stream of data. sec_before_split (float): Seconds to trim before split. If None, then the beginning of the record will be unchanged. Returns: stream: cut streams. """ if not st.passed: return st for tr in st: logging.debug('Before cut end time: %s ' % tr.stats.endtime) etime = tr.getParameter('signal_end')['end_time'] tr.trim(endtime=etime) logging.debug('After cut end time: %s ' % tr.stats.endtime) if sec_before_split is not None: split_time = tr.getParameter('signal_split')['split_time'] stime = split_time - sec_before_split logging.debug('Before cut start time: %s ' % tr.stats.starttime) if stime < etime: tr.trim(starttime=stime) else: tr.fail('The \'cut\' processing step resulting in ' 'incompatible start and end times.') logging.debug('After cut start time: %s ' % tr.stats.starttime) tr.setProvenance( 'cut', { 'new_start_time': tr.stats.starttime, 'new_end_time': tr.stats.endtime } ) return st
5,325,872
def interp_road(d,croad,roads,intersections,normD = False): """ Get the position of a point along a road """ start_int = roads[croad]['start_int'] start_pos = intersections[start_int]['position'] end_int = roads[croad]['end_int'] end_pos = intersections[end_int]['position'] if not normD: length = road_length(croad,roads,intersections) if 'type' not in roads[croad] or roads[croad]['type'] == 'line': if normD: alpha = d else: alpha = d/length return (1.0-alpha)*start_pos + alpha*end_pos else: C = roads[croad]['center'].reshape(2) startR = np.sqrt(np.sum(np.power(C - start_pos,2.0))) endR = np.sqrt(np.sum(np.power(C - end_pos,2.0))) r = 0.5*(startR + endR) startTheta = np.arctan2(start_pos[1] - C[1],start_pos[0] - C[0]) if startTheta < 0: startTheta += 2.0*np.pi endTheta = np.arctan2(end_pos[1] - C[1],end_pos[0] - C[0]) if endTheta < 0: endTheta += 2.0*np.pi if roads[croad]['turn_direction'] < 0 and endTheta > startTheta: startTheta += 2.0*np.pi elif roads[croad]['turn_direction'] > 0 and endTheta < startTheta: endTheta += 2.0*np.pi # startTheta = roads[croad]['start_theta'] # endTheta = roads[croad]['end_theta'] if normD: curr_theta = (1.0-d)*startTheta + d*endTheta else: curr_theta = startTheta + (endTheta - startTheta)*(d/length) return C + r*np.array([np.cos(curr_theta),np.sin(curr_theta)]).reshape(2)
5,325,873
def to_ufo_font_attributes(self, family_name): """Generate a list of UFOs with metadata loaded from .glyphs data. Modifies the list of UFOs in the UFOBuilder (self) in-place. """ font = self.font # "date" can be missing; Glyphs.app removes it on saving if it's empty: # https://github.com/googlei18n/glyphsLib/issues/134 date_created = getattr(font, 'date', None) if date_created is not None: date_created = to_ufo_time(date_created) units_per_em = font.upm version_major = font.versionMajor version_minor = font.versionMinor copyright = font.copyright designer = font.designer designer_url = font.designerURL manufacturer = font.manufacturer manufacturer_url = font.manufacturerURL for master in font.masters: ufo = self.ufo_module.Font() if date_created is not None: ufo.info.openTypeHeadCreated = date_created ufo.info.unitsPerEm = units_per_em ufo.info.versionMajor = version_major ufo.info.versionMinor = version_minor if copyright: ufo.info.copyright = copyright if designer: ufo.info.openTypeNameDesigner = designer if designer_url: ufo.info.openTypeNameDesignerURL = designer_url if manufacturer: ufo.info.openTypeNameManufacturer = manufacturer if manufacturer_url: ufo.info.openTypeNameManufacturerURL = manufacturer_url ufo.info.ascender = master.ascender ufo.info.capHeight = master.capHeight ufo.info.descender = master.descender ufo.info.xHeight = master.xHeight horizontal_stems = master.horizontalStems vertical_stems = master.verticalStems italic_angle = -master.italicAngle if horizontal_stems: ufo.info.postscriptStemSnapH = horizontal_stems if vertical_stems: ufo.info.postscriptStemSnapV = vertical_stems if italic_angle: ufo.info.italicAngle = italic_angle width = master.width weight = master.weight if weight: ufo.lib[GLYPHS_PREFIX + 'weight'] = weight if width: ufo.lib[GLYPHS_PREFIX + 'width'] = width for number in ('', '1', '2', '3'): custom_name = getattr(master, 'customName' + number) if custom_name: ufo.lib[GLYPHS_PREFIX + 'customName' + number] = custom_name custom_value = getattr(master, 'customValue' + number) if custom_value: ufo.lib[GLYPHS_PREFIX + 'customValue' + number] = custom_value self.to_ufo_names(ufo, master, family_name) self.to_ufo_blue_values(ufo, master) self.to_ufo_family_user_data(ufo) self.to_ufo_master_user_data(ufo, master) self.to_ufo_guidelines(ufo, master) self.to_ufo_custom_params(ufo, master) master_id = master.id ufo.lib[GLYPHS_PREFIX + 'fontMasterID'] = master_id # FIXME: (jany) in the future, yield this UFO (for memory, laze iter) self._ufos[master_id] = ufo
5,325,874
def zero_pad2d(inputs, padding=0, output_dtype="float32", requires_grad=False): """ Zero padding for 2d tensor Args: ----------------------------- inputs : Tensor shape [batch, channel, height, width] padding: (optional:0) int or tuple expected: (h_pad_up, h_pad_down, w_pad_up, w_pad_down) output_dtype : str requires_grad : bool ----------------------------- Returns: ----------------------------- Tensor shape [batch, channel, padded_height, padded_width] ----------------------------- """ padding = (padding, padding, padding, padding) if isinstance(padding, (int, tvm.tir.IntImm)) else padding assert isinstance(padding, tuple), "type(padding)={}".format(type(padding)) if len(padding) == 2: padding = (padding[0], padding[0], padding[1], padding[1]) assert (len(padding) == 4) if all([padding[i] == 0 for i in range(len(padding))]): return inputs batch_size, in_channel, height, width = inputs.shape padded_shape = (batch_size, in_channel, height + padding[0] + padding[1], width + padding[2] + padding[3]) padding_value = tvm.tir.expr.const(0, output_dtype) def _inner_zero_pad2d(inputs): def _for_spatial(b, c, h, w): def _for_reduce(): return tvm.te.if_then_else( tvm.te.all(h >= padding[0], h < height + padding[0], w >= padding[2], w < width + padding[2]), inputs[b, c, h - padding[0], w - padding[2]], padding_value ) return _for_reduce, [], "none" return _for_spatial return Compute(padded_shape, output_dtype , inputs, fhint=_inner_zero_pad2d, name="zero_pad2d", requires_grad=requires_grad)
5,325,875
def test_inflight_requests(broker, req): """Not all simultaneous requests to a cached service are executed. """ req = dict(req) req["delay"] = 2 results = [] for _ in range(10): results.append(broker.execute("mock_preload_long_ttl", req, cache=True)) status = [cache_status(r.metadata) for r in results] hits = [s for s in status if s == "hit"] misses = [s for s in status if s == "miss"] assert len(hits) > len(misses)
5,325,876
def my_plot(my_folder='/mnt/closet/ldr2/catalogues'): """Make some plots. Parameters ---------- my_directory : string Working directory. """ df = pd.read_csv('/home/sean/Downloads/ldr2/LDR2 and BZCAT 10_ crossmatch -' ' BL Lacs.csv') compact = df[df['Compact'] == True] # noqa extended = df[df['Compact'] != True] # noqa plt.figure(figsize=(12, 12)).patch.set_facecolor('white') plt.rcParams['font.family'] = 'serif' plt.rcParams['mathtext.fontset'] = 'dejavuserif' mpl.rcParams['xtick.major.size'] = 10 mpl.rcParams['xtick.major.width'] = 2 mpl.rcParams['xtick.minor.size'] = 5 mpl.rcParams['xtick.minor.width'] = 2 mpl.rcParams['ytick.major.size'] = 10 mpl.rcParams['ytick.major.width'] = 2 mpl.rcParams['ytick.minor.size'] = 5 mpl.rcParams['ytick.minor.width'] = 2 mpl.rcParams['axes.linewidth'] = 2 # luminosity against redshift gs = gridspec.GridSpec(2, 1, width_ratios=[1], height_ratios=[2, 1], hspace=0.02) ax = plt.subplot(gs[0, 0]) ax.errorbar(compact['redshift'], compact['Luminosity with FIRST index (W/Hz)'], markersize=15, mec='k', label='Unresolved', elinewidth=2, yerr=compact['Luminosity error with FIRST index (W/Hz)'], marker='s', ls='none', mfc='#302f2c', color='k', mew=2) ax.errorbar(extended['redshift'], extended['Luminosity with FIRST index (W/Hz)'], markersize=15, label='Extended', elinewidth=2, mec='k', yerr=extended['Luminosity error with FIRST index (W/Hz)'], marker='s', ls='none', mfc='#faf3dd', color='k', mew=2) ax.set_xticks([]) ax.set_yscale('log') ax.set_ylabel(r'$L_{144} \,\,\,\,(\mathrm{W\,\,Hz}^{-1})$', fontsize=30) ax.set_xlim(0, 0.8) plt.setp(ax.get_yticklabels(), fontsize=30) handles, labels = ax.get_legend_handles_labels() handles = [h[0] for h in handles] ax.legend(handles, labels, ncol=2, loc='upper center', numpoints=1, fontsize=30, mode='expand', bbox_to_anchor=(0.5, 1.2, -0.1, 0), frameon=False) axx = plt.subplot(gs[1, 0]) bins = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8] axx.hist([compact['redshift'], extended['redshift']], histtype='stepfilled', color=['#302f2c', '#faf3dd'], edgecolor='k', lw=2, bins=bins, stacked=True) axx.set_yticks([0, 3, 6, 9]) axx.set_xlabel(r'$z$', fontsize=30) axx.set_xlim(0, 0.8) plt.setp(axx.get_xticklabels(), fontsize=30) plt.setp(axx.get_yticklabels(), fontsize=30) gs.update(left=0.15) save_name = f'{my_folder}/luminosity-against-redshift-hist.png' plt.savefig(f'{save_name}') os.system(f'convert {save_name} -trim {save_name}') # removes whitespace print(f'gpicview {save_name}') plt.clf() # redshift against spectral index gs = gridspec.GridSpec(2, 1, width_ratios=[1], height_ratios=[2, 1], hspace=0.02) ax = plt.subplot(gs[0, 0]) ax.errorbar(compact['LDR2-to-NVSS index'], compact['redshift'], markersize=15, xerr=compact['LDR2-to-NVSS index error'], mec='k', marker='s', ls='none', mfc='#302f2c', color='k', mew=2, label='Unresolved', elinewidth=2) ax.plot([0.78], [0.496], markersize=15, mec='k', mew=2, marker='>', ls='none', mfc='#302f2c', color='k') ax.text(0.58, 0.48359, r'$\alpha = 1.9$', rotation=0, fontsize=20) ax.errorbar(extended['LDR2-to-NVSS index'], extended['redshift'], markersize=15, xerr=extended['LDR2-to-NVSS index error'], marker='s', ls='none', mfc='#faf3dd', color='k', mew=2, label='Extended', elinewidth=2, mec='k') ax.set_xticks([]) ax.set_ylabel(r'$z$', fontsize=30) ax.set_xlim(-0.8, 0.8) plt.setp(ax.get_yticklabels(), fontsize=30) handles, labels = ax.get_legend_handles_labels() handles = [h[0] for h in handles] ax.legend(handles, labels, ncol=2, loc='upper center', numpoints=1, fontsize=30, mode='expand', bbox_to_anchor=(0.5, 1.2, -0.1, 0), frameon=False) axx = plt.subplot(gs[1, 0]) bins = np.linspace(-0.8, 0.8, 9) axx.hist([compact['LDR2-to-NVSS index'], extended['LDR2-to-NVSS index']], histtype='stepfilled', color=['#302f2c', '#faf3dd'], edgecolor='k', lw=2, bins=bins, stacked=True) axx.set_xticks(bins) axx.set_yticks([0, 3, 6, 9, 12]) axx.set_xlabel(r'$\alpha$', fontsize=30) axx.set_xlim(-0.8, 0.8) plt.setp(axx.get_xticklabels(), fontsize=30) plt.setp(axx.get_yticklabels(), fontsize=30) gs.update(left=0.15) plt.savefig(f'{my_folder}/redshift-against-spectral-index-hist.png') print(f'{my_folder}/redshift-against-spectral-index-hist.png') plt.clf() # spectral index against luminosity gs = gridspec.GridSpec(2, 1, width_ratios=[1], height_ratios=[2, 1], hspace=0.02) ax = plt.subplot(gs[0, 0]) ax.errorbar(compact['Luminosity with NVSS index (W/Hz)'], compact['LDR2-to-NVSS index'], markersize=15, xerr=compact['Luminosity error with NVSS index (W/Hz)'], yerr=compact['LDR2-to-NVSS index error'], mec='k', marker='s', ls='none', mfc='#302f2c', color='k', mew=2, label='Unresolved', elinewidth=2) ax.plot([1.31e24], [0.76], markersize=15, mec='k', mew=2, marker='^', ls='none', mfc='#302f2c', color='k') ax.text(0.9e24, 0.66, r'$\alpha = 1.9$', rotation=00, fontsize=20) ax.errorbar(extended['Luminosity with NVSS index (W/Hz)'], extended['LDR2-to-NVSS index'], markersize=15, xerr=extended['Luminosity error with NVSS index (W/Hz)'], yerr=extended['LDR2-to-NVSS index error'], marker='s', ls='none', mfc='#faf3dd', color='k', mew=2, label='Extended', elinewidth=2, mec='k') bins = np.logspace(np.log10(1e23), np.log10(1e27), 9) ax.set_ylabel(r'$\rho_{144}$', fontsize=30) ax.set_xlim(bins[1], bins[-1]) ax.set_xscale('log') ax.set_xticks([]) ax.set_ylim(-0.8, 0.8) ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False) plt.setp(ax.get_yticklabels(), fontsize=30) handles, labels = ax.get_legend_handles_labels() handles = [h[0] for h in handles] ax.legend(handles, labels, ncol=2, loc='upper center', numpoints=1, fontsize=30, mode='expand', bbox_to_anchor=(0.5, 1.2, -0.1, 0), frameon=False) axx = plt.subplot(gs[1, 0]) axx.hist([compact['Luminosity with NVSS index (W/Hz)'], extended['Luminosity with NVSS index (W/Hz)']], histtype='stepfilled', color=['#302f2c', '#faf3dd'], edgecolor='k', lw=2, stacked=True, bins=bins) axx.set_yticks([0, 4, 8, 12]) axx.set_xlabel(r'$D$ (kpc)', fontsize=30) axx.set_xlim(bins[1], bins[-1]) axx.set_xscale('log') plt.setp(axx.get_xticklabels(), fontsize=30) plt.setp(axx.get_yticklabels(), fontsize=30) gs.update(left=0.15) save_name = f'{my_folder}/spectral-index-against-luminosity-hist.png' plt.savefig(f'{save_name}') os.system(f'convert {save_name} -trim {save_name}') # removes whitespace print(f'gpicview {save_name}') plt.clf() # core-dominance-against-extent gs = gridspec.GridSpec(2, 1, width_ratios=[1], height_ratios=[2, 1], hspace=0.02) ax = plt.subplot(gs[0, 0]) # ax.errorbar(compact['Luminosity with NVSS index (W/Hz)'], # compact['Extent (kpc)'], # markersize=15, # xerr=compact['Luminosity error with NVSS index (W/Hz)'], # yerr=compact['LDR2-to-NVSS index error'], # mec='k', # marker='s', ls='none', mfc='#302f2c', color='k', mew=2, # label='Unresolved', elinewidth=2) # ax.plot([1.31e24], # [0.76], # markersize=15, # mec='k', mew=2, # marker='^', ls='none', mfc='#302f2c', color='k') # ax.text(0.9e24, 0.66, r'$\alpha = 1.9$', rotation=00, fontsize=20) ax.errorbar(extended['Extent (kpc)'], extended['Core dominance'].astype('float'), markersize=15, xerr=extended['Extent error (kpc)'], yerr=extended['Core dominance error'].astype('float'), marker='s', ls='none', mfc='#faf3dd', color='k', mew=2, label='Extended', elinewidth=2, mec='k') bins = np.linspace(0, 600, 13) ax.set_ylabel(r'$\rho_{144}$', fontsize=30) ax.set_xlim(0, 600) ax.set_yscale('log') ax.set_xticks([]) ax.set_ylim(0.02, 10) ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False) plt.setp(ax.get_yticklabels(), fontsize=30) handles, labels = ax.get_legend_handles_labels() handles = [h[0] for h in handles] # ax.legend(handles, labels, ncol=2, loc='upper center', numpoints=1, # fontsize=30, mode='expand', # bbox_to_anchor=(0.5, 1.2, -0.1, 0), frameon=False) axx = plt.subplot(gs[1, 0]) print('asdfasdfasdf') print(extended['Extent (kpc)']) axx.hist(#[compact['Luminosity with NVSS index (W/Hz)'], extended['Extent (kpc)'], histtype='stepfilled', color=['#faf3dd'], edgecolor='k', lw=2, stacked=True, bins=bins) axx.set_yticks([0, 2, 4, 6]) axx.set_xlabel(r'$D$ (kpc)', fontsize=30) axx.set_xlim(bins[1], bins[-1]) # axx.set_xscale('log') plt.setp(axx.get_xticklabels(), fontsize=30) plt.setp(axx.get_yticklabels(), fontsize=30) gs.update(left=0.15) save_name = f'{my_folder}/core-dominance-against-extent-hist.png' plt.savefig(f'{save_name}') os.system(f'convert {save_name} -trim {save_name}') # removes whitespace print(f'gpicview {save_name} # !!!!!!!!!!!!') plt.clf() # extent-against-spectral-index-extent # redshift against spectral index gs = gridspec.GridSpec(2, 1, width_ratios=[1], height_ratios=[2, 1], hspace=0.02) ax = plt.subplot(gs[0, 0]) ax.errorbar(compact['LDR2-to-NVSS index'], compact['Extent (kpc)'], markersize=15, xerr=compact['LDR2-to-NVSS index error'], # yerr=compact['Extent error (kpc)'], mec='k', marker=r'$\downarrow$', ls='none', mfc='#302f2c', color='k', mew=2, label='Unresolved', elinewidth=2) ax.plot([0.775], [79.5363], markersize=15, mec='k', mew=2, marker=r'$\rightarrow$', ls='none', mfc='#302f2c', color='k') # ax.text(0.58, 0.48359, r'$\alpha_{144}^{} = 1.9$', rotation=0, fontsize=20) ax.errorbar(extended['LDR2-to-NVSS index'], extended['Extent (kpc)'], markersize=15, xerr=extended['LDR2-to-NVSS index error'], yerr=extended['Extent error (kpc)'], marker='s', ls='none', mfc='#faf3dd', color='k', mew=2, label='Extended', elinewidth=2, mec='k') ax.set_xticks([]) ax.set_ylabel(r'$D$ (kpc)', fontsize=30) ax.set_xlim(-0.8, 0.8) ax.set_ylim(0, 600) plt.setp(ax.get_yticklabels(), fontsize=30) handles, labels = ax.get_legend_handles_labels() handles = [h[0] for h in handles] ax.legend(handles, labels, ncol=2, loc='upper center', numpoints=1, fontsize=30, mode='expand', bbox_to_anchor=(0.5, 1.2, -0.1, 0), frameon=False) axx = plt.subplot(gs[1, 0]) bins = np.linspace(-0.8, 0.8, 9) axx.hist([compact['LDR2-to-NVSS index'], extended['LDR2-to-NVSS index']], histtype='stepfilled', color=['#302f2c', '#faf3dd'], edgecolor='k', lw=2, bins=bins, stacked=True) axx.set_xticks(bins) axx.set_yticks([0, 3, 6, 9, 12]) axx.set_xlabel(r'$\alpha_{144}^{1400}$', fontsize=30) axx.set_xlim(-0.8, 0.8) plt.setp(axx.get_xticklabels(), fontsize=30) plt.setp(axx.get_yticklabels(), fontsize=30) gs.update(left=0.15) save_name = f'{my_folder}/extent-against-spectral-index-hist.png' plt.savefig(f'{save_name}') os.system(f'convert {save_name} -trim {save_name}') # removes whitespace print(f'gpicview {save_name} # asdflkjasdj!') plt.clf() '''---------------------------------------------------------------------''' # spectral index against core dominance gs = gridspec.GridSpec(2, 1, width_ratios=[1], height_ratios=[2, 1], hspace=0.02) ax = plt.subplot(gs[0, 0]) # ax.errorbar(compact['Core dominance'], # compact['Extent (kpc)'], # markersize=15, # xerr=compact['Core dominance error'], # yerr=compact['Extent error (kpc)'], # mec='k', # marker='s', ls='none', mfc='#302f2c', color='k', mew=2, # label='Unresolved', elinewidth=2) # ax.plot([1.31e24], # [0.46], # markersize=15, # mec='k', mew=2, # marker='^', ls='none', mfc='#302f2c', color='k') # ax.text(0.9e24, 0.37, r'$\alpha = 1.95$', rotation=00, fontsize=20) ax.errorbar(extended['Core dominance'].astype('float'), extended['LDR2-to-NVSS index'].astype('float'), markersize=15, yerr=extended['LDR2-to-NVSS index error'].astype('float'), xerr=extended['Core dominance error'].astype('float'), marker='s', ls='none', mfc='#faf3dd', color='k', mew=2, label='Extended', elinewidth=2, mec='k') bins = np.logspace(np.log10(0.01), np.log10(10), 7) print(bins) ax.set_ylabel(r'$\alpha_{144}^{1400}$', fontsize=30) ax.set_xlim(0.02, 10) ax.set_xscale('log') ax.set_xticks([]) ax.set_ylim(-0.8, 0.3) # ax.set_yticks([]) print(extended['LDR2-to-NVSS index'].astype('float')) ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False) plt.setp(ax.get_yticklabels(), fontsize=30) handles, labels = ax.get_legend_handles_labels() handles = [h[0] for h in handles] # ax.legend(handles, labels, ncol=2, loc='upper center', numpoints=1, # fontsize=30, mode='expand', # bbox_to_anchor=(0.5, 1.2, -0.1, 0), frameon=False) axx = plt.subplot(gs[1, 0]) axx.hist(#[compact['Core dominance'], extended['Core dominance'].astype('float'),#], histtype='stepfilled', color='#faf3dd', # '#302f2c' edgecolor='k', lw=2, stacked=True, bins=bins) axx.set_yticks([0, 4, 8]) axx.set_xlabel(r'$\rho_{144}$', fontsize=30) axx.set_xlim(0.02, 10) axx.set_xscale('log') plt.setp(axx.get_xticklabels(), fontsize=30) plt.setp(axx.get_yticklabels(), fontsize=30) gs.update(left=0.15) save_name = f'{my_folder}/spectral-index-against-core-dominance-hist.png' plt.savefig(f'{save_name}') os.system(f'convert {save_name} -trim {save_name}') # removes whitespace print(f'gpicview {save_name} # for real!') plt.clf() import sys sys.exit() '''---------------------------------------------------------------------''' plt.figure(figsize=(20, 12)).patch.set_facecolor('white') plt.rcParams['font.family'] = 'serif' plt.rcParams['mathtext.fontset'] = 'dejavuserif' mpl.rcParams['xtick.major.size'] = 10 mpl.rcParams['xtick.major.width'] = 2 mpl.rcParams['xtick.minor.size'] = 5 mpl.rcParams['xtick.minor.width'] = 2 mpl.rcParams['ytick.major.size'] = 10 mpl.rcParams['ytick.major.width'] = 2 mpl.rcParams['ytick.minor.size'] = 5 mpl.rcParams['ytick.minor.width'] = 2 mpl.rcParams['axes.linewidth'] = 2 gs = gridspec.GridSpec(2, 3, width_ratios=[4, 18, 0.4], height_ratios=[18, 6]) gs.update(wspace=0.05, hspace=0.05) ax = plt.subplot(gs[0, 1]) extended.sort_values('Luminosity with NVSS index (W/Hz)', inplace=True, ascending=False) x = np.log10(extended['Luminosity with NVSS index (W/Hz)']) s = (((60 - 10) / (np.max(x) - np.min(x))) * (x - np.max(x)) + 60) ** 2 hb = ax.scatter(extended['Extent (kpc)'].astype('float'), extended['Core dominance'].astype('float'), s=s, cmap='plasma', linewidths=2, edgecolors='k', c=extended['LDR2-to-NVSS index']) # ax.set_yscale('log') ax.set_xlim(0, 700) ax.set_ylim(0, 1)#1e-2, 1e1) ax.set_xticks([]) ax.tick_params(axis='y', which='both', left=False, right=False, labelleft=False) # ax.plot() # ax.text(0.42, 0.44, r'$R = 1726$', rotation=0, fontsize=20) custom_lines = [Line2D([0], [0], mfc='w', mec='k', marker='o', lw=0, markersize=np.sqrt(s.iloc[0]), mew=2), Line2D([0], [0], marker='o', linestyle='None', mew=2, markersize=np.sqrt(s.iloc[-1]), mec='k', mfc='w')] labels = [f'{extended["Luminosity with NVSS index (W/Hz)"].iloc[0]}', f'{extended["Luminosity with NVSS index (W/Hz)"].iloc[-1]}'] labels = [r'$1.9 \times 10^{24}$ W Hz', r'$4.5 \times 10^{26}$ W Hz'] plt.legend(custom_lines, labels, ncol=2, loc='upper center', bbox_to_anchor=(0.55, 1.2, -0.15, 0), mode='expand', numpoints=1, fontsize=30, frameon=False) cax = plt.subplot(gs[0, 2]) cbar = plt.colorbar(hb, cax=cax, format='%.1f') hb.set_clim(vmin=-1, vmax=0) cbar.set_label(r'$\alpha$', fontsize=30) cbar.ax.tick_params(labelsize=30) axx = plt.subplot(gs[1, 1]) axx.hist(extended['Extent (kpc)'].astype('float'), histtype='step', fill=True, color='#faf3dd', edgecolor='k', lw=2, bins=[0, 100, 200, 300, 400, 500, 600, 700]) axx.axvline(np.average(extended['Extent (kpc)']), color='k', linewidth=2, linestyle='dashed') axx.set_xlim(0, 700) axx.yaxis.tick_right() axx.set_xlabel(r'$D$ (kpc)', fontsize=30) axx.set_yticks([0, 3, 6]) plt.setp(axx.get_xticklabels(), fontsize=30) plt.setp(axx.get_yticklabels(), fontsize=30) axy = plt.subplot(gs[0, 0]) logbins = np.logspace(np.log10(1e-2), np.log10(1e4), 13) axy.hist(extended['Core dominance'].astype('float'), orientation='horizontal', histtype='step', fill=True, color='#faf3dd', lw=2, ec='k', bins=logbins) axy.axhline(np.median(extended['Core dominance'].astype('float')), color='k', linestyle='dashed', linewidth=2) axy.set_yscale('log') axy.set_xticks([0, 3, 6, 9]) axy.set_ylabel(r'$R$', fontsize=30) axy.set_ylim(1e-2, 1e1) plt.setp(axy.get_xticklabels(), fontsize=30) plt.setp(axy.get_yticklabels(), fontsize=30) # gs.update(bottom=0) save_name = f'{my_folder}/core-dominance-against-extent-hist.png' plt.savefig(f'{save_name}') os.system(f'convert {save_name} -trim {save_name}') # removes whitespace print(f'gpicview {save_name}') plt.clf()
5,325,877
def gtfs_admin(request): """admin page for adding new review categories (and potentially other features down the road)""" return render(request, 'admin/gtfs_admin.html')
5,325,878
def get_file_path(): """ Get current file's directory. Return `None` if there is no file path available. """ try: file_path = sublime.active_window().extract_variables()['file_path'] except KeyError: return None else: return file_path
5,325,879
def mutate_word(word): """Introduce a random change into the word: delete, swap, repeat, and add stray character. This may raise a ValueError. """ word = list(word) choice = random.randrange(4) if choice == 0: # Delete a character word.pop(random.randrange(len(word))) elif choice == 1: # Swap two characters index = random.randrange(0, len(word) - 1) word[index], word[index + 1] = word[index + 1], word[index] elif choice == 2: # Repeat a character index = random.randrange(0, len(word)) word.insert(index, word[index]) elif choice == 3: # Insert a stray character char = chr(random.randint(ord('a'), ord('z'))) word.insert(random.randint(0, len(word)), char) return ''.join(word)
5,325,880
def data_layer_property_from_dict(data_layer_property_dictionary: dict, client: cl.Client = None): """ The method converts a dictionary of DataLayerProperty to a DataLayerProperty object. :param data_layer_property_dict: A dictionary that contains the keys of a DataLayerProperty. :type data_layer_property_dict: dict :param client: An IBM PAIRS client. :type client: ibmpairs.client.Client :rtype: ibmpairs.catalog.DataLayerProperty :raises Exception: If not a dict. """ data_layer_property = DataLayerProperty.from_dict(data_layer_property_dictionary) cli = common.set_client(input_client = client, global_client = cl.GLOBAL_PAIRS_CLIENT) data_layer_property.client = cli return data_layer_property
5,325,881
def pcc_vector(v1, v2): """Pearson Correlation Coefficient for 2 vectors """ len1 = len(v1) len2 = len(v2) if len1 != len2: return None else: length = len1 avg1 = 1.0 * sum(v1) / len(v1) avg2 = 1.0 * sum(v2) / len(v2) dxy = [(v1[i] - avg1) * (v2[i] - avg2) for i in range(length)] dx2 = [(v1[i] - avg1) ** 2 for i in range(length)] dy2 = [(v2[i] - avg2) ** 2 for i in range(length)] return sum(dxy) / (sum(dx2) * sum(dy2)) ** 0.5
5,325,882
def cypher_repr(obj): """ Generate the Cypher representation of an object. """ string = StringIO() writer = CypherWriter(string) writer.write(obj) return string.getvalue()
5,325,883
def silence(): """Silence the screen reader (if currently speaking).""" tolk.silence()
5,325,884
def firstlastmile_pipeline(**kwargs): """The first and last mile pipeline attaches any unattached elements to ensure a fully-connected graph""" tags = ['flmile'] firstmile_nodes = [ node( firstmile_edge, ['sjoin_oilfields_data','sjoin_edges_pipelines_oilfields','sjoin_ports_data','sjoin_cities_data','sjoin_pipelines_data'], 'flmile_edges_oilfields', tags=tags+['firstmile','firstmile_oilfields'] ), # assets, existing_edges, closest port, city, [pipeline/railway] node( firstmile_edge, ['sjoin_oilwells_data','sjoin_edges_pipelines_oilwells','sjoin_ports_data','sjoin_cities_data','sjoin_pipelines_data'], 'flmile_edges_oilwells', tags=tags+['firstmile','firstmile_oilwells'] ), # assets, existing_edges, closest port, city, [pipeline/railway] node( firstmile_edge, ['sjoin_coalmines_data','sjoin_edges_railways_coalmines','sjoin_ports_data','sjoin_cities_data','sjoin_railways_data'], 'flmile_edges_coalmines', tags=tags+['firstmile','firstmile_coalmines'] ), # assets, existing_edges, closest port, city, [pipeline/railway] ] lastmile_nodes = [ node( powerstations_lastmile, ['sjoin_powerstations_data','sjoin_edges_pipelines_powerstations','sjoin_edges_railways_powerstations','sjoin_railways_data','sjoin_pipelines_data','sjoin_ports_data','sjoin_cities_data'], # powerstations, ps_edges_pipelines, ps_edges_railways, railways, pipelines, ports, cities 'flmile_edges_powerstations', tags=tags+['lastmile','lastmile_powerstations'] ) ] lastmile_nodes += [ node( cities_delauney, ['sjoin_cities_data','ne'], 'flmile_edges_cities', tags = tags+['lastmile','lastmile_cities'] ), node( shippingroutes_lastmile, ['sjoin_edges_shippingroutes_ports','sjoin_shippingroutes_data','sjoin_ports_data'], 'flmile_edges_shippingroutes_ports', tags= tags+['lastmile','lastmile_shippingroutes','lastmile_shippingroutes_ports'] ), node( shippingroutes_lastmile, ['sjoin_edges_shippingroutes_lngterminals','sjoin_shippingroutes_data','sjoin_lngterminals_data'], 'flmile_edges_shippingroutes_lngterminals', tags= tags+['lastmile','lastmile_shippingroutes','lastmile_shippingroutes_lng'] ), ] IDL_nodes = [ node( connect_IDL, 'sjoin_shippingroutes_data', 'flmile_idl_edges', tags=tags+['flmile_idl'] ) ] null_nodes = [node(null_forward, f'sjoin_{sector}_data', f'flmile_{sector}_data',tags = tags+['flm_null',f'flm_null_{sector}']) for sector in ALL_SECTORS] null_nodes += [node(null_forward, f'sjoin_edges_{sector1}_{sector2}', f'flmile_edges_{sector1}_{sector2}', tags=tags+['flm_null',f'flm_null_{sector1}_{sector2}']) for sector1, sector2 in SJOIN_PAIRS if sector1!='shippingroutes'] return Pipeline(firstmile_nodes + lastmile_nodes + IDL_nodes + null_nodes)
5,325,885
def photo_new(request, cast: Cast): """ Add a new Photo to a cast """ if request.method == 'POST': form = CastPhotoForm(request.POST, request.FILES) if form.is_valid(): photo = form.save(commit=False) photo.cast = cast photo.save() messages.success(request, f'Photo has been added') return redirect('cast_photo_detail', slug=cast.slug, pk=photo.pk) else: form = CastPhotoForm() return render(request, 'castadmin/photo_edit.html', { 'cast': cast, 'form': form, })
5,325,886
def decode(argument: str) -> tuple[list[int], ...]: """Decode argument string from command line :param argument: argument string :return: pair of list of digits """ char_lists = map(list, argument.split('-')) range_ = tuple(list(map(int, clist)) for clist in char_lists) return range_
5,325,887
def initialize_weights(*models): """ Initialize Model Weights """ for model in models: for module in model.modules(): if isinstance(module, (nn.Conv2d, nn.Linear)): nn.init.kaiming_normal_(module.weight) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.BatchNorm2d): module.weight.data.fill_(1) module.bias.data.zero_()
5,325,888
def zip_files(files, zipfilename, mode='a', compressed=True, fullpath=True): """ Compress/archive a given list of files into a zip file. The mode can be either write "w" or append "a". Duplicates will be ommited in append mode. """ import zipfile if compressed: compression=zipfile.ZIP_DEFLATED else: compression=zipfile.ZIP_STORED with zipfile.ZipFile(zipfilename, mode=mode, compression=compression) as z: if fullpath: for f in files: f = get_path(f) if f[1:] not in z.namelist(): z.write(f) else: for f in files: if f not in z.namelist(): z.write(f) return
5,325,889
def main(duration=0, rate=3300, gain=16, verbose=False, **kwargs): """ Take data from the ADS1015 for duration seconds (or until stdin receives EOF, if duration is 0) at rate and gain. Default argument values may not be the same as for the command line arguments. See the ArgumentParser instance below. Kwargs: lc_channel = the channel of the ads1015 from which to read load cell data. comparator = Whether to use the ALERT/RDY pin instead of time.sleep(). This is highly recommended, provided the hardware is setup. comparator_pin = the GPIO pin from which to yoink the comparator. This is specified as a BCM number. battery_check_freq = frequency to check the battery level. Not exact! 1 Hz by default... but setting 2 Hz results in ~ 1 Hz measurements. battery_channel = obvious. Sends the data to stdout. We're using common mode, not differential mode. At 3300 samples per second, this generates 2.2 MB of ascii per minute, which is probably fine. """ if verbose: print("Debugging information:", file=stderr) for key in kwargs: print(key + ": {0}".format(kwargs[key]), file=stderr) adc = Adafruit_ADS1x15.ADS1015() lc_channel = kwargs['lc_channel'] if 'lc_channel' in kwargs else 0 use_comp = kwargs['comparator'] if 'comparator' in kwargs else False differential = kwargs['differential'] if 'differential' in kwargs else False battery_check_freq = (kwargs['battery_check_freq'] if 'battery_check_freq' in kwargs else 1) battery_channel = (kwargs['battery_channel'] if 'battery_channel' in kwargs else 1) battery_counter_limit = rate / battery_check_freq assert battery_counter_limit > 0 pretty_time = SimpleTimer() print(TITLE_LINE, file=stdout) if duration == 0: # The test will run until ctrl-D is sent end_condition = EndCondition() end_condition.start() keep_going = lambda: not end_condition.finished else: # The test will run for the specified number of seconds. time_stop = time.time() + duration keep_going = lambda: time.time() < time_stop # Setup the ADC. See ADS1015 datasheet, pages 12 and 17. # A couple things are different depending on whether we're measuring # between lc_channel and ground, or between lc_channel and some other # analogue input (i.e. kwargs['diff_channel']). if differential: print("Also differential mode", file=stderr) # lc_channel, until this next part executes, has represented a single analog # input pin on the ADS1015. To measure the difference between two analog inputs, # we need to change it into a special value that's meaningful only to the # ads_1015 hardware. See the ADS1015 data sheet, page 16. diff_channel = kwargs['diff_channel'] if 'diff_channel' in kwargs else None assert 0 <= diff_channel <= 3 and diff_channel != lc_channel if lc_channel == 0 and diff_channel == 1: lc_channel = 0 elif lc_channel == 0 and diff_channel == 3: lc_channel = 1 elif lc_channel == 1 and diff_channel == 3: lc_channel = 2 elif lc_channel == 2 and diff_channel == 3: lc_channel = 3 else: raise ValueError() if use_comp: # if we're using the comparator lc_start_adc_function = adc.start_adc_difference_comparator else: lc_start_adc_function = adc.start_adc_difference else: # Single ended mode is the opposite of differential mode, of course. print("Also single-ended mode", file=stderr) if use_comp: lc_start_adc_function = adc.start_adc_comparator else: lc_start_adc_function = adc.start_adc if use_comp: # Set up the test, using the ALERT/RDY pin in "conversion-ready" mode. print("Comparator mode", file=stderr) ready_pin = kwargs['comparator_pin'] # Setup and fish for debilitating GPIO exceptions: GPIO.setup(ready_pin, Adafruit_GPIO.IN, pull_up_down=Adafruit_GPIO.PUD_OFF) wait_function = lambda: GPIO.wait_for_edge(ready_pin, Adafruit_GPIO.FALLING) # Set up the test, using the ALERT/RDY pin in "conversion-ready" mode. # We store these functions because we will need to switch modes # repeatedly in order to read both the start_lc = lambda: lc_start_adc_function( lc_channel, -1, 1, # Hi_thresh < 0 < Lo_thresh --> conversion ready mode data_rate=rate, gain=gain, latching=False, # latching is ignored num_readings=1, traditional=False, active_low=True, wait_function=wait_function) start_battery = lambda: adc.start_adc_comparator( battery_channel, -1, 1, # Same idea. data_rate=BATTERY_RATE, gain=BATTERY_GAIN, latching=False, # latching is ignored num_readings=1, traditional=False, active_low=True, wait_function=wait_function) do_cleanup = lambda: GPIO.cleanup(pin=ready_pin) else: # We have no ALERT/RDY pin, so set up the test to use # time.sleep() instead of GPIO interrupts. # TODO - test whether the logic to assign lc_start_adc_function # earlier is enough to make this code work with both differential # and non-differential measurements. sleeper = Sleeper(rate, lambda: (time.time(), adc.get_last_result())) wait_function = sleeper.sleep # sleeps roughly the right amount start_lc = lambda: lc_start_adc_function( lc_channel, gain=gain, data_rate=rate) start_battery = lambda: adc.start_adc( battery_channel, gain=gain, data_rate=BATTERY_RATE) do_cleanup = lambda: None # This is the main loop. All the lambdas and complicated stuff above # happened so that this loop could be clear and readable. start_lc() battery_check_counter = 0 try: while keep_going(): try: if battery_check_counter > battery_counter_limit: read_battery(adc, BATTERY_SAMPLE_SIZE, start_lc=start_lc, start_battery=start_battery, file=stdout, wait_function=wait_function, get_time=pretty_time) battery_check_counter = 0 battery_check_counter += 1 wait_function() print(LC_DATA_FORMAT.format(pretty_time(), adc.get_last_result()), file=stdout) except IOError: if verbose: traceback.print_exc() start_lc() continue finally: do_cleanup() adc.stop_adc() print("Cleaned up.", file=stderr)
5,325,890
def generate_data(p=11, n=400): """ Generates non-linear multivariate data of dimension 'p'. The data is linear in parameters of the type: y = b0 + b1 * x + b2*x^2 + ... + bp * x^p Args: :param p: int dimensions :param n: int number of samples Returns: np.ndarray """ true_params = np.random.uniform(low=0.0, high=1.0, size=p+1) x = np.sort(np.random.uniform(low=-1.0, high=1.0, size=n)) X = np.zeros(shape=(n, p+1), dtype=float) X[:, 0] = 1. for i in range(p): X[:, i+1] = x ** (i+1) # logger.debug("X:\n%s" % str(list(X))) e = np.random.normal(loc=0.0, scale=0.2, size=n) y_true = X.dot(true_params) y = y_true + e # logger.debug("y:\n%s" % str(list(y))) return X, y, true_params, x, y_true, e
5,325,891
def get_search_apps(): """Gets all registered search apps.""" return tuple(_load_search_apps().values())
5,325,892
def volume_tetrahedron( point_a: array_like, point_b: array_like, point_c: array_like, point_d: array_like ) -> np.float64: """ Return the volume of a tetrahedron defined by four points. The points are the vertices of the tetrahedron. They must be 3D or less. Parameters ---------- point_a, point_b, point_c, point_d : array_like The four vertices of the tetrahedron. Returns ------- np.float64 The volume of the tetrahedron. References ---------- http://mathworld.wolfram.com/Tetrahedron.html Examples -------- >>> from skspatial.measurement import volume_tetrahedron >>> volume_tetrahedron([0, 0], [3, 2], [-3, 5], [1, 8]) 0.0 >>> volume_tetrahedron([0, 0, 0], [2, 0, 0], [1, 1, 0], [0, 0, 1]).round(3) 0.333 >>> volume_tetrahedron([0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1]).round(3) 0.167 """ vector_ab = Vector.from_points(point_a, point_b) vector_ac = Vector.from_points(point_a, point_c) vector_ad = Vector.from_points(point_a, point_d) vector_cross = vector_ac.cross(vector_ad) # Set the dimension to 3 so it matches the cross product. vector_ab = vector_ab.set_dimension(3) return 1 / 6 * abs(vector_ab.dot(vector_cross))
5,325,893
def set_time_mode(time_exe = False): """Enable/Disable code timing Parameters ---------- time_exe : boolean Internal mode setting Returns ------- None """ GLContainer.set_time_mode(time_exe)
5,325,894
def lemmatizer(word): """Returns: lemmatized word if word >= length 5 """ if len(word)<4: return word return wnl.lemmatize(wnl.lemmatize(word, "n"), "v")
5,325,895
def kolmogn(n, x, cdf=True): """Computes the CDF for the two-sided Kolmogorov-Smirnov distribution. The two-sided Kolmogorov-Smirnov distribution has as its CDF Pr(D_n <= x), for a sample of size n drawn from a distribution with CDF F(t), where D_n &= sup_t |F_n(t) - F(t)|, and F_n(t) is the Empirical Cumulative Distribution Function of the sample. Parameters ---------- n : integer, array_like the number of samples x : float, array_like The K-S statistic, float between 0 and 1 cdf : bool, optional whether to compute the CDF(default=true) or the SF. Returns ------- cdf : ndarray CDF (or SF it cdf is False) at the specified locations. The return value has shape the result of numpy broadcasting n and x. """ it = np.nditer([n, x, cdf, None], op_dtypes=[None, np.float, np.bool, np.float]) for _n, _x, _cdf, z in it: if np.isnan(_n): z[...] = _n continue if int(_n) != _n: raise ValueError(f'n is not integral: {_n}') z[...] = _kolmogn(int(_n), _x, cdf=_cdf) result = it.operands[-1] return result
5,325,896
def join_legacy_read_path(sample_path: str, suffix: int) -> str: """ Create a path string for a sample read file using the old file name convention (eg. reads_1.fastq). :param sample_path: the path to the sample directory :param suffix: the read file suffix :return: the read path """ return os.path.join(sample_path, f"reads_{suffix}.fastq")
5,325,897
def fixture_audio_playback(fixture_signal_generator): """ Makes sure that there is some audio being played to our null sink. """ sg = fixture_signal_generator with sg.attach_generator(): time.sleep(1.0) # TODO yield sg
5,325,898
def watermark_pdf(input_file: str, wm_text: str, pages: Tuple = None): """ Adds watermark to a pdf file. """ result, wm_buffer = create_watermark(wm_text) if result: wm_reader = PdfFileReader(wm_buffer) pdf_reader = PdfFileReader(open(input_file, 'rb'), strict=False) pdf_writer = PdfFileWriter() try: for page in range(pdf_reader.getNumPages()): # If required to watermark specific pages not all the document pages if pages: if str(page) not in pages: continue page = pdf_reader.getPage(page) page.mergePage(wm_reader.getPage(0)) pdf_writer.addPage(page) except Exception as e: print("Exception = ", e) return False, None, None return True, pdf_reader, pdf_writer
5,325,899