content
stringlengths
22
815k
id
int64
0
4.91M
def ft32m3(ft3): """ft^3 -> m^3""" return 0.028316847*ft3
5,335,800
def get_audience(request): """ Uses Django settings to format the audience. To figure out the audience to use, it does this: 1. If settings.DEBUG is True and settings.SITE_URL is not set or empty, then the domain on the request will be used. This is *not* secure! 2. Otherwise, settings.SITE_URL is checked for the request domain and an ImproperlyConfigured error is raised if it is not found. Examples of settings.SITE_URL:: SITE_URL = 'http://127.0.0.1:8001' SITE_URL = 'https://example.com' SITE_URL = 'http://example.com' SITE_URL = ( 'http://127.0.0.1:8001', 'https://example.com', 'http://example.com' ) """ req_proto = 'https://' if request.is_secure() else 'http://' req_domain = request.get_host() req_url = '%s%s' % (req_proto, req_domain) site_url = getattr(settings, 'SITE_URL', None) if not site_url: if settings.DEBUG: return req_url else: raise ImproperlyConfigured('`SITE_URL` must be set. See ' 'documentation for django-browserid') if isinstance(site_url, basestring): site_url = [site_url] try: url_iterator = iter(site_url) except TypeError: raise ImproperlyConfigured('`SITE_URL` is not a string or an iterable') if req_url not in url_iterator: raise ImproperlyConfigured('request `{0}`, was not found in SITE_URL `{1}`' .format(req_url, site_url)) return req_url
5,335,801
def visit(obj, visitor: BooleanExpressionVisitor[T]) -> T: """A generic function for applying a boolean expression visitor to any point within an expression The function traverses the expression in post-order fashion Args: obj(BooleanExpression): An instance of a BooleanExpression visitor(BooleanExpressionVisitor[T]): An instance of an implementation of the generic BooleanExpressionVisitor base class Raises: NotImplementedError: If attempting to visit an unsupported expression """ raise NotImplementedError(f"Cannot visit unsupported expression: {obj}")
5,335,802
def max_shading_elevation(total_collector_geometry, tracker_distance, relative_slope): """Calculate the maximum elevation angle for which shading can occur. Parameters ---------- total_collector_geometry: :py:class:`Shapely Polygon <Polygon>` Polygon corresponding to the total collector area. tracker_distance: array-like Distances between neighboring trackers and the reference tracker. relative_slope: array-like Slope between neighboring trackers and reference tracker. A positive slope means neighboring collector is higher than reference collector. Returns ------- max_shading_elevation: float The highest solar elevation angle for which shading can occur for a given field layout and collector geometry [degrees] Note ---- The maximum shading elevation angle is calculated for all neighboring trackers using the bounding box geometry and the bounding circle. For rectangular collectors (as approximated when using the bounding box), the maximum shading elevation occurs when one of the upper corners of the projected shading geometry and the lower corner of the reference collector intersects. For circular collectors (as approximated by the bounding cirlce), the maximum elevation occurs when the projected shadow is directly below the reference collector and the two circles tangent to each other. The maximum elevation is calculated using both the bounding box and the bounding circle, and the minimum of these two elevations is returned. For rectangular and circular collectors, the maximum elevation is exact, whereas for other geometries, the returned elevation is a conservative estimate. """ # Calculate extent of box bounding the total collector geometry x_min, y_min, x_max, y_max = total_collector_geometry.bounds # Collector dimensions x_dim = x_max - x_min y_dim = y_max - y_min delta_gamma_rad = np.arcsin(x_dim / tracker_distance) # Calculate max elevation based on the bounding box (rectangular) max_elevations_rectangular = np.rad2deg(np.arcsin( y_dim * np.cos(np.deg2rad(relative_slope)) / (tracker_distance * np.cos(delta_gamma_rad)))) + relative_slope # Calculate max elevations using the minimum bounding diameter (circular) D_min = _calculate_min_tracker_spacing(total_collector_geometry) max_elevations_circular = np.rad2deg(np.arcsin( (D_min * np.cos(np.deg2rad(relative_slope)))/tracker_distance)) \ + relative_slope # Compute max elevation max_elevation = np.nanmin([np.nanmax(max_elevations_rectangular), np.nanmax(max_elevations_circular)]) return max_elevation
5,335,803
def tensor_scatter_add(input_x, indices, updates): """ Creates a new tensor by adding the values from the positions in `input_x` indicated by `indices`, with values from `updates`. When multiple values are given for the same index, the updated result will be the sum of all values. This operation is almost equivalent to using ScatterNdAdd, except that the updates are applied on output `Tensor` instead of input `Parameter`. The last axis of `indices` is the depth of each index vectors. For each index vector, there must be a corresponding value in `updates`. The shape of `updates` should be equal to the shape of `input_x[indices]`. For more details, see use cases. Note: If some values of the `indices` are out of bound, instead of raising an index error, the corresponding `updates` will not be updated to `input_x`. Args: - **input_x** (Tensor) - The target tensor. The dimension of input_x must be no less than indices.shape[-1]. - **indices** (Tensor) - The index of input tensor whose data type is int32 or int64. The rank must be at least 2. - **updates** (Tensor) - The tensor to update the input tensor, has the same type as input, and updates. Shape should be equal to indices.shape[:-1] + input_x.shape[indices.shape[-1]:]. Returns: Tensor, has the same shape and type as `input_x`. Raises: TypeError: If dtype of `indices` is neither int32 nor int64. ValueError: If length of shape of `input_x` is less than the last dimension of shape of `indices`. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore >>> import numpy as np >>> from mindspore import Tensor, nn >>> from mindspore import ops >>> input_x = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mindspore.float32) >>> indices = Tensor(np.array([[0, 0], [0, 0]]), mindspore.int32) >>> updates = Tensor(np.array([1.0, 2.2]), mindspore.float32) >>> output = ops.tensor_scatter_add(input_x, indices, updates) >>> print(output) [[ 3.1 0.3 3.6] [ 0.4 0.5 -3.2]] """ return tensor_scatter_add_(input_x, indices, updates)
5,335,804
def test100(): """ CIFAR-100 test set creator. It returns a reader creator, each sample in the reader is image pixels in [0, 1] and label in [0, 9]. :return: Test reader creator. :rtype: callable """ return reader_creator( paddle.v2.dataset.common.download(CIFAR100_URL, 'cifar', CIFAR100_MD5), 'test')
5,335,805
def test_s3_16_2v03_s3_16_2v03i(mode, save_output, output_format): """ tests restriction facet in intervening union """ assert_bindings( schema="ibmData/valid/S3_16_2/s3_16_2v03.xsd", instance="ibmData/valid/S3_16_2/s3_16_2v03.xml", class_name="Root", version="1.1", mode=mode, save_output=save_output, output_format=output_format, structure_style="filenames", )
5,335,806
def setupradiusserver(step): """ This function is to setup radius server in the ops-host image """ h1 = hosts[0] h2 = hosts[1] switchip = get_switch_ip(step) print("SwitchIP:" + switchip) out = h2("sed -i \"76s/steve/steve/\" /etc/freeradius/users") out = h2("sed -i \"76s/#steve/steve/\" /etc/freeradius/users") out = h2("sed -i \"77s/Framed-User/Nas-Prompt-User/\" /etc/freeradius/users") out = h2("sed -i \"77s/#//\" /etc/freeradius/users") out = h2("sed -i \"196s/192.168.0.0/"+switchip+"/\" " "/etc/freeradius/clients.conf") out = h2("sed -i \"196,199s/#//\" /etc/freeradius/clients.conf") h2("service freeradius stop") sleep(2) out = h2("service freeradius start") assert ("fail") not in out, "Failed to start freeradius on host" print("SwitchIP:" + switchip) out = h1("sed -i \"76s/steve/steve/\" /etc/freeradius/users") out = h1("sed -i \"76s/#steve/steve/\" /etc/freeradius/users") out = h1("sed -i \"77s/Framed-User/Nas-Prompt-User/\" /etc/freeradius/users") out = h1("sed -i \"77s/#//\" /etc/freeradius/users") out = h1("sed -i \"196s/192.168.0.0/0.0.0.0/\" " "/etc/freeradius/clients.conf") out = h1("sed -i \"196s/24/0/\" " "/etc/freeradius/clients.conf") out = h1("sed -i \"196,199s/#//\" /etc/freeradius/clients.conf") h1("service freeradius stop") sleep(2) out = h1("service freeradius start") assert ("fail") not in out, "Failed to start freeradius on host" step("Configured radius server on host\n")
5,335,807
def extract_mapped_read_records(fastq_fwd, fastq_rev, read_ids, paired_out, unpaired_out, unaligned_out): """ Split FASTQ files into a FASTQ with the aligned and one with the unaligned reads. :param fastq_fwd: Forward FASTQ file used for creating the SAM file. :param fastq_rev: Reverse FASTQ file used for creating the SAM file. :param read_ids: Read IDs that passed the MAPQ threshold. :param paired_out: FASTQ file to write paired aligned reads to. :param unpaired_out: FASTQ file to write singleton aligned reads to. :param unaligned_out: FASTQ file to write unaligned reads to. """ logging.info("Getting FASTQ records that have mapped fragments ...") fq_format = "fastq" mappable = 0 records = 0 with gzip.open(fastq_fwd, "rt") as fwd_handle, gzip.open(fastq_rev, "rt") as rev_handle: for (fwd_record, rev_record) in zip(SeqIO.parse(fwd_handle, fq_format), SeqIO.parse(rev_handle, fq_format)): records += 2 if fwd_record.id in read_ids["fwd"] and rev_record.id in read_ids["rev"]: mappable += 2 # write interleaved SeqIO.write(fwd_record, paired_out, fq_format) SeqIO.write(rev_record, paired_out, fq_format) continue if fwd_record.id in read_ids["fwd"]: mappable += 1 SeqIO.write(fwd_record, unpaired_out, fq_format) else: SeqIO.write(fwd_record, unaligned_out, fq_format) if rev_record.id in read_ids["rev"]: mappable += 1 SeqIO.write(rev_record, unpaired_out, fq_format) else: SeqIO.write(rev_record, unaligned_out, fq_format) logging.info(f"{mappable:d} records extracted from {records:d} total ({mappable / records * 100:.1f}%).")
5,335,808
def clear(): """Cleanup Moler's configuration""" global loaded_config loaded_config = ["NOT_LOADED_YET"] conn_cfg.clear() dev_cfg.clear()
5,335,809
def _calculate_target_matrix_dimension(m, kernel, paddings, strides): """ Calculate the target matrix dimension. Parameters ---------- m: ndarray 2d Matrix k: ndarray 2d Convolution kernel paddings: tuple Number of padding in (row, height) on one side. If you put 2 padding on the left and 2 padding on the right, specify 2. strides: tuple Step size in (row, height) Returns ------- out: tuple Tuple containing (number of rows, number of columns) Raises ------ ValueError If kernel size is greater than m in any axis after padding """ source_height = m.shape[0] source_width = m.shape[1] padding_row = paddings[0] padding_column = paddings[1] kernel_height = kernel.shape[0] kernel_width = kernel.shape[1] if kernel_height > (source_height + padding_row) or kernel_width > (source_width + padding_column): raise ValueError("Kernel size is larger than the matrix") row_stride = strides[0] col_stride = strides[1] # (source_height - kernel_height)/strides[0] is how many steps you can go down. # + 1 to include the start position. target_height = int((source_height + padding_row - kernel_height) / row_stride) + 1 target_width = int((source_width + padding_column - kernel_width) / col_stride) + 1 return (target_height, target_width)
5,335,810
def any_email(): """ Return random email >>> import re >>> result = any_email() >>> type(result) <type 'str'> >>> re.match(r"(?:^|\s)[-a-z0-9_.]+@(?:[-a-z0-9]+\.)+[a-z]{2,6}(?:\s|$)", result, re.IGNORECASE) is not None True """ return "%s@%s.%s" % (any_string(max_length=10), any_string(max_length=10), any_string(min_length=2, max_length=3))
5,335,811
def transform_pts_base_to_stitched_im(pts): """Project 3D points in base frame to the stitched image Args: pts (np.array[3, N]): points (x, y, z) Returns: pts_im (np.array[2, N]) inbound_mask (np.array[N]) """ im_size = (480, 3760) # to image coordinate pts_rect = pts[[1, 2, 0], :] pts_rect[:2, :] *= -1 # to pixel horizontal_theta = np.arctan2(pts_rect[0], pts_rect[2]) horizontal_percent = horizontal_theta / (2 * np.pi) + 0.5 x = im_size[1] * horizontal_percent y = ( 485.78 * pts_rect[1] / pts_rect[2] * np.cos(horizontal_theta) + 0.4375 * im_size[0] ) # horizontal_theta = np.arctan(pts_rect[0, :] / pts_rect[2, :]) # horizontal_theta += (pts_rect[2, :] < 0) * np.pi # horizontal_percent = horizontal_theta / (2 * np.pi) # x = ((horizontal_percent * im_size[1]) + 1880) % im_size[1] # y = ( # 485.78 * (pts_rect[1, :] / ((1 / np.cos(horizontal_theta)) * pts_rect[2, :])) # ) + (0.4375 * im_size[0]) # x is always in bound by cylindrical parametrization # y is always at the lower half of the image, since laser is lower than the camera # thus only one boundary needs to be checked inbound_mask = y < im_size[0] return np.stack((x, y), axis=0).astype(np.int32), inbound_mask
5,335,812
def get_equal_static_values(*args): """get_equal_static_values(FileConstHandle input, FileConstHandle out) -> bool""" return _RMF.get_equal_static_values(*args)
5,335,813
def quaternion_to_rotation_matrix(quaternion): """ This converts a quaternion representation of on orientation to a rotation matrix. The input is a 4-component numpy array in the order [w, x, y, z], and the output is a 3x3 matrix stored as a 2D numpy array. We follow the approach in "3D Math Primer for Graphics and Game Development" by Dunn and Parberry. """ w = quaternion[0] x = quaternion[1] y = quaternion[2] z = quaternion[3] R = np.empty((3, 3), dtype=np.float64) R[0][0] = 1.0 - 2.0*y**2 - 2.0*z**2 R[0][1] = 2.0*x*y + 2.0*w*z R[0][2] = 2.0*x*z - 2.0*w*y R[1][0] = 2.0*x*y - 2.0*w*z R[1][1] = 1.0 - 2.0*x**2 - 2.0*z**2 R[1][2] = 2.0*y*z + 2.0*w*x R[2][0] = 2.0*x*z + 2.0*w*y R[2][1] = 2.0*y*z - 2.0*w*x R[2][2] = 1.0 - 2.0*x**2 - 2.0*y**2 return R
5,335,814
def load_subject(filename: str, mask_niimg): """ Load a subject saved in .mat format with the version 7.3 flag. Return the subject niimg, using a mask niimg as a template for nifti headers. Args: filename <str> the .mat filename for the subject data mask_niimg niimg object the mask niimg object used for nifti headers """ subject_data = None with h5py.File(filename, 'r') as f: subject_data = f['SM_feature'][()] # It's necessary to reorient the axes, since h5py flips axis order subject_data = np.moveaxis(subject_data, [0, 1, 2, 3], [3, 2, 1, 0]) subject_niimg = nl.image.new_img_like( mask_niimg, subject_data, affine=mask_niimg.affine, copy_header=True) return subject_niimg
5,335,815
def get_data_monash(directory): """ Get the monash data in a dictionary """ # Generate the wildcard for the models wildcard = os.path.join(directory, "*") model_files = glob.glob(wildcard) all_models = {} for model_file in model_files: # First extract the filename file_name = os.path.split(model_file)[-1].split(".")[0] # Now for each line in the model file, retrieve the abundances with open(model_file, "r") as fread: # There are several models in each file, so just # look for each and save it for line in fread: # Find initial mass if "Initial mass" in line: # Get label lnlst = line.split() # Initial mass ini_mass = lnlst[4][0:4] label = "monash_m" + ini_mass + "_" # Mix mix = lnlst[13][0:8] label += "mix_" + mix + "_" # Ov if "_ov" in line: ov = lnlst[16] label += "N_ov_" + ov + "_" # Rest of the label label += file_name.split("_")[-1] # Now model if "Final abundances" in line: fread.readline() # Skip header # Save elements elems = {} for line in fread: if "#" in line: break # Add element to the list lnlst = line.split() name = lnlst[0].capitalize() if name == "Fe": feH = float(lnlst[3]) else: name += "/Fe" elems[name] = float(lnlst[4]) # Store all_models[label] = elems all_models[label]["Fe/H"] = feH return all_models
5,335,816
def pretty_duration(seconds): """Return a human-readable string for the specified duration""" if seconds < 2: return '%d second' % seconds elif seconds < 120: return '%d seconds' % seconds elif seconds < 7200: return '%d minutes' % (seconds // 60) elif seconds < 48 * 3600: return '%d hours' % (seconds // 3600) else: return '%d days' % (seconds // (24 * 3600))
5,335,817
def delete_bucket(zkclient, bucket_id): """Deletes bucket definition from Zoookeeper.""" zkutils.ensure_deleted(zkclient, z.path.bucket(bucket_id))
5,335,818
def plot_ecdf(tidy_data, cats, val, title, width=550, conf_int=False): """ Plots an ECDF of tidy data. tidy_data: Set of tidy data. cats: Categories to plot val: The value to plot title: Title of plot width: width of plot conf_int: Whether or not to bootstrap a CI. """ p = bokeh_catplot.ecdf( data = tidy_data, cats = cats, val = val, title = title, width = width, conf_int = conf_int, ) return p
5,335,819
def upsert(left, right, inclusion=None, exclusion=None): """Upserts the specified left collection with the specified right collection by overriding the left values with the right values that have the same indices and concatenating the right values to the left values that have different indices on the common keys that are in the specified inclusive list and are not in the specified exclusive list.""" right = collection_to_common_type(right, left, inclusion=inclusion, exclusion=exclusion) left = update(left, include_index(right, left)) return concat(left, exclude_index(right, left))
5,335,820
def train(training_data): """Trains the model on a given data set. Parameters ---------- training_data Returns ------- """ counts = Counter(training_data) model = {} # sort counts by lowest occurrences, up to most frequent. # this allows higher frequencies to overwrite related # values in the model for pair, _ in counts.most_common()[:-len(counts)-1:-1]: word, tag = pair model[word] = tag return model
5,335,821
def _exit(): """Exit the program.""" exit()
5,335,822
def get_description(): """ Return a dict describing how to call this plotter """ desc = dict() desc['data'] = True desc['cache'] = 86400 desc['description'] = """This chart totals the number of distinct calendar days per month that a given present weather condition is reported within the METAR data feed. The calendar day is computed for the local time zone of the reporting station. <p>The reporting of present weather codes within METARs has changed over the years and there is some non-standard nomenclature used by some sites. The thunder (TS) reports are delineated into three categories here to hopefully allow more accurate statistics. <ul> <li><strong>All Thunder Reports (TS)</strong> includes any <code>TS</code> mention in any present weather code</li> <li><strong>Thunder in Vicinity (VCTS)</strong> includes any <code>VCTS</code> mention in any present weather code, for example, <code>VCTSRA</code> would match.</li> <li><strong>Thunder Reports (excluding VCTS)</strong> includes most <code>TS</code> mentions, but not any including <code>VC</code></li> </ul> """ desc['arguments'] = [ dict(type='zstation', name='zstation', default='DSM', label='Select Station:', network='IA_ASOS'), dict(type='year', name="year", label='Year to Highlight:', default=datetime.date.today().year, min=1973), dict(type='select', name='var', default='FG', label='Present Weather Option:', options=PDICT), ] return desc
5,335,823
def check_cortex(ioc, ioc_type, object_id, is_mail=False, cortex_expiration_days=30): """Run all available analyzer for ioc. arguments: - ioc: value/path of item we need to check on cortex - ioc_type: type of the ioc (generic_relation and cortex datatype) - object_id: item to attach report to - is_mail: ioc is a mail [mail datatype is for addresses and file is for mail] """ _, _, cortex_api = get_info(mail=False) # Mail object is file in cortex # need to save mail object analyzer as mail_obj to discriminate them filter_type = ioc_type if not is_mail else "mail_obj" analyzers = Analyzer.objects.filter( disabled=False, supported_types__contains=[filter_type] ).order_by("-priority") # Full mail only on premise if ioc_type == "file": analyzers = analyzers.filter(onpremise=True) if is_mail is True: content_type = Mail else: content_type = Attachment elif ioc_type == "mail": content_type = Address elif ioc_type == "url": content_type = Url elif ioc_type == "domain": content_type = Domain elif ioc_type == "ip": content_type = Ip elif ioc_type == "hash": content_type = Attachment else: logging.error("Wrong ioc_type type {}".format(ioc_type)) return old_reports = Report.objects.filter( content_type=ContentType.objects.get_for_model(content_type), object_id=object_id, success=True, date__gte=datetime.datetime.today() - datetime.timedelta(days=cortex_expiration_days), ) try: db_object = content_type.objects.get(pk=object_id) except Exception: logging.error("CORTEX {} {} {} {}".format(ioc, ioc_type, object_id, is_mail)) return for analyzer in analyzers: # Check if item was already been processed for report in old_reports: if report.analyzer == analyzer: if "malicious" in report.taxonomies: db_object.tags.add( "{}: malicious".format(analyzer.name), tag_kwargs={"color": "#FF0000"}, ) db_object.taxonomy = 4 db_object.save() elif "suspicious" in report.taxonomies: db_object.tags.add( "{}: suspicious".format(analyzer.name), tag_kwargs={"color": "#C15808"}, ) db_object.taxonomy = max(3, db_object.taxonomy) db_object.save() elif "safe" in report.taxonomies: db_object.tags.add( "{}: safe".format(analyzer.name), tag_kwargs={"color": "#00FF00"}, ) db_object.taxonomy = max(2, db_object.taxonomy) db_object.save() elif "info" in report.taxonomies: db_object.tags.add( "{}: info".format(analyzer.name), tag_kwargs={"color": "#00B0FF"}, ) db_object.taxonomy = max(1, db_object.taxonomy) db_object.save() continue # If not rerun the analyzer try: job = cortex_api.analyzers.run_by_name( analyzer.name, {"data": ioc, "dataType": ioc_type, "tlp": 1}, force=1, ) while job.status not in ["Success", "Failure"]: time.sleep(10) job = cortex_api.jobs.get_report(job.id) if job.status == "Success": response = job.json() try: taxonomies = glom( response, ("report.summary.taxonomies", ["level"]) ) except PathAccessError: taxonomies = None report = Report( response=response, content_object=db_object, analyzer=analyzer, taxonomies=taxonomies, success=True, ) report.save() if "malicious" in taxonomies: db_object.tags.add( "{}: malicious".format(analyzer.name), tag_kwargs={"color": "#FF0000"}, ) db_object.taxonomy = 4 db_object.save() elif "suspicious" in taxonomies: db_object.tags.add( "{}: suspicious".format(analyzer.name), tag_kwargs={"color": "#C15808"}, ) db_object.taxonomy = max(3, db_object.taxonomy) db_object.save() elif "safe" in taxonomies: db_object.tags.add( "{}: safe".format(analyzer.name), tag_kwargs={"color": "#00FF00"}, ) db_object.taxonomy = max(2, db_object.taxonomy) db_object.save() elif "info" in taxonomies: db_object.tags.add( "{}: info".format(analyzer.name), tag_kwargs={"color": "#00B0FF"}, ) db_object.taxonomy = max(1, db_object.taxonomy) db_object.save() elif job.status == "Failure": report = Report( content_object=db_object, analyzer=analyzer, success=False, ) report.save() except Exception as excp: logging.error( "ERROR running analyzer {} for {}: {}".format(analyzer.name, ioc, excp) ) return True
5,335,824
def test_assert_callback_not_called_when_not_called( callable_group: MockCallableGroup, schedule_call: Callable, ) -> None: """ Test that assert_no_item succeeds when the item is produced too late. :param callable_group: the callable group under test :param schedule_call: a callable used to schedule a callback call. """ schedule_call(1.2, callable_group["a"], "foo", bah="bah") callable_group["a"].assert_not_called()
5,335,825
def _env_corr_same(wxy, Xa, Ya, sign=-1, log=True, x_ind=None, y_ind=None): """ The cSPoC objective function with same filters for both data sets: the correlation of amplitude envelopes Additionally, it returns the gradients of the objective function with respect to each of the filter coefficients. Notes: ------ The input datasets Xa and Ya are the analytic representations of the original datasets X and Y, hence they must be complex arrays. Xa and Ya can be either 2d numpy arrays of shape (channels x datapoints) or 3d array of shape (channels x datapoints x trials). For 3d arrays the average envelope in each trial is calculated if x_ind (or y_ind, respectively) is None. If they are set, the difference of the instantaneous amplitude envelope at x_ind/y_ind and the average envelope is calculated for each trial. If log == True, then the log transform is taken before the average inside the trial Input: ------ -- wxy is the array of shared filter coefficients for x and y -- Xa - numpy array - complex analytic representation of X Xa is the first Hilbert-transformed dataset of shape px x N (x tr), where px is the number of sensors, N the number of datapoints, tr the number of trials -- Ya is the second Hilbert-transformed dataset of shape py x N (x tr) -- sign {-1, 1} - the correlation coefficient is multiplied with this number. If the result of this function is minimized -1 should be used to find maximum correlation, 1 should be used to find maximal anti-correlation, defaults to -1 -- log {True, False} - compute the correlation between the log- transformed envelopes, if datasets come in epochs, then the log is taken before averaging inside the epochs -- x_ind int - the time index (-Xa.shape[1] <= x_ind < Xa.shape[1]), where the difference of the instantaneous envelope and the average envelope is determined for Xa -- y_ind int - the time index (-Ya.shape[1] <= y_ind < Ya.shape[1]), where the difference of the instantaneous envelope and the average envelope is determined for Ya Output: ------- -- c - float - the correlation coefficient of the amplitude envelopes of X and Y multiplied by the value of \"sign\" -- c_der - numpy array - the gradient of c with respect to each of the coefficients in wxy """ assert isinstance(Xa, _np.ndarray), "Xa must be numpy array" assert _np.iscomplexobj(Xa), "Xa must be a complex-type numpy array" +\ ", i.e. the analytic representaion of X" assert (Xa.ndim ==2 or Xa.ndim==3), "Xa must be 2D or 3D numpy array" assert isinstance(Ya, _np.ndarray), "Ya must be numpy array" assert _np.iscomplexobj(Ya), "Ya must be a complex-type numpy array" +\ ", i.e. the analytic representation of Y" assert (Ya.ndim ==2 or Ya.ndim==3), "Ya must be 2D or 3D numpy array" assert Xa.shape[-1] == Ya.shape[-1], "Size of last dimension in Xa " +\ "Ya must agree" p1 = Xa.shape[0] p2 = Ya.shape[0] assert p1 == p2, 'Dimensionality of Xa and Ya must agree for cSPoc' +\ ' with same filters' assert len(wxy) == p1, "Length of wxy must equal the" + \ " number of variables in Xa and Ya" assert isinstance(log, bool), "\"log\" must be a boolean (True or False)" assert sign in [-1, 1, 0], "\"sign\" must be -1, 1, or 0" if x_ind != None: assert Xa.ndim == 3, "If x_ind is set, Xa must be 3d array!" assert isinstance(x_ind, int), "x_ind must be integer!" assert ((x_ind >= -Xa.shape[1]) and (x_ind < Xa.shape[1])), "x_ind must match the range of " +\ "Xa.shape[1]" if y_ind != None: assert Ya.ndim == 3, "If y_ind is set, Ya must be 3d array!" assert isinstance(y_ind, int), "y_ind must be integer!" assert ((y_ind >= -Ya.shape[1]) and (y_ind < Ya.shape[1])), "y_ind must match the range of " +\ "Ya.shape[1]" # filter signal spatially Xa_filt = _np.tensordot(wxy, Xa, axes=(0,0)) Ya_filt = _np.tensordot(wxy, Ya, axes=(0,0)) # get envelope of filtered signal x_env = _np.abs(Xa_filt) y_env = _np.abs(Ya_filt) # get derivatives of envelopes envx_derwx = ((Xa_filt.real * Xa.real + Xa_filt.imag * Xa.imag) / x_env) envy_derwy = ((Ya_filt.real * Ya.real + Ya_filt.imag * Ya.imag) / y_env) if log: envx_derwx = envx_derwx / x_env envy_derwy = envy_derwy / y_env x_env = _np.log(x_env) y_env = _np.log(y_env) if ((Xa.ndim == 3) and (x_ind != None)): envx_derwx = envx_derwx[:,x_ind] - envx_derwx.mean(1) x_env = x_env[x_ind] - x_env.mean(0) elif Xa.ndim == 3: envx_derwx = envx_derwx.mean(1) x_env = x_env.mean(0) if ((Ya.ndim == 3) and (y_ind != None)): envy_derwy = envy_derwy[:,y_ind] - envy_derwy.mean(1) y_env = y_env[y_ind] - y_env.mean(0) elif Ya.ndim == 3: envy_derwy = envy_derwy.mean(1) y_env = y_env.mean(0) # remove mean of envelopes and derivatives x_env = x_env - x_env.mean() y_env = y_env - y_env.mean() envx_derwx = envx_derwx - envx_derwx.mean(1)[:,_np.newaxis] envy_derwy = envy_derwy - envy_derwy.mean(1)[:,_np.newaxis] # numerator of correlation num = _np.mean(x_env * y_env) # derivative of numerator num_d = _np.mean(envx_derwx*y_env + x_env*envy_derwy,1) # denominator of correlation denom = _np.sqrt(_np.mean(x_env**2) * _np.mean(y_env**2)) # derivative of denominator denom_d = ( (_np.mean(x_env*envx_derwx,1)*_np.mean(y_env**2) + _np.mean(x_env**2)*_np.mean(y_env*envy_derwy,1) ) / _np.sqrt(_np.mean(x_env**2) * _np.mean(y_env**2))) #final correlation corr = num / denom #final derivative corr_d = (num_d*denom - num*denom_d) / denom**2 if sign == 0: return _np.sign(corr)*corr, _np.sign(corr)*corr_d else: return sign*corr, sign*corr_d
5,335,826
def create_resource(): """Hosts resource factory method""" deserializer = HostDeserializer() serializer = HostSerializer() return wsgi.Resource(Controller(), deserializer, serializer)
5,335,827
def test_write_two_regions_starting_from_position_that_is_bigger_than_file_length(chunk_size, local_file, mount_file): """TC-PIPE-FUSE-67""" actual_size = os.path.getsize(local_file) write_regions(local_file, {'offset': actual_size + 5, 'amount': 10}, {'offset': actual_size + 20, 'amount': 10}) write_regions(mount_file, {'offset': actual_size + 5, 'amount': 10}, {'offset': actual_size + 20, 'amount': 10}) assert_content(local_file, mount_file)
5,335,828
def resolve_media_files(document, resource): """ Embed media files into the response document. :param document: the document eventually containing the media files. :param resource: the resource being consumed by the request. .. versionadded:: 0.4 """ for field in resource_media_fields(document, resource): file_id = document[field] _file = app.media.get(file_id, resource) if _file: # otherwise we have a valid file and should send extended response # start with the basic file object if config.RETURN_MEDIA_AS_BASE64_STRING: ret_file = base64.encodestring(_file.read()) elif config.RETURN_MEDIA_AS_URL: prefix = config.MEDIA_BASE_URL if config.MEDIA_BASE_URL \ is not None else app.api_prefix ret_file = '%s/%s/%s' % (prefix, config.MEDIA_ENDPOINT, file_id) else: ret_file = None if config.EXTENDED_MEDIA_INFO: document[field] = { 'file': ret_file, } # check if we should return any special fields for attribute in config.EXTENDED_MEDIA_INFO: if hasattr(_file, attribute): # add extended field if found in the file object document[field].update({ attribute: getattr(_file, attribute) }) else: # tried to select an invalid attribute abort(500, description=debug_error_message( 'Invalid extended media attribute requested' )) else: document[field] = ret_file else: document[field] = None
5,335,829
def reformat(args): """Reformats a database to comply with diamond diamond makedb has some requirements when adding taxonomic info 1. protein seqids cannot be longer than 14 characters 2. nodes.dmp and names.dmp must be supplied 3. a prot.accession2taxid.gz file mapping protein ids to taxonomy ids must be supplied """ prepare.format_fasta(args.fastafile, args.reformatted, args.tmpdir, args.force, args.taxidmap, args.forceidmap, args.maxidlen)
5,335,830
def get_ref_kmer(ref_seq, ref_name, k_len): """ Load reference kmers. """ ref_mer = [] ref_set = set() for i in range(len(ref_seq) - k_len + 1): kmer = ref_seq[i:(i + k_len)] if kmer in ref_set: raise ValueError( "%s found multiple times in reference %s, at pos. %d" % ( kmer, ref_name, i) ) ref_mer.append(kmer) ref_set.add(kmer) return ref_mer
5,335,831
def get_distutils_build_or_install_option(option): """ Returns the value of the given distutils build or install option. Parameters ---------- option : str The name of the option Returns ------- val : str or None The value of the given distutils build or install option. If the option is not set, returns None. """ return get_distutils_option(option, ['build', 'build_ext', 'build_clib', 'install'])
5,335,832
def boot(num): """ Show boot sequence on screen """ _boot(num)
5,335,833
def test_hist_bbox(basic_image): """Test that the bbox dimensions are customizable.""" f, ax = ep.hist(basic_image, figsize=(50, 3)) bbox = str(f.__dict__.get("bbox_inches")) assert bbox == "Bbox(x0=0.0, y0=0.0, x1=50.0, y1=3.0)"
5,335,834
def CMDset_close(parser, args): """Closes the issue.""" auth.add_auth_options(parser) options, args = parser.parse_args(args) auth_config = auth.extract_auth_config_from_options(options) if args: parser.error('Unrecognized args: %s' % ' '.join(args)) cl = Changelist(auth_config=auth_config) # Ensure there actually is an issue to close. cl.GetDescription() cl.CloseIssue() return 0
5,335,835
def gchip(k_k, b_b, c_c): """gchip(k_k, b_b, c_c)""" yout = b_b*c_c*nu_f(1, b_b, k_k)**((c_c+1)/2)*\ cos((c_c+1)*atan(b_b*k_k)) return yout
5,335,836
def register(args: argparse.Namespace) -> None: """ Handler for `lambada register`, which registers a new lambada function against a simiotics registry Args: args `argparse.Namespace` object containing parameters to the `register` command Returns: None, prints key of registered function """ simiotics = client_from_env() tags = { 'runtime': args.runtime, 'handler': args.handler, 'requirements': args.requirements, 'iam_policy': args.iam_policy, 'timeout': str(args.timeout), 'env': args.env, LambadaManagerKey: LambadaManager, } simiotics.register_function(args.key, args.code, tags, args.overwrite) print(args.key)
5,335,837
def epochs_sim_agg_returns_cov_market_plot(agg_ret: pd.Series, epochs_len: int) -> None: """Plots the aggregated distribution of simulated returns for a market. :param agg_ret: simulated rotated and aggregated returns from a simulated market. :type agg_ret: pd.Series :param epochs_len: length of the epochs. :type win: int """ function_name: str = epochs_sim_agg_returns_cov_market_plot.__name__ epochs_sim_tools.function_header_print_plot( function_name, [""], "", "", "", sim=True ) agg_ret = agg_ret.rename("Agg. returns") x_values: np.ndarray = np.arange(-6, 6, 0.001) gaussian: np.ndarray = epochs_sim_tools.gaussian_distribution(0, 1, x_values) plot_lin = agg_ret.plot( kind="density", style="-", logy=True, figsize=(16, 9), legend=True, lw=5 ) plt.semilogy(x_values, gaussian, "-", lw=3, label="Gaussian") plt.legend(fontsize=20) plt.title(f"Simulation", fontsize=30) plt.xlabel(f"Aggregated simulated returns - epochs {epochs_len}", fontsize=25) plt.ylabel("PDF", fontsize=25) plt.xticks(fontsize=15) plt.yticks(fontsize=15) plt.xlim(-6, 6) plt.ylim(10 ** -5, 1) plt.grid(True) plt.tight_layout() figure_log = plot_lin.get_figure() # Plotting epochs_sim_tools.save_plot( figure_log, function_name, [""], "", str(epochs_len), "", sim=True ) plt.close() del agg_ret del figure_log del plot_lin gc.collect()
5,335,838
def create_component(ctx: NVPContext): """Create an instance of the component""" return ProcessUtils(ctx)
5,335,839
def is_valid_ip(ip: str) -> bool: """ Args: ip: IP address Returns: True if the string represents an IPv4 or an IPv6 address, false otherwise. """ try: ipaddress.IPv4Address(ip) return True except ValueError: try: ipaddress.IPv6Address(ip) return True except ValueError: return False
5,335,840
def complete_tree(leaves): """ Complete a tree defined by its leaves. Parmeters: ---------- leaves : np.array(dtype=np.int64) Returns: -------- np.array(dtype=np.int64) """ tree_set = _complete_tree(leaves) return np.fromiter(tree_set, dtype=np.int64)
5,335,841
def pick_best_batch_size_for_gpu(): """ Tries to pick a batch size that will fit in your GPU. These sizes aren't guaranteed to work, but they should give you a good shot. """ free, available = torch.cuda.mem_get_info() availableGb = available / (1024 ** 3) if availableGb > 14: return 16 elif availableGb > 10: return 8 elif availableGb > 7: return 4 return 1
5,335,842
def getTensorRelativError(tA, pA): """Get the relative error between two tensors.""" pA_shape = np.shape(pA) tA_shape = np.shape(tA) assert (pA_shape == tA_shape), "Arrays must be same shape" err = np.max(np.abs(np.array(pA)-np.array(tA))) return err
5,335,843
def eliminations(rct_gras, prd_gras): """ find eliminations consistent with these reactants and products :param rct_gras: reactant graphs (must have non-overlapping keys) :param prd_gras: product graphs (must have non-overlapping keys) Eliminations are identified by forming a bond between an attacking heavy atom and another atom not initially bonded to it, forming a ring. The bond adjacent to the attacked atom is then broken, along with a second bond in the ring, downstream of the attacking heavy atom, away from the attacked atom. """ _assert_is_valid_reagent_graph_list(rct_gras) _assert_is_valid_reagent_graph_list(prd_gras) rxns = [] if len(rct_gras) == 1 and len(prd_gras) == 2: rct_gra, = rct_gras prds_gra = union_from_sequence(prd_gras) ngb_keys_dct = atoms_neighbor_atom_keys(rct_gra) frm1_keys = atom_keys(rct_gra, excl_syms=('H',)) frm2_keys = atom_keys(rct_gra) bnd_keys = bond_keys(rct_gra) frm_bnd_keys = [(frm1_key, frm2_key) for frm1_key, frm2_key in itertools.product(frm1_keys, frm2_keys) if frm1_key != frm2_key and not frozenset({frm1_key, frm2_key}) in bnd_keys] for frm1_key, frm2_key in frm_bnd_keys: # Bond the radical atom to the hydrogen atom gra_ = add_bonds(rct_gra, [(frm2_key, frm1_key)]) # Get keys to the ring formed by this extra bond rng_keys = next((ks for ks in rings_atom_keys(gra_) if frm2_key in ks and frm1_key in ks), None) # Eliminations (as far as I can tell) only happen through TSs with # 3- or 4-membered rings if rng_keys is not None and len(rng_keys) < 5: frm1_ngb_key, = ngb_keys_dct[frm1_key] & set(rng_keys) frm2_ngb_key, = ngb_keys_dct[frm2_key] & set(rng_keys) # Break the bonds on either side of the newly formed bond gra_ = remove_bonds(gra_, [(frm1_key, frm1_ngb_key)]) gra_ = remove_bonds(gra_, [(frm2_key, frm2_ngb_key)]) inv_dct = isomorphism(gra_, prds_gra) if inv_dct: f_frm_bnd_key = (frm1_key, frm2_key) f_brk_bnd_key1 = (frm1_key, frm1_ngb_key) f_brk_bnd_key2 = (frm2_key, frm2_ngb_key) inv_ = inv_dct.__getitem__ b_frm_bnd_key1 = tuple(map(inv_, f_brk_bnd_key1)) b_frm_bnd_key2 = tuple(map(inv_, f_brk_bnd_key2)) b_brk_bnd_key = tuple(map(inv_, f_frm_bnd_key)) forw_tsg = ts.graph(rct_gra, frm_bnd_keys=[f_frm_bnd_key], brk_bnd_keys=[f_brk_bnd_key1, f_brk_bnd_key2]) back_tsg = ts.graph(prds_gra, frm_bnd_keys=[b_frm_bnd_key1, b_frm_bnd_key2], brk_bnd_keys=[b_brk_bnd_key]) rcts_atm_keys = list(map(atom_keys, rct_gras)) prds_atm_keys = list(map(atom_keys, prd_gras)) if inv_dct[frm2_key] not in prds_atm_keys[1]: prds_atm_keys = list(reversed(prds_atm_keys)) # Create the reaction object rxns.append(Reaction( rxn_cls=par.ReactionClass.ELIMINATION, forw_tsg=forw_tsg, back_tsg=back_tsg, rcts_keys=rcts_atm_keys, prds_keys=prds_atm_keys, )) return ts_unique(rxns)
5,335,844
def ParseAndCreateModel(inputFilePath, jsonFilePath = None, outputFilePath = None, className = "SBMLmodel"): """ This function parses an SBML model file and generates a Python file implementing the model. Parameters ---------- inputFilePath : str Location of the SBML model to be parsed jsonFilePath : str, optional If provided, a json file containing the data for each of the model components will be created. If a file exists at the provided location, the file will be overwritten. Otherwise, this functionality is skipped and just a Python file will be generated. outputFilePath : str, optional Location where the Python file will be created. If a file exists at the provided location, the file will be overwritten. If a location is not provided, the file path of the generated python file will be based on the inputFilePath className : str, optional Name of the class that implements the SBML model. Returns ------- None Warnings -------- This function will overwrite files located at jsonFilePath and outputFilePath Notes ----- ParseAndCreateModel is intended to combine most of the functionality provided by this package. The intent is for this function to be suitable for most applications. """ # if jsonFileName == None: # jsonFileName = inputFileName.split('.')[0] + '.json' if outputFilePath == None: outputFilePath = inputFilePath.split('.')[0] + '.py' modelData = ParseSBMLFile(inputFilePath) if not jsonFilePath == None: modelData.DumpToJSON(jsonFilePath) # reloadedModelData = ModelData.LoadFromJSON(textFileName) GenerateModel(modelData, outputFilePath, objectName = className)
5,335,845
def sigmoid(*columns): """Fit a Sigmoid through the data of the last scan. The return value is a pair of tuples:: ((a, b, x0, c), (d_a, d_b, d_x0, d_c)) where the elemets of the second tuple the estimated standard errors of the fit parameters. The fit parameters are: * a - amplitude of the Sigmoid * b - steepness of the curve * x0 - center * c - background if the fit failed, the result is ``(None, None)``. Example:: cscan(...) values, stderr = sigmoid('h', 'adet') """ xs, ys, dys, _, ds = _getData(columns) fit = SigmoidFit() res = fit.run(xs, ys, dys) if res._failed: return None, None session.notifyFitCurve(ds, 'sigmoid', res.curve_x, res.curve_y) descrs = ['amplitude', 'steepness', 'center', 'background'] vals = [] for par, err, descr in zip(res._pars[1], res._pars[2], descrs): vals.append((descr, '%.4f' % par, '%.4f' % err)) printTable(('parameter', 'value', 'error'), vals, session.log.info) return CommandLineFitResult((tuple(res._pars[1]), tuple(res._pars[2])))
5,335,846
def update_dictionary_count(dictionary: dict, key: str): """ Add 1 to the specified dictionary key or create a dictionary entry for that key. @param dictionary: The dictionary to be updated @param key: String holding the dictionary key @return: None (Dictionary is updated) """ dictionary[key] = dictionary[key] + 1 if key in dictionary.keys() else 1 return
5,335,847
def _install( bz2, recipe_name, debug=False, meta_recipe=False, env_var_dir="", env_var_file="", parent_name="", commands_file="", ): """Method to install a local pre-built package to ensure package installs correctly _install ======== This method is used to install a pre-built ggd package. conda build was used to turn the ggd recipe into a ggd package. This script will take the locally built ggd package and install it. This method is used to ensure the package installs correctly. Parameters: ----------- 1) bz2: (str) The bz2 tarball package file created from the conda build 2) recipe_name: (str) The name of the ggd recipe/package 3) debug: (bool) Whether or not to set logging level to debug 4) meta_recipe: (bool) Whether or not the recipe is a meta recipe 5) env_var_dir: (str) The path to the meta-recipe tmp env var 6) env_var_file: (str) The file path to the meta-recipe tmp env var json file 7) parent_name: (str) If a meta-recipe, the name of the parent meta-recipe 8) commands_file: (str) The path to the subsetted commands used for the specific Meta-Recipe ID if meta-recipe Returns: +++++++ 1) True if the installation was successful and the package was not already installed on the system 2) False if the package has already been installed on the system 3) If the installation fails program exits. ggd data handling is initiated to remove any new/updated files from the installation process """ import traceback from .utils import ( get_conda_package_list, get_required_conda_version, update_installed_pkg_metadata, update_metarecipe_metadata, ) conda_version, equals = get_required_conda_version() conda_install = "{}conda{}{}{}".format('"', equals, conda_version, '"') ## See if it is already installed if recipe_name in get_conda_package_list(conda_root(), include_local=True).keys(): return False ## Set CONDA_SOURCE_PREFIX environment variable os.environ["CONDA_SOURCE_PREFIX"] = conda_root() ## base install command install_command = ["conda", "install", "-v", "--use-local", "-y", recipe_name] ## check if debug flag needs to be added if debug: install_command.append("-v") ## Check if specific conda version needs to be added if conda_version != -1: install_command.append(conda_install) ## Install the new recipe try: sp.check_call(install_command, stderr=sys.stderr, stdout=sys.stdout) ## Update meta-recipe bz2 if meta-recipe if meta_recipe: import json import shutil ## Check for meta-recipe environment variables if op.exists(env_var_file): print( "\n:ggd:check-recipe: Loading Meta-Recipe ID specific environment variables" ) ## Load environment variables from json file meta_env_vars = json.load(open(env_var_file)) commands_str = ( "\n".join([x.strip() for x in open(commands_file, "r")]) if op.exists(commands_file) else "" ) ## Remove temp dir with json file shutil.rmtree(env_var_dir) ## Update bz2 file success, new_bz2_path = update_metarecipe_metadata( pkg_name=recipe_name, env_var_dict=meta_env_vars, parent_name=parent_name, final_file_list=[], final_file_size_dict={}, commands_str=commands_str, ) assert ( success ), ":ggd:check-recipe: !!ERROR!! There was a problem updating the meta-recipe metadata" except Exception as e: print(e) print( "\n:ggd:check-recipe: %s did not install properly. \n\n\t->Error message:\n" % recipe_name ) print(traceback.format_exc()) ## Remove ggd files recipe_dict = get_recipe_from_bz2(bz2) species = recipe_dict["about"]["identifiers"]["species"] genome_build = recipe_dict["about"]["identifiers"]["genome-build"] version = recipe_dict["package"]["version"] name = recipe_dict["package"]["name"] ggd_jdict = { "packages": { name: { "identifiers": {"species": species, "genome-build": genome_build}, "version": version, } } } try: check_for_installation( [recipe_name], ggd_jdict ) ## .uninstall method to remove extra ggd files except Exception as e: print(e) print( "\n:ggd:check-recipe: Review the STDOUT and STDERR, correct the errors, and re-run $ggd check-recipe\n" ) ## Exit sys.exit(1) ## Update installed metadata print("\n:ggd:check-recipe: Updating installed package list") update_installed_pkg_metadata( remove_old=False, add_packages=[recipe_name], include_local=True ) return True
5,335,848
def MakeSamplesFromOutput(metadata, output): """Create samples containing metrics. Args: metadata: dict contains all the metadata that reports. output: string, command output Example output: perfkitbenchmarker/tests/linux_benchmarks/nccl_benchmark_test.py Returns: Samples containing training metrics, and the bandwidth """ samples = [] metadata.update(_SAMPLE_LINE_RE.match(output).groupdict()) results = regex_util.ExtractAllMatches( r'(Rank\s+\d+) (.*)', output) for rank, device in results: metadata[rank] = device results = regex_util.ExtractAllMatches( r'^\s*' r'(\d+)\s+' r'(\d+)\s+' r'(\w+)\s+' r'(\w+)\s+' r'(\d+(?:\.\d+)?)\s+' r'(\d+(?:\.\d+)?)\s+' r'(\d+(?:\.\d+)?)\s+' r'(\S+)\s+' r'(\d+(?:\.\d+)?)\s+' r'(\d+(?:\.\d+)?)\s+' r'(\d+(?:\.\d+)?)\s+' r'(\S+)', output, re.MULTILINE) max_out_of_place_algbw = 0 for row in results: metadata_copy = metadata.copy() metadata_copy.update(zip(_METADATA_COLUMNS, row)) for metric, metadata_key in sorted(_SAMPLE_NAMES.items()): samples.append(sample.Sample(metric, float(metadata_copy[metadata_key]), 'GB/s', metadata_copy)) # Gbps is gigaBIT per second and GB/s is gigaBYTE per second max_out_of_place_algbw = max(max_out_of_place_algbw, float(metadata_copy['out_of_place_algbw'])) avg_bus_bandwidth = regex_util.ExtractExactlyOneMatch( r'Avg bus bandwidth\s+: ([0-9\.]+)', output) samples.append(sample.Sample('avg_busbw', float(avg_bus_bandwidth), 'GB/s', metadata)) samples.append(sample.Sample('max_out_of_place_algbw', max_out_of_place_algbw * 8, 'Gbps', metadata)) return samples, max_out_of_place_algbw
5,335,849
def _write_bytes_to_temporary_file(local_path): """if `local_path` is a file-like object, write the contents to an *actual* file and return a pair of new local filename and a function that removes the temporary file when called.""" if hasattr(local_path, "read"): # `local_path` is a file-like object local_bytes = local_path local_bytes.seek(0) # reset internal pointer temp_file, local_path = tempfile.mkstemp(suffix="-threadbare") with os.fdopen(temp_file, "wb") as fh: data = local_bytes.getvalue() # data may be a string or it may be bytes. # if it's a string we assume it's a UTF-8 string. # skip entirely if we're on Python2 if isinstance(data, str) and common.PY3: data = bytes(data, "utf-8") fh.write(data) cleanup = lambda: os.unlink(local_path) return local_path, cleanup return local_path, None
5,335,850
def tree_to_newick_rec(cur_node): """ This recursive function is a helper function to generate the Newick string of a tree. """ items = [] num_children = len(cur_node.descendants) for child_idx in range(num_children): s = '' sub_tree = tree_to_newick_rec(cur_node.descendants[child_idx]) if sub_tree != '': s += '(' + sub_tree + ')' s += cur_node.descendants[child_idx].name items.append(s) return ','.join(items)
5,335,851
def clean(layers, skiplayers, db_url): """Clean and index the data after load """ db = fwa.util.connect(db_url) # parse the input layers in_layers = parse_layers(layers, skiplayers) for layer in settings.source_tables: table = fwa.tables[layer['table']] if layer['table'] in in_layers and table in db.tables: click.echo(layer['table']+': cleaning') # drop ogr and esri columns for column in settings.drop_columns: if column in db[table].columns: db[table].drop_column(column) # ensure _id keys are int - ogr maps them to double for column in db[table].columns: if column[-3:] == '_id': if column == 'linear_feature_id': column_type = 'bigint' else: column_type = 'integer' sql = '''ALTER TABLE {t} ALTER COLUMN {col} TYPE {type} '''.format(t=table, col=column, type=column_type) db.execute(sql) # make sure there are no '<Null>' strings in codes for column in ['fwa_watershed_code', 'local_watershed_code']: if column in db[table].columns: sql = """UPDATE {t} SET {c} = NULL WHERE {c} = '<Null>' """.format(t=table, c=column) db.execute(sql) # add ltree columns to tables with watershed codes if 'fwa_watershed_code' in db[table].columns: click.echo(layer['table']+': adding ltree types') fwa.add_ltree(table, db=db) # add primary key constraint db[table].add_primary_key(layer['id']) click.echo(layer['table']+': indexing') # create indexes on columns noted in parameters for column in layer['index_fields']: db[table].create_index([column]) # create geometry index for tables loaded by group if layer['grouped']: db[table].create_index_geom() # index watershed codes for col in ['fwa_watershed_code', 'local_watershed_code']: if col in db[table].columns: sql = """CREATE INDEX IF NOT EXISTS ix_{n}_{c}_tpo ON {t} ({c} text_pattern_ops) """.format(n=layer['table'], t=table, c=col) db.execute(sql) # create additional functions, convenience tables, lookups # (run queries with 'create_' prefix if required sources are present) # create general upstream / downstream functions based on watershed codes db.execute(fwa.queries['fwa_upstreamwsc']) #db.execute(fwa.queries['fwa_downstream']) # for streams, create length upstream/downstream functions and invalid code lookup if 'whse_basemapping.fwa_stream_networks_sp' in db.tables: db.execute(fwa.queries['create_invalid_codes']) for f in ['fwa_lengthdownstream', 'fwa_lengthupstream', 'fwa_lengthinstream']: db.execute(fwa.queries[f]) # create named streams table if ('whse_basemapping.fwa_stream_networks_sp' in db.tables and 'whse_basemapping.fwa_lakes_poly' in db.tables and 'whse_basemapping.fwa_manmade_waterbodies_poly' in db.tables): db.execute(fwa.queries['create_fwa_named_streams']) # subdivide watershed group polys if 'whse_basemapping.fwa_watershed_groups_poly' in db.tables: db.execute(fwa.queries['create_fwa_watershed_groups_subdivided']) # simplify the 20k-50k stream lookup if 'whse_basemapping.fwa_streams_20k_50k' in db.tables: db.execute(fwa.queries['create_lut_50k_20k_wsc']) # create a simple waterbody lookup table if ('whse_basemapping.fwa_wetlands_poly' in db.tables and 'whse_basemapping.fwa_lakes_poly' in db.tables and 'whse_basemapping.fwa_manmade_waterbodies_poly' in db.tables and 'whse_basemapping.fwa_rivers_poly' in db.tables): db.execute(fwa.queries['create_fwa_waterbodies']) # create text_pattern_pos indexes on watershed codes # (these aren't included in sources.json indexes as index type is required) # add CDB_MakeHexagon function db.execute(fwa.queries['CDB_MakeHexagon'])
5,335,852
def create_data_bucket(): """ Creates the main GCS data bucket; enables logging and versioning for the bucket and sets/overwrites access rights according to best practices. :return: None """ run_command('gsutil mb -p {} -c {} -l {} gs://{}' .format(PROJECT_ID, DATA_STORAGE_CLASS, DATA_BUCKET_LOCATION, DATA_BUCKET_ID),'already exists') run_command('gsutil logging set on -b gs://{} gs://{}'.format(LOGS_BUCKET_ID, DATA_BUCKET_ID)) run_command('gsutil versioning set on gs://{}'.format(DATA_BUCKET_ID) ) __set_data_bucket_access()
5,335,853
def stop_at_chimera_removal(inFolder, outFolder, rdb, trimq, joining_method, qcq, maxloose, fastq_p): """ """ global PR trimmed = asfolder(outFolder + PR['Ftrimmed']) merged = asfolder(outFolder + PR['Fmerged']) qc = asfolder(outFolder + PR['Fqc']) chi = asfolder(outFolder + PR['Fchi']) trimfolder(inFolder, trimmed, trimq) if joining_method == "fastq-join": mergefolderfastq(trimmed, merged, fastq_p) elif joining_method == "bbmerge": mergefolderbb(trimmed, merged, maxloose=maxloose) else: raise ("%s: unknown merging metod method" % joining_method) qualitycontrol(merged, qc, qcq) removechimera(qc, chi, rdb)
5,335,854
def mdetr_efficientnetB3(pretrained=False, return_postprocessor=False): """ MDETR ENB3 with 6 encoder and 6 decoder layers. Pretrained on our combined aligned dataset of 1.3 million images paired with text. """ model = _make_detr("timm_tf_efficientnet_b3_ns") if pretrained: checkpoint = torch.hub.load_state_dict_from_url( url="https://zenodo.org/record/4721981/files/pretrained_EB3_checkpoint.pth", map_location="cpu", check_hash=True, ) model.load_state_dict(checkpoint["model"]) if return_postprocessor: return model, PostProcess() return model
5,335,855
def goods_insert(): """Goods Insert""" products = ( ('제품1', 30000, 'wenyang-x700.jpg', 10, 3, '제품1에 대한 설명이 여기에 들어옵니다'), ('제품2', 33000, 'tamara-bellis-x700.jpg', 10, 5, '제품2에 대한 설명이 여기에 들어옵니다'), ('제품3', 35000, 'roland-denes-x700.jpg', 10, 1, '제품3에 대한 설명이 여기에 들어옵니다'), ('제품4', 33000, 'raamin-ka-x700.jpg', 10, 2, '제품4에 대한 설명이 여기에 들어옵니다'), ('제품5', 31000, 'oliver-johnson-x700.jpg', 10, 5, '제품5에 대한 설명이 여기에 들어옵니다'), ('제품6', 36000, 'taisiia-stupak-x700.jpg', 10, 4, '제품6에 대한 설명이 여기에 들어옵니다') ) for item in products: goods = Goods() goods.goods_name = item[0] goods.price = item[1] goods.goods_photo = item[2] goods.goods_cnt = item[3] goods.goods_ranking = item[4] goods.goods_description = item[5] db_session.add(goods) db_session.commit()
5,335,856
def _check_EJR_brute_force(profile, committee): """ Test using brute-force whether a committee satisfies EJR. Parameters ---------- profile : abcvoting.preferences.Profile A profile. committee : iterable of int A committee. Returns ------- bool """ # should check for ell from 1 until committee size ell_upper_bound = len(committee) + 1 # loop through all possible ell for ell in range(1, ell_upper_bound): # list of candidates with less than ell approved candidates in committee voters_less_than_ell_approved_candidates = [] # compute minimum group size for this ell group_size = math.ceil(ell * (len(profile) / len(committee))) # compute list of candidates to consider for i, voter in enumerate(profile): if len(voter.approved & committee) < ell: voters_less_than_ell_approved_candidates.append(i) # check if an ell-cohesive group can be formed with considered candidates if len(voters_less_than_ell_approved_candidates) < group_size: # if not possible then simply continue with next ell continue # check all possible combinations of considered voters, # taken (possible group size) at a time for combination in itertools.combinations( voters_less_than_ell_approved_candidates, group_size ): # to calculate the cut of approved candidates for the considered voters # initialize the cut to be the approval set of the first candidate in current # combination cut = set(profile[combination[0]].approved) # calculate the cut over all voters for current combination # (also can skip first voter in combination, but inexpensive enough...) for j in combination: cut = cut & profile[j].approved # if size of cut is >= ell, then combination is an ell-cohesive group if len(cut) >= ell: # we have found combination to be an ell-cohesive set, with no voter having # at least ell approved candidates in committee. Thus EJR fails detailed_information = { "cohesive_group": voters_less_than_ell_approved_candidates, "ell": ell, "joint_candidates": cut, } return False, detailed_information # if function has not returned by now, then it means that for all ell, # no ell-cohesive group was found among candidates with less than ell # approved candidates in committee. Thus committee satisfies EJR detailed_information = {} return True, detailed_information
5,335,857
def read_stanford_labels(): """Read stanford hardi data and label map""" # First get the hardi data fetch_stanford_hardi() hard_img, gtab = read_stanford_hardi() # Fetch and load files, folder = fetch_stanford_labels() labels_file = pjoin(folder, "aparc-reduced.nii.gz") labels_img = nib.load(labels_file) return hard_img, gtab, labels_img
5,335,858
def generate_generators(n_vals, replicas, output_directory): """ Generate generators and save them in `output_directory`. Will read an array of n values from `n_vals`, generating `replicas` generators for each n value. Creates with deterministic state machine generators. """ filename_format = os.path.abspath(output_directory) + '/%d-%d.gen' for n in n_vals: for i in range(replicas): generator = DeterministicStateMachineGenerator(n) generator.transition_matrix.tofile(filename_format % (n, i))
5,335,859
def _WrapRequestForUserAgentAndTracing(http_client, trace_token, trace_email, trace_log, gcloud_ua): """Wrap request with user-agent, and trace reporting. Args: http_client: The original http object. trace_token: str, Token to be used to route service request traces. trace_email: str, username to which service request traces should be sent. trace_log: bool, Enable/diable server side logging of service requests. gcloud_ua: str, User agent string to be included in the request. Returns: http, The same http object but with the request method wrapped. """ orig_request = http_client.request def RequestWithUserAgentAndTracing(*args, **kwargs): """Wrap request with user-agent, and trace reporting. Args: *args: Positional arguments. **kwargs: Keyword arguments. Returns: Wrapped request method with user-agent and trace reporting. """ modified_args = list(args) # Use gcloud specific user-agent with command path and invocation-id. # Pass in the user-agent through kwargs or args. def UserAgent(current=''): user_agent = '{0} {1}'.format(current, gcloud_ua) return user_agent.strip() cur_ua = RequestArgsGetHeader(modified_args, kwargs, 'user-agent', '') RequestArgsSetHeader(modified_args, kwargs, 'user-agent', UserAgent(cur_ua)) # Modify request url to enable requested tracing. url_parts = urlparse.urlsplit(args[0]) query_params = urlparse.parse_qs(url_parts.query) if trace_token: query_params['trace'] = 'token:{0}'.format(trace_token) elif trace_email: query_params['trace'] = 'email:{0}'.format(trace_email) elif trace_log: query_params['trace'] = 'log' # Replace the request url in the args modified_url_parts = list(url_parts) modified_url_parts[3] = urllib.urlencode(query_params, doseq=True) modified_args[0] = urlparse.urlunsplit(modified_url_parts) return orig_request(*modified_args, **kwargs) http_client.request = RequestWithUserAgentAndTracing # apitools needs this attribute to do credential refreshes during batch API # requests. if hasattr(orig_request, 'credentials'): setattr(http_client.request, 'credentials', orig_request.credentials) return http_client
5,335,860
def indexData_x(x, ukn_words): """ Map each word in the given data to a unique integer. A special index will be kept for "out-of-vocabulary" words. :param x: The data :return: Two dictionaries: one where words are keys and indexes values, another one "reversed" (keys->index, values->words) """ # Retrieve all words used in the data (with duplicates) all_text = [w for e in x for w in e] # Create a DETERMINISTIC set of all words used = set() words = [x for x in all_text if x not in used and (used.add(x) or True)] print("Number of entries: ",len(all_text)) print("Individual entries: ",len(words)) # Assign an integer index for each individual word word2ind = {word: index for index, word in enumerate(words, 2)} ind2word = {index: word for index, word in enumerate(words, 2)} # To deal with out-of-vocabulary words word2ind.update({ukn_words:1}) ind2word.update({1:ukn_words}) # The index '0' is kept free in both dictionaries return word2ind, ind2word
5,335,861
def BFS_TreeSearch(problem): """ Tree Search BFS Args->problem: OpenAI Gym environment Returns->(path, time_cost, space_cost): solution as a path and stats. """ node = Node(problem.startstate, None) time_cost = 0 space_cost = 1 if node.state == problem.goalstate: return build_path(node), time_cost, space_cost frontier = NodeQueue() frontier.add(node) while not frontier.is_empty(): current = frontier.remove() for action in range(problem.action_space.n): time_cost += 1 child = Node(problem.sample(current.state, action), current) if(child.state == problem.goalstate): return build_path(child), time_cost, space_cost # solution frontier.add(child) space_cost = max(space_cost,len(frontier)) return None, time_cost, space_cost
5,335,862
def remove_metaRotation(gA_rot: GeoArray, rspAlg='cubic') -> GeoArray: """Remove any metadata rotation (a rotation that only exists in the map info).""" gA = GeoArray(*warp_ndarray(gA_rot[:], gA_rot.gt, gA_rot.prj, rspAlg=rspAlg, # out_gsd=(gA_rot.xgsd, gA_rot.ygsd) ), nodata=gA_rot.nodata) gA.basename = os.path.basename(gA.basename) gA.meta = gA.meta return gA
5,335,863
def _objective_function(extra_features: jnp.ndarray, media_mix_model: lightweight_mmm.LightweightMMM, media_input_shape: Tuple[int, int], media_gap: Optional[int], target_scaler: Optional[preprocessing.CustomScaler], media_scaler: preprocessing.CustomScaler, geo_ratio: jnp.array, seed: Optional[int], media_values: jnp.ndarray) -> jnp.float64: """Objective function to calculate the sum of all predictions of the model. Args: extra_features: Extra features the model requires for prediction. media_mix_model: Media mix model to use. Must have a predict method to be used. media_input_shape: Input shape of the data required by the model to get predictions. This is needed since optimization might flatten some arrays and they need to be reshaped before running new predictions. media_gap: Media data gap between the end of training data and the start of the out of sample media given. Eg. if 100 weeks of data were used for training and prediction starts 2 months after training data finished we need to provide the 8 weeks missing between the training data and the prediction data so data transformations (adstock, carryover, ...) can take place correctly. target_scaler: Scaler that was used to scale the target before training. media_scaler: Scaler that was used to scale the media data before training. geo_ratio: The ratio to split channel media across geo. Should sum up to 1 for each channel and should have shape (c, g). seed: Seed to use for PRNGKey during sampling. For replicability run this function and any other function that gets predictions with the same seed. media_values: Media values required by the model to run predictions. Returns: The negative value of the sum of all predictions. """ if hasattr(media_mix_model, "n_geos") and media_mix_model.n_geos > 1: media_values = geo_ratio * jnp.expand_dims(media_values, axis=-1) media_values = jnp.tile( media_values / media_input_shape[0], reps=media_input_shape[0]) # Distribute budget of each channels across time. media_values = jnp.reshape(a=media_values, newshape=media_input_shape) media_values = media_scaler.transform(media_values) return -jnp.sum( media_mix_model.predict( media=media_values.reshape(media_input_shape), extra_features=extra_features, media_gap=media_gap, target_scaler=target_scaler, seed=seed).mean(axis=0))
5,335,864
def decode_block(block: np.ndarray) -> Union[np.ndarray, bool]: """ Decode a data block with hamming parity bits. :param block: The data block to be decoded :return the decoded data bits, False if the block is invalid """ if not block.size & block.size - 1 and block.size & 0x5555_5555: _block = np.array(block.flat) flip = reduce(lambda x, y: x ^ y, [i for i, bit in enumerate(_block) if bit] + [1, 1]) if flip: if reduce(lambda x, y: x ^ y, _block): warn('Two or more bit-flips occur, self-correction failed.') warn("Single bit-flip at index {} corrected".format(flip)) _block[flip] = not _block[flip] return np.array([bit for i, bit in enumerate(_block) if i and i & i - 1]) warn('Invalid block size.') return False
5,335,865
def clone(): """ Clone the student repositories for the assignment and (optionall) copies notebook files into the course_materials 'submitted' directory. Clones into the clone_dir directory, as specified in config.yml. Requires that you have filename of student roster defined in config.yml and that the roster file exists. By default, if a local directory with the name of the repo already exists, pulls from github to update. Use the --skip-existing flag if you don't want to update existing repos. """ parser = argparse.ArgumentParser(description=clone.__doc__) parser.add_argument( "assignment", help="""Name of assignment. Must match assignment name in course_materials directories""", ) parser.add_argument( "--skip-existing", action="store_true", help="""Do not attempt to update repositories that have already been cloned.""", ) parser.add_argument( "--no-submitted", action="store_false", help="""Skip moving files from cloned repo to submitted.""", ) args = parser.parse_args() clone_student_repos(args)
5,335,866
def kt_android_library(name, exports = [], visibility = None, **kwargs): """Creates an Android sandwich library. `srcs`, `deps`, `plugins` are routed to `kt_jvm_library` the other android related attributes are handled by the native `android_library` rule. """ # TODO(bazelbuild/rules_kotlin/issues/556): replace with starlark # buildifier: disable=native-android native.android_library( name = name, exports = exports + _kt_android_artifact(name, **kwargs), visibility = visibility, tags = kwargs.get("tags", default = None), testonly = kwargs.get("testonly", default = 0), )
5,335,867
def occupancy(meta, ax=None): """ Show channel occupancy over time. """ if ax is None: f, ax = plt.subplots() f.set_figwidth(14) f.suptitle("Occupancy over time") start_time = meta.read_start_time.min() / 10000 / 60 end_time = meta.read_end_time.max() / 10000 / 60 total_minutes = end_time - start_time num_channels = meta.channel_number.max()+1 X = np.zeros((num_channels, int(np.ceil(total_minutes)))) for channel, group in meta.groupby("channel_number"): for index, read in group.iterrows(): a,b = read.read_start_time/10000/60, read.read_end_time / 10000 / 60 X[channel, round(a):round(b)] = 1 ax.imshow(X, aspect= total_minutes/1800, cmap="Greys") ax.xaxis.set_label_text("Time (in minutes)") ax.yaxis.set_label_text("Channel number") return ax.get_figure(), ax
5,335,868
def reg1_r_characteristic(r, s, alpha, beta, c, h): """ evaluate x - ((4/3)r - (2/3)s)t in region 1, equation 19 """ # when s < 0 the expression can be factored and you avoid the # difference of nearly equal numbers and dividing by a small number # equation 74 rr = r/c ss = s/c poly1 = 2.0*rr - ss poly2 = 3.0*rr*rr - 2.0*rr*ss + ss*ss poly3 = 4.0*rr**3 - 3.0*rr*rr*ss + 2.0*rr*ss*ss - ss**3 value = np.where(s <= 0., h*(1.0 - (2./3.)*poly2 + (32./135.)*poly3 + (4./9.)*alpha*(poly1 - poly2 + (4./15.)*poly3) - (2./9.)*beta*(poly2 - (8./15.)*poly3)), evalPhip(r,alpha,beta,c,h)/(r + s + 1e-20) - (evalPhi(r,alpha,beta,c,h) - evalPhi(s,alpha,beta,c,h))/(r + s + 1e-20)**2 ) return value
5,335,869
def olf_gd_offline_in_z(X: np.ndarray, k: int, rtol: float = 1e-6, max_iter: int = 100000, rectY: bool = False, rectZ: bool = False, init: str = 'random', Y0=None, Z0=None, verbose: bool = False, alpha=1, cycle=500, rho=1, beta=0.1) \ -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """ Gradient descent to calculate the solution of the olfactory cost function Parameters ---------- X: input data matrix k: number of LNs etamax: the initial learning rate rtol: relative tolerance init: initialization for the Y and Z matrices Returns ------ Y and Z Matrices minimizing the olf cost function """ D, N = X.shape # n_s is the number of samples if init != 'given': Y = get_init_matrix(D, N, method=init, A=X, scale=100, rect=rectY) # Y = X # i wonder if you should do the simulation with the random matrix, # so that is it not "cheating" by already taking a solution which resembles # what you think the solution should be. if rectY: Y = rectify(Y) # Y = X Z = get_init_matrix(k, N, method=init, A=X, scale=100, rect=rectZ) # Y = np.array(Y, dtype=FLOAT_TYPE) # A = np.array(A, dtype=FLOAT_TYPE) # X = np.array(X, dtype=FLOAT_TYPE, order='F') # Y = np.array(Y, dtype=FLOAT_TYPE, order='F') # Z = np.array(Z, dtype=FLOAT_TYPE, order='F') else: Y = Y0 Z = Z0 sigma = 0.1 cost0 = olf_cost(X, Y, Z, rho=rho) cost2 = cost0.copy() print(f'initial cost: {cost0}') for j in range(max_iter): Y_old = Y.copy() cost_old = cost2 Y, cost2, successY, a1 = gd_step_Y(X, Y, Z, cost_old, sigma, beta, alpha, rectY, rho=rho) if not successY: break conv1 = np.amax(np.abs(Y - Y_old) / np.abs(Y_old + 1e-2)) d_cost1 = np.abs(cost_old - cost2) / np.abs(cost_old) if d_cost1 < rtol and conv1 < rtol: print(f'stopped y iteration because cost and Y stopped changing, {j}') break if j % cycle == 0 and j > 0: alpha *= beta cost0 = olf_cost(X, Y, Z, rho=rho) cost2 = cost0.copy() cost1 = cost0.copy() print(f'cost after fixing Y: {cost0}') # eye = np.eye(N)/N costs = np.zeros(max_iter) # eye = np.array(eye, dtype=FLOAT_TYPE, order='F') if rectZ: funcz = rectify else: funcz = lambda x: x for i in range(max_iter): # print(i, cost2) costs[i] = cost2 Y_old = Y.copy() Z_old = Z.copy() cost_old2 = cost2.copy() grad_Z = get_grad_Z(Y, Z, rho=rho) # grad_Z = -get_grad_Z2(X, Z, rho=rho) alpha_z = alpha while alpha_z > EPSILON * 0.000001: successZ = False Z_new = funcz(Z_old + alpha_z * grad_Z) # expected_cost_increase = sigma * np.sum(grad_Z * (Z_new - Z_old)) alpha_y = alpha Y = Y_old.copy() cost1 = olf_cost(X, Y, Z_new, rho=rho) # print(alpha_z, cost1) for j in range(max_iter): # print(j, cost1) Y_old2 = Y.copy() cost_old1 = cost1 Y, cost1, successY, a1 = gd_step_Y(X, Y_old2, Z_new, cost_old1, sigma, beta, alpha_y, rectY, rho=rho) if not successY: # print('y iteration not successful') break conv1 = np.amax(np.abs(Y - Y_old2) / np.abs(Y_old2 + 1e-2)) d_cost1 = np.abs(cost_old1 - cost1) / np.abs(cost_old1) # print(conv1, d_cost1) if d_cost1 < rtol and conv1 < rtol: # print(f'stopped y iteration because cost and Y' # f'stopped changing, {j}, {alpha_y}') break # print(f'i, j: {i}, {j}, after y iteration: costs: {cost2}, {cost_old1}, {cost1}') # cost_new = olf_cost(X, Y, Z_new, rho=rho) # print(expected_cost_increase, cost_new - cost_old) cost_new = cost1 # if cost_new - cost_old2 > expected_cost_increase: if cost_new - cost_old2 > 0: # print(f'z iteration successful, {cost_old2}, {cost_new}') successZ = True break alpha_z *= beta if successZ: Z = Z_new cost2 = cost_new else: print('stopped because Z gd steps was unsuccessfull') break convz = np.amax(np.abs(Z-Z_old) / np.abs(Z_old + 1e-2)) d_cost2 = np.abs(cost_old2 - cost2) / np.abs(cost_old2) if d_cost2 < rtol and convz < rtol: print('stopped because costs and Z stopped changing') break if i % cycle == 0 and i > 0: alpha *= beta print(i, 'costs:', cost_old, cost1, cost2) # print('costs:', cost_old, cost1, cost2) # break print(f'i: {i}, costs: {cost0}, {cost2}') return Y, Z, costs[:i+1]
5,335,870
def get_features(model_description_features: List[Dict[str, Any]]): """Get features from a list of dictionaries Parameters ---------- model_description_features : List[Dict[str, Any]] Examples -------- >>> l = [{'StrokeCount': None}, \ {'ConstantPointCoordinates': \ [{'strokes': 4}, \ {'points_per_stroke': 81}, \ {'fill_empty_with': 0}, \ {'pen_down': False}] \ } \ ] >>> get_features(l) [StrokeCount, ConstantPointCoordinates - strokes: 4 - points per stroke: 81 - fill empty with: 0 - pen down feature: False - pixel_env: 0 ] """ return utils.get_objectlist( model_description_features, config_key="features", module=sys.modules[__name__] )
5,335,871
def get_cluster_codes(cluster: pd.Categorical) -> pd.Series: """Get the X location for plotting p-value string.""" categories = cluster.cat.categories.rename("cluster") return pd.Series(range(len(categories)), index=categories, name="x")
5,335,872
def _expand_configurations_from_chain(chain, *, pragma: str = 'pytmc', allow_no_pragma=False): """ Wrapped by ``expand_configurations_from_chain``, usable for callers that don't want the full product of all configurations. """ def handle_scalar(item, pvname, config): """Handler for scalar simple or structured items.""" yield item, config def handle_array_complex(item, pvname, config): """Handler for arrays of structured items (or enums).""" low, high = item.array_info.bounds expand_digits = math.floor(math.log10(high)) + 2 array_element_pragma = config.get('array', '') for idx in parse_array_settings(array_element_pragma, (low, high)): # shallow-copy; only touching the top level "pv" key idx_config = copy.copy(config) idx_config['pv'] += get_array_suffix( config, idx, default=f':%.{expand_digits}d') yield parser._ArrayItemProxy(item, idx), idx_config def get_all_options(subitems, handler, pragmas): split_pragma = split_pytmc_pragma('\n'.join(pragmas)) for pvname, separated_cfg in separate_configs_by_pv(split_pragma): config = dictify_config(separated_cfg) # config will have the SUBITEM key, applicable to its level # in the hierarchy. If it exists, merge it with our current set. if SUBITEM in config: _merge_subitems(subitems, config[SUBITEM]) for key, value in subitems.get(PRAGMA, []): config[key] = value yield from handler(item, pvname, config) # `subitems` keeps track of forward references with pragmas of members # and sub-members (and so on) subitems = {} for item in chain: subitems = subitems.get(item.name, {}) pragmas = list(pragma for pragma in get_pragma(item, name=pragma) if pragma is not None) if not pragmas: if allow_no_pragma: pragmas = [None] yield [(item, None)] continue # If any pragma in the chain is unset, escape early return [] if item.array_info and (item.data_type.is_complex_type or item.data_type.is_enum): options = get_all_options(subitems, handle_array_complex, pragmas) else: options = get_all_options(subitems, handle_scalar, pragmas) yield list(options)
5,335,873
def encrypt(data=None, key=None): """ Encrypts data :param data: Data to encrypt :param key: Encryption key (salt) """ k = _get_padded_key(key) e = AES.new(k, AES.MODE_CFB, k[::-1]) enc = e.encrypt(data) return base64.b64encode(enc)
5,335,874
def get_logger(logdir_path=None): """logging.Logger Args ---- logdir_path: str path of the directory where the log files will be output Returns ------- logger (logging.Logger): instance of logging.Logger """ log_format = ( '%(levelname)-8s - %(asctime)s - ' '[%(real_filename)s %(real_funcName)s %(real_lineno)d] %(message)s' ) sh = logging.StreamHandler() sh.addFilter(CustomFilter()) # sh.setLevel(logging.INFO) if logdir_path is None: logging.basicConfig(handlers=[sh], format=log_format, level=logging.INFO) else: if not os.path.exists(logdir_path): os.makedirs(logdir_path) logfile_path = logdir_path + str(datetime.date.today()) + '.log' fh = logging.FileHandler(logfile_path) fh.addFilter(CustomFilter()) logging.basicConfig( handlers=[sh, fh], format=log_format, level=logging.INFO ) logger = logging.getLogger(__name__) return logger
5,335,875
def set_is_dirty_for_all_views(is_dirty: bool) -> None: """ @brief Set is_dirty for all views. @param is_dirty Indicate if views are dirty """ for w in sublime.windows(): for v in w.views(): if is_view_normal_ready(v): view_is_dirty_val(v, is_dirty)
5,335,876
def combine_results (input_files, output_files): """ Combine all """ (output_filename, success_flag_filename) = output_files with open(output_filename, "w") as out: for inp, flag in input_files: with open(inp) as ii: out.write(ii.read()) with open(success_flag_filename, "w") as oo: pass
5,335,877
def add_ROCR100(self, timeperiod=10, type="line", color="tertiary", **kwargs): """Rate of Change (Ratio * 100).""" if not self.has_close: raise Exception() utils.kwargs_check(kwargs, VALID_TA_KWARGS) if "kind" in kwargs: type = kwargs["kind"] name = "ROCR100({})".format(str(timeperiod)) self.sec[name] = dict(type=type, color=color) self.ind[name] = talib.ROCR100(self.df[self.cl].values, timeperiod)
5,335,878
def box_in_k_largest(boxes, box, k): """Returns True if `box` is one of `k` largest boxes in `boxes`. If there are ties that extend beyond k, they are included.""" if len(boxes) == 0: return False boxes = sorted(boxes, reverse=True, key=box_volume) n = len(boxes) prev = box_volume(boxes[0]) for i in range(n): vol = box_volume(boxes[i]) if i >= k: if vol < prev: break prev = vol if np.array_equal(boxes[i], box): return True return False
5,335,879
def _config_cron_workflow( schedule, concurrency_policy='"Allow"', # Default to "Allow" successful_jobs_history_limit=3, # Default 3 failed_jobs_history_limit=1, # Default 1 starting_deadline_seconds=10, suspend="false", timezone="Asia/Shanghai", # Default to Beijing time ): """ Config the CronWorkflow, see example https://github.com/argoproj/argo/blob/master/docs/cron-workflows.md """ cron_config = { "schedule": schedule, "concurrencyPolicy": concurrency_policy, "successfulJobsHistoryLimit": successful_jobs_history_limit, "failedJobsHistoryLimit": failed_jobs_history_limit, "startingDeadlineSeconds": starting_deadline_seconds, "suspend": suspend, "timezone": timezone, } states.workflow.config_cron_workflow(cron_config)
5,335,880
def print_report_row(col1, col2, col3, col4): """Prints memory usage report row""" print("{: >20} {: >10} {: >10} {: >10}".format(col1, col2, col3, col4))
5,335,881
def diagonal(a, offset=0, axis1=0, axis2=1): """ Returns specified diagonals. If `a` is 2-D, returns the diagonal of a with the given offset, i.e., the collection of elements of the form a[i, i+offset]. If `a` has more than two dimensions, then the axes specified by axis1 and axis2 are used to determine the 2-D sub-array whose diagonal is returned. The shape of the resulting array can be determined by removing axis1 and axis2 and appending an index to the right equal to the size of the resulting diagonals. Args: a (Tensor): Array from which the diagonals are taken. offset (int): optional. Offset of the diagonal from the main diagonal. Can be positive or negative. Defaults to main diagonal (0). axis1 (int): optional. Axis to be used as the first axis of the 2-D sub-arrays from which the diagonals should be taken. Defaults to first axis (0). axis2 (int): optional. Axis to be used as the second axis of the 2-D sub-arrays from which the diagonals should be taken. Defaults to second axis (1). Returns: Tensor, if `a` is 2-D, then a 1-D array containing the diagonal. If a.ndim > 2, then the dimensions specified by axis1 and axis2 are removed, and a new axis inserted at the end corresponding to the diagonal. Raises: ValueError: if the input tensor has less than two dimensions. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> a = np.arange(4).reshape(2,2) >>> print(a) [[0 1] [2 3]] >>> output = np.diagonal(a) >>> print(output) [0 3] >>> output = np.diagonal(a, 1) >>> print(output) [1] >>> a = np.arange(8).reshape(2, 2, 2) >>> print(a) [[[0 1] [2 3]] [[4 5] [6 7]]] >>> output = np.diagonal(a, 0, 0, 1) >>> print(output) [[0 6] [1 7]] """ ndim = F.rank(a) if ndim < 2: return _raise_value_error('diagonal requires an array of at least two dimensions') dtype = F.dtype(a) if _is_empty(F.shape(a)): return _empty(dtype, (0,)) cast_type = dtype if not isinstance(dtype, Float): # reduce_sum only supports float types cast_type = mstype.float32 a = F.cast(a, cast_type) axes = _check_axis_valid((axis1, axis2), ndim) perm = () for i in range(ndim): if i not in axes: perm += (i,) perm += axes a = transpose(a, perm) shape = F.shape(a) n, m = shape[-2:] e = _eye(n, m, offset, cast_type) e = _expand(e, ndim) e = _broadcast_to(e, F.shape(e), F.shape(a), ndim) prod = F.tensor_mul(a, e) res = F.reduce_sum(prod, -1) begin = () for i in range(ndim-2): begin += (0,) last_dim_begin = _max(0, -offset) begin += (last_dim_begin,) size = F.shape(res)[:-1] last_dim_end = _min( shape[-2], _max(0, shape[-1] - offset)) - last_dim_begin if last_dim_end <= 0: return _empty(dtype, size + (0,)) size += (last_dim_end,) res = F.tensor_slice(res, begin, size) if not _check_same_type(cast_type, dtype): res = F.cast(res, dtype) return res
5,335,882
def available_number_of_windows_in_array(n_samples_array, n_samples_window, n_advance): """ Parameters ---------- n_samples_array n_samples_window n_advance Returns ------- """ stridable_samples = n_samples_array - n_samples_window if stridable_samples < 0: print("Window is longer than the time series") raise Exception available_number_of_strides = int(np.floor(stridable_samples / n_advance)) return available_number_of_strides + 1
5,335,883
def tensor_to_image(tensor: torch.tensor) -> ndarray: """ Convert a torch tensor to a numpy array :param tensor: torch tensor :return: numpy array """ image = TENSOR_TO_PIL(tensor.cpu().clone().squeeze(0)) return image
5,335,884
def translate_fields(translations, proto_set, fields): """In-place replace text with translations for one proto_set.""" for item in proto_set.data_array: for field in fields: val = getattr(item, field) if val: setattr(item, field, translations[val])
5,335,885
def shimizu_mirioka(XYZ, t, a=0.75, b=0.45): """ The Shinizu-Mirioka Attractor. x0 = (0.1,0,0) """ x, y, z = XYZ x_dt = y y_dt = (1 - z) * x - a * y z_dt = x**2 - b * z return x_dt, y_dt, z_dt
5,335,886
def mock_client_fixture(): """Prevent setup.""" with patch("custom_components.porscheconnect.Client") as mock: instance = mock.return_value instance.getVehicles.return_value = async_return([]) instance.getAllTokens.return_value = async_return([]) instance.isTokenRefreshed.return_value = False yield
5,335,887
def draw_cap_peaks_rh_coord(img_bgr, rafts_loc, rafts_ori, raft_sym, cap_offset, rafts_radii, num_of_rafts): """ draw lines to indicate the capillary peak positions in right-handed coordinate :param numpy array img_bgr: the image in bgr format :param numpy array rafts_loc: the locations of rafts :param numpy array rafts_ori: the orientation of rafts, in deg :param int raft_sym: the symmetry of raft :param int cap_offset: the angle between the dipole direction and the first capillary peak, in deg :param numpy array rafts_radii: radii of the rafts :param int num_of_rafts: num of rafts :return: bgr image file """ line_thickness = int(2) line_color2 = (0, 255, 0) cap_gap = 360 / raft_sym # cap_offset = 45 # the angle between the dipole direction and the first capillary peak output_img = img_bgr height, width, _ = img_bgr.shape for raft_id in np.arange(num_of_rafts): for capID in np.arange(raft_sym): # note that the sign in front of the sine term is "+" line_start = (rafts_loc[raft_id, 0], height - rafts_loc[raft_id, 1]) line_end = (int(rafts_loc[raft_id, 0] + np.cos((rafts_ori[raft_id] + cap_offset + capID * cap_gap) * np.pi / 180) * rafts_radii[raft_id]), height - int(rafts_loc[raft_id, 1] + np.sin((rafts_ori[raft_id] + cap_offset + capID * cap_gap) * np.pi / 180) * rafts_radii[raft_id])) output_img = cv.line(output_img, line_start, line_end, line_color2, line_thickness) return output_img
5,335,888
def plot_calibration_results( run_dict_list, fig_scale=1, aggregation="median", legend=True, legend_kwargs=None, ax=None, ): """ Plot calibration results Parameters ---------- run_dict_list : list[dict] List of dicts, each with keys id: Run id label: Legend label plot_opts: Opts for plotting criterion: Calibration criterion (e.g., neg_entropy) checkpoint_step: Checkpoint step """ if ax is None: fig, ax = plt.subplots( nrows=1, ncols=1, figsize=(5.4 * 0.5 * fig_scale, 1.5 * fig_scale) ) for run in run_dict_list: eval_df = load_calibration_data(run) mean_arr, min_arr, max_arr = aggregate_calibration_data(eval_df, aggregation) x = np.arange(1, mean_arr.shape[0] + 1) if run["label"] != "": label = run["label"] + ", " else: label = "" if "has_minmax" not in run or run["has_minmax"] is True: run["has_minmax"] = ["rand", "ol", "mpc"] if not run["has_minmax"]: run["has_minmax"] = [] if run.get("has_rand", True): l = ax.plot(x, mean_arr[:, 0], label=label + "Random") if "rand" in run["has_minmax"]: ax.fill_between( x, min_arr[:, 0], max_arr[:, 0], alpha=0.15, color=l[0].get_color() ) if run.get("has_ol", True): l = ax.plot(x, mean_arr[:, 2], label=label + "OL") if "ol" in run["has_minmax"]: ax.fill_between( x, min_arr[:, 2], max_arr[:, 2], alpha=0.15, color=l[0].get_color() ) if run.get("has_mpc", True): l = ax.plot(x, mean_arr[:, 1], label=label + "MPC") if "mpc" in run["has_minmax"]: ax.fill_between( x, min_arr[:, 1], max_arr[:, 1], alpha=0.15, color=l[0].get_color() ) ax.set_xlabel("Prediction horizon") ax.set_ylabel("Mean squared error") lower_bar = mean_arr - min_arr upper_bar = max_arr - mean_arr ax.set(yscale="log") ax.set_xticks([1, 10, 20, 30, 40, 50]) if legend: if legend_kwargs is None: legend_kwargs = {} plt.legend(handlelength=1, columnspacing=0.5, labelspacing=0.1, **legend_kwargs)
5,335,889
def invoke(command): """Invoke sub-process.""" try: output = subprocess.check_output(command, stderr=subprocess.STDOUT) status = 0 except subprocess.CalledProcessError as error: # pragma: no cover output = error.output status = error.returncode return status, output
5,335,890
def pack_bidirectional_lstm_state(state, num_layers): """ Pack the hidden state of a BiLSTM s.t. the first dimension equals to the number of layers. """ assert (len(state) == 2 * num_layers) _, batch_size, hidden_dim = state.size() layers = state.view(num_layers, 2, batch_size, hidden_dim).transpose(1, 2).contiguous() state = layers.view(num_layers, batch_size, -1) return state
5,335,891
def _create_record_from_template(template, start, end, fasta_reader): """Returns a copy of the template variant with the new start and end. Updates to the start position cause a different reference base to be set. Args: template: third_party.nucleus.protos.Variant. The template variant whose non-location and reference base information to use. start: int. The desired new start location. end: int. The desired new end location. fasta_reader: GenomeReferenceFai object. The reader used to determine the correct start base to use for the updated variant. Returns: An updated third_party.nucleus.protos.Variant with the proper start, end, and reference base set and all other fields inherited from the template. """ retval = copy.deepcopy(template) retval.start = start retval.end = end if start != template.start: retval.reference_bases = fasta_reader.query( ranges.make_range(retval.reference_name, start, start + 1)) return retval
5,335,892
def run_W_E(cfg=None, eta_path=None, pris_path=None, etrm_daily_path=None, output_folder=None, is_ssebop=False, is_jpl=False, shape=None, start_date=None, end_date=None, time_step='monthly', eta_output=None, precip_output=None, ratiomod=True): """ :return: """ # over 15 years we normalize every monthly map by the total precip. Do the same for SSEB ETa. # path to monthly ETa if eta_path is None: eta_path = "yourpath" # path to monthly Prism if pris_path is None: pris_path = "yourpath" # output path if output_folder is None: output_folder = "yourpath" # function to calculate the depletion at a monthly timestep # within that function, output the running depletion raster that results from each month's F_in-F_out if is_ssebop: start_date = datetime.strptime("2000-2", "%Y-%m") end_date = datetime.strptime("2013-12", "%Y-%m") if cfg != None: start_date, end_date = cfg.date_range print 'start and end dates: {}, {}'.format(start_date, end_date) if time_step == 'monthly': values_in_timeseries = ((end_date.year - start_date.year) * 12) + (end_date.month - start_date.month) print 'months in analysis series: {}'.format(values_in_timeseries) precip_name = "tot_precip_{}_{}.tif" if is_ssebop: eta_name = "ssebop_{}_{}_warped.tif" else: eta_name = "tot_eta_{}_{}.tif" elif time_step == 'daily': date_delta = end_date - start_date values_in_timeseries = date_delta.days # year and julian date all together no spaces precip_name = "PRISMD2_NMHW2mi_{}{:02d}{:02d}.tif" if is_ssebop: eta_name = "ssebop_{}_{}_warped.tif" elif is_jpl: # year.zeropadmonth.zeropadday.PTJPL.ET_daily_kg.MODISsin1km_etrm.tif if ratiomod: eta_name = "{}.{:02d}.{:02d}.PTJPL.ET_daily_kg.MODISsin1km_etrm_ratiomod.tif" else: eta_name = "{}.{:02d}.{:02d}.PTJPL.ET_daily_kg.MODISsin1km_etrm.tif" # TODO - GENERATE ETRM DATASET to GAPFILL ETRM_eta_name = "" else: eta_name = "tot_eta_{}_{}.tif" # at each timestep, keep track of how negative the depletion has gotten; initialize at zero if shape is None: shape = 2525, 2272 depletion_ledger = np.zeros(shape, dtype=float) # keep track of the maximum depletion map max_depletion = np.zeros(shape, dtype=float) # keep track of the minimum depletion map min_depletion = np.zeros(shape, dtype=float) # track total ETa total_eta = np.zeros(shape, dtype=float) # track cumulative prism precip total_precip = np.zeros(shape, dtype=float) # track NANs nan_counter = np.zeros(shape, dtype=float) # instantiate a yesterday array for gapfilling yesterday_eta_arr = np.zeros(shape, dtype=float) for i in range(values_in_timeseries + 1): print 'run we iteration {}'.format(i) if time_step == 'monthly': # count up from the start date by months... date = start_date + relativedelta(months=+i) precip = os.path.join(pris_path, precip_name.format(date.year, date.month)) eta = os.path.join(eta_path, eta_name.format(date.year, date.month)) elif time_step == 'daily': date = start_date + relativedelta(days=+i) if i > 0: yesterday = start_date + relativedelta(days=(+i-1)) precip = os.path.join(pris_path, precip_name.format(date.year, date.month, date.day)) if is_jpl: eta = os.path.join(eta_path, eta_name.format(date.year, date.month, date.day)) # if i > 0: # yesterday_eta = os.path.join(eta_path, eta_name.format(yesterday.year, yesterday.month, yesterday.day)) # print 'yesterday eta', yesterday_eta else: print 'you need to set the script up to run jpl data or specify another daily dataset' \ ' and fix the script' if not os.path.isfile(eta): print 'cannot find eta file: \n {}'.format(eta) print 'we will skip this day and continue accumulating' continue # array, transform, dimensions, projection, data type precip_arr, transform, dim, proj, dt = raster_extract(precip) eta_arr, transform, dim, proj, dt = raster_extract(eta) eta_nan_bool = np.isnan(eta_arr) precip_arr[eta_nan_bool] = 0 eta_arr[eta_nan_bool] = 0 # count how many NAN's occur for every pixel nan_counter[eta_nan_bool] += 1 total_eta += eta_arr total_precip += precip_arr if time_step == 'daily' and eta_output != None: # output the current timestep cumulative eta and precip eta_outname = 'cumulative_eta_{}_{}_{}.tif'.format(date.year, date.month, date.day) write_raster(total_eta, transform, eta_output, eta_outname, dim, proj, dt) if time_step == 'daily' and precip_output != None: # output the current timestep cumulative eta and precip precip_outname = 'cumulative_prism_{}_{}_{}.tif'.format(date.year, date.month, date.day) write_raster(total_precip, transform, precip_output, precip_outname, dim, proj, dt) # depletion for the current timestep depletion_delta = depletion_calc(eta_arr, precip_arr) # add to the running depletion tally depletion_ledger += depletion_delta # for any values that become negative, make them zero. Assume runoff...Wang-Erlandsson (2016) # todo - comment out to allow negative depletions (i.e., accumulation of water beyond storage) depletion_ledger[depletion_ledger < 0.0] = 0.0 max_depletion = np.maximum(depletion_ledger, max_depletion) if cfg != None: # reset minimum two years into the analysis if i is 24: min_depletion = depletion_ledger min_depletion = np.minimum(depletion_ledger, min_depletion) if time_step == 'monthly': # for each monthly timestep, take the cumulative depletion condition and output it as a raster depletion_name = "cumulative_depletion_{}_{}.tif".format(date.year, date.month) write_raster(depletion_ledger, transform, output_folder, depletion_name, dim, proj, dt) elif time_step == 'daily': depletion_name = "cumulative_depletion_{}_{}_{}.tif".format(date.year, date.month, date.day) write_raster(depletion_ledger, transform, output_folder, depletion_name, dim, proj, dt) print 'iterations finished' # output the maximum depletion max_depletion_name = 'max_depletion_{}_{}.tif'.format(start_date.year, end_date.year) write_raster(max_depletion, transform, output_folder, max_depletion_name, dim, proj, dt) if cfg != None: # output the minimum depletion min_depletion_name = 'min_depletion_{}_{}.tif'.format(start_date.year+2, end_date.year) write_raster(min_depletion, transform, output_folder, min_depletion_name, dim, proj, dt) # output the depletion range range_depletion = np.zeros(shape, dtype=float) range_depletion = max_depletion - min_depletion range_depletion_name = 'range_depletion_{}_{}.tif'.format(start_date.year + 2, end_date.year) write_raster(range_depletion, transform, output_folder, range_depletion_name, dim, proj, dt) # output total ETa (i.e., SSEBop) to test whether it looks like the netcdf file if is_ssebop: total_eta_name = "cum_total_ssebop_{}_{}.tif".format(start_date.year, end_date.year) elif is_jpl: total_eta_name = "cum_total_jpl_eta_{}_{}.tif".format(start_date.year, end_date.year) else: total_eta_name = 'cum_total_generic_eta_{}_{}.tif'.format(start_date.year, end_date.year) write_raster(total_eta, transform, output_folder, total_eta_name, dim, proj, dt) # output the total PRISM precip total_precip_name = 'cum_total_prism_precip_{}_{}.tif'.format(start_date.year, end_date.year) write_raster(total_precip, transform, output_folder, total_precip_name, dim, proj, dt) # output the nan raster nan_name = 'cumulative_nan_occurences_{}_{}.tif'.format(start_date.year, end_date.year) write_raster(nan_counter, transform, output_folder, nan_name, dim, proj, dt)
5,335,893
def create_outputfile(prxdoc,inputfiles_element,inputfilehref,nominal_outputfilehref,outputfilehref,outputdict,ignore_locking): """Create the output XML file from the raw input by running any filters, etc. It will be presumed that the output XML file will eventually be referred to by nominal_outputfilehref, but the actual file written will be outputfilehref""" # print("inputfilehref=%s" % (inputfilehref.humanurl())) if inputfilehref.get_bare_unquoted_filename().lower().endswith(".xls") or inputfilehref.get_bare_unquoted_filename().lower().endswith(".xlsx"): try: import xlrd import xlrd.sheet inputfileelement=outputdict[inputfilehref].inputfileelement # Any dc: namespace elements within the inputfileelement # will get placed in a dc:summary tag timestamp=datetime.datetime.fromtimestamp(os.path.getmtime(inputfilehref.getpath()),lm_timestamp.UTC()).isoformat() spreadsheet=xlrd.open_workbook(inputfilehref.getpath()) sheetname=prxdoc.getattr(inputfileelement,"sheetname",spreadsheet.sheet_names()[0]) sheet=spreadsheet.sheet_by_name(sheetname) titlerow=int(prxdoc.getattr(inputfileelement,"titlerow","1"))-1 # titlerow=sheet.row(titlerownum) nrows=sheet.nrows ncols=sheet.ncols rawtitles = [ str(sheet.cell(titlerow,col).value).strip() for col in range(ncols) ] tagnames = [ convert_to_tagname(splitunits(rawtitle)[0]) if rawtitle is not None and len(rawtitle) > 0 else "blank" for rawtitle in rawtitles ] unitnames = [ convert_to_tagname(splitunits(rawtitle)[1]) if rawtitle is not None and len(rawtitle) > 0 else None for rawtitle in rawtitles ] nsmap=copy.deepcopy(prx_nsmap) nsmap["ls"] = "http://limatix.org/spreadsheet" outdoc=xmldoc.xmldoc.newdoc("ls:sheet",nsmap=nsmap,contexthref=outputfilehref) # Copy dc: namespace elements within inputfileelement # into a dc:summary tag inputfileel_children=prxdoc.children(inputfileelement) summarydoc=None for inputfileel_child in inputfileel_children: if prxdoc.gettag(inputfileel_child).startswith("dc:"): if summarydoc is None: summarydoc=xmldoc.xmldoc.newdoc("dc:summary",nsmap=nsmap,contexthref=prxdoc.getcontexthref()) pass # place in document with same context as where it came from summarydoc.getroot().append(copy.deepcopy(inputfileel_child)) pass pass if summarydoc is not None: # shift summary context and then copy it into outdoc summarydoc.setcontexthref(outdoc.getcontexthref()) outdoc.getroot().append(copy.deepcopy(summarydoc.getroot())) pass # Copy spreadsheet table for row in range(titlerow+1,nrows): rowel=outdoc.addelement(outdoc.getroot(),"ls:row") rownumel=outdoc.addelement(rowel,"ls:rownum") outdoc.settext(rownumel,str(row)) for col in range(ncols): cell=sheet.cell(row,col) cell_type=xlrd.sheet.ctype_text.get(cell.ctype,'unknown') if cell_type=="empty": continue cellel=outdoc.addelement(rowel,"ls:"+tagnames[col]) outdoc.setattr(cellel,"ls:celltype",cell_type) hyperlink=sheet.hyperlink_map.get((row,col)) if cell_type=="text" and hyperlink is None: outdoc.settext(cellel,cell.value) pass elif cell_type=="text" and hyperlink is not None: # Do we need to do some kind of conversion on # hyperlink.url_or_path() outdoc.settext(cellel,cell.value) hyperlink_href=dcv.hrefvalue(hyperlink.url_or_path,contexthref=inputfilehref) hyperlink_href.xmlrepr(outdoc,cellel) pass elif cell_type=="number": if unitnames[col] is not None: outdoc.setattr(cellel,"dcv:units",unitnames[col]) pass outdoc.settext(cellel,str(cell.value)) pass elif cell_type=="xldate": outdoc.settext(cellel,datetime.datetime(xlrd.xldate_as_tuple(cell.value,spreadsheet.datemode)).isoformat()) pass elif cell_type=="bool": outdoc.settext(cellel,str(bool(cell.value))) pass elif cell_type=="error": outdoc.settext(cellel,"ERROR %d" % (cell.value)) pass else: raise ValueError("Unknown cell type %s" %(cell_type)) pass pass # Did the user provide a prx:xslt href indicating # a transformation to apply? xslttag=prxdoc.xpathsinglecontext(outputdict[inputfilehref].inputfileelement,"prx:xslt",default=None) if xslttag is not None: # Replace outdoc with transformed copy outdoc = create_outputfile_process_xslt(prxdoc,xslttag,inputfiles_element,outputdict[inputfilehref].inputfileelement,outdoc) pass # Write out under new file name outputfilehref assert(outputfilehref != inputfilehref) outdoc.set_href(outputfilehref,readonly=False) outdoc.close() canonhash=None # could hash entire input file... pass except ImportError: raise(ImportError("Need to install xlrd package in order to import .xls or .xlsx files")) pass elif inputfilehref.has_fragment(): # input file url has a fragment... we're only supposed # to extract a portion of the file timestamp=datetime.datetime.fromtimestamp(os.path.getmtime(inputfilehref.getpath()),lm_timestamp.UTC()).isoformat() if inputfilehref.fragless()==prxdoc.get_filehref(): inputfilecontent=prxdoc # special case where input file is .prx file pass else: inputfilecontent=xmldoc.xmldoc.loadfile(inputfilehref.getpath()) pass inputfileportion=inputfilehref.evaluate_fragment(inputfilecontent) if len(inputfileportion)==0: raise ValueError("Input URL %s fragment reference failed to resolve" % (inputfilehref.humanurl())) elif len(inputfileportion) > 1: raise ValueError("Input URL %s fragment reference resolved to multiple elements" % (inputfilehref.humanurl())) #print("inputfilehref=%s" % (inputfilehref.humanurl())) #print("inputfileportion=%s" % (etree.tostring(inputfileportion[0]))) #import pdb as pythondb #pythondb.set_trace() outdoc=xmldoc.xmldoc.copy_from_element(inputfilecontent,inputfileportion[0],nsmap=prx_nsmap) # NOTE: prx_nsmap doesn't make much difference here because the nsmap of the element is copied in. prx_nsmap just makes our prefixes available through xmldoc # Create canonicalization from unmodified outdoc so that we can hash it outdoc_canon=BytesIO() outdoc.doc.write_c14n(outdoc_canon,exclusive=False,with_comments=True) canonhash=hashlib.sha256(outdoc_canon.getvalue()).hexdigest() if inputfileportion[0] is outputdict[inputfilehref].inputfileelement: # special case where this input file href with fragment # points to its very tag -- the <inputfiles> tag in the prxfile # auto-populate corresponding <outputfile> tags # i.e. modify outdoc to make sure there is an <outputfile> tag with an xlink:href # for each inputfile assert(inputfilecontent.gettag(inputfileportion[0])=="prx:inputfiles") outdoc_inputfiletags=[ outdoc.getroot() ] # treat the root <inputfiles> tag as an inputfile outdoc_inputfiletags.extend(outdoc.xpath("prx:inputfile")) for outdoc_inputfiletag in outdoc_inputfiletags: if outdoc_inputfiletag is outdoc.getroot() and not outdoc.hasattr(outdoc_inputfiletag,"xlink:href"): # root prx:inputfiles tag has no xlink:href assert(outdoc.gettag(outdoc_inputfiletag)=="prx:inputfiles") outdoc_inputfilehref = inputfilehref # subsegment of input file pass elif outdoc.hasattr(outdoc_inputfiletag,"xlink:href") and outdoc_inputfiletag is not outdoc.getroot(): outdoc_inputfilehref = dcv.hrefvalue.fromxml(outdoc,outdoc_inputfiletag) # specified input file pass else: raise ValueError("Bad <prx:inputfiles> or <prx:inputfile> tag at %s" % (dcv.hrefvalue.fromelement(outdoc,outdoc_inputfiletag).humanurl())) #print("outdoc_inputfilehref:") #print(outdoc_inputfilehref) #print("outputdict keys:") #print(outputdict.keys()) assert(outdoc_inputfilehref in outputdict) # all of these input file references should be keys to the output dict because outputdict was made from the originals! # Find or create prx:outputfile tag outdoc_outputfiletag = outdoc.child(outdoc_inputfiletag,"prx:outputfile") if outdoc_outputfiletag is None: outdoc_outputfiletag=outdoc.addelement(outdoc_inputfiletag,"prx:outputfile") pass # Ensure prx:outputfile tag has a hyperlink if not outdoc.hasattr(outdoc_outputfiletag,"xlink:href"): outputdict[outdoc_inputfilehref].outputfilehref.xmlrepr(outdoc,outdoc_outputfiletag) pass pass pass # Did the user provide a prx:xslt href indicating # a transformation to apply? xslttag=prxdoc.xpathsinglecontext(outputdict[inputfilehref].inputfileelement,"prx:xslt",default=None) if xslttag is not None: outdoc = create_outputfile_process_xslt(prxdoc,xslttag,inputfiles_element,outputdict[inputfilehref].inputfileelement,outdoc) pass # Write out selected portion under new file name outputfilehref assert(outputfilehref != inputfilehref) outdoc.set_href(outputfilehref,readonly=False) outdoc.close() pass else: # input file url has no fragment, not .xls or .xlsx: treat it as XML # extract the whole thing! # Do we have an input filter? ... stored as xlink:href in <inputfilter> tag canonhash=None # (we could hash the entire inputfile!) inputfilters=prxdoc.xpathcontext(outputdict[inputfilehref].inputfileelement,"prx:inputfilter") if len(inputfilters) > 1: raise ValueError("Maximum of one <inputfilter> element permitted in .prx file") timestamp=datetime.datetime.fromtimestamp(os.path.getmtime(inputfilehref.getpath()),lm_timestamp.UTC()).isoformat() xslttag=prxdoc.xpathsinglecontext(outputdict[inputfilehref].inputfileelement,"prx:xslt",default=None) if len(inputfilters) > 0: # have an input filter inputfilter=inputfilters[0] # run input filter # Get path from xlink:href #inputfilterpath=prxdoc.get_href_fullpath(inputfilter) inputfilterhref=dcv.hrefvalue.fromxml(prxdoc,inputfilter) inputfilterpath=inputfilterhref.getpath() # build arguments inputfilterargs=[inputfilterpath] # pull attributes named param1, param2, etc. from inputfilter tag cnt=1 while "param"+cnt in inputfilter.attrib: inputfilterargs.append(inputfilter.attrib["param"+cnt]) cnt+=1 pass # add input and output filenames as params to filter inputfilterargs.append(inputfilehref.getpath()) inputfilterargs.append(outputfilehref.getpath()) # Call input filter... will raise # exception if input filter fails. subprocess.check_call(*inputfilterargs) pass elif xslttag is not None: indoc=xmldoc.xmldoc.loadhref(inputfilehref,nsmap=prx_nsmap,readonly=True) outdoc = create_outputfile_process_xslt(prxdoc,xslttag,inputfiles_element,outputdict[inputfilehref].inputfileelement,indoc) # Write out under new file name outputfilehref assert(outputfilehref != inputfilehref) outdoc.set_href(outputfilehref,readonly=False) outdoc.close() pass else: # use shutil to copy input to output shutil.copyfile(inputfilehref.getpath(),outputfilehref.getpath()) pass pass return (canonhash,timestamp)
5,335,894
def test_resolver(): """Simple test of the DNS resolver """ hosts = ['www', 'mail', 'maps'] dns_servers = ['8.8.8.8', '4.2.2.2'] r = resolver.Resolver( hostnames=hosts, domain='google.com', nameservers=dns_servers, tries=1 ) cache = r.resolve() assert len(cache[hosts[0]]) >= 1 # Clear the resolver and try again r.clear() assert(len(r.cache) == 0) r.hostnames = hosts r.resolve() assert len(cache[hosts[1]]) >= 1
5,335,895
def section3(): """ ## Convert Mask Annotations to Polygon More about from_segmentation() function on <a href="https://console.dataloop.ai/sdk-docs/dtlpy.entities.annotation_definitions.html#dtlpy.entities.annotation_definitions.polygon.Polygon.from_segmentation" target="_blank">here</a>. """
5,335,896
def get_example_data(filepath: str, is_gzip: bool = True, make_bytes: bool = False) -> BytesIO: """ 获取示例数据,下载打开文件,进行解压缩。 :param filepath: 文件路径。 :param is_gzip: 是否压缩的。 :param make_bytes: 字节数据。 :return: """ # 如果本地存在则从本地加载 local_path = os.path.join(EXAMPLES_FOLDER, "data", filepath) if os.path.exists(local_path): with io.open(local_path, "rb") as f: content = f.read() else: content = request.urlopen(f"{BASE_URL}{filepath}?raw=true").read() if is_gzip: content = zlib.decompress(content, zlib.MAX_WBITS | 16) if make_bytes: content = BytesIO(content) return content
5,335,897
def test_get_InSar_flight_comment(data_name, expected): """ Test we can formulate a usefule comment for the uavsar annotation file and a dataname """ blank = '{} time of acquisition for pass {}' desc = {blank.format('start', '1'): {'value': pd.to_datetime('2020-01-01 10:00:00 UTC')}, blank.format('stop', '1'): {'value': pd.to_datetime('2020-01-01 12:00:00 UTC')}, blank.format('start', '2'): {'value': pd.to_datetime('2020-02-01 10:00:00 UTC')}, blank.format('stop', '2'): {'value': pd.to_datetime('2020-02-01 12:00:00 UTC')}} comment = get_InSar_flight_comment(data_name, desc) assert comment == expected
5,335,898
def convert_time(time_string): """ Input a time in HH:MM:SS form and output a time object representing that """ return time.strptime(time_string, "%H:%M")
5,335,899