content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import pyarrow as pa def ST_IsValid(geos): """ Check if geometry is of valid geometry format. :type geos: Series(dtype: object) :param geos: Geometries in WKB form. :rtype: Series(dtype: bool) :return: True if geometry is valid. :example: >>> import pandas >>> import arctern >>> data = pandas.Series(["POINT (1.3 2.6)", "POINT (2.6 4.7)"]) >>> rst = arctern.ST_IsValid(arctern.ST_GeomFromText(data)) >>> print(rst) 0 true 1 true dtype: bool """ arr_geos = pa.array(geos, type='binary') return arctern_caller(arctern_core_.ST_IsValid, arr_geos)
466f29367dbdc7c09581f7bedda72fe729bdd73d
32,000
import copy import json import logging def import_email(assessment, campaign_number, template_smtp): """Import email from file.""" temp_template = Template(name=f"{assessment.id}-T{str(campaign_number)}") temp_smtp = copy.deepcopy(template_smtp) temp_smtp.name = f"{assessment.id}-SP-{campaign_number}" # Receives the file name and checks if it exists. while True: try: import_file_name = get_input(" Import File name?") # Drops .json if included so it can always be added as fail safe. import_file_name = import_file_name.split(".", 1)[0] with open(import_file_name + ".json") as importFile: import_temp = json.load(importFile) # Validates that all fields are present or raise MissingKey Error. email_import_validation(import_temp) break except EnvironmentError: logging.critical("Import File not found: {}.json".format(import_file_name)) print("Please try again...") except MissingKey as e: # Logs and indicates the user should correct before clicking ok which will re-run the import. logging.critical("Missing Field from import: {}".format(e.key)) message_dialog( title="Missing Field", text=f'Email import is missing the "{e.key}" field, please correct before clicking Ok.\n {e.key}: {e.description}', ) continue # Finalize SMTP profile, push to GoPhish for check. # TODO Need to valid this formatting. temp_smtp.from_address = import_temp["from_address"] # Load temp_template.subject = import_temp["subject"] temp_template.html = import_temp["html"] temp_template.text = import_temp["text"] temp_template.name = f"{assessment.id}-T{str(campaign_number)}-{import_temp['id']}" return temp_smtp, temp_template
7359ff21d159b0f5d7fce90fe58ae17ef785e271
32,001
import time def get_framerate(has_already_started, start_time, frame_counter, frame_rate, frame_num=5, decimal_round_num=2): """ Returns current framerate of video based on time elapsed in frame_num frames. Works in a while loop for each frame""" if has_already_started: if frame_counter % 5 == 0: curr_time = time.time() frame_rate = frame_counter/(curr_time - start_time) frame_rate = round(frame_rate, decimal_round_num) frame_counter += 1 return has_already_started, start_time, frame_counter, frame_rate else: has_already_started = True start_time = time.time() frame_counter = 0 frame_rate = 0 return has_already_started, start_time, frame_counter, frame_rate
61db421be9e8d5a0e810a79875eac2b776be99ca
32,002
def check_for_solve(grid): """ checks if grid is full / filled""" for x in range(9): for y in range(9): if grid[x][y] == 0: return False return True
5fc4a8e7a2efaa016065fc0736aa5bdb7d4c92f8
32,003
def Sn(i, length): """Convert an int to a binary string.""" s = '' while i != 0: digit = i & 0xff i >>= 8 s += chr(digit) if len(s) > length: raise Exception("Integer too big to fit") while len(s) < length: s += chr(0) return s
607c2b8e82379db091505d7422edc17ea121bc3f
32,004
import os def _get_config_file_schema(): """ Return the path to the parameters file json schema. """ module_file = _decode_filesystem_path(__file__) return os.path.join(os.path.dirname(module_file), "pystepsrc_schema.json")
7a67cf8c5496d21dcff1b9e40c054e5961adab7f
32,005
def parse_args(): """Parse command line arguments""" parser = ArgumentParser( description='Print contents of a parsed config file', formatter_class=ArgumentDefaultsHelpFormatter, ) parser.add_argument( 'pipeline', metavar='CONFIGFILE', nargs='?', default='settings/pipeline/example.cfg', help='Pipeline config file to parse', ) parser.add_argument( '-v', action='count', default=Levels.WARN, help='Set verbosity level', ) kwargs = vars(parser.parse_args()) set_verbosity(kwargs.pop('v')) return kwargs
67462f2078ede7dfb9d9e0956b32f1eb09b2a747
32,006
import copy def _normalize_annotation(annotation, tag_index): """ Normalize the annotation anchorStart and anchorEnd, in the sense that we start to count the position from the beginning of the sentence and not from the beginning of the disambiguated page. :param annotation: Annotation object :param tag_index: start index (int) :return: a new Annotation object """ # norm_annotation = copy.deepcopy(annotation) norm_annotation = annotation norm_annotation.anchorStart = int(annotation.anchorStart) - tag_index norm_annotation.anchorEnd = int(annotation.anchorEnd) - tag_index return copy.copy(norm_annotation)
a7da5810711ada97a2ddcc308be244233fe813be
32,007
def read_lc(csvfile, comment='|'): """ Read a light curve csv file from gAperture. :param csvfile: The name of the csv file to read. :type csvfile: str :param comment: The character used to denote a comment row. :type comment: str :returns: pandas DataFrame -- The contents of the csv file. """ return pd.io.parsers.read_csv(csvfile, comment=comment)
74e53cbfe902e9d23567569ec0f4c5ef5ef75baa
32,008
def check_cutoffs(cutoffs): """Validates the cutoff Parameters ---------- cutoffs : np.ndarray or pd.Index Returns ---------- cutoffs (Sorted array) Raises ---------- ValueError If cutoffs is not a instance of np.array or pd.Index If cutoffs array is empty. """ if not isinstance(cutoffs, (np.ndarray, pd.Index)): raise ValueError( f"`cutoffs` must be a np.array or pd.Index, " f"but found: {type(cutoffs)}" ) assert np.issubdtype(cutoffs.dtype, np.integer) if len(cutoffs) == 0: raise ValueError("Found empty `cutoff` array") return np.sort(cutoffs)
db0a3a477b27883aa1d29486083cf3e6c993e021
32,009
def hook(images, augmenter, parents, default): """Determines which augmenters to apply to masks.""" return augmenter.__class__.__name__ in MASK_AUGMENTERS
0e1e6589bc37d90b0ac8249e11dee54140efd86a
32,010
def _get_videos(course, pagination_conf=None): """ Retrieves the list of videos from VAL corresponding to this course. """ videos, pagination_context = get_videos_for_course( str(course.id), VideoSortField.created, SortDirection.desc, pagination_conf ) videos = list(videos) # This is required to see if edx video pipeline is enabled while converting the video status. course_video_upload_token = course.video_upload_pipeline.get('course_video_upload_token') transcription_statuses = ['transcription_in_progress', 'transcript_ready', 'partial_failure', 'transcript_failed'] # convert VAL's status to studio's Video Upload feature status. for video in videos: # If we are using "new video workflow" and status is in `transcription_statuses` then video encodes are ready. # This is because Transcription starts once all the encodes are complete except for YT, but according to # "new video workflow" YT is disabled as well as deprecated. So, Its precise to say that the Transcription # starts once all the encodings are complete *for the new video workflow*. is_video_encodes_ready = not course_video_upload_token and (video['status'] in transcription_statuses) # Update with transcript languages video['transcripts'] = get_available_transcript_languages(video_id=video['edx_video_id']) video['transcription_status'] = ( StatusDisplayStrings.get(video['status']) if is_video_encodes_ready else '' ) # Convert the video status. video['status'] = convert_video_status(video, is_video_encodes_ready) return videos, pagination_context
21774a476424b8f68f67c368fb72343fb9cfd552
32,011
import torch def sample_stacking_program(num_primitives, device, address_suffix="", fixed_num_blocks=False): """Samples blocks to stack from a set [0, ..., num_primitives - 1] *without* replacement. The number of blocks is stochastic and can be < num_primitives. Args num_primitives (int) device address_suffix Returns [num_blocks] (where num_blocks is stochastic and between 1 and num_primitives (inclusive)) """ # Init stacking_program = [] available_primitive_ids = list(range(num_primitives)) if fixed_num_blocks: num_blocks = num_primitives else: # Sample num_blocks uniformly from [1, ..., num_primitives] (inclusive) raw_num_blocks_logits = torch.ones((num_primitives,), device=device) raw_num_blocks = pyro.sample( f"raw_num_blocks{address_suffix}", pyro.distributions.Categorical(logits=raw_num_blocks_logits), ) num_blocks = raw_num_blocks + 1 # Sample primitive ids for block_id in range(num_blocks): # Sample primitive raw_primitive_id_logits = torch.ones((len(available_primitive_ids),), device=device) raw_primitive_id = pyro.sample( f"raw_primitive_id_{block_id}{address_suffix}", pyro.distributions.Categorical(logits=raw_primitive_id_logits), ) primitive_id = available_primitive_ids.pop(raw_primitive_id) # Add to the stacking program based on previous action stacking_program.append(primitive_id) return torch.tensor(stacking_program, device=device)
f5927edc11b2e20fcfb4b19b6ecddd92ba911841
32,012
def get_mmr_address(rn, m0m1): """Return address of an memory-mapped register and its size in bits. """ mmr_map = { 0b00 : 0xf0400, 0b01 : 0xf0500, 0b10 : 0xf0600, 0b11 : 0xf0700 } address = mmr_map[m0m1] + (rn * 0x4) size = get_register_size_by_address(address) return address, size
53b92051d7ac64f5e288a121dae7e764166c9d2e
32,013
import sys def with_translations(**columns): """Decorator that creates a translations table for the decorated model. Creates a table mapped to a ``ModelTranslations`` class (given a decorated model class ``Model``) containing the provided `**columns`, with references to the :class:`.Language` and the decorated model. On the decorated model, creates an association proxy for each translated field that returns a dict of translations mapped by :attr:`~.Language.subtag`. Parameters ---------- **columns : :obj:`dict` of :obj:`str`: :class:`sqlalchemy.schema.Column` Translatable columns to create in the translations table, typically of the :class:`sqlalchemy.types.Unicode` type. """ def decorator(cls): Translations = type('{}Translation'.format(cls.__name__), (Base, ), columns) Translations = belongs_to( cls, backref_name='translations', collection_class=attribute_mapped_collection('subtag'))( Translations) Translations = belongs_to( Language, name='local_language', backref_name=False)(Translations) Translations.__table__.append_constraint( sa.UniqueConstraint('{}_id'.format(snake_case(cls.__name__)), 'local_language_id')) setattr(Translations, 'subtag', association_proxy('local_language', 'subtag')) for attr in columns: setattr(cls, attr, association_proxy('translations', attr)) setattr(sys.modules[cls.__module__], Translations.__name__, Translations) return cls return decorator
22928f878de1480bcd483b7313642969483c3dd8
32,014
def thread_map(func, data): """ http://code.activestate.com/recipes/577360-a-multithreaded-concurrent-version-of-map/ """ #todo: this is disabled return map(func, data) N = len(data) result = [None] * N # wrapper to dispose the result in the right slot def task_wrapper(i): result[i] = func(data[i]) threads = [Thread(target=task_wrapper, args=(i,)) for i in xrange(N)] for t in threads: t.start() for t in threads: t.join() return result
eec6efd698a71c63b0c1a7c0c0c4df1f21e3b62f
32,015
import re def number_of_a_char(element: Element): """ get number of linked char, for example, result of `<a href="#">hello</a>world` = 5 :param element: :return: length """ if element is None: return 0 text = ''.join(element.xpath('.//a//text()')) text = re.sub(r'\s*', '', text, flags=re.S) return len(text)
9d1394552b740844aadacc3fe3b2f802b698c18a
32,016
def preprocessing_fn(batch): """ Standardize, then normalize sound clips """ processed_batch = [] for clip in batch: signal = clip.astype(np.float64) # Signal normalization signal = signal / np.max(np.abs(signal)) # get pseudorandom chunk of fixed length (from SincNet's create_batches_rnd) signal_length = len(signal) np.random.seed(signal_length) signal_start = ( np.random.randint(signal_length / WINDOW_LENGTH - 1) * WINDOW_LENGTH % signal_length ) signal_stop = signal_start + WINDOW_LENGTH signal = signal[signal_start:signal_stop] processed_batch.append(signal) return np.array(processed_batch)
25ce4a077027239126d02b62e93ca0a3bcb15b5e
32,017
def elast_quad9(coord, params): """ Quadrilateral element with 9 nodes for classic elasticity under plane-strain Parameters ---------- coord : coord Coordinates of the element. params : list List with material parameters in the following order: [Young modulus, Poisson coefficient, density]. Returns ------- stiff_mat : ndarray (float) Local stifness matrix. mass_mat : ndarray (float) Local mass matrix. """ stiff_mat = np.zeros((18, 18)) mass_mat = np.zeros((18, 18)) C = fem.umat(params[:2]) if len(params) == 2: dens = 1 else: dens = params[-1] gpts, gwts = gau.gauss_nd(3, ndim=2) for cont in range(gpts.shape[0]): # pylint: disable=E1136 # pylint/issues/3139 r, s = gpts[cont, :] H, B, det = fem.elast_diff_2d(r, s, coord, fem.shape_quad9) factor = det * gwts[cont] stiff_mat += factor * (B.T @ C @ B) mass_mat += dens * factor * (H.T @ H) return stiff_mat, mass_mat
c27bae77ca54a3a370ccdac5b5550f73cb121d9a
32,018
from typing import Union from pathlib import Path def peek(audio_file_path: Union[str, Path], output: str = "np"): """ Returns a tuple of audio data and its sampling rate The audio data can be a numpy array or list """ data, sr = sf.read(audio_file_path, dtype="float32") data = data.transpose() if output == "list": return data.tolist(), sr if output == "np": return data, sr
30b47c77ab92cf0a544d84605204261c899f9e9a
32,019
import click def parse_rangelist(rli): """Parse a range list into a list of integers""" try: mylist = [] for nidrange in rli.split(","): startstr, sep, endstr = nidrange.partition("-") start = int(startstr, 0) if sep: end = int(endstr, 0) if end < start: mylist.extend(range(start, end - 1, -1)) else: mylist.extend(range(start, end + 1)) else: mylist.append(start) except ValueError: # pylint: disable=raise-missing-from raise click.ClickException("Invalid range list %s" % rli) return mylist
321496a1170b81d02b8378d687d8ce6d6295bff6
32,020
import os def intersect_by_tf(emotion_dict): """ Takes multiple emotion word lists and intersects them by their term frequency, leading to the emotional difference between sets of any size. Args: emotion_dict: dict of summed up emotional scores based on the tf scores of the underlying words Returns: tf_intersection_dict: The comparison of each two sets in a dict. Of the keys: Second element is subtracted by the first one. """ key_list = [*emotion_dict] tf_intersection_dict = {} for i in range(len(key_list)): for j in range(i + 1, len(key_list)): both_docs = "intersect: " + os.path.basename(os.path.normpath(key_list[i])) + " and " \ + os.path.basename(os.path.normpath(key_list[j])) tf_intersection_dict[both_docs] = \ {k: emotion_dict[key_list[i]][k] - emotion_dict[key_list[j]][k] for k in emotion_dict[key_list[i]]} return tf_intersection_dict
f46a02b736a76bd3adcc1efaf7638452241567ce
32,021
def transcript_segments(location_descriptors, gene_descriptors): """Provide possible transcript_segment input.""" return [ { "transcript": "refseq:NM_152263.3", "exon_start": 1, "exon_start_offset": -9, "exon_end": 8, "exon_end_offset": 7, "gene_descriptor": gene_descriptors[0], "component_genomic_start": location_descriptors[2], "component_genomic_end": location_descriptors[3] }, { "component_type": "transcript_segment", "transcript": "refseq:NM_034348.3", "exon_start": 1, "exon_end": 8, "gene_descriptor": gene_descriptors[3], "component_genomic_start": location_descriptors[0], "component_genomic_end": location_descriptors[1] }, { "component_type": "transcript_segment", "transcript": "refseq:NM_938439.4", "exon_start": 7, "exon_end": 14, "exon_end_offset": -5, "gene_descriptor": gene_descriptors[4], "component_genomic_start": location_descriptors[0], "component_genomic_end": location_descriptors[1] }, { "component_type": "transcript_segment", "transcript": "refseq:NM_938439.4", "exon_start": 7, "gene_descriptor": gene_descriptors[4], "component_genomic_start": location_descriptors[0] } ]
3ca9041ff278dcd19432b6d314b9c01de6be1983
32,022
def perform_data_filtering_q2(data): """ Takes the original DataFrame. Returns the altered DataFrame necessary for Q2. """ # redoing the dataframe columns based on different values df = data diseased = df['num'] != 0 df['num'] = np.where(diseased, 'diseased', 'healthy') males = df['sex'] == 1 df['sex'] = np.where(males, 'male', 'female') return df
2c8943bda66722b70a5dd25cb5a7c7473e40e67c
32,023
def player_with_name_and_value(source): """ source: pn.widgets.DiscretePlayer() target: consists of source player's name, value and player itself With pn.widgets.DiscretePlayer, we don't get name and value updates in textual form. This method is useful in case we want name and continuous value update. """ mark = pn.pane.Markdown(f'{source.value}') def callback(*events): for event in events: if event.name == 'value': mark.object = str(event.new) source.param.watch(callback, ['value'], onlychanged=False) target = pn.Column(pn.Row(source.name, mark), source) return target
e7d415d798f9c6aefb203f861b08c9477dde32d7
32,024
def _cached_diff(expression, var): """ Derive expression with respect to a single variable. :param expression: an expression to derive :type expression: :class:`~sympy.Expr` :param var: a variable :type var: :class:`~sympy.Symbol` :return: the derived expression :type: :class:`~sympy.Expr` """ return sp.Derivative(expression, var, evaluate=True)
4a4206d327bee6f0c8168893e2bffbeaa23b9c14
32,025
def _get_mult_op_ ( klass1 , klass2 ) : """Get the proper multiplication operator """ t = klass1 , klass2 ops = _mult_ops_.get( t , None ) if ops : return ops ## RETURN ## try to load the operators try : ops = Ostap.Math.MultiplyOp ( klass1 , klass2 ) _mult_ops_ [ t ] = ops return ops ## RETURN except TypeError: return None ## RETURN return None ## RETURN
f7a000f697d4739894e1671e9cfeb23099e1ce4f
32,026
def datetime_into_columns(df, column, weekday = False, hour_minutes = False, from_type = 'object'): """ The function converts a column with a date from either int64 or object type into separate columns with day - month - year user can choose to add weekday - hour - minutes columns Keyword arguments df (Pandas DataFrame type)-- is the given dataframe column (string) -- the chosen column to create new columns from weekday (boolean) -- True if user wants new column with weekday value (default False) hour_minutes (boolean) -- True if user wants two new columns with hour and minutes values (default False) from_type (string) -- 'object' by default if original column type is object and 'int64' if original column type is int64 return: the resulting dataframe with the new colum """ if from_type == 'int64': column = pd.to_datetime(df[column].astype(str)) else: column = pd.to_datetime(df[column]) datetime = pd.DataFrame(column) datetime['day'] = column.dt.day datetime['month'] = column.dt.month datetime['year'] = column.dt.year if weekday == True: datetime['weekday'] = column.dt.weekday if hour_minutes == True: datetime['hour'] = column.dt.hour datetime['minutes'] = column.dt.minute df = pd.concat([df, datetime], axis = 1) df = df.loc[:,~df.columns.duplicated(keep='last')] return df
e6178b33f113ef1430d0d8df3fe9b6c53d200e1e
32,027
def cis_codif_h1_moms(probe, starttime, endtime, sensitivity='high', try_download=True): """ Load H+ moments from CIS instrument. See https://caa.estec.esa.int/documents/UG/CAA_EST_UG_CIS_v35.pdf for more information on the CIS data. Parameters ---------- probe : string Probe number. Must be '1', '2', '3', or '4'. starttime : datetime Interval start. endtime : datetime Interval end. sensitivity : string, 'high' or 'low', default: 'low' Load high or low sensitivity Returns ------- data : DataFrame Requested data. """ sensitivitydict = {'high': 'HS', 'low': 'LS'} sensitivity = sensitivitydict[sensitivity] endstr = '_CP_CIS-CODIF_' + sensitivity + '_H1_MOMENTS' return _load(probe, starttime, endtime, 'peace', endstr[1:], try_download=try_download)
e8f014196c2a634d406aaeb86a3130e6db59049f
32,028
def readlines(file_path): """ Read lines from fname, if the fname is loaded get the buffer.""" buffer = getbuffer(file_path) if buffer and int(vim.eval('bufloaded(%d)' % buffer.number)): return buffer try: with open(file_path, 'r') as fo: # we are not decoding: since we have to assume that files are in &encoding # and vim stores buffers, variables, ... in &encoding. return fo.read().splitlines() except IOError: return []
a4f367a00f90095a17f9eaf29eb150e7a5176045
32,029
import random from sys import path def random_avg_subdir(myinput, dims = "", n = 20): """ Get the average of n faces chosen from different subdirectories of myinput. You can also pass a list of directories into myinput instead of a string representing the target directory """ results = [] if isinstance(myinput, list) == False: subdirs = dirtools.all_subdirs(myinput) else: subdirs = myinput order = random.sample(subdirs, len(subdirs)) i = 0 while (i < len(order)) & (len(results) < n): d = path(order[i]) files = d.files() j = 0 goahead = False order2 = random.sample(files, len(files)) while (j < len(files)) & (len(results) < n) & (goahead == False): try: img = efr.EasyImageFile(order2[j]) faces = img.detect_faces() results += faces goahead = True except efr.NotAnImage: pass j += 1 i += 1 return average(results[0:min(n, len(results))], dims)
83313fb7d31b0d07600dfc1424a2f94a37eca29c
32,030
def convert_range_image_to_point_cloud( frame, range_images, camera_projections, range_image_top_pose, ri_indexes=(0, 1) ): """Convert range images to point cloud. modified from https://github.com/waymo-research/waymo-open-dataset/blob/master/waymo_open_dataset/utils/range_image_utils.py#L612 Args: frame: open dataset frame range_images: A dict of {laser_name, [range_image_first_return, range_image_second_return]}. camera_projections: A dict of {laser_name, [camera_projection_from_first_return, camera_projection_from_second_return]}. range_image_top_pose: range image pixel pose for top lidar. ri_indexes: 0 for the first return, 1 for the second return. Returns: points: {[N, 3]} list of 3d lidar points of length 5 (number of lidars). cp_points: {[N, 6]} list of camera projections of length 5 (number of lidars). """ tf = tensorflow calibrations = sorted(frame.context.laser_calibrations, key=lambda c: c.name) ret_dict = defaultdict(list) frame_pose = tf.convert_to_tensor(value=np.reshape(np.array(frame.pose.transform), [4, 4])) # [H, W, 6] range_image_top_pose_tensor = tf.reshape( tf.convert_to_tensor(value=range_image_top_pose.data), range_image_top_pose.shape.dims ) # [H, W, 3, 3] range_image_top_pose_tensor_rotation = transform_utils.get_rotation_matrix( range_image_top_pose_tensor[..., 0], range_image_top_pose_tensor[..., 1], range_image_top_pose_tensor[..., 2], ) range_image_top_pose_tensor_translation = range_image_top_pose_tensor[..., 3:] range_image_top_pose_tensor = transform_utils.get_transform( range_image_top_pose_tensor_rotation, range_image_top_pose_tensor_translation ) for c in calibrations: for ri_index in ri_indexes: range_image = range_images[c.name][ri_index] if len(c.beam_inclinations) == 0: beam_inclinations = range_image_utils.compute_inclination( tf.constant([c.beam_inclination_min, c.beam_inclination_max]), height=range_image.shape.dims[0], ) else: beam_inclinations = tf.constant(c.beam_inclinations) beam_inclinations = tf.reverse(beam_inclinations, axis=[-1]) extrinsic = np.reshape(np.array(c.extrinsic.transform), [4, 4]) range_image_tensor = tf.reshape( tf.convert_to_tensor(value=range_image.data), range_image.shape.dims ) pixel_pose_local = None frame_pose_local = None if c.name == dataset_pb2.LaserName.TOP: pixel_pose_local = range_image_top_pose_tensor pixel_pose_local = tf.expand_dims(pixel_pose_local, axis=0) frame_pose_local = tf.expand_dims(frame_pose, axis=0) range_image_mask = range_image_tensor[..., 0] > 0 # No Label Zone if FILTER_NO_LABEL_ZONE_POINTS: nlz_mask = range_image_tensor[..., 3] != 1.0 # 1.0: in NLZ range_image_mask = range_image_mask & nlz_mask range_image_cartesian = range_image_utils.extract_point_cloud_from_range_image( tf.expand_dims(range_image_tensor[..., 0], axis=0), tf.expand_dims(extrinsic, axis=0), tf.expand_dims(tf.convert_to_tensor(value=beam_inclinations), axis=0), pixel_pose=pixel_pose_local, frame_pose=frame_pose_local, ) range_image_cartesian = tf.squeeze(range_image_cartesian, axis=0) points_tensor = tf.gather_nd( range_image_cartesian, tf.compat.v1.where(range_image_mask) ) ret_dict["points_{}_{}".format(c.name, ri_index)].append(points_tensor.numpy()) # Note: channel 1 is intensity # https://github.com/waymo-research/waymo-open-dataset/blob/master/waymo_open_dataset/dataset.proto#L176 intensity_tensor = tf.gather_nd(range_image_tensor[..., 1], tf.where(range_image_mask)) ret_dict["intensity_{}_{}".format(c.name, ri_index)].append(intensity_tensor.numpy()) return ret_dict
af6a6af4cfcde6f3b600ffe0add8a3a0b0870067
32,031
from typing import Dict from typing import Tuple def get_counts(circ: MyCircuit, n_shots: int, seed: int) -> Dict[Tuple[int, ...], int]: """Helper method for tests to summarise the shot table from the simulator :param circ: The circuit to simulate :type circ: MyCircuit :param n_shots: The number of samples to take :type n_shots: int :param seed: Seed for the random sampling :type seed: int :return: Map from readout array to the number of instances observed in the shot table :rtype: Dict[Tuple[int, ...], int] """ sim = MySimulator(circ) shots = sim.sample(n_shots=n_shots, seed=seed) rows, freqs = np.unique(shots, axis=0, return_counts=True) return {tuple(r): f for r, f in zip(rows, freqs)}
e681fdc02e4cf7a637eac6f1e2096d9276b39cc6
32,032
def length( inputs: tf.Tensor, axis: int = -1, keepdims: bool = False, epsilon: float = 1e-10, name: str = None ) -> tf.Tensor: """ Computes the vector length (2-norm) along specified ´axis´ of given Tensor ´inputs´. Optionally an epsilon can be added to the squared norm before the square root is computed. """ with tf.name_scope(name, default_name="norm"): if epsilon is None: return tf.sqrt(tf.reduce_sum(tf.square(inputs), axis=axis, keepdims=keepdims)) else: return tf.sqrt(tf.add(tf.reduce_sum(tf.square(inputs), axis=axis, keepdims=keepdims), epsilon))
a548110ec2d8e3512e73805aea690f22c7d50fe3
32,033
def class_label_matrix(labels, img_sizes, num_classes): """ Computes the class label matrix of the training data. """ # Class label matrix Y = list() # Modeling the object detection problem as a binary classification problem (none, detection) if num_classes == 2: print('Modeling as a binary problem') for sample in range(len(labels)): # None if len(labels[sample]) == 0: Y.append(0) # Detection (smoke or fire or both) else: Y.append(1) # Modeling the object detection problem as a multiclass classification problem (none, fire, smoke) if num_classes > 2: print('Modeling as a multiclass problem') # Pixels area per image area = {'fire': 0, 'smoke': 0} for sample in range(len(labels)): # None if len(labels[sample]) == 0: Y.append(0) # Detection else: # For each bounding box for label in range(labels[sample].shape[0]): # Class identifier class_id = labels[sample][label][0] # Normalized coordinates xmin = labels[sample][label][1] ymin = labels[sample][label][2] xmax = labels[sample][label][3] ymax = labels[sample][label][4] # Image dimensions height = img_sizes[sample][0] width = img_sizes[sample][1] # Coordinates without normalization xmin, ymin, xmax, ymax = deconvert((width, height), (xmin, ymin, xmax, ymax)) # Sum the pixel areas according to the class if class_id == 0: area['smoke'] += (xmax - xmin) * (ymax - ymin) else: area['fire'] += (xmax - xmin) * (ymax - ymin) # If the smoke pixel area is larger than the fire pixel area if area['smoke'] > area['fire']: Y.append(1) # Otherwise else: Y.append(2) # Resetting counters for the next image area = area.fromkeys(area, 0) # Convert a class vector (integers) to binary class matrix Y = np.eye(num_classes, dtype = 'int')[Y] # List to numpy array Y = np.array(Y) return Y
47efe5f8f76cac58d3afeaddb04d766ebdebf377
32,034
def default_lambda_consumer(env_id): """Create a default lambda consumer for the snapshot restore test.""" return st.consumer.LambdaConsumer( metadata_provider=DictMetadataProvider( CONFIG_DICT["measurements"], SnapRestoreBaselinesProvider(env_id) ), func=consume_output, func_kwargs={})
ec7ec2ae6df4aaa7caf034537a5259b897755700
32,035
def thresholdPolyData(poly, attr, threshold, mode): """ Get the polydata after thresholding based on the input attribute Args: poly: vtk PolyData to apply threshold atrr: attribute of the cell array threshold: (min, max) Returns: output: resulted vtk PolyData """ surface_thresh = vtk.vtkThreshold() surface_thresh.SetInputData(poly) surface_thresh.ThresholdBetween(*threshold) if mode=='cell': surface_thresh.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS, attr) else: surface_thresh.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, attr) surface_thresh.Update() surf_filter = vtk.vtkDataSetSurfaceFilter() surf_filter.SetInputData(surface_thresh.GetOutput()) surf_filter.Update() return surf_filter.GetOutput()
e4717b971c238d9c3a63a902db7eb91e2c630340
32,036
def TCh_GetNum(*args): """ TCh_GetNum(char const & Ch) -> int Parameters: Ch: char const & """ return _snap.TCh_GetNum(*args)
6caf9bcf71868604a6aedeceaba299a36a6bc62a
32,037
def writeFGSSPostageStampRequestById(outfile, requestName, results, xsize, ysize, psRequestType = 'byid', optionMask = 2049, imageType = 'warp', psJobType = 'stamp', skycell = 'null', email = 'qub2@qub.ac.uk', camera = 'gpc1', coordMask = 2): """writeFGSSPostageStampRequestById. Args: outfile: requestName: results: xsize: ysize: psRequestType: optionMask: imageType: psJobType: skycell: email: camera: coordMask: """ # "results" is the data set returned from the database of all the candidates. Need # to construct a suitable query that contains the appropriate columns. fileSuccessfullyWritten = False hdu = pf.PrimaryHDU() hdulist = pf.HDUList() prihdr = hdu.header prihdr.set('SIMPLE', True, 'file does conform to FITS standard') prihdr.set('BITPIX', 16, comment='number of bits per data pixel') prihdr.set('NAXIS', 0, comment='number of data axes') prihdr.set('EXTEND', True, 'FITS dataset may contain extensions') prihdr.add_comment(" FITS (Flexible Image Transport System) format is defined in 'Astronomy") prihdr.add_comment(" and Astrophysics', volume 376, page 359; bibcode: 2001A&A...376..359H") hdulist.append(hdu) rownum = [] project = [] survey_name = [] ipp_release = [] job_type = [] option_mask = [] req_type = [] img_type = [] id = [] tess_id = [] component = [] coord_mask = [] center_x = [] # RA center_y = [] # DEC width = [] height = [] #label = [] data_group = [] reqfilt = [] mjd_min = [] mjd_max = [] run_type = [] fwhm_min = [] fwhm_max = [] comment = [] row = 1 # 2012-09-21 KWS Discovered that PyFITS3 doesn't allow implicit creation of # double arrays from integer lists. Need to cast integers # as floats. for result in results: rownum.append(row) project.append(camera) survey_name.append('null') ipp_release.append('null') job_type.append('stamp') option_mask.append(optionMask) # Changed to 2049 for unconvolved stacks req_type.append(psRequestType) img_type.append(imageType) # Hard wired to warp for FGSS 3pi data id.append(result["warp_id"]) # This should contain the warp ID as extracted from the GPC1 database tess_id.append('RINGS.V3') component.append(skycell) coord_mask.append(coordMask) center_x.append(float(result["ra_psf"])) center_y.append(float(result["dec_psf"])) width.append(float(xsize)) height.append(float(ysize)) #label.append('null') data_group.append('null') reqfilt.append('null') mjd_min.append(0) mjd_max.append(0) run_type.append('null') fwhm_min.append(0) fwhm_max.append(0) # Added IPP_IDET to list of columns selected try: if result["comment"]: comment.append(result["comment"]) else: comment.append('%s_%s_%s_%d_%s' % (str(result["id"]), result["tdate"], result["imageid"], result["ipp_idet"], "target")) # Hard wired "target" as image type except KeyError as e: comment.append('%s_%s_%s_%d_%s' % (str(result["id"]), result["tdate"], result["imageid"], result["ipp_idet"], "target")) # Hard wired "target" as image type row = row + 1 # Create the FITS columns. rownum_col = pf.Column(name='ROWNUM', format='J', array=rownum) project_col = pf.Column(name='PROJECT', format='16A', array=project) survey_name_col = pf.Column(name='SURVEY_NAME', format='16A', array=survey_name) ipp_release_col = pf.Column(name='IPP_RELEASE', format='16A', array=ipp_release) job_type_col = pf.Column(name='JOB_TYPE', format='16A', array=job_type) option_mask_col = pf.Column(name='OPTION_MASK', format='J', array=option_mask) req_type_col = pf.Column(name='REQ_TYPE', format='16A', array=req_type) img_type_col = pf.Column(name='IMG_TYPE', format='16A', array=img_type) id_col = pf.Column(name='ID', format='16A', array=id) tess_id_col = pf.Column(name='TESS_ID', format='64A', array=tess_id) component_col = pf.Column(name='COMPONENT', format='64A', array=component) coord_mask_col = pf.Column(name='COORD_MASK', format='J', array=coord_mask) center_x_col = pf.Column(name='CENTER_X', format='D', array=center_x) center_y_col = pf.Column(name='CENTER_Y', format='D', array=center_y) width_col = pf.Column(name='WIDTH', format='D', array=width) height_col = pf.Column(name='HEIGHT', format='D', array=height) #label_col = pf.Column(name='LABEL', format='64A', array=label) data_group_col = pf.Column(name='DATA_GROUP', format='64A', array=data_group) reqfilt_col = pf.Column(name='REQFILT', format='16A', array=reqfilt) mjd_min_col = pf.Column(name='MJD_MIN', format='D', array=mjd_min) mjd_max_col = pf.Column(name='MJD_MAX', format='D', array=mjd_max) run_type_col = pf.Column(name='RUN_TYPE', format='16A', array=run_type) fwhm_min_col = pf.Column(name='FWHM_MIN', format='D', array=fwhm_min) fwhm_max_col = pf.Column(name='FWHM_MAX', format='D', array=fwhm_max) comment_col = pf.Column(name='COMMENT', format='64A', array=comment) cols=pf.ColDefs([rownum_col, project_col, survey_name_col, ipp_release_col, job_type_col, option_mask_col, req_type_col, img_type_col, id_col, tess_id_col, component_col, coord_mask_col, center_x_col, center_y_col, width_col, height_col, data_group_col, reqfilt_col, mjd_min_col, mjd_max_col, run_type_col, fwhm_min_col, fwhm_max_col, comment_col]) tbhdu=pf.BinTableHDU.from_columns(cols) # The from_columns method only available from PyFITS 3.3 onwards. #tbhdu=pf.BinTableHDU.from_columns(cols) hdulist.append(tbhdu) exthdr = hdulist[1].header exthdr.set('EXTNAME','PS1_PS_REQUEST','name of this binary table extension') exthdr.set('REQ_NAME',requestName,'Postage Stamp request name') # 2015-08-26 KWS Updated contents of the header for version 2 exthdr.set('EXTVER','2','Extension version') exthdr.set('ACTION','PROCESS') exthdr.set('EMAIL',email,'Email address of submitter') hdulist.writeto(outfile, clobber=True) fileSuccessfullyWritten = True return fileSuccessfullyWritten
636284d46cbaced8d0609afe988148cbc3111d32
32,038
def reduce_sequence(sequence, desired_length): """Reduces a sequence to the desired length by removing some of its elements uniformly.""" if len(sequence) < desired_length: raise RuntimeError('Cannot reduce sequence to longer length.') indexes = N.arange(desired_length) * len(sequence) / desired_length return [sequence[i] for i in indexes]
25f104abc666e26821436a42f5cb71b99b41a86c
32,039
def encode_label(text): """Encode text escapes for the static control and button labels The ampersand (&) needs to be encoded as && for wx.StaticText and wx.Button in order to keep it from signifying an accelerator. """ return text.replace("&", "&&")
b4402604f87f19dab9dbda4273798374ee1a38d8
32,040
def dash_table_from_data_frame(df: pd.DataFrame, *, id, **kwargs): """Returns a dash_table.DataTable that will render `df` in a simple HTML table.""" df_all_columns = df.reset_index() return dash_table.DataTable( id=id, columns=[{"name": i, "id": i} for i in df_all_columns.columns], cell_selectable=False, data=df_all_columns.to_dict("records"), editable=False, page_action="native", **kwargs, )
813bf054f33a4dc15dfcff300414f60fd9cf2973
32,041
from sys import exc_info def _triple(): """Return a (type, value, tb) triple.""" try: one() except IndexError: return exc_info() else: raise AssertionError('We should have had an IndexError.')
455826d91dfcb5e6f76b46e4f2631c03f4e55474
32,042
from warnings import filterwarnings def calcRSI(df): """ Calculates RSI indicator Read about RSI: https://www.investopedia.com/terms/r/rsi.asp Args: df : pandas.DataFrame() dataframe of historical ticker data Returns: pandas.DataFrame() dataframe of calculated RSI indicators + original data """ filterwarnings("ignore") df["price_change"] = df["adjclose"].pct_change() df["Upmove"] = df["price_change"].apply(lambda x: x if x > 0 else 0) df["Downmove"] = df["price_change"].apply(lambda x: abs(x) if x < 0 else 0) df["avg_Up"] = df["Upmove"].ewm(span=19).mean() df["avg_Down"] = df["Downmove"].ewm(span=19).mean() df = df.dropna() df["RS"] = df["avg_Up"] / df["avg_Down"] df["RSI"] = df["RS"].apply(lambda x: 100 - (100 / (x + 1))) return df
4c2c76159473bf8b23e24cb02af00841977c7cd3
32,043
import os def sample_images2(imgs, model, path, idx=None,save=False): """Saves a generated sample from the test set""" true_map = Variable(imgs['B'].type(Tensor)) save_image(true_map, os.path.join(path, 'test_results/gt', str(idx)) +'.jpg', normalize=True) fake_B = model(Variable(imgs['A'].type(Tensor))) if save: save_image(fake_B, os.path.join(path, 'test_results/pred_map', str(idx)) +'.jpg', normalize=True) return fake_B
2d03f7756aeed6cb4bb7106c7393a42bf7dc2ee4
32,044
def grep_annotations_multiple_files(files_list, regex, base_path, grep_type): """ :todo Refactor and remove the ugly type option """ annotations_list = [] for f in files_list: annotations = [] if grep_type == "code": annotations = grep_code_annotations(f, regex, base_path) elif grep_type == "tests": annotations = grep_test_annotations(f, regex) if annotations: annotations_list.extend(annotations) return annotations_list
434ed6f72026a62cd410d8b7d62b35a5e1fe5440
32,045
def test_c_py_compose_transforms_module(): """ Test combining Python and C++ transforms """ ds.config.set_seed(0) def test_config(arr, input_columns, output_cols, op_list): data = ds.NumpySlicesDataset(arr, column_names=input_columns, shuffle=False) data = data.map(operations=op_list, input_columns=input_columns, output_columns=output_cols, column_order=output_cols) res = [] for i in data.create_dict_iterator(output_numpy=True): for col_name in output_cols: res.append(i[col_name].tolist()) return res arr = [1, 0] assert test_config(arr, ["cols"], ["cols"], [py_transforms.OneHotOp(2), c_transforms.Mask(c_transforms.Relational.EQ, 1)]) == \ [[[False, True]], [[True, False]]] assert test_config(arr, ["cols"], ["cols"], [py_transforms.OneHotOp(2), (lambda x: x + x), c_transforms.Fill(1)]) \ == [[[1, 1]], [[1, 1]]] assert test_config(arr, ["cols"], ["cols"], [py_transforms.OneHotOp(2), (lambda x: x + x), c_transforms.Fill(1), (lambda x: x + x)]) \ == [[[2, 2]], [[2, 2]]] assert test_config([[1, 3]], ["cols"], ["cols"], [c_transforms.PadEnd([3], -1), (lambda x: x + x)]) \ == [[2, 6, -2]] arr = ([[1]], [[3]]) assert test_config(arr, ["col0", "col1"], ["a"], [(lambda x, y: x + y), c_transforms.PadEnd([2], -1)]) == [[4, -1]]
e51191a48cc79bcac8cfe41508dd9e539da4645c
32,046
import struct def add_header(input_array, codec, length, param): """Add the header to the appropriate array. :param the encoded array to add the header to :param the codec being used :param the length of the decoded array :param the parameter to add to the header :return the prepended encoded byte array""" return struct.pack(">i", codec) + struct.pack(">i", length) + struct.pack(">i", param) + input_array
228db86bb6eb9e3c7cc59cc48b67e443d46cc36d
32,047
import torch def jaccard_loss(logits, true, eps=1e-7): """Computes the Jaccard loss, a.k.a the IoU loss. Note that PyTorch optimizers minimize a loss. In this case, we would like to maximize the jaccard loss so we return the negated jaccard loss. Args: true: a tensor of shape [B, H, W] or [B, 1, H, W]. logits: a tensor of shape [B, C, H, W]. Corresponds to the raw output or logits of the model. eps: added to the denominator for numerical stability. Returns: jacc_loss: the Jaccard loss. """ num_classes = logits.shape[1] if num_classes == 1: true_1_hot = torch.eye(num_classes + 1)[true.squeeze(1)] true_1_hot = true_1_hot.permute(0, 3, 1, 2).float() true_1_hot_f = true_1_hot[:, 0:1, :, :] true_1_hot_s = true_1_hot[:, 1:2, :, :] true_1_hot = torch.cat([true_1_hot_s, true_1_hot_f], dim=1) pos_prob = torch.sigmoid(logits) neg_prob = 1 - pos_prob probas = torch.cat([pos_prob, neg_prob], dim=1) else: true_1_hot = torch.eye(num_classes)[true.squeeze(1)] true_1_hot = true_1_hot.permute(0, 3, 1, 2).float() probas = F.softmax(logits, dim=1) true_1_hot = true_1_hot.type(logits.type()) dims = (0,) + tuple(range(2, true.ndimension())) intersection = torch.sum(probas * true_1_hot, dims) cardinality = torch.sum(probas + true_1_hot, dims) union = cardinality - intersection jacc_loss = (intersection / (union + eps)).mean() return (1 - jacc_loss)
10e113294f67cbe88b61e90af51c6c6659ead805
32,048
def _get_positional_body(*args, **kwargs): """Verify args and kwargs are valid, and then return the positional body, if users passed it in.""" if len(args) > 1: raise TypeError("There can only be one positional argument, which is the POST body of this request.") if "options" in kwargs: raise TypeError("The 'options' parameter is positional only.") return args[0] if args else None
c777296ab9c0e95d0f4d7f88dfd4ae292bfc558f
32,049
def resize_image_with_padding(im, new_dims, interp_order=1): """ Resize an image array with interpolation. Parameters ---------- im : (H x W x K) ndarray new_dims : (height, width) tuple of new dimensions. interp_order : interpolation order, default is linear. Returns ------- im : resized ndarray with shape (new_dims[0], new_dims[1], K) """ ret = np.empty((new_dims[0], new_dims[1], im.shape[-1]), dtype=np.float32) ret.fill(0) target_as = new_dims[1] / float(new_dims[0]) aspect_ratio = im.shape[1] / float(im.shape[0]) if target_as < aspect_ratio: scale = new_dims[1] / float(im.shape[1]) scaled_width = int(new_dims[1]) scaled_height = min(int(new_dims[0]), int(scale* im.shape[0])) resized_img = cv2.resize(im, (scaled_width, scaled_height)) start_x = 0 start_y = 0 ret[start_x: start_x + scaled_height, start_y: start_y + scaled_width, :] = resized_img else: scale = new_dims[0] / float(im.shape[0]) scaled_width = min(int(new_dims[1]), int(scale* im.shape[1])) scaled_height = int(new_dims[0]) resized_img = cv2.resize(im, (scaled_width, scaled_height)) start_x = 0 start_y = int((new_dims[1] - scaled_width) / 2) ret[start_x: start_x + scaled_height, start_y: start_y + scaled_width, :] = resized_img return ret.astype(np.float32)
d39195cdd20db2a7cd6750c7a81b419a62f820f9
32,050
from typing import List from typing import Dict def _get_run_stats(calc_docs: List[Calculation]) -> Dict[str, RunStatistics]: """Get summary of runtime statistics for each calculation in this task.""" run_stats = {} total = dict( average_memory=0.0, max_memory=0.0, elapsed_time=0.0, system_time=0.0, user_time=0.0, total_time=0.0, cores=0, ) for calc_doc in calc_docs: stats = calc_doc.output.run_stats run_stats[calc_doc.task_name] = stats total["average_memory"] = max(total["average_memory"], stats.average_memory) total["max_memory"] = max(total["max_memory"], stats.max_memory) total["cores"] = max(total["cores"], stats.cores) total["elapsed_time"] += stats.elapsed_time total["system_time"] += stats.system_time total["user_time"] += stats.user_time total["total_time"] += stats.total_time run_stats["overall"] = RunStatistics(**total) return run_stats
fb83c559ced3ca44eaee767d6dabf8c183779f7f
32,051
import os def naming_convention(file_dir, file_name): """Rename files with 8-character hash""" long_hash = sha1sum(os.path.join(file_dir, file_name)) file_prefix, file_sufix = file_name.split('.') new_name = '{file_prefix}-{short_hash}.{file_sufix}'.format( file_prefix=file_prefix, short_hash=long_hash[:8], file_sufix=file_sufix) return new_name, long_hash
c150ce78ddfa6ac35e74421eb89979d3e069a585
32,052
def mvn_log_pdf(x, mean, covariance): """ This function calculates the log-likelihood of x for a multivariate normal distribution parameterised by the provided mean and covariance. :param x: The location(s) to evaluate the log-likelihood. Must be [B x D], where B is the batch size and D is the dimensionality of the multivariate normal. B can be 1. :param mean: The mean of the multivariate normal distribution. Must be [1 x D]. :param covariance: The covariance of the multivariate normal distribution. Must be [D x D]. :return: The log-likelihood values evaluated at x. This is a B-length vector. """ # Determine number of dimensions of the multivariate normal distribution. num_dims = tf.shape(covariance, out_type=TF_DTYPE)[-1] # num_dims = covariance.get_shape().as_list()[-1] # Calculate log-likelihood. diff = tf.transpose(x - mean) # [D x B]. chol_covar = tf.cholesky(tf.squeeze(covariance)) # [D x D]. alpha = tf.transpose(tf.matrix_triangular_solve(chol_covar, diff, lower=True)) # [B x D]. beta = tf.reduce_sum(tf.log(tf.diag_part(chol_covar))) return -0.5 * (tf.reduce_sum(tf.square(alpha), axis=-1) + num_dims * np.log(2.0 * np.pi)) - beta
dc2b019ace6760a040d97045b50b552e937706fd
32,053
def question_input (user_decision=None): """Obtains input from user on whether they want to scan barcodes or not. Parameters ---------- user_decision: default is None, if passed in, will not ask user for input. string type. Returns ------- True if user input was 'yes' False is user input was anything else """ # Ask user if they would like to scan a barcode, and obtain their input if user_decision == None: decision = input("Would you like to scan a barcode? Type 'yes' to begin. ") else: decision = user_decision # Return boolean value based on user response if decision == 'yes': return True else: return False
afb7f3d4eef0795ad8c4ff7878e1469e07ec1875
32,054
def depth(d): """Check dictionary depth""" if isinstance(d, dict): return 1 + (max(map(depth, d.values())) if d else 0) return 0
6fd72b255a5fba193612cfa249bf4d242b315be1
32,055
import logging def validate_analysis_possible(f): """ Decorator that validates that the amount of information is sufficient for attractor analysis. :param f: function :return: decorated function """ def f_decorated(*args, **kwargs): db_conn, *_ = args if db_conn.root.n_aggregated_attractors() == 1 or \ db_conn.root.total_frequency() <= 2: logging.getLogger().info('Not enough attractors to infer node correlations.') return None else: return f(*args, **kwargs) return f_decorated
9de0cbf2e18e47d14912ae3ebdff526a73f2c25d
32,056
import struct def get_short_chan_id(source: hex, dest: hex) -> bytes: """Return a short channel id (bytes) based on source and destination provided. """ channel = [ channel for channel in config.rpc.listchannels(source=source)["channels"] if channel["destination"] == dest ][0]["short_channel_id"] block_height, tx_index, output_index = channel.split("x") if not block_height and tx_index and output_index: raise ValueError( f"Could not find block_height, tx_index and output_index in " f"channels" ) block_height = int(block_height) tx_index = int(tx_index) output_index = int(output_index) logger.debug(f"Got short channel ID: {block_height}x{tx_index}x{output_index}") _id = bytearray() # 3 bytes for block height and tx_index _id += struct.pack(config.be_u32, block_height)[-3:] _id += struct.pack(config.be_u32, tx_index)[-3:] _id += struct.pack(config.be_u16, output_index) return _id
11e3bccf4fb120b05c9da118f3e05dfc02394a9e
32,057
import logging import os def register_logging(app): """日志处理器""" class RequestFormatter(logging.Formatter): def format(self, record): record.url = request.url record.remote_addr = request.remote_addr return super(RequestFormatter, self).format(record) request_formatter = RequestFormatter( '[%(asctime)s] %(remote_addr)s requested %(url)s\n' '%(levelname)s in %(module)s: %(message)s' ) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') logs_dir = os.path.join(basedir, 'logs') try: os.makedirs(logs_dir) except OSError: pass file_handler = RotatingFileHandler(os.path.join(logs_dir, 'bluelog.log'), maxBytes=10 * 1024 * 1024, backupCount=10) file_handler.setFormatter(formatter) file_handler.setLevel(logging.INFO) mail_handler = SMTPHandler( mailhost=app.config['MAIL_SERVER'], fromaddr=app.config['MAIL_USERNAME'], toaddrs=['ADMIN_EMAIL'], subject=_l('Bluelog Application Error'), credentials=(app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])) mail_handler.setLevel(logging.ERROR) mail_handler.setFormatter(request_formatter) if not app.debug: app.logger.addHandler(mail_handler) app.logger.addHandler(file_handler)
d657df03bffa1bcd3a0bcbee20389e654b9f863f
32,058
def get_rel(href, method, rule): """Returns the `rel` of an endpoint (see `Returns` below). If the rule is a common rule as specified in the utils.py file, then that rel is returned. If the current url is the same as the href for the current route, `self` is returned. Args: href (str): the full endpoint url (e.g. https://alegna-api.nerevu.com/v1/data) method (str): an HTTP method (e.g. 'GET' or 'DELETE') rule (str): the endpoint path (e.g. '/v1/data/<int:id>') Returns: rel (str): a string representing what the endpoint does Examples: >>> href = 'https://alegna-api.nerevu.com/v1/data' >>> method = 'GET' >>> rule = '/v1/data' >>> get_rel(href, method, rule) 'data' >>> method = 'DELETE' >>> get_rel(href, method, rule) 'data_delete' >>> method = 'GET' >>> href = 'https://alegna-api.nerevu.com/v1' >>> rule = '/v1 >>> get_rel(href, method, rule) 'home' """ if href == request.url and method == request.method: rel = "self" else: # check if route is a common route resourceName = get_resource_name(rule) rel = get_common_rel(resourceName, method) # add the method if not common or GET if not rel: rel = resourceName if method != "GET": rel = f"{rel}_{method.lower()}" # get params and add to rel params = get_params(rule) joined_params = "_".join(params) if joined_params: rel = f"{rel}_{joined_params}" return rel
e1e5af2baabec766f07460275d7525569439b40c
32,059
def is_exist(self, connectivity): """Check the existence of a cell defined by a connectivity (vector of points indices). The order of points indices does not matter. Parameters ---------- self : CellMat an CellMat object connectivity : ndarray an array of node tags Returns ------- bool True if the element already exist """ # Check the existence of the element e = np.array([], dtype=int) for nd_tag in connectivity: e = np.concatenate((e, self.get_point2cell(nd_tag))) unique, unique_counts = np.unique(e, return_counts=True) for ie in range(len(unique)): if unique_counts[ie] == self.nb_pt_per_cell and unique_counts[ie] == len( connectivity ): # If this condition is valid, the element already exist return True return False
59f111040ba158fa03e82400c1d3cb9dc1444601
32,060
from typing import Optional from typing import cast def get_current_identity_arn(boto3_session: Optional[boto3.Session] = None) -> str: """Get current user/role ARN. Parameters ---------- boto3_session : boto3.Session(), optional Boto3 Session. The default boto3 session will be used if boto3_session receive None. Returns ------- str User/role ARN. Examples -------- >>> import awswrangler as wr >>> arn = wr.sts.get_current_identity_arn() """ session: boto3.Session = _utils.ensure_session(session=boto3_session) return cast(str, _utils.client(service_name="sts", session=session).get_caller_identity().get("Arn"))
892ffdd35d8a31849f4a53b0048a5bf87be624ce
32,061
import hmac def proxy_signature_is_valid(request, secret): """ Return true if the calculated signature matches that present in the query string of the given request. """ # Allow skipping of validation with an explicit setting. # If setting not present, skip if in debug mode by default. skip_validation = getattr(settings, 'SKIP_APP_PROXY_VALIDATION', settings.DEBUG) if skip_validation: return True # Create a mutable version of the GET parameters. query_dict = request.GET.copy() # Extract the signature we're going to verify. If no signature's present, the request is invalid. try: signature_to_verify = query_dict.pop('signature')[0] except KeyError: return False calculated_signature = get_proxy_signature(query_dict, secret) # Try to use compare_digest() to reduce vulnerability to timing attacks. # If it's not available, just fall back to regular string comparison. try: return hmac.compare_digest(calculated_signature.encode('utf-8'), signature_to_verify.encode('utf-8')) except AttributeError: return calculated_signature == signature_to_verify
4050c736188b53e274a16d26c4f45d8ac1983785
32,062
def remove_whitespace(sentences): """ Clear out spaces and newlines from the list of list of strings. Arguments: ---------- sentences : list<list<str>> Returns: -------- list<list<str>> : same strings as input, without spaces or newlines. """ return [[w.rstrip() for w in sent] for sent in sentences]
ed50124aec20feba037ea775490ede14457d6943
32,063
def generate_jwt(payload, expiry, secret=None): """ 生成jwt :param payload: dict 载荷 :param expiry: datetime 有效期 :param secret: 密钥 :return: jwt """ _payload = {'exp': expiry} _payload.update(payload) if not secret: secret = current_app.config['JWT_SECRET'] token = jwt.encode(_payload, secret, algorithm='HS256') return token
aa4727b7d26a7f00b015cbaed9a977c5865eedca
32,064
import json import secrets def authentication(uuid): """Allow a client to request/recieve an authentication key.""" if request.method == "POST": with peewee_db.atomic(): if RestClient.get_or_none(RestClient.uuid == uuid): return json.jsonify({"msg": "UUID already exits."}), 409 else: authk = secrets.token_urlsafe() x = RestClient(uuid=uuid, authkey=authk) x.save() return json.jsonify({"msg": "RestClient saved.", "url": "/authentication/" + uuid}), 201 elif request.method == "GET": with peewee_db.atomic(): query = RestClient.select().where(RestClient.uuid == uuid) if len(query) > 0: return json.jsonify({"authkey": query[0].authkey}) else: return json.jsonify({"msg": "UUID not registered."}), 404
ec984b03c17917b12eb7ed6034c28b40f42aac63
32,065
def one_sided_ema(xolds, yolds, low=None, high=None, n=512, decay_steps=1., low_counts_threshold=1e-8): """From openai.baselines.common.plot_util.py perform one-sided (causal) EMA (exponential moving average) smoothing and resampling to an even grid with n points. Does not do extrapolation, so we assume xolds[0] <= low && high <= xolds[-1] Arguments --------- xolds : array or list x values of data. Needs to be sorted in ascending order yolds : array of list y values of data. Has to have the same length as xolds low : float min value of the new x grid. By default equals to xolds[0] high : float max value of the new x grid. By default equals to xolds[-1] n : int number of points in new x grid decay_steps : float EMA decay factor, expressed in new x grid steps. low_counts_threshold: float or int y values with counts less than this value will be set to NaN Returns ------- xs : array with new x grid ys : array of EMA of y at each point of the new x grid count_ys : array of EMA of y counts at each point of the new x grid """ low = xolds[0] if low is None else low high = xolds[-1] if high is None else high assert xolds[0] <= low, \ f'low={low} < xolds[0]={xolds[0]} - extrapolation not permitted!' assert xolds[-1] >= high, \ f'high={high} > xolds[-1]={xolds[-1]} - extrapolation not permitted!' assert len(xolds) == len(yolds), \ f'len of xolds ({len(xolds)}) and yolds ({len(yolds)}) do not match!' xolds = xolds.astype('float64') yolds = yolds.astype('float64') luoi = 0 # last unused old index sum_y = 0. count_y = 0. xnews = np.linspace(low, high, n) decay_period = (high - low) / (n - 1) * decay_steps interstep_decay = np.exp(- 1. / decay_steps) sum_ys = np.zeros_like(xnews) count_ys = np.zeros_like(xnews) for i in range(n): xnew = xnews[i] sum_y *= interstep_decay count_y *= interstep_decay while True: if luoi >= len(xolds): break xold = xolds[luoi] if xold <= xnew: decay = np.exp(- (xnew - xold) / decay_period) sum_y += decay * yolds[luoi] count_y += decay luoi += 1 else: break sum_ys[i] = sum_y count_ys[i] = count_y ys = sum_ys / count_ys ys[count_ys < low_counts_threshold] = np.nan return xnews, ys, count_ys
17f14cd7a775c347366f375dfecef6285dc55af7
32,066
def settings_value(setting_name): """Return value for a given setting variable. {% settings_value "LANGUAGE_CODE" %} """ return getattr(settings, setting_name, "")
aab0b2f16f0fa66a1c4066382b0b96c6c2a15215
32,067
def mongo_stat(server, args_array, **kwargs): """Method: mongo_stat Description: Function stub holder for mongo_perf.mongo_stat. Arguments: (input) server (input) args_array (input) **kwargs class_cfg """ status = True if server and args_array and kwargs.get("class_cfg", True): status = True return status
45ae8fd66a1d0cae976959644837fae585d68e65
32,068
import pickle def train_new_TFIDF(docs, save_as=None): """ Trains a new TFIDF model.\n If a user abstract is given, it is used for the training. Parameters ---------- docs : `[String]`. Documents to train on\n save_as : `String`. Name to save model as. Returns ------- `TfidfVectorizer : The newly trained TFIDF model """ print("Started training TFIDF") objectives = prepare_documents_for_tfidf(docs) # creating tfidf model with given parameters (not trained yet) if len(docs) == 1: tfidf = init_tfidf_model(max_df=1.0) else: tfidf = init_tfidf_model() # Fit the TfIdf model. Learn vocab and IDF tfidf.fit(objectives) print("Finished training TFIDF") if (save_as): pickle.dump(tfidf, open( "custom_logic/src/models/" + save_as + ".sav", 'wb') ) return tfidf
be520b62fa6f718eeb185e59fed5fe3edf8d7ea7
32,069
import math def generate_sphere_points(n): """ Returns list of coordinates on a sphere using the Golden- Section Spiral algorithm. """ points = [] inc = math.pi * (3 - math.sqrt(5)) offset = 2 / float(n) for k in range(int(n)): y = k * offset - 1 + (offset / 2) r = math.sqrt(1 - y*y) phi = k * inc points.append(v3.vector(math.cos(phi)*r, y, math.sin(phi)*r)) return points
6349f001709c2d2958cc2bcdd9bfe9c70a79b8f9
32,070
def get_options(): """ Purpose: Parse CLI arguments for script Args: N/A Return: N/A """ parser = ArgumentParser(description="Produce to Kafka Topic") required = parser.add_argument_group("Required Arguments") optional = parser.add_argument_group("Optional Arguments") # Optional Arguments optional.add_argument( "-R", "--replication", "--topic-replication", dest="topic_replication", help="Replication factor of the topic to create", required=False, default=1, type=int, ) optional.add_argument( "-P", "--partitions", "--topic-partitions", dest="topic_partitions", help="Number of partitions of the topic to create", required=False, default=1, type=int, ) # Required Arguments required.add_argument( "-B", "--broker", "--brokers", "--kafka-broker", "--kafka-brokers", action="append", dest="kafka_brokers", help="Kafka Brokers", required=True, type=str, ) required.add_argument( "-T", "--topic", "--kafka-topic", "--topic-name", dest="topic_name", help="Topic name to create", required=True, type=str, ) return parser.parse_args()
a6168fb549fff9b2634f4c55cc625b7e3e387fa7
32,071
def _estimate_log_gaussian_prob(X, means, precisions_chol, covariance_type): """Estimate the log Gaussian probability. Parameters ---------- X : array-like of shape (n_samples, n_features) means : array-like of shape (n_components, n_features) precisions_chol : array-like Cholesky decompositions of the precision matrices. 'full' : shape of (n_components, n_features, n_features) 'tied' : shape of (n_features, n_features) 'diag' : shape of (n_components, n_features) 'spherical' : shape of (n_components,) covariance_type : {'full', 'tied', 'diag', 'spherical'} Returns ------- log_prob : array, shape (n_samples, n_components) """ n_samples, n_features = X.shape n_components, _ = means.shape # det(precision_chol) is half of det(precision) log_det = _compute_log_det_cholesky( precisions_chol, covariance_type, n_features) if covariance_type == 'full': log_prob = np.empty((n_samples, n_components)) #print('mm', n_samples, n_features, n_components) for i, x in enumerate(X): #print('x', i, x) for k, (mu, prec_chol) in enumerate(zip(means, precisions_chol)): pp = 0.0 for f in range(x.shape[0]): dot_m = 0.0 dot_x = 0.0 for p in range(prec_chol.shape[0]): dot_m += (mu[p] * prec_chol[p,f]) dot_x += (x[p] * prec_chol[p,f]) y = (dot_x - dot_m) pp += ( y * y ) #print('k', k, '\n', mu, '\n', prec_chol) dot_x = np.dot(x, prec_chol) dot_m = np.dot(mu, prec_chol) y = dot_x - dot_m #print('dot_x', dot_x) #print('dot_m', dot_m) #print('y', y) p = np.sum(np.square(y), axis=0) # sum over features #assert p == pp, (p, pp) #print("log_prob", i, k, p) log_prob[i, k] = p elif covariance_type == 'tied': log_prob = np.empty((n_samples, n_components)) for k, mu in enumerate(means): y = np.dot(X, precisions_chol) - np.dot(mu, precisions_chol) log_prob[:, k] = np.sum(np.square(y), axis=1) elif covariance_type == 'diag': precisions = precisions_chol ** 2 log_prob = (np.sum((means ** 2 * precisions), 1) - 2. * np.dot(X, (means * precisions).T) + np.dot(X ** 2, precisions.T)) elif covariance_type == 'spherical': precisions = precisions_chol ** 2 log_prob = (np.sum(means ** 2, 1) * precisions - 2 * np.dot(X, means.T * precisions) + np.outer(row_norms(X, squared=True), precisions)) s = -.5 * (n_features * np.log(2 * np.pi) + log_prob) + log_det #print('s', s, 'log_det\n', log_det) return s
c8d50ca609dfd877463ae20c572ec1ab60960b21
32,072
def descendants(region_id, allowed_ids, rm: RegionMeta): """Get all filtered descendant IDs of a given region ID. A descendant is only accepted if it's in ``allowed_ids`` or is a leaf region. This is mimicking Dimitri's algorithm, I'm not sure about why this must be that way. """ all_descendants = set() for child_id in rm.children(region_id): if child_id in allowed_ids or rm.is_leaf(child_id): all_descendants.add(child_id) all_descendants |= descendants(child_id, allowed_ids, rm) return all_descendants
b0d1a5b57c00335343e52fbfe6e73b47bbccda42
32,073
import json def search_classification(request): """ Filters the classification by name. """ filters = json.loads(request.GET.get('filters', {})) fields = filters.get('fields', []) page = int_arg(request.GET.get('page', 1)) classifications = Classification.objects.get_queryset() if 'name' in fields: method = filters.get('method', 'ieq') if method == 'ieq': classifications = classifications.filter(name__iexact=filters['name']) elif method == 'icontains': classifications = classifications.filter(name__icontains=filters['name']) classifications = classifications.annotate(Count('ranks')) classifications_list = [] if classifications: for classification in classifications: classifications_list.append({ "id": classification.id, "name": classification.name, 'can_modify': classification.can_modify, 'can_delete': classification.can_delete, 'label': classification.get_label(), 'description': classification.description, 'num_classification_ranks': classification.ranks__count }) response = { 'items': classifications_list, 'page': page } return HttpResponseRest(request, response)
fd1b78a9169c3496ee0b5d286d03d81cf75473c9
32,074
import click def get_zone_id(ctx, param, zone_name): """Return the id for a zone by name.""" del ctx #unused del param #unused cf = CloudFlare.CloudFlare() zones = cf.zones.get(params={'name': zone_name}) if len(zones) != 1: raise click.ClickException('Invalid zone name: {}'.format(zone_name)) return (zones[0]['id'], zones[0]['name'])
07ebf939fe08b9f146ddb871af97e59c88e9484d
32,075
from typing import List def solve(letters: List[str], dictionary: trie.Node) -> List[str]: """Finds all words that can be made using the given letters. """ center_letter = letters[0] words = set() queue = deque([letter for letter in letters]) while queue: candidate = queue.popleft() is_word, is_prefix = trie.has_word_has_prefix(dictionary, candidate) if is_word and center_letter in candidate: words.add(candidate) if is_prefix: queue.extend([candidate + letter for letter in letters]) return list(words)
8a1773c6c388b88cc4b208ca24d5c0b8249722d0
32,076
from datetime import datetime def datetime_from_milliseconds_since_epoch(ms_since_epoch: int, timezone: datetime.timezone = None) -> datetime.datetime: """Converts milliseconds since epoch to a datetime object. Arguments: ---------- ms_since_epoch {int} -- Number of milliseconds since epoch. Keyword Arguments: -------- timezone {datetime.timezone} -- The timezone of the new datetime object. (default: {None}) Returns: -------- datetime.datetime -- A python datetime object. """ return datetime.datetime.fromtimestamp((ms_since_epoch / 1000), tz=timezone)
95528da79c78ca9956d656067b5be623058b12e6
32,077
def input_file(path): """ Read common text file as a stream of (k, v) pairs where k is line number and v is line text :param path: path to the file to read :return: lazy seq of pairs """ return zip(count(), __input_file(path))
d1699863f790181bdbd5ea1abc08446e32909ffc
32,078
def lik_constant(vec, rho, t, root=1, survival=1, p1=p1): """ Calculates the likelihood of a constant-rate birth-death process, conditioned on the waiting times of a phylogenetic tree and degree of incomplete sampling. Based off of the R function `TreePar::LikConstant` written by Tanja Stadler. T. Stadler. On incomplete sampling under birth-death models and connections to the sampling-based coalescent. Jour. Theo. Biol. 261: 58-66, 2009. Args: vec (float, float): two element tuple of birth and death rho (float): sampling fraction t (list): vector of waiting times root (bool): include the root or not? (default: 1) survival (bool): assume survival of the process? (default: 1) Returns: float: a likelihood """ l = vec[0] # noqa: E741 m = vec[1] t.sort(reverse=True) lik = (root + 1) * log(p1(t[0], l, m, rho)) for tt in t[1:]: lik += log(l) + log(p1(tt, l, m, rho)) if survival == 1: lik -= (root + 1) * log(1 - p0(t[0], l, m, rho)) return -lik
bfb74866eec3c6eedbd6536522403f08340d39d7
32,079
def window_bounds( window: Window, affine: Affine, offset: str = 'center' ) -> tuple[float, float, float, float]: """Create bounds coordinates from a rasterio window Parameters: window: Window affine: Affine offset: str Returns: coordinate bounds (w, s, e, n) """ (row_start, col_start), (row_stop, col_stop) = window w, s = xy(affine, row_stop, col_start, offset=offset) e, n = xy(affine, row_start, col_stop, offset=offset) bounds = (w, s, e, n) return bounds
6d6dca039213b4f5ea85d9168172b5cb32ff1a1e
32,080
import copy def get_k8s_model(model_type, model_dict): """ Returns an instance of type specified model_type from an model instance or represantative dictionary. """ model_dict = copy.deepcopy(model_dict) if isinstance(model_dict, model_type): return model_dict elif isinstance(model_dict, dict): # convert the dictionaries camelCase keys to snake_case keys model_dict = _map_dict_keys_to_model_attributes(model_type, model_dict) # use the dictionary keys to initialize a model of given type return model_type(**model_dict) else: raise AttributeError("Expected object of type 'dict' (or '{}') but got '{}'.".format(model_type.__name__, type(model_dict).__name__))
217c517b53acb596eec51773f856ceaf15a93597
32,081
def merge_specs(specs_): """Merge TensorSpecs. Args: specs_: List of TensorSpecs to be merged. Returns: a TensorSpec: a merged TensorSpec. """ shape = specs_[0].shape dtype = specs_[0].dtype name = specs_[0].name for spec in specs_[1:]: assert shape[1:] == spec.shape[1:], "incompatible shapes: %s, %s" % ( shape, spec.shape) assert dtype == spec.dtype, "incompatible dtypes: %s, %s" % ( dtype, spec.dtype) shape = merge_shapes((shape, spec.shape), axis=0) return specs.TensorSpec( shape=shape, dtype=dtype, name=name, )
fb0c895847c477cc90eb3b495505fe436667fe1e
32,082
def makeMapItem(pl_id): """ Recupere les items d'un player sur la map (stand ou pub). Utilise les fonctions makeMapItemStand et Pub. :param arg1: id du joueur :type arg1: int :return: collection des objets appartenant au joueur, avec leur position :rtype: Collection d'objets Json """ mapItem = [] mapItem.append(makeMapItemStand(pl_id)) pub_id = db.select("SELECT p.p_id FROM Pub p WHERE p.pl_id = "+ str(pl_id)) if len(pub_id) != 0: for row in pub_id: mapItem.append(makeMapItemPub(row['pub_id'])) return (mapItem)
5bb5745dc161d74b2bd832a213c55906bc5f358c
32,083
def get_test_result_records(page_number, per_page, filters): """Get page with applied filters for uploaded test records. :param page_number: The number of page. :param per_page: The number of results for one page. :param filters: (Dict) Filters that will be applied for records. """ return IMPL.get_test_result_records(page_number, per_page, filters)
89fc8b6d441ec8830cdb16fccfa142eed39f6c6a
32,084
def new_graph(**kwargs) -> Plot: """[summary] :return: [description] :rtype: Plot """ return GraphPlot(kwargs)
0bdbe97f6d86ba7dcfe802b5d41cb2e61fbb0ea7
32,085
def _average_path_length(n_samples_leaf): """ Taken from sklearn implementation of isolation forest: https://github.com/scikit-learn/scikit-learn/blob/fd237278e/sklearn/ensemble/_iforest.py#L480 For each given number of samples in the array n_samples_leaf, this calculates average path length of unsucceesful BST search. Args: n_samples_leaf: array of number of samples (in leaf) Returns: array of average path lengths """ n_samples_leaf_shape = n_samples_leaf.shape n_samples_leaf = n_samples_leaf.reshape((1, -1)) average_path_length = np.zeros(n_samples_leaf.shape) mask_1 = n_samples_leaf <= 1 mask_2 = n_samples_leaf == 2 not_mask = ~np.logical_or(mask_1, mask_2) average_path_length[mask_1] = 0.0 average_path_length[mask_2] = 1.0 average_path_length[not_mask] = ( 2.0 * (np.log(n_samples_leaf[not_mask] - 1.0) + np.euler_gamma) - 2.0 * (n_samples_leaf[not_mask] - 1.0) / n_samples_leaf[not_mask] ) return average_path_length.reshape(n_samples_leaf_shape)
d6434c4ed437e0f8bff9e5d9f25bfdc84ce8f82d
32,086
import pkg_resources def get_pkg_license(pkgname): """ Given a package reference (as from requirements.txt), return license listed in package metadata. NOTE: This function does no error checking and is for demonstration purposes only. """ pkgs = pkg_resources.require(pkgname) pkg = pkgs[0] for line in pkg.get_metadata_lines('PKG-INFO'): (k, v) = line.split(': ', 1) if k == "License": return v return None
238f2b3d33de6bf8ebfcca8f61609a58357e6da1
32,087
def parse(date): """ convert date from different input formats: Parameters ---------- date : STR FLOAT (unix timestamp) Python native datetime.date object pandas datetime object Returns ------- datetime.date """ out = False try: if isinstance(date, str): date = pd.to_datetime(date).date() out = True if isinstance(date, float) or isinstance(date, int): date = pd.to_datetime(date, unit='s').date() out = True if not out: date = pd.to_datetime(date).date() except: date = None return date
e7de2f8198c177630dfd75980b25fe5e9a70e0a2
32,088
def check_table_exist(conn): """Check if a table exists. We do not use IF EXISTS in creating the table so as to we will not create hyper table twice when the table already exists. Args: conn (psycopg2.extensions.connection): The connection to PostgreSQL database. Returns: True if table exists. False is table does not exist. """ CHECK_SQL = """ SELECT EXISTS ( SELECT FROM pg_tables WHERE tablename = '%s' ); """ % TABLENAME cur = conn.cursor() cur.execute(CHECK_SQL) result = cur.fetchall()[0][0] cur.close() return result
0d9199464c0323f5258e4ca6638b5a5c6759b434
32,089
import traceback def get_err_str(exception, message, trace=True): """Return an error string containing a message and exception details. Args: exception (obj): the exception object caught. message (str): the base error message. trace (bool): whether the traceback is included (default=True). """ if trace: trace_str = "".join(traceback.format_tb(exception.__traceback__)).strip() err_str = "{}\nTYPE: {}\nDETAILS: {}\nTRACEBACK:\n\n{}\n" \ "".format(message, type(exception), exception, trace_str) else: err_str = "{}\nTYPE: {}\nDETAILS: {}\n".format(message, type(exception), exception) return err_str
0ede3de80fb1097b0537f90337cf11ffa1edecf7
32,090
def epoch_data(data, window_length = 2,overlap=0.5): """ Separates the data into equal sized windows Input: - data: data to seperate into windows - window_length: length of the window in seconds - overlap: overlap, float in [0,1), in percentage overlap of windows Output: an array of windows, each """ sample_rate = 250 # Hz array_epochs = [] i = 0 window_size_hz = int(window_length * sample_rate) overlap_size_hz = int(overlap * window_length * sample_rate) while(i <= len(data)-window_size_hz ): array_epochs.append(data[i:i+ window_size_hz ]) i = i + window_size_hz - overlap_size_hz # This is what the raw data looks like # if i is not len(data) - 1: # array_epochs.append(data[i:len(data)]) return np.array(array_epochs)
ab15ea4927118ed36ccd9d161772de7457239374
32,091
def dailyUsagePer15minn(index_list, tank_data_list): """Process tank temperatures series to water usage series divided into 96 15 minutes intervals, where each interval has value in liters equal to used normalized hot water(37deg of c). :param index_list: list of indexes :param tank_data_list: list of tank temperatures :return: list of numbers, length = 96 """ first_use = tank_data_list[index_list[1]] hh_mm = first_use['time'].split('T')[1] zero_intervals = int(hh_mm[0:2]) * 4 # from midnight till first use hours zero_intervals += int(hh_mm[3:5]) // 15 # floor division for minutes usage = [0] * zero_intervals # fill intervals till first use of day for i in range(0, len(index_list) - 1, 2): tank_before = float(tank_data_list[index_list[i]]['value']) tank_after = float(tank_data_list[index_list[i + 1]]['value']) water_before, water_after = wrapTempToWaterTemp(temp_before=tank_before, temp_after=tank_after) used = calculateUsedWater(temp_tank_before=water_before, temp_tank_after=water_after) # add new interval with usage or extend last, depending on time if i > 0: t = tank_data_list[index_list[i]]['time'].split('T')[1] past = tank_data_list[index_list[i - 2]]['time'].split('T')[1] if (int(t[0:2]) - int(past[0:2])) == 0 and ((int(t[3:5]) // 15) == (int(past[3:5]) // 15)): usage[-1] = float(usage[-1]) + round(abs(normalizeUsedWater( temp_tank_before=water_before, temp_tank_after=water_after, used_volume=used)), 2) else: usage.append(round(abs(normalizeUsedWater(temp_tank_before=water_before, temp_tank_after=water_after, used_volume=used)), 2)) else: usage.append(round(abs(normalizeUsedWater(temp_tank_before=water_before, temp_tank_after=water_after, used_volume=used)), 2)) # fill gaps between intervals with usage by intervals with 0 if i + 2 <= len(index_list) - 1: last_time = tank_data_list[index_list[i]]['time'].split('T')[1] next_time = tank_data_list[index_list[i + 2]]['time'].split('T')[1] hour_difference = (int(next_time[0:2]) - int(last_time[0:2])) if hour_difference == 0: gap = ((int(next_time[3:5]) // 15) - (int(last_time[3:5]) // 15)) - 1 if gap > 0: usage.extend([0] * gap) elif hour_difference == 1: gap = (4 - ((int(last_time[3:5]) // 15) + 1)) + (int(next_time[3:5]) // 15) if gap > 0: usage.extend([0] * gap) else: gap = (4 - ((int(last_time[3:5]) // 15) + 1)) + (int(next_time[3:5]) // 15) gap += (hour_difference - 1) * 4 if gap > 0: usage.extend([0] * gap) # fill intervals from last use of day till midnight last_use = tank_data_list[index_list[-2]] hh_mm = last_use['time'].split('T')[1] gap = ((24 - int(hh_mm[0:2])) - 1) * 4 gap += 4 - ((int(hh_mm[3:5]) // 15) + 1) if gap > 0: usage.extend([0] * gap) return usage
7699b4455376675312b2800896db4f75b37a1ada
32,092
def is_valid(number): """Check if the number provided is a valid CAS RN.""" try: return bool(validate(number)) except ValidationError: return False
7e05c8e05f779c6f06150d90ab34b18585dd4803
32,093
import torch def get_kernel(kernel_type, input_dim, on_gpu=True, **kwargs): """ Initializes one of the following gpytorch kernels: RBF, Matern Args: kernel_type (str): Kernel type ('RBF', Matern52', 'Spectral) input_dim (int): Number of input dimensions (translates into number of kernel dimensions unless isotropic=True) on_gpu (bool): Sets default tensor type to torch.cuda.DoubleTensor **lengthscale (list of two lists): Determines lower (1st list) and upper (2nd list) bounds for kernel lengthscale(s); number of elements in each list is equal to the input dimensions **isotropic (bool): one kernel lengthscale in all dimensions **n_mixtures (int): number of mixtures for spectral mixture kernel **precision (str): Choose between single ('single') and double ('double') precision Returns: kernel object """ precision = kwargs.get("precision", "double") if precision == 'single': tensor_type = torch.FloatTensor tensor_type_gpu = torch.cuda.FloatTensor else: tensor_type = torch.DoubleTensor tensor_type_gpu = torch.cuda.DoubleTensor if on_gpu and torch.cuda.is_available(): torch.set_default_tensor_type(tensor_type_gpu) else: torch.set_default_tensor_type(tensor_type) lscale = kwargs.get('lengthscale') isotropic = kwargs.get("isotropic") nmix = kwargs.get("n_mixtures") if nmix is None: nmix = 4 if lscale is not None: lscale = gpytorch.constraints.Interval(torch.tensor(lscale[0]), torch.tensor(lscale[1])) input_dim = 1 if isotropic else input_dim kernel_book = lambda input_dim, lscale, **kwargs: { 'RBF': gpytorch.kernels.RBFKernel( ard_num_dims=input_dim, lengthscale_constraint=lscale ), 'Matern52': gpytorch.kernels.MaternKernel( ard_num_dims=input_dim, lengthscale_constraint=lscale ), 'Spectral': gpytorch.kernels.SpectralMixtureKernel( ard_num_dims=input_dim, num_mixtures=kwargs.get("nmix") ) } try: kernel = kernel_book(input_dim, lscale, nmix=nmix)[kernel_type] except KeyError: print('Select one of the currently available kernels:',\ '"RBF", "Matern52", "Spectral"') raise return kernel
f9660ba9ef16a816a2377ac90531e6d2705a0659
32,094
def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
685ff34e14c26fcc408e5d4f9219483118bfd3c0
32,095
def down_sample(source, freq_vocab, replacement='', threshold=1e-3, min_freq=0, seed=None, name=None): """Randomly down-sample high frequency tokens in `source` with `replacement` value. Args: source: string `Tensor` or `RaggedTensor` or `SparseTensor` of any shape, items to be sampled. freq_vocab: `Counter` with frequencies vocabulary. replacement: `string`, value to set instead of downsampled ones threshold: `float`, items occurrence threshold. min_freq: `int`, items below that frequency will be treated as unique. seed: `int`, used to create a random seed (optional). See @{tf.random.set_seed} for behavior. name: `string`, a name for the operation (optional). Returns: A boolean `Tensor` of same shape as source: "keep" flags. """ with tf.name_scope(name or 'down_sample'): if isinstance(source, sparse_tensor.SparseTensorValue) or isinstance(source, sparse_tensor.SparseTensor): source = sparse_tensor.convert_to_tensor_or_sparse_tensor(source, dtype=tf.string, name=name) else: source = ragged_tensor.convert_to_tensor_or_ragged_tensor(source, dtype=tf.string, name=name) if not tf.string.is_compatible_with(source.dtype): raise RuntimeError('"Source" must have dtype compatible with "string". ' 'Actual: {}'.format(source.dtype)) if isinstance(source, tf.SparseTensor): return tf.SparseTensor( values=down_sample(source.values, freq_vocab, replacement, threshold, min_freq, seed), indices=source.indices, dense_shape=source.dense_shape ) elif isinstance(source, tf.RaggedTensor): return source.with_flat_values( down_sample(source.flat_values, freq_vocab, replacement, threshold, min_freq, seed) ) keep = sample_mask( source=source, freq_vocab=freq_vocab, threshold=threshold, min_freq=min_freq, seed=seed, ) return tf.where(keep, source, replacement)
2209bcc48356f4d11c9151a80ab85069c8b6ad5b
32,096
import platform import pickle def load_pickle(f): """使用pickle加载文件""" version = platform.python_version_tuple() # 取python版本号 if version[0] == '2': return pickle.load(f) # pickle.load, 反序列化为python的数据类型 elif version[0] == '3': return pickle.load(f, encoding='latin1') raise ValueError("invalid python version: {}".format(version))
33db0ba6dbd8b1d2b3eba57e63d4069d91fbcb0b
32,097
def extract_hashtags(text_list): """Return a summary dictionary about hashtags in :attr:`text_list` Get a summary of the number of hashtags, their frequency, the top ones, and more. :param list text_list: A list of text strings. :returns summary: A dictionary with various stats about hashtags >>> posts = ['i like #blue', 'i like #green and #blue', 'i like all'] >>> hashtag_summary = extract_hashtags(posts) >>> hashtag_summary.keys() dict_keys(['hashtags', 'hashtags_flat', 'hashtag_counts', 'hashtag_freq', 'top_hashtags', 'overview']) >>> hashtag_summary['hashtags'] [['#blue'], ['#green', '#blue'], []] A simple extract of hashtags from each of the posts. An empty list if none exist >>> hashtag_summary['hashtags_flat'] ['#blue', '#green', '#blue'] All hashtags in one flat list. >>> hashtag_summary['hashtag_counts'] [1, 2, 0] The count of hashtags per post. >>> hashtag_summary['hashtag_freq'] [(0, 1), (1, 1), (2, 1)] Shows how many posts had 0, 1, 2, 3, etc. hashtags (number_of_hashtags, count) >>> hashtag_summary['top_hashtags'] [('#blue', 2), ('#green', 1)] >>> hashtag_summary['overview'] {'num_posts': 3, 'num_hashtags': 3, 'hashtags_per_post': 1.0, 'unique_hashtags': 2} """ return extract(text_list, HASHTAG, 'hashtag')
688824fbef72c961b48a6bdb001983c25b1e0cee
32,098
def skipif_32bit(param): """ Skip parameters in a parametrize on 32bit systems. Specifically used here to skip leaf_size parameters related to GH 23440. """ marks = pytest.mark.skipif( compat.is_platform_32bit(), reason="GH 23440: int type mismatch on 32bit" ) return pytest.param(param, marks=marks)
575226d01867ae1f898fc821fe1d51de5eb630fb
32,099