content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def save_str(self=str(''), filename=str('output.txt'), permissions=str('w')): """Save a given string to disk using a given file name. Args: self(str): String to save to disk. (default str('')) filename(str): File name to use when saving to disk. (default str('output.txt')) permissions(str): Permissions to use when opening file. (default str('w')) Returns: dict: Input parameters specified.""" # Convert variables if self is not None: string = str(self) if filename is not None: filename = str(filename) if permissions is not None: permissions = str(permissions) # Open file on disk. file_on_disk = open(filename, permissions) # Write to file on disk. file_on_disk.write(string) # Close file on disk. file_on_disk.close() # Create dict of specified parameters. params = dict({'string': [string], 'filename': [filename], 'permissions': [permissions]}) # Return dict of specified parameters. return params
87df6f654834409397d63e0313d26c70d1944a11
3,638,100
def balanced(banked_chemicals): """return true if all non-ore chemicals have non-negative amounts.""" def _enough(chemical): return chemical == "ORE" or banked_chemicals[chemical] >= 0 return all(map(_enough, banked_chemicals))
c42d492bfc67664040095260c24bbff155e98d5e
3,638,101
import bz2 import json def dict2json(thedict, json_it=False, compress_it=False): """if json_it convert thedict to json if compress_it, do a bzip2 compression on the json""" if compress_it: return bz2.compress(json.dumps(thedict).encode()) elif json_it: return json.dumps(thedict) else: return thedict
b6158427c653a00cc6953ce9f0b0a0fb4881bd7a
3,638,102
import argparse def parse_arguments(): """ Parse command line arguments. """ parser = argparse.ArgumentParser( description="Python script for minimizing unit cell." ) subparser = parser.add_subparsers(dest='command') subparser.required = True yaml_parse = subparser.add_parser("yaml") xml_parse = subparser.add_parser("xml") yaml_parse.add_argument( '--input', "-i", type=str, help="Input yaml file", required=True ) xml_parse.add_argument( '--nvt', dest='nvt', action='store_true', default=False, required=False, help="Perform md in nvt only." ) xml_parse.add_argument( '--input', "-i", type=str, help="Input xml file", required=True ) xml_parse.add_argument( '--pdb', "-p", type=str, help="Input pdb file", required=True ) xml_parse.add_argument( '--prefix', "-pre", type=str, help="Output prefix for csv and dcd files.", default="xtal_md", required=False ) xml_parse.add_argument( '--nanoseconds', "-ns", type=int, help="Production length in nanoseconds.", required=False, default=100 ) xml_parse.add_argument( '--replicates', "-r", type=int, help="Number of replicates to generate.", required=False, default=10 ) xml_parse.add_argument( '--temperature', "-t", type=float, help="Target temperature in md run.", required=False, default=298.15 ) return parser.parse_args()
11e59e4c3f35a042794d07ea6294c4e015245798
3,638,103
import os import random def get_image(roidb, config): """ preprocess image and return processed roidb :param roidb: a list of roidb :return: list of img as in mxnet format roidb add new item['im_info'] 0 --- x (width, second dim of im) | y (height, first dim of im) """ num_images = len(roidb) processed_ims = [] processed_roidb = [] for i in range(num_images): roi_rec = roidb[i] assert os.path.exists(roi_rec['image']), '%s does not exist'.format(roi_rec['image']) im = cv2.imread(roi_rec['image'], cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION) if roidb[i]['flipped']: im = im[:, ::-1, :] new_rec = roi_rec.copy() scale_ind = random.randrange(len(config.SCALES)) target_size = config.SCALES[scale_ind][0] max_size = config.SCALES[scale_ind][1] im, im_scale = resize(im, target_size, max_size, stride=config.network.IMAGE_STRIDE) im_tensor = transform(im, config.network.PIXEL_MEANS) processed_ims.append(im_tensor) im_info = [im_tensor.shape[2], im_tensor.shape[3], im_scale] new_rec['boxes'] = clip_boxes(np.round(roi_rec['boxes'].copy() * im_scale), im_info[:2]) new_rec['im_info'] = im_info processed_roidb.append(new_rec) return processed_ims, processed_roidb
a6c9c2fb79f8158ee286437a7ac47047ded3c71f
3,638,104
def compoundedInterest(fv, p): """Compounded interest Returns: Interest value Input values: fv : Future value p : Principal """ i = fv - p return i
00f7fd1f141293afe595393eca23c308d3fdd7d0
3,638,105
def get_volumetric_scene(self, data_key="total", isolvl=0.5, step_size=3, **kwargs): """Get the Scene object which contains a structure and a isosurface components Args: data_key (str, optional): Use the volumetric data from self.data[data_key]. Defaults to 'total'. isolvl (float, optional): The cuoff for the isosurface to using the same units as VESTA so e/bhor and kept grid size independent step_size (int, optional): step_size parameter for marching_cubes_lewiner. Defaults to 3. **kwargs: kwargs for the Structure.get_scene function Returns: [type]: [description] """ struct_scene = self.structure.get_scene(**kwargs) iso_scene = self.get_isosurface_scene( data_key=data_key, isolvl=isolvl, step_size=step_size, origin=struct_scene.origin, ) struct_scene.contents.append(iso_scene) return struct_scene
836fb5f3158ed5fe55a2975ce05eb21636584a95
3,638,106
from sklearn.compose import ColumnTransformer from sklearn.preprocessing import OrdinalEncoder def encode_labels(x, features): """ Maps strings to integers """ encoder = ColumnTransformer([("", OrdinalEncoder(), features)], n_jobs=-1) x[:, features] = encoder.fit_transform(x) return x
8f318020f663a88733dea9389627cb62d483892c
3,638,107
def get_jogframe( conx: Connection, idx: int, group: int = 1, include_comment: bool = False ) -> t.Tuple[Position_t, t.Optional[str]]: """Return the jog frame at index 'idx'. :param idx: Numeric ID of the jog frame. :type idx: int :param group: Numeric ID of the motion group the jog frame is associated with. :type group: int :returns: A tuple containing the user frame and associated comment (if requested) :rtype: tuple(Position_t, str) """ if group < 1 or group > 8: raise ValueError( f"Requested group id invalid (must be between 1 and 8, got: {group})" ) if idx < 1 or idx > 5: raise ValueError( f"Requested jog frame idx invalid (must be between 1 and 5, got: {idx})" ) varname = f'[TPFDEF]JOGFRAMES[{group},{idx}]' frame = _get_frame_var(conx, varname) cmt = None if include_comment: JOGFRAME = 2 cmt = _get_frame_comment(conx, frame_type=JOGFRAME, group=group, idx=idx) return (frame, cmt)
9a1a215311d111c262ab9810c5933a07c39d4def
3,638,108
def rotate(arr, bins): """ Return an array rotated by 'bins' places to the left :param list arr: Input data :param int bins: Number of bins to rotate by """ bins = bins % len(arr) if bins == 0: return arr else: return np.concatenate((arr[bins:], arr[:bins]))
0ac038377ed173130d83ef5231bd9678058de28a
3,638,109
from typing import Iterable from typing import Optional from typing import List from typing import OrderedDict def clear_list(items: Iterable[Optional[Typed]]) -> List[Typed]: """ return unique items in order of first ocurrence """ return list(OrderedDict.fromkeys(i for i in items if i is not None))
511bbfb6b567d494a143fb1e1ee06c39f54c47e8
3,638,110
def chords(labels): """ Transform a list of chord labels into an array of internal numeric representations. Parameters ---------- labels : list List of chord labels (str). Returns ------- chords : numpy.array Structured array with columns 'root', 'bass', and 'intervals', containing a numeric representation of chords (`CHORD_DTYPE`). """ crds = np.zeros(len(labels), dtype=CHORD_DTYPE) cache = {} for i, lbl in enumerate(labels): cv = cache.get(lbl, None) if cv is None: cv = chord(lbl) cache[lbl] = cv crds[i] = cv return crds
53d14ab6318dfaeeb77d0dd5f815c8c2f3359918
3,638,111
def reduce_aet_if_dry(aet, wat_lev, fc): """ Reduce actual evapotranspiration if the soil is dry. If the water level in a cell is less than 0.7*fc, the rate of evapo-transpiration is reduced by a factor. This factor is 1 when wat_lev = 0.7*fc and decreases linearly to reach 0 when wat_lev = 0 i.e. where wat_lev < 0.7*fc, apply a correction factor of wat_lev/(0.7*fc) to the aet grid. Args: aet: "Raw" actual evapotranspiration grid. wat_lev: Water level grid fc: Soil field capacity grid. Returns: Array (modified AET grid with AET reduced where necessary). """ # Get a boolean array showing which cells need correcting bool_array = wat_lev < (0.7*fc) # Calculate a correction factor for all cells, but subtract 1 from answer cor_facts_minus1 = (wat_lev / (0.7*fc)) - 1 # Multiplying bool_array by cor_facts_minus1 gives a grid with values of # (cor_fact - 1) for cells which need correcting and zero otherwise. Add 1 # to this to get a grid of cor_facts and ones cor_facts = (bool_array * cor_facts_minus1) + 1 return aet*cor_facts
170462a23c3903a390b89963aa6ce21839e5d44b
3,638,112
def merge_sort(array): """ Sort array via merge sort algorithm Args: array: list of elements to be sorted Returns: Sorted list of elements Examples: >>> merge_sort([1, -10, 21, 3, 5]) [-10, 1, 3, 5, 21] """ if len(array) == 1: return array[:] mid = len(array) // 2 left = merge_sort(array[:mid]) right = merge_sort(array[mid:]) sort_result = _merge(left, right) return sort_result
9730f88be4a54334bac801dc4713bf20b683f824
3,638,113
def create_monitored_session(target: tf.train.Server, task_index: int, checkpoint_dir: str, save_checkpoint_secs: int, config: tf.ConfigProto=None) -> tf.Session: """ Create a monitored session for the worker :param target: the target string for the tf.Session :param task_index: the task index of the worker :param checkpoint_dir: a directory path where the checkpoints will be stored :param save_checkpoint_secs: number of seconds between checkpoints storing :param config: the tensorflow configuration (optional) :return: the session to use for the run """ # we chose the first task to be the chief is_chief = task_index == 0 # Create the monitored session sess = tf.train.MonitoredTrainingSession( master=target, is_chief=is_chief, hooks=[], checkpoint_dir=checkpoint_dir, save_checkpoint_secs=save_checkpoint_secs, config=config ) return sess
f963e61cf57aa602a9a9397104a935b5a17a6dc1
3,638,114
def sun_rise_set_times(datetime_index, coords): """ Return sunrise and set times for the given datetime_index and coords, as a Series indexed by date (days, resampled from the datetime_index). """ obs = ephem.Observer() obs.lat = str(coords[0]) obs.lon = str(coords[1]) # Ensure datetime_index is daily dtindex = pd.DatetimeIndex(datetime_index.to_series().map(pd.Timestamp.date).unique()) return pd.Series( _sun_rise_set(dtindex, obs), index=dtindex )
9c2dfb3c7c86c98144b23788b0b399899724c8ff
3,638,115
def get_n1_event_format(): """ Define the format for the events in a neurone recording. Arguments: None. Returns: - A Struct (from the construct library) describing the event format. """ # Define the data format of the events # noinspection PyUnresolvedReferences return Struct( "Revision" / Int32sl, "RFU1" / Int32sl, "Type" / Int32sl, "SourcePort" / Int32sl, "ChannelNumber" / Int32sl, "Code" / Int32sl, "StartSampleIndex" / Int64ul, "StopSampleIndex" / Int64ul, "DescriptionLength" / Int64ul, "DescriptionOffset" / Int64ul, "DataLength" / Int64ul, "DataOffset" / Int64ul, "RFU2" / Int32sl, "RFU3" / Int32sl, "RFU4" / Int32sl, "RFU5" / Int32sl )
8663c4b8ba8d83e10ed6dc03a35589c74cd23420
3,638,116
def idzp_rid(eps, m, n, matveca): """ Compute ID of a complex matrix to a specified relative precision using random matrix-vector multiplication. :param eps: Relative precision. :type eps: float :param m: Matrix row dimension. :type m: int :param n: Matrix column dimension. :type n: int :param matveca: Function to apply the matrix adjoint to a vector, with call signature `y = matveca(x)`, where `x` and `y` are the input and output vectors, respectively. :type matveca: function :return: Rank of ID. :rtype: int :return: Column index array. :rtype: :class:`numpy.ndarray` :return: Interpolation coefficients. :rtype: :class:`numpy.ndarray` """ proj = np.empty( m + 1 + 2*n*(min(m, n) + 1), dtype=np.complex128, order='F') k, idx, proj, ier = _id.idzp_rid(eps, m, n, matveca, proj) if ier: raise _RETCODE_ERROR proj = proj[:k*(n-k)].reshape((k, n-k), order='F') return k, idx, proj
7655afb9b5c29f395ca6c5c83baaa8592f68d124
3,638,117
def update_position(request, space_url): """ This view saves the new note position in the debate board. Instead of reloading all the note form with all the data, we use the partial form "UpdateNotePosition" which only handles the column and row of the note. """ place = get_object_or_404(Space, url=space_url) if request.method == "POST" and request.is_ajax(): note = get_object_or_404(Note, pk=request.POST['noteid']) debate = get_object_or_404(Debate, pk=note.debate.id) position_form = UpdateNotePosition(request.POST or None, instance=note) if (request.user.has_perm('admin_space', place) or request.user.has_perm('mod_space', place) or request.user.has_perm('admin_debate', debate) or request.user.has_perm('mod_debate', debate) or request.user == note.author): if position_form.is_valid(): position_form_uncommited = position_form.save(commit=False) position_form_uncommited.column = get_object_or_404(Column, pk=request.POST['column']) position_form_uncommited.row = get_object_or_404(Row, pk=request.POST['row']) position_form_uncommited.save() return HttpResponse(_("Note updated")) else: return HttpResponseBadRequest(_("There has been an error validating the form.")) else: raise PermissionDenied else: return HttpResponseBadRequest(_("The petition was not POST."))
29cc55ad30c8cdf6a326381a9b267aaf515059b6
3,638,118
def tan(x): """Return the tangent of *x* radians.""" return 0.0
51a8f497b2cc81cfd0066a7f8f5b1afef362941e
3,638,119
def entropy(p): """ Calculates the Shannon entropy for a marginal distribution. Args: p (np.ndarray): the marginal distribution. Returns: (float): the entropy of p """ # Since zeros do not contribute to the Shannon entropy by definition, we # ignore them to avoid any errors/warnings. p = p[p != 0] H = -np.dot(p, np.log(p)) # Filter against machine epsilon return _eps_filter(H)
d9deb56211069e70ee688ec7cf9cea4cb6507d2a
3,638,120
def format_to_str(*a, **kwargs): """ Formats gotten objects to str. """ result = "" if kwargs == {}: kwargs = {'keepNewlines': True} for x in range(0, len(a)): tempItem = a[x] if type(tempItem) is str: result += tempItem elif type(tempItem) in [list, dict, tuple]: result += str(tempItem) # pformat(tempItem) elif hasattr(tempItem, "itemType"): result += "<" + tempItem.itemType + ":" + tempItem.itemModelPointer + ">" else: result += str(tempItem) if x < len(a) - 1: result += " " if not kwargs['keepNewlines']: result = result.replace("\n", "*nl*") return result
066262f6059a7f146026b1bc638b9119e2c34718
3,638,121
from typing import List import re import os def get_all_files_in_tree_with_regex(basedir: str, regex_str: str) -> List[str]: """ Returns a list of paths such that each path is to a file with the provided suffix. Walks the entire tree of basedir. """ r = re.compile(regex_str) data_files = [] for root, dirs, files in os.walk(basedir): for f in files: if r.match(f): logger.info("f: %s -- matches regex: %s" % (f, regex_str)) data_files.append(os.path.join(root, f)) return data_files
20467b7217e02cd2feec88c77e98e731bf393b3d
3,638,122
def zero_corrected_countless(data): """ Vectorized implementation of downsampling a 2D image by 2 on each side using the COUNTLESS algorithm. data is a 2D numpy array with even dimensions. """ # allows us to prevent losing 1/2 a bit of information # at the top end by using a bigger type. Without this 255 is handled incorrectly. data = data + 1 # don't use +=, it will affect the original data. sections = [] # This loop splits the 2D array apart into four arrays that are # all the result of striding by 2 and offset by (0,0), (0,1), (1,0), # and (1,1) representing the A, B, C, and D positions from Figure 1. factor = (2, 2) for offset in np.ndindex(factor): part = data[tuple(np.s_[o::f] for o, f in zip(offset, factor))] sections.append(part) a, b, c, d = sections ab = a * (a == b) # PICK(A,B) ac = a * (a == c) # PICK(A,C) bc = b * (b == c) # PICK(B,C) a = ab | ac | bc # Bitwise OR, safe b/c non-matches are zeroed result = a + (a == 0) * d - 1 # a or d - 1 return result
7b0dc08f0233f929b572d555ad611c8bf795bbfd
3,638,123
from typing import Union def mnn_synthetic_data( n_samples: int = 1000, n_features: int = 100, n_batches: int = 2, n_latent: int = 2, n_classes: int = 3, proportions: np.ndarray = None, sparsity: float = 1.0, scale: Union[int, float] = 5, batch_scale: float = 0.1, bio_batch_angle: Union[float, None] = None, seed: int = 2018, ): """ :param n_samples: number of samples (cells) per batch :param n_features: number of features (genes) :param n_batches: number of batches :param n_latent: size of the latent space used to generate data :param n_classes: number of classes shared across batches :param proportions: proportion of cells from each class in each batch. If shape is (n_classes,) same proportions used each time. If shape is (n_batches, n_classes) then each row is a different batch. Default is equal representation :param sparsity: sparsity of class weightings :param scale: scaling factor for generating data :param batch_scale: batch effect relative to data :param bio_batch_angle: angle of batch effect w/ bio subspace :param seed: seed for random number generator :return: real-valued expression data with batch effect and metadata """ if proportions is None: proportions = np.ones((n_batches, n_classes)) / n_classes else: proportions = np.broadcast_to(proportions, (n_batches, n_classes)) if seed: np.random.seed(seed) class_centers = latent.gen_classes(n_latent, n_classes, sparsity, scale) batches = np.repeat(np.arange(n_batches), n_samples) latent_exp = [] classes = [] for b in range(n_batches): b_latent, b_classes = latent.sample_classes( n_samples, class_centers, proportions[b, :] ) latent_exp.append(b_latent) classes.append(b_classes) latent_exp = np.vstack(latent_exp) classes = np.hstack(classes) programs = latent.gen_programs(n_latent, n_features, 1.0, 1.0) expression = np.dot(latent_exp, programs) projection_to_bio = np.dot(np.linalg.pinv(programs), programs) expression_w_batch = batch.add_batch_vectors( expression, batches, batch_scale, bio_batch_angle, projection_to_bio, copy=True ) adata = util.arrays_to_anndata( expression_w_batch, batches, classes, X_latent=latent_exp, X_gt=expression ) return adata
e3e696cfb1e207db2159cdc512bc5b4d8f24e47b
3,638,124
def create_labeled_pair(img, gt_center, prop_center, gt_radius, scale): """ Given a crater proposal and ground truth label, this function creates a labeled pair. Returns X, Y, where X is an image, and Y is a set of ground truths. img: an array gt_center: the known ground-truth center point (x, y) (floats) prop_center: the crater proposal center (x, y) (ints) gt_radius: the known ground-truth crater radius in pixels (float) scale: one of [32, 64, 128, 256] (very rough size of crater) follows the scheme: 0<r<8 --> scale=32 8<r<16 --> scale=64 16<r<32 --> scale=128 32<r --> scale=256 (craters bigger than r=64 not supported) """ permitted_scales = [32, 64, 128, 256] if scale not in permitted_scales: msg = f"scale {scale} not permitted. Please use one of: " msg += str(permitted_scales) raise Exception(msg) scale_factor = scale//32 x_offset = (gt_center[0] - prop_center[0])/scale_factor y_offset = (gt_center[1] - prop_center[1])/scale_factor r_scaled = gt_radius/scale Y = (x_offset, y_offset, r_scaled) X = extract_proposal(img, prop_center, scale) return X, Y
b65bc6234183794c432377f8475833dc1f8b72c7
3,638,125
def loader_to_dask(loader_array): """ Map a call to `dask.array.from_array` onto all the elements in ``loader_array``. This is done so that an explicit ``meta=`` argument can be provided to prevent loading data from disk. """ if len(loader_array.shape) != 1: raise ValueError("Can only be used on one dimensional arrays") # The meta argument to from array is used to determine properties of the # array, such as dtype. We explicitly specify it here to prevent dask # trying to auto calculate it by reading from the actual array on disk. meta = np.zeros((0,), dtype=loader_array[0].dtype) to_array = partial(da.from_array, meta=meta) return map(to_array, loader_array)
75039dac3f5ed21e6c1a0b8b5445af30757267cf
3,638,126
import re def list_to_sentences(string): """ Splits text at newlines and puts it back together after stripping new- lines and enumeration symbols, joined by a period. """ if string is None: return None lines = string.splitlines() curr = '' processed = [] for line in lines: stripped = line.strip() # empty line if 0 == len(stripped): if curr: processed.append(re.sub(r'\.\s*$', '', curr)) curr = '' # beginning a new fragment elif not curr or 0 == len(curr): curr = re.sub(r'^[-\d\.\(\)]+\s*', '', stripped) # new line item? true when it starts with "-", "1." or "1)" (with # optional dash) or if the indent level is less than before (simple # whitespace count) (NO LONGER IMPLEMENTED) elif re.match(r'^-\s+', stripped) \ or re.match(r'^\d+\.\s+', stripped) \ or re.match(r'^(-\s*)?\d+\)\s+', stripped): if curr: processed.append(re.sub(r'\.\s*$', '', curr)) curr = re.sub(r'^(-|(\d+\.)|((-\s*)?\d+\)))\s*', '', stripped) # append to previous fragment else: curr = '%s %s' % (curr, stripped) if curr: processed.append(re.sub(r'\.\s*$', '', curr)) sentences = '. '.join(processed) if len(processed) > 0 else '' if len(sentences) > 0: sentences += '.' return sentences
3f155bf501d78cb9263a9cbb0b6d7e4102daeb53
3,638,127
def decode_locations_one_layer(anchors_one_layer, offset_bboxes): """decode the offset bboxes into center bboxes Args: anchors_one_layer: ndarray represents all anchors coordinate in one layer, encode by [y,x,h,w] offset_bboxes: A tensor with any shape ,the shape of lowest axis must be 4, means the offset val in [y,x,h,w] Return: the locations of bboxes encode by [y,x,h,w] """ shape = offset_bboxes.get_shape().as_list() try: i = shape.index(None) shape[i] = -1 except ValueError: pass offset_bboxes = tf.reshape(offset_bboxes,shape=tf.stack([shape[0], -1, shape[-1]])) yref, xref, href, wref = anchors_one_layer ymin = yref - href / 2. xmin = xref - wref / 2. ymax = yref + href / 2. xmax = xref + wref / 2. anchor_ymin = np.float32(ymin) anchor_xmin = np.float32(xmin) anchor_ymax = np.float32(ymax) anchor_xmax = np.float32(xmax) # Transform to center / size. anchor_cy = (anchor_ymax + anchor_ymin) / 2. anchor_cx = (anchor_xmax + anchor_xmin) / 2. anchor_h = anchor_ymax - anchor_ymin anchor_w = anchor_xmax - anchor_xmin ## reshape to -1 ## anchor_cy = np.reshape(anchor_cy,[-1]) anchor_cx = np.reshape(anchor_cx, [-1]) anchor_h = np.reshape(anchor_h, [-1]) anchor_w = np.reshape(anchor_w, [-1]) bboxes_cy = offset_bboxes[:, :, 0] * anchor_h + anchor_cy bboxes_cx = offset_bboxes[:, :, 1] * anchor_w + anchor_cx bboxes_h = tf.exp(offset_bboxes[:, :, 2]) * anchor_h bboxes_w = tf.exp(offset_bboxes[:, :, 3]) * anchor_w cbboxes_out = tf.stack([bboxes_cy, bboxes_cx, bboxes_h, bboxes_w], axis=-1) cbboxes_out = tf.reshape(cbboxes_out, shape=shape) return cbboxes_out
96df6f6b9756cda991e47aea226ea64d5de8648c
3,638,128
def FormatReserved(enum_or_msg_proto): """Format reserved values/names in a [Enum]DescriptorProto. Args: enum_or_msg_proto: [Enum]DescriptorProto message. Returns: Formatted enum_or_msg_proto as a string. """ reserved_fields = FormatBlock('reserved %s;\n' % ','.join( map(str, sum([list(range(rr.start, rr.end)) for rr in enum_or_msg_proto.reserved_range], [])))) if enum_or_msg_proto.reserved_range else '' if enum_or_msg_proto.reserved_name: reserved_fields += FormatBlock('reserved %s;\n' % ', '.join('"%s"' % n for n in enum_or_msg_proto.reserved_name)) return reserved_fields
56b3ad5c2d31a901847c50ad05bd324ca366f101
3,638,129
def download_pepper(load=True): # pragma: no cover """Download scan of a pepper (capsicum). Originally obtained from Laser Design. Parameters ---------- load : bool, optional Load the dataset after downloading it when ``True``. Set this to ``False`` and only the filename will be returned. Returns ------- pyvista.PolyData or str DataSet or filename depending on ``load``. Examples -------- >>> from pyvista import examples >>> dataset = examples.download_pepper() >>> dataset.plot() """ return _download_and_read('pepper.ply', load=load)
3d35c19a5eb36d8a393076b212c5a9789ff61625
3,638,130
def getAllSerial(): """get all device serials found by command adb devices""" _, msgs = shell_command("adb devices") devices = [line for line in msgs if "\tdevice\n" in line] serials = sorted([dev.split()[0] for dev in devices], key=len) return serials
a6a6fd93c4bd27babbca2a15999e19985677e0a1
3,638,131
import os from pathlib import Path def create_directory(list_path_proj: list, dir_name: str): """ :return: Directory created at c:\\users\\$Env:USER\\projects\\automate-ssas-build\\examples/bi-project_name-olap/queries/ c:\\users\\$Env:USER\\projects\\automate-ssas-build\\examples/bi-two-olap/queries/ """ list_path_proj_with_dir = [] print(f'\nDirectory created at') for path_olap in list_path_proj: dir = os.path.join(path_olap + dir_name) Path(dir).mkdir(parents=True, exist_ok=True) list_path_proj_with_dir.append(dir) print(dir) return list_path_proj_with_dir
99719b6c0f64c474d67b6ec896b281088e58ef3f
3,638,132
import io def plot_points(points): """Generate a plot with a varying number of randomly generated points Args: points (int): a number of points to plot Returns: An svg plot with <points> data points """ # data for plotting data = np.random data = np.random.rand(points, 2) fig = Figure() FigureCanvas(fig) ax = fig.add_subplot(111) ax.scatter(data[:,0], data[:,1]) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_title(f'There are {points} data points!') ax.grid(True) img = io.StringIO() fig.savefig(img, format='svg') #clip off the xml headers from the image svg_img = '<svg' + img.getvalue().split('<svg')[1] return svg_img
1b3bba3e48ef252e80ad7895ce596e9deb9a0c86
3,638,133
def bisect_status(): """Reproduce the status line git-bisect prints after each step.""" return "Bisecting: {} revisions left to test after this (roughly {} steps).".format( ceil((bisect_revisions() - 1) / 2), bisect_steps_remaining() - 1, )
7ddca3f9de9de3775a52aac1b03a3383ebc487df
3,638,134
def echo_view(): """Call echo() with the Flask request.""" return echo(flask.request)
f51f9f6b2f1f58abcc6f1a0ed9a28056c092f289
3,638,135
def read_bytes_offset_file(f,n_bytes,v=0): """ Used to skip some offset when reading a binary file. Parameters ---------- f : file handler (sys.stdin). n_words : int number of words of type TYPE_WORD. v : int [0 by default] verbose mode if 1. """ words_array = [] try: words_array = np.fromfile(file = f,dtype=np.uint8, count=n_bytes) if v==1: print("vdif - Read "+str(n_bytes)) except EOFError: if v==1: print("vdif - Tried to read "+str(n_bytes)) return([]) return([])
3f68b4ff22dba97e89cc7b036f63655f3d6b756d
3,638,136
from datetime import datetime def person_relationship_dates(node): """Find the nearest start/end dates related to a node's person.""" person = node.people.single() rel = node.people.relationship(person) if rel.start_date is not None: return {'start_date': rel.start_date, 'end_date': rel.end_date} elif rel.start_date is None: # Look at all roles associated with the project, # and determine the date range based on those roles. if isinstance(node, Project): start_dates = [] end_dates = [] for role in node.roles: rel = role.people.relationship(person) if rel.start_date is not None: start_dates.append(rel.start_date) if rel.end_date is not None: end_dates.append(rel.end_date) dates = { 'start_date': min(start_dates) if start_dates else None, 'end_date': max(end_dates) if end_dates else None, } if start_dates and not end_dates: dates['end_date'] = datetime.date.today() return dates else: return {'start_date': None, 'end_date': None}
cd16b3f69979f0d516cb28313ac0dabda9416f30
3,638,137
def oops(): """Lazy way to return an oops reponse.""" return make_response('oops', 400)
e52fb6621ff9fdba48d2d5b81a8cbbb5241eeac8
3,638,138
def get_lowest_energy_conformer( name, mol, gfn_exec=None, settings=None, ): """ Get lowest energy conformer of molecule. Method: 1) ETKDG conformer search on molecule 2) xTB `normal` optimisation of each conformer 3) xTB `opt_level` optimisation of lowest energy conformer 4) save file """ if settings is None: settings = { 'conf_opt_level': 'normal', 'final_opt_level': 'extreme', 'charge': 0, 'no_unpaired_e': 0, 'max_runs': 1, 'calc_hessian': False, 'solvent': None, 'N': 100 } # Check for missing settings. req_settings = [ 'N', 'final_opt_level', 'charge', 'no_unpaired_e', 'max_runs', 'calc_hessian', 'solvent', 'conf_opt_level' ] for i in req_settings: if i not in settings: raise MissingSettingError( f'Settings missing {i}. Has {settings.keys()}.' ) # Run ETKDG on molecule. print(f'....running ETKDG on {name}') cids, confs = build_conformers(mol, N=settings['N']) # Optimize all conformers at normal level with xTB. low_e_conf_id = -100 low_e = 10E20 for cid in cids: name_ = f'{name}_confs/c_{cid}' ey_file = f'{name}_confs/c_{cid}_eyout' mol = update_from_rdkit_conf( mol, confs, conf_id=cid ) mol.write(f'{name}_confs/c_{cid}.mol') # Optimize. opt_mol = optimize_conformer( name=name_, mol=mol, gfn_exec=gfn_exec, opt_level=settings['conf_opt_level'], charge=settings['charge'], no_unpaired_e=settings['no_unpaired_e'], max_runs=settings['max_runs'], calc_hessian=settings['calc_hessian'], solvent=settings['solvent'] ) opt_mol.write(f'{name}_confs/c_{cid}_opt.mol') # Get energy. calculate_energy( name=name_, mol=opt_mol, gfn_exec=gfn_exec, ey_file=ey_file, charge=settings['charge'], no_unpaired_e=settings['no_unpaired_e'], solvent=settings['solvent'] ) ey = read_gfnx2xtb_eyfile(ey_file) if ey < low_e: print( 'lowest energy conformer updated with energy: ' f'{ey}, id: {cid}' ) low_e_conf_id = cid low_e = ey # Get lowest energy conformer. low_e_conf = stk.BuildingBlock.init_from_file( f'{name}_confs/c_{low_e_conf_id}_opt.mol' ) low_e_conf.write(f'{name}_confs/low_e_unopt.mol') # Optimize lowest energy conformer at opt_level. low_e_conf = optimize_conformer( name=name+'low_e_opt', mol=low_e_conf, gfn_exec=gfn_exec, opt_level=settings['final_opt_level'], charge=settings['charge'], no_unpaired_e=settings['no_unpaired_e'], max_runs=settings['max_runs'], calc_hessian=settings['calc_hessian'], solvent=settings['solvent'] ) low_e_conf.write(f'{name}_confs/low_e_opt.mol') # Return molecule. return low_e_conf
e09cb7ed755e2a94075c2206ebd5d7600d633657
3,638,139
def dictize_params(params): """ Parse parameters into a normal dictionary """ param_dict = dict() for key, value in params.iteritems(): param_dict[key] = value return param_dict
4847815622b0855b1056361bff7f7ee02fe6d97a
3,638,140
def dot(x, y, sparse=False): """Wrapper for tf.matmul (sparse vs dense).""" if sparse: res = tf.sparse_tensor_dense_matmul(x, y) else: res = tf.matmul(x, y) return res
fd904bbbaf09ea3207ed8dfa47a17f30ed640ff7
3,638,141
def get_python3_status(classifiers): """ Search through list of classifiers for a Python 3 classifier. """ status = False for classifier in classifiers: if classifier.find('Programming Language :: Python :: 3') == 0: status = True return status
b4bf347dc0bbf3e9a198baa8237f7820cbb86e0b
3,638,142
def drawBeta(s, w, size=1): """Draw beta from its distribution (Eq.9 Rasmussen 2000) using ARS Make it robust with an expanding range in case of failure""" #nd = w.shape[0] 用于多维数据 lb = 0.0 flag = True cnt = 0 while flag: xi = lb + np.logspace(-3 - cnt, 1 + cnt, 200) # update range if needed flag = False try: ars = ARS(fbeta, fbetaprima, xi=xi, lb=0.0, ub=np.inf, \ s=s, w=w) except: cnt += 1 flag = True # draw beta return ars.draw(size)
f0a7826a8411dfc224b2b161d1372984630647e8
3,638,143
def get_unique_map_to_pullback(p, p_a, p_b, z_a, z_b): """Find a unique map to pullback.""" z_p = dict() for value in p: z_keys_from_a = set() if value in p_a.keys(): a_value = p_a[value] z_keys_from_a = set(keys_by_value(z_a, a_value)) z_keys_from_b = set() if value in p_b.keys(): b_value = p_b[value] z_keys_from_b.update(keys_by_value(z_b, b_value)) z_keys = z_keys_from_a.intersection(z_keys_from_b) for z_key in z_keys: z_p[z_key] = value return z_p
6dfcef9e2fa531ae84641be3dd1b4b8836baa958
3,638,144
import torch from typing import Optional def f1_score( pred: torch.Tensor, target: torch.Tensor, num_classes: Optional[int] = None, class_reduction: str = 'micro', ) -> torch.Tensor: """ Computes the F1-score (a.k.a F-measure), which is the harmonic mean of the precision and recall. It ranges between 1 and 0, where 1 is perfect and the worst value is 0. Args: pred: estimated probabilities target: ground-truth labels num_classes: number of classes class_reduction: method to reduce metric score over labels - ``'micro'``: calculate metrics globally (default) - ``'macro'``: calculate metrics for each label, and find their unweighted mean. - ``'weighted'``: calculate metrics for each label, and find their weighted mean. - ``'none'``: returns calculated metric per class Return: Tensor containing F1-score Example: >>> x = torch.tensor([0, 1, 2, 3]) >>> y = torch.tensor([0, 1, 2, 2]) >>> f1_score(x, y) tensor(0.7500) """ return fbeta_score(pred=pred, target=target, beta=1., num_classes=num_classes, class_reduction=class_reduction)
3a5e8bb2915da7aedb16266575bb099f64554c8f
3,638,145
def PyramidPoolingModule(inputs, feature_map_shape): """ Build the Pyramid Pooling Module. """ interp_block1 = InterpBlock(inputs, 1, feature_map_shape) interp_block2 = InterpBlock(inputs, 2, feature_map_shape) interp_block3 = InterpBlock(inputs, 3, feature_map_shape) interp_block6 = InterpBlock(inputs, 6, feature_map_shape) res = tf.concat([inputs, interp_block6, interp_block3, interp_block2, interp_block1], axis=-1) return res
4182291ff038ef89620412f2087c9a8bc23e0cd9
3,638,146
def order(ord): """ `order` is decorator to order the pipeline classes. This decorator specifies a property named "order" to the member function so that we can use the property to order the member functions. This `order` function can be combined with the decorator `with_transforms` which orders the member functions. ```python class AGoodClass: def __init__(self): self.size = 0 @order(1) def first_good_member(self, new): return "first good member" @order(2) def second_good_member(self, new): return "second good member" ``` """ return attributes(order=ord)
7cba4dda9a844733e45698257f206aeb22e2e6b2
3,638,147
import math def collisionIndicator(egoPose, egoPoly, objPose, objPoly): """ Indicator function for collision between ego vehicle and moving object Param: egoPose: ego vehicle objPose: pose of object Return: col_indicator: (float) collision indicator between two object """ dMean = np.array([egoPose.x_m-objPose.x_m, egoPose.y_m-objPose.y_m]) dCov = egoPose.covUtm + objPose.covUtm diff_yaw = abs(egoPose.yaw_rad-objPose.yaw_rad) col_indicator = 0 # handle parallel and orthogonal case if abs(math.remainder(diff_yaw, np.pi/2)) < param._COLLISION_ORTHO_THRES: poly, bound = gaussian.minkowskiSumOrthogonal(egoPoly, objPoly) col_indicator = collisionIndicatorComputeSimple(bound, dMean, dCov) # handle general case else: poly, bound = gaussian.minkowskiSum(egoPoly, objPoly) col_indicator = collisionIndicatorCompute( poly=poly, bound=bound, dMean=dMean, dCov=dCov) return col_indicator
074852f5a12cd18c1b201ba4d72e2f710c21417c
3,638,148
def lookup_listener(param): """ Flags a method as a @lookup_listener. This method will be updated on the changes to the lookup. The lookup changes when values are registered in the lookup or during service activation. @param param: function being attached to @return: """ def decor(func): if not hasattr(func, "lookup_decor"): func.lookup_decor = [param] else: func.lookup_decor.append(param) return func return decor
5d053e20ca8c2316aa46f27809b8e0ae59077d32
3,638,149
async def read_multi_analog_inputs(app, addr): """ Execute a single request using `ReadPropertyMultipleRequest`. This will read the first 40 analog input values from the remote device. :param app: An app instance :param addr: The network address of the remote device :return: """ read_access_specs = [] for i in range(10): read_access_specs.append( ReadAccessSpecification( objectIdentifier=('analogInput', i), listOfPropertyReferences=[PropertyReference(propertyIdentifier='presentValue')], ) ) return await app.execute_request( ReadPropertyMultipleRequest( listOfReadAccessSpecs=read_access_specs, destination=Address(addr) ), )
9b2d6f820bcb6fc0ee9ef4ad65c7c7654e4a47b1
3,638,150
def lookupBlock(blockName): """ Look up block name string in name list data value (e.g. color) override may be appended to the end e.g. stained_hardened_clay_10 Note: block name lookup is case insensitive """ blockName = blockName.upper() try: try: name, data = blockName.rsplit('_', 1) except ValueError: return Blocks[blockName] else: try: data = int(data) except ValueError: return Blocks[blockName] return Block(Blocks[name].id, data) except KeyError: print 'Invalid block name:', blockName sys.exit()
f1cd8a43751df8bf710a1d0379887725c5a5e400
3,638,151
def factorize(values, sort=False, na_sentinel=-1, size_hint=None): """Encode the input values as integer labels Parameters ---------- values: Series, Index, or CuPy array The data to be factorized. na_sentinel : number, default -1 Value to indicate missing category. Returns ------- (labels, cats) : (cupy.ndarray, cupy.ndarray or Index) - *labels* contains the encoded values - *cats* contains the categories in order that the N-th item corresponds to the (N-1) code. Examples -------- >>> import cudf >>> data = cudf.Series(['a', 'c', 'c']) >>> codes, uniques = cudf.factorize(data) >>> codes array([0, 1, 1], dtype=int8) >>> uniques StringIndex(['a' 'c'], dtype='object') See Also -------- cudf.Series.factorize : Encode the input values of Series. """ if sort: raise NotImplementedError( "Sorting not yet supported during factorization." ) if na_sentinel is None: raise NotImplementedError("na_sentinel can not be None.") if size_hint: warn("size_hint is not applicable for cudf.factorize") return_cupy_array = isinstance(values, cp.ndarray) values = Series(values) cats = values._column.dropna().unique().astype(values.dtype) name = values.name # label_encoding mutates self.name labels = values._label_encoding(cats=cats, na_sentinel=na_sentinel).values values.name = name return labels, cats.values if return_cupy_array else Index(cats)
be4001dc9873ace10dce9100f7375718db783b24
3,638,152
def make_proc(code, variables, path, *, use_async=False): # pylint: disable=redefined-builtin """Compile this code block to a procedure. Args: code: the code block to execute. Text, will be indented. vars: variable names to pass into the code path: the location where the code is stored use_async: False if sync code, True if async, None if in thread Returns: the procedure to call. All keyval arguments will be in the local dict. """ hdr = f"""\ def _proc({ ",".join(variables) }): """ if use_async: hdr = "async " + hdr code = hdr + code.replace("\n", "\n ") code = compile(code, str(path), "exec") return partial(_call_proc, code, variables)
7f4e6f279ede515f71dbee6c3a5a796ee76815d2
3,638,153
def ratings_std(df): """calculate standard deviation of ratings from the given dataframe parameters ---------- df (pandas dataframe): a dataframe cotanis all ratings Returns ------- standard deviation(float): standard deviation of ratings, keep 4 decimal """ std_value = df['ratings'].std() std_value = round(std_value,4) return std_value
b1bf00d25c0cee91632eef8248d5e53236dd4526
3,638,154
def save(objct, fileoutput, binary=True): """ Save 3D object to file. (same as `write()`). Possile extensions are: - vtk, vti, ply, obj, stl, byu, vtp, xyz, tif, vti, mhd, png, bmp. """ return write(objct, fileoutput, binary)
c310e5be80bcfb56f2c6bd9f9734f171a0fbd2c6
3,638,155
def read_pfm(fname): """ Load a pfm file as a numpy array Args: fname: path to the file to be loaded Returns: content of the file as a numpy array """ file = open(fname, 'rb') color = None width = None height = None scale = None endian = None header = file.readline().rstrip() if b'PF' == header: color = True elif b'Pf' == header: color = False else: raise Exception('Not a PFM file! header: ' + header) dims = file.readline() try: width, height = list(map(int, dims.split())) except: raise Exception('Malformed PFM header.') scale = float(file.readline().rstrip()) if scale < 0: # little-endian endian = '<' scale = -scale else: endian = '>' # big-endian data = np.fromfile(file, endian + 'f') shape = (height, width, 3) if color else (height, width) data = np.reshape(data, shape) data = np.flipud(data) return data, scale
3c1f90965479cd1fdaaecbd2c740d807d952f687
3,638,156
import math def broaden_spectrum(spect, sigma): """ Broadens a peak defined in spect by the sigma factor and returns the x and y data to plot. Args: ---- spect (np.ndarray) -- input array containing the peak info for the individual peak to be broadened. sigma (float) -- gaussian broadening term for the peaks given. Returns: -------- plot_vals (list) -- a 2D array containing the x and y values for plotting. """ # Assertions # assert isinstance(spect, (np.ndarray, list)), \ # 'Input must be a list or a numpy array.' # assert isinstance(sigma, float), \ # 'sigma value must be a float' #min of the spectrum **FUTURE FEATURE** #min_x = min(spect[0]) - 50. min_x = spect[0] - 50 #max of the spectrum **FUTURE FEATURE** #max_x = max(spect[0]) + 50. max_x = spect[0] + 50 x = np.linspace(start=min_x, stop=max_x, num=10000) y = [0. for k in range(len(x))] for i in range(len(x)): #**FUTURE FEATURE** #for j in range(len(spect[0])): # y[j] += spect[1][j] * math.exp(-0.5 * (((x[i] - spect[0][j]) ** 2\ # ) / sigma ** 2)) y[i] += spect[1] * math.exp(-0.5 * (((x[i] - spect[0]) ** 2) / \ sigma ** 2)) plot_vals = [x, y] return plot_vals
9c177dfaf282ad16b9742f96bb70e971a2fce6c6
3,638,157
import requests def mv_audio(serial_id, audio_setting): """ This function will change the audio recording settings to {audio_setting} in the meraki dashboard for the mv camera with the {serial_id} :param: serial_id: the serial id for the meraki mv camera :param: audio_setting: 'true' to turn on audio recording, 'false' to turn off audio recording :return: api response status code """ url = f"https://api.meraki.com/api/v1/devices/{serial_id}/camera/qualityAndRetention" payload = f'''{{ "audioRecordingEnabled": {audio_setting} }}''' headers = { "Content-Type": "application/json", "Accept": "application/json", "X-Cisco-Meraki-API-Key": API_KEY } response = requests.request('PUT', url, headers=headers, data=payload) if response.status_code == 403: print(f'Camera-serial id:{serial_id} audio recording have been changed to: {audio_setting}') return response.status_code
cb6f2415d8950513760afdcfbe11993999071b73
3,638,158
from typing import List def lsp_text_edits(changed_file: ChangedFile) -> List[TextEdit]: """Take a jedi `ChangedFile` and convert to list of text edits. Handles inserts, replaces, and deletions within a text file """ old_code = ( changed_file._module_node.get_code() # pylint: disable=protected-access ) new_code = changed_file.get_new_code() opcode_position_lookup_old = get_opcode_position_lookup(old_code) text_edits = [] for opcode in get_opcodes(old_code, new_code): if opcode.op in _OPCODES_CHANGE: start = opcode_position_lookup_old[opcode.old_start] end = opcode_position_lookup_old[opcode.old_end] start_char = opcode.old_start - start.range_start end_char = opcode.old_end - end.range_start new_text = new_code[opcode.new_start : opcode.new_end] text_edits.append( TextEdit( range=Range( start=Position(line=start.line, character=start_char), end=Position(line=end.line, character=end_char), ), new_text=new_text, ) ) return text_edits
9069db3931606874e0cf8a796ffe6acb88f4ad8a
3,638,159
import hashlib def _hash(file_name, hash_function=hashlib.sha256): """compute hash of file `file_name`""" with open(file_name, 'rb') as file_: return hash_function(file_.read()).hexdigest()
463d692116fbb85db9f1a537cbcaa5d2d019ba05
3,638,160
def prepare_cmf(observer='1931_2deg'): """Safely returns the color matching function dictionary for the specified observer. Parameters ---------- observer : `str`, {'1931_2deg', '1964_10deg'} the observer to return Returns ------- `dict` cmf dict Raises ------ ValueError observer not 1931 2 degree or 1964 10 degree """ if observer.lower() == '1931_2deg': return prepare_cie_1931_2deg_observer() elif observer.lower() == '1964_10deg': return prepare_cie_1964_10deg_observer() else: raise ValueError('observer must be 1931_2deg or 1964_10deg')
82422c521fc9b55acbad6ab0fbb31a67ec5f71b9
3,638,161
import re import numpy import os def plotAssemblyTypes( blueprints, coreName, assems=None, plotNumber=1, maxAssems=None, showBlockAxMesh=True, ): """ Generate a plot showing the axial block and enrichment distributions of each assembly type in the core. Parameters ---------- bluepprints: Blueprints The blueprints to plot assembly types of. assems: list list of assembly objects to be plotted. plotNumber: integer number of uniquely identify the assembly plot from others and to prevent plots from being overwritten. maxAssems: integer maximum number of assemblies to plot in the assems list. showBlockAxMesh: bool if true, the axial mesh information will be displayed on the right side of the assembly plot. """ if assems is None: assems = list(blueprints.assemblies.values()) if not isinstance(assems, (list, set, tuple)): assems = [assems] if not isinstance(plotNumber, int): raise TypeError("Plot number should be an integer") if maxAssems is not None and not isinstance(maxAssems, int): raise TypeError("Maximum assemblies should be an integer") numAssems = len(assems) if maxAssems is None: maxAssems = numAssems # Set assembly/block size constants yBlockHeights = [] yBlockAxMesh = OrderedSet() assemWidth = 5.0 assemSeparation = 0.3 xAssemLoc = 0.5 xAssemEndLoc = numAssems * (assemWidth + assemSeparation) + assemSeparation # Setup figure fig, ax = plt.subplots(figsize=(15, 15), dpi=300) for index, assem in enumerate(assems): isLastAssem = True if index == (numAssems - 1) else False (xBlockLoc, yBlockHeights, yBlockAxMesh) = _plotBlocksInAssembly( ax, assem, isLastAssem, yBlockHeights, yBlockAxMesh, xAssemLoc, xAssemEndLoc, showBlockAxMesh, ) xAxisLabel = re.sub(" ", "\n", assem.getType().upper()) ax.text( xBlockLoc + assemWidth / 2.0, -5, xAxisLabel, fontsize=13, ha="center", va="top", ) xAssemLoc += assemWidth + assemSeparation # Set up plot layout ax.spines["right"].set_visible(False) ax.spines["top"].set_visible(False) ax.spines["bottom"].set_visible(False) ax.yaxis.set_ticks_position("left") yBlockHeights.insert(0, 0.0) yBlockHeights.sort() yBlockHeightDiffs = numpy.diff( yBlockHeights ) # Compute differential heights between each block ax.set_yticks([0.0] + list(set(numpy.cumsum(yBlockHeightDiffs)))) ax.xaxis.set_visible(False) ax.set_title("Assembly Designs for {}".format(coreName), y=1.03) ax.set_ylabel("Thermally Expanded Axial Heights (cm)".upper(), labelpad=20) ax.set_xlim([0.0, 0.5 + maxAssems * (assemWidth + assemSeparation)]) # Plot and save figure ax.plot() figName = coreName + "AssemblyTypes{}.png".format(plotNumber) runLog.debug("Writing assem layout {} in {}".format(figName, os.getcwd())) fig.savefig(figName) plt.close(fig) return figName
296bce7dca6f938ed7d37b3f37d766b5124ac930
3,638,162
def list_project_milestones(request): """ list project specific milestones """ project_id = request.GET.get('project_id') project = Project.objects.get(id=project_id) template = loader.get_template('project_management/list_project_milestones.html') open_status = Status.objects.get(name="Open") if Milestone.objects.filter(project_id=project.id, status=open_status).exists(): open_milestones = Milestone.objects.filter(project_id=project.id, status=open_status) open_count = Milestone.objects.filter(project_id=project.id, status=open_status).count() else: open_milestones = "" open_count = 0 onhold_status = Status.objects.get(name="Onhold") if Milestone.objects.filter(project_id=project.id, status=onhold_status).exists(): onhold_count = Milestone.objects.filter(project_id=project.id, status=onhold_status).count() else: onhold_count = 0 terminated_status = Status.objects.get(name="Terminated") if Milestone.objects.filter(project_id=project.id, status=terminated_status).exists(): terminated_count = Milestone.objects.filter(project_id=project.id, status=terminated_status).count() else: terminated_count = 0 completed_status = Status.objects.get(name="Completed") if Milestone.objects.filter(project_id=project.id, status=completed_status).exists(): completed_count = Milestone.objects.filter(project_id=project.id, status=completed_status).count() else: completed_count = 0 context = { 'project_id': project.id, 'project_name': project.name, 'open_milestones': open_milestones, 'completed_count': completed_count, 'onhold_count': onhold_count, 'terminated_count': terminated_count, 'open_count': open_count } return HttpResponse(template.render(context, request))
b7a8cc729f17f9e8640ae83d5f70ede6ce9c1ece
3,638,163
from opax import apply_updates, transform_gradients from re import T from typing import Tuple from typing import Any from typing import Union def build_update_fn(loss_fn, *, scan_mode: bool = False): """Build a simple update function. *Note*: The output of ``loss_fn`` must be ``(loss, (aux, model))``. Arguments: loss_fn: The loss function. scan_mode: If true, use `(model, optimizer)` as a single argument. Example: >>> def mse_loss(model, x, y): ... y_hat = model(x) ... loss = jnp.mean(jnp.square(y - y_hat)) ... return loss, (loss, model) ... >>> update_fn = pax.utils.build_update_fn(mse_loss) >>> net = pax.Linear(2, 2) >>> optimizer = opax.adam(1e-4)(net.parameters()) >>> x = jnp.ones((32, 2)) >>> y = jnp.zeros((32, 2)) >>> net, optimizer, loss = update_fn(net, optimizer, x, y) """ # pylint: disable=import-outside-toplevel def _update_fn(model: T, optimizer: O, *inputs, **kwinputs) -> Tuple[T, O, Any]: """An update function. Note that: ``model`` and ``optimizer`` have internal states. We have to return them in the output as jax transformations (e.g., ``jax.grad`` and ``jax.jit``) requires pure functions. Arguments: model_and_optimizer: (a callable pax.Module, an optimizer), inputs: input batch. Returns: model_and_optimizer: updated (model, optimizer), aux: the aux info. """ assert isinstance(model, Module) assert isinstance(optimizer, Module) model_treedef = jax.tree_structure(model) grads, (aux, model) = grad(loss_fn, has_aux=True)(model, *inputs, **kwinputs) if jax.tree_structure(model) != model_treedef: raise ValueError("Expecting an updated model in the auxiliary output.") params = select_parameters(model) updates, optimizer = transform_gradients(grads, optimizer, params=params) params = apply_updates(params, updates=updates) model = update_parameters(model, params=params) return model, optimizer, aux def _update_fn_scan( model_and_optimizer: Union[C, Tuple[T, O]], *inputs, **kwinputs ) -> Tuple[C, Any]: model, optimizer = model_and_optimizer model, optimizer, aux = _update_fn(model, optimizer, *inputs, **kwinputs) return (model, optimizer), aux return _update_fn_scan if scan_mode else _update_fn
28d6ac1b38ae4cf4e9c7e1e50ffe89d87437e01c
3,638,164
def _get_perf_hint(hint, index: int, _default=None): """ Extracts a "performance hint" value -- specified as either a scalar or 2-tuple -- for either the left or right Dataset in a merge. Parameters ---------- hint : scalar or 2-tuple of scalars, optional index : int Indicates whether the hint value is being extracted for the left or right Dataset. 0 = left, 1 = right. _default : optional Optional default value, returned if `hint` is None. Returns ------- Any The extracted performance hint value. """ if hint is None: return _default elif isinstance(hint, tuple): return hint[index] else: return hint
d67a70d526934dedaa9f571970e27695404350f2
3,638,165
def synchronized_limit(lock): """ Synchronization decorator; provide thread-safe locking on a function http://code.activestate.com/recipes/465057/ """ def wrap(f): def synchronize(*args, **kw): if lock[1] < 10: lock[1] += 1 lock[0].acquire() try: return f(*args, **kw) finally: lock[1] -= 1 lock[0].release() else: raise Exception('Too busy') return synchronize return wrap
a28adfca434b7feaa5aa33c2ba4d1ed2e48cf916
3,638,166
from scipy.ndimage import affine_transform def fast_warp(img, tf, output_shape=(50, 50), mode='constant', order=1): """ This wrapper function is faster than skimage.transform.warp """ m = tf._matrix res = np.zeros(shape=(output_shape[0], output_shape[1], 3), dtype=floatX) trans, offset = m[:2, :2], (m[0, 2], m[1, 2]) res[:, :, 0] = affine_transform(img[:, :, 0].T, trans, offset=offset, output_shape=output_shape, mode=mode, order=order) res[:, :, 1] = affine_transform(img[:, :, 1].T, trans, offset=offset, output_shape=output_shape, mode=mode, order=order) res[:, :, 2] = affine_transform(img[:, :, 2].T, trans, offset=offset, output_shape=output_shape, mode=mode, order=order) return res
989b6edc370b7ab92685741f70d8346717d60505
3,638,167
def _get_detections(generator, model, score_threshold=0.05, max_detections=400, save_path=None): """ Get the detections from the model using the generator. The result is a list of lists such that the size is: all_detections[num_images][num_classes] = detections[num_detections, 4 + num_classes] # Arguments generator : The generator used to run images through the model. model : The model to run on the images. score_threshold : The score confidence threshold to use. max_detections : The maximum number of detections to use per image. save_path : The path to save the images with visualized detections to. # Returns A list of lists containing the detections for each image in the generator. """ all_detections = [[None for i in range(generator.num_classes()) if generator.has_label(i)] for j in range(generator.size())] for i in progressbar.progressbar(range(generator.size()), prefix='Running network: '): raw_image = generator.load_image(i) image = generator.preprocess_image(raw_image.copy()) image, scale = generator.resize_image(image) if keras.backend.image_data_format() == 'channels_first': image = image.transpose((2, 0, 1)) # run network boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))[:3] # correct boxes for image scale boxes /= scale # select indices which have a score above the threshold indices = np.where(scores[0, :] > score_threshold)[0] # select those scores scores = scores[0][indices] # find the order with which to sort the scores scores_sort = np.argsort(-scores)[:max_detections] # select detections image_boxes = boxes[0, indices[scores_sort], :] image_scores = scores[scores_sort] image_labels = labels[0, indices[scores_sort]] image_detections = np.concatenate([image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1) # if save_path is not None: # draw_annotations(raw_image, generator.load_annotations(i), label_to_name=generator.label_to_name) # draw_detections(raw_image, image_boxes, image_scores, image_labels, label_to_name=generator.label_to_name) # # cv2.imwrite(os.path.join(save_path, '{}.png'.format(i)), raw_image) # copy detections to all_detections for label in range(generator.num_classes()): if not generator.has_label(label): continue all_detections[i][label] = image_detections[image_detections[:, -1] == label, :-1] return all_detections
1800bba86a21c07356fd0075d8d911b9ec55540b
3,638,168
def BF (mu, s2, noise_var=None, pps=None): """ Buzzi-Ferraris et al.'s design criterion. - Buzzi-Ferraris and Forzatti (1983) Sequential experimental design for model discrimination in the case of multiple responses. Chem. Eng. Sci. 39(1):81-85 - Buzzi-Ferraris et al. (1984) Sequential experimental design for model discrimination in the case of multiple responses. Chem. Eng. Sci. 39(1):81-85 - Buzzi-Ferraris et al. (1990) An improved version of sequential design criterion for discrimination among rival multiresponse models. Chem. Eng. Sci. 45(2):477-481 """ mu, s2, noise_var, _, n, M, _ = _reshape(mu, s2, noise_var, None) s2 += noise_var dc = np.zeros(n) for i in range(M-1): for j in range(i+1,M): iSij = np.linalg.inv(s2[:,i] + s2[:,j]) t1 = np.trace( np.matmul(noise_var, iSij), axis1=1, axis2=2 ) r1 = np.expand_dims(mu[:,i] - mu[:,j],2) t2 = np.sum( r1 * np.matmul(iSij, r1), axis=(1,2) ) dc += t1 + t2 return dc
f51e7c2a6827e1a1283afc8d0b8af5e3fe66a034
3,638,169
def deploy(usr, pwd, path=getcwd(), venv=None): """release on `pypi.org`""" log(INFO, ICONS["deploy"] + 'deploy release on `pypi.org`') # check dist module('twine', 'check --strict dist/*', path=path, venv=venv) # push to pypi.org return module("twine", "upload -u %s -p %s dist/*" % (usr, pwd), level=LEVEL, path=path, venv=venv)
cfb13ac61addfc783a0f6e5963fb44838142f77d
3,638,170
def most_interval_scheduling(interval_list): """ 最多区间调度:优先选择'end'值小的区间 Args: interval_list(list): 区间列表 Returns: scheduling_list(list): 去重实体列表 """ scheduling_list = list() sorted_interval_list = sorted(interval_list, key=lambda x: x['end']) size = len(sorted_interval_list) scheduling_list.append(sorted_interval_list[0]) for i in range(1, size): if scheduling_list[-1]['end'] <= sorted_interval_list[i]['start']: scheduling_list.append(sorted_interval_list[i]) return scheduling_list
82b1d051221043025497c95d9657245b5b507bde
3,638,171
def kernel_program(inputfile, dimData, Materials, dict_nset_data, \ dict_elset_matID={}, dict_elset_dload={}): """The kernel_program should be called by the job script (e.g., Job-1.py) where the user defines: - inputfile: the name of the input file - dimData: the dimensional data (see class dimension_data) - Materials: the list of materials used in the analysis (see package Elements) - dict_nset_data: the dictionary of nset_data (for bcds and concentrated loads) where the keys are nset names read from inputfile and values are nset_data as defined in the class nset_data and optionally: - dict_elset_matID: a dictionary where each key is an elset name defined in inputfile, and its value is the corresponding index of material in the Materials list for elements in this elset. This dictionary needs to be defined when multiple materials/material sections are present in the model - dict_elset_dload: a dictionary where each key is an elset name defined in inputfile, and its value is the corresponding dload_data (see class dload_data) for all elements in this elset, meaning that these elements are subjected to the distributed loading defined by this dload_data. This is needed when distributed loading is present in the model""" ########################################################################### # Preprocessing ########################################################################### # Read data from Abaqus input file and form abaqus parts parts = read_abaqus.read_parts_from_inputfile(inputfile) # check if there is only one part # in the future, consider making a loop over all parts if(not len(parts)==1): raise ValueError('Only a single part is supported!') # verification of dimensional parameters before proceeding verify_dimensional_parameters(parts[0], dimData) # form lists of nodes and elem_lists (eltype and elem indices of this type) nodes = form_nodes(parts[0]) elem_lists = form_elem_lists(parts[0], dimData.NDOF_NODE, dimData.ELEM_TYPES,\ dict_elset_matID) # form lists of bcds and cloads [bcd_dofs, bcd_values, cload_dofs, cload_values] = \ form_bcds_cloads(parts[0], dict_nset_data, dimData.NDOF_NODE) # form lists of elset for distributed loads list_dload_data = form_list_dload_data(parts[0], dict_elset_dload) ########################################################################### # Assembler # obtain the full stiffness matrix K and external distributed force vector f ########################################################################### # form the list of all the elems for assembly elems = [] for elist in elem_lists: # verify material type before assembly for elem in elist.elems: elem.verify_material(Materials[elem.matID]) elems.extend(elist.elems) # call assembler [K, f] = assembler(nodes, elems, dimData.NDOF_NODE, Materials, list_dload_data) ########################################################################### # Solver # modify the stiffness matrix and force vector based on applied bcds and loads # obtain dof vector a and reaction force vector RF, both size ndof by 1 ########################################################################### [a, RF] = solver(K, f, bcd_dofs, bcd_values, cload_dofs, cload_values) return [parts, nodes, elem_lists, f, a, RF]
ac65f5c1355ed4097018ded03a9484cf77e7bf17
3,638,172
import os import re def filter_fasta(infa, outfa, regex=".*", v=False, force=False): """Filter fasta file based on regex. Parameters ---------- infa : str Filename of input fasta file. outfa : str Filename of output fasta file. Cannot be the same as infa. regex : str, optional Regular expression used for selecting sequences. v : bool, optional If set to True, select all sequence *not* matching regex. force : bool, optional If set to True, overwrite outfa if it already exists. Returns ------- fasta : Fasta instance pyfaidx Fasta instance of newly created file """ if infa == outfa: raise ValueError("Input and output FASTA are the same file.") if os.path.exists(outfa): if force: os.unlink(outfa) if os.path.exists(outfa + ".fai"): os.unlink(outfa + ".fai") else: raise ValueError( "{} already exists, set force to True to overwrite".format(outfa)) filt_function = re.compile(regex).search fa = Fasta(infa, filt_function=filt_function) seqs = fa.keys() if v: original_fa = Fasta(infa) seqs = [s for s in original_fa.keys() if s not in seqs] fa = original_fa if len(seqs) == 0: raise ValueError("No sequences left after filtering!") with open(outfa, "w") as out: for chrom in seqs: out.write(">{}\n".format(fa[chrom].name)) out.write("{}\n".format(fa[chrom][:].seq)) return Fasta(outfa)
c41b195d6d400da0fea266f835f6ec3cf06b8c78
3,638,173
from typing import Collection def rmse_metric(predicted: Collection, actual: Collection) -> float: """ Root-mean-square error metric. Args: predicted (list): prediction values. actual (list): reference values. Returns: root-mean-square-error metric. """ return np.sqrt(np.mean(np.subtract(predicted, actual) ** 2))
87b14ae0c99db10ffaa8a352d7deb6007ba1f00e
3,638,174
import json def set_user_data(session_id, user_data): """ this function temporarily stores data transmitted by user, when POSTed by the user device; the data is then picked up by the page ajax polling mechanism """ return session_id if red.set(K_USER_DATA.format(session_id), json.dumps(user_data), ex=30) else None
c3230dd751d2b34f8abb142be5dc170c639258b5
3,638,175
def find_keys(info: dict) -> dict: """Determines all the keys and their parent keys. """ avail_keys = {} def if_dict(dct: dict, prev_key: str): for key in dct.keys(): if key not in avail_keys: avail_keys[key] = prev_key if type(dct[key]) == dict: if_dict(dct[key], key + '[].') elif type(dct[key]) == list: for item in dct[key]: if type(item) == dict: if_dict(item, key + '[].') if_dict(info, '') # print(avail_keys) return avail_keys
8d0bed361767d62bbc3544efdfe47e8e1065f462
3,638,176
def valid(exc, cur1, cur2=None, exclude=None, exclude_cur=None): """ Find if the given exc satisfies currency 1 (currency 2) (and is not exclude) (and currency is not exclude) """ if exclude is not None and exc == exclude: return False curs = [exc.to_currency, exc.from_currency] if exclude_cur is not None and exclude_cur in curs: return False if cur2 is not None: return cur1 in curs and cur2 in curs return cur1 in curs
84a37e669fee120aed8fbc57ab13d5f70f583cf4
3,638,177
import math def arc( x: float, y: float, radius: float, start: float, stop: float, quantization: float = 0.1, ) -> np.ndarray: """Build a circular arc path. Zero angles refer to east of unit circle and positive values extend counter-clockwise. Args: x: center X coordinate y: center Y coordinate radius: circle radius start: start angle (degree) stop: stop angle (degree) quantization: maximum length of linear segment Returns: arc path """ def normalize_angle(a): while a > 360: a -= 360 while a < 0: a += 360 return a start = normalize_angle(start) stop = normalize_angle(stop) if stop < start: stop += 360 elif start == stop: raise ValueError("start and stop angles must have different values") n = math.ceil((stop - start) / 180 * math.pi * radius / quantization) angle = np.linspace(start, stop, n) angle[angle == 360] = 0 angle *= math.pi / 180 return radius * (np.cos(-angle) + 1j * np.sin(-angle)) + complex(x, y)
99ce50042e4199c38fdb0a6e79134fab0cd30196
3,638,178
def build_feature_columns(schema): """Build feature columns as input to the model.""" # non-numeric columns exclude = ['customer_id', 'brand', 'promo_sensitive', 'weight', 'label'] # numeric feature columns numeric_column_names = [col for col in schema.names if col not in exclude] numeric_columns = [ tf.feature_column.numeric_column(col) for col in numeric_column_names ] # identity column identity_column = tf.feature_column.categorical_column_with_identity( key='promo_sensitive', num_buckets=2) # DNNClassifier only accepts dense columns indicator_column = tf.feature_column.indicator_column(identity_column) # numeric weight column weight_column = tf.feature_column.numeric_column('weight') feature_columns = numeric_columns + [indicator_column] return feature_columns, weight_column
048ebe72e291db6a92ee9d2d903baf6e10df9bf2
3,638,179
def get_mol_func(smiles_type): """ Returns a function pointer that converts a given SMILES type to a mol object. :param smiles_type: The SMILES type to convert VALUES=(deepsmiles.*, smiles, scaffold). :return : A function pointer. """ if smiles_type.startswith("deepsmiles"): _, deepsmiles_type = smiles_type.split(".") return lambda deepsmi: to_mol(from_deepsmiles(deepsmi, converter=deepsmiles_type)) else: return to_mol
7af4260cb79c21e763ee2ad4a64c38b5e3fd84fe
3,638,180
def index(): """Return to the homepage.""" return render_template("index.html")
6b3a8595173d8919478ae4a0f4dc7f8a3958af56
3,638,181
def species_thermo_value(spc_dct): """ species enthalpy at 298 """ return spc_dct['H298']
7684b0ace0fa9717cb1cc3ea83bb6be8099c4bf6
3,638,182
import inspect import importlib import pprint def invest_validator(validate_func): """Decorator to enforce characteristics of validation inputs and outputs. Attributes of inputs and outputs that are enforced are: * ``args`` parameter to ``validate`` must be a ``dict`` * ``limit_to`` parameter to ``validate`` must be either ``None`` or a string (``str`` or ``unicode``) that exists in the ``args`` dict. * All keys in ``args`` must be strings * Decorated ``validate`` func must return a list of 2-tuples, where each 2-tuple conforms to these rules: * The first element of the 2-tuple is an iterable of strings. It is an error for the first element to be a string. * The second element of the 2-tuple is a string error message. In addition, this validates the ``n_workers`` argument if it's included. Raises: AssertionError when an invalid format is found. Example: from natcap.invest import validation @validation.invest_validator def validate(args, limit_to=None): # do your validation here """ def _wrapped_validate_func(args, limit_to=None): validate_func_args = inspect.getfullargspec(validate_func) assert validate_func_args.args == ['args', 'limit_to'], ( 'validate has invalid parameters: parameters are: %s.' % ( validate_func_args.args)) assert isinstance(args, dict), 'args parameter must be a dictionary.' assert (isinstance(limit_to, type(None)) or isinstance(limit_to, str)), ( 'limit_to parameter must be either a string key or None.') if limit_to is not None: assert limit_to in args, ('limit_to key "%s" must exist in args.' % limit_to) for key, value in args.items(): assert isinstance(key, str), ( 'All args keys must be strings.') # Pytest in importlib mode makes it impossible for test modules to # import one another. This causes a problem in test_validation.py, # which gets imported into itself here and fails. # Since this decorator might not be needed in the future, # just ignore failed imports; assume they have no ARGS_SPEC. try: model_module = importlib.import_module(validate_func.__module__) except: LOGGER.warning('Unable to import module %s: assuming no ARGS_SPEC.', validate_func.__module__) model_module = None # If the module has an ARGS_SPEC defined, validate against that. if hasattr(model_module, 'ARGS_SPEC'): LOGGER.debug('Using ARG_SPEC for validation') args_spec = getattr(model_module, 'ARGS_SPEC')['args'] if limit_to is None: LOGGER.info('Starting whole-model validation with ARGS_SPEC') warnings_ = validate_func(args) else: LOGGER.info('Starting single-input validation with ARGS_SPEC') args_key_spec = args_spec[limit_to] args_value = args[limit_to] error_msg = None # We're only validating a single input. This is not officially # supported in the validation function, but we can make it work # within this decorator. try: if args_key_spec['required'] is True: if args_value in ('', None): error_msg = "Value is required" except KeyError: # If required is not defined in the args_spec, we default # to False. If 'required' is an expression, we can't # validate that outside of whole-model validation. pass # If the input is not required and does not have a value, no # need to validate it. if args_value not in ('', None): input_type = args_key_spec['type'] validator_func = _VALIDATION_FUNCS[input_type] try: validation_options = ( args_key_spec['validation_options']) except KeyError: validation_options = {} error_msg = ( validator_func(args_value, **validation_options)) if error_msg is None: warnings_ = [] else: warnings_ = [([limit_to], error_msg)] else: # args_spec is not defined for this function. LOGGER.warning('ARGS_SPEC not defined for this model') warnings_ = validate_func(args, limit_to) LOGGER.debug('Validation warnings: %s', pprint.pformat(warnings_)) return warnings_ return _wrapped_validate_func
80043d1c1263fec28469fb168fa0466f3dd38be5
3,638,183
async def async_setup(hass: HomeAssistant, config: dict): """Set up the Flood integration.""" hass.data.setdefault(DOMAIN, {}) return True
5d404d856346e26f8c7f32102c6286f05ac91f8c
3,638,184
def typehint(x, typedict): """Replace the dtypes in `x` keyed by `typedict` with the dtypes in `typedict`. """ dtype = x.dtype lhs = dict(zip(dtype.fields.keys(), map(first, dtype.fields.values()))) dtype_list = list(merge(lhs, typedict).items()) return x.astype(np.dtype(sort_dtype_items(dtype_list, dtype.names)))
a5e8ab9e94f3d622467a3e486bc21ffed1336878
3,638,185
import random def balance(samples, labels, balance_factor, adjust_func): """create a balanced dataset by subsampling classes or generating new samples""" grouped = group_by_label(samples, labels) if balance_factor <= 1.0: largest_group_size = max([len(x[1]) for x in grouped]) target_group_size = int(largest_group_size * balance_factor) else: target_group_size = int(balance_factor) grouped_balanced = [] for label, group in grouped: if len(group) > target_group_size: # print(label, 1.0) group_resized = random.sample(group, target_group_size) else: # print(label, (len(group) * 1.0) / target_group_size) group_resized = [x for x in group] while len(group_resized) < target_group_size: group_resized.append(adjust_func(random.choice(group))) grouped_balanced.append((label, group_resized)) pairs = [(y, x[0]) for x in grouped_balanced for y in x[1]] return zip(*pairs)
128aaec1b3c60348190394c5ceab6b561eba6f51
3,638,186
import random def generate_sha1(string, salt=None): """ Generates a sha1 hash for supplied string. Doesn't need to be very secure because it's not used for password checking. We got Django for that. :param string: The string that needs to be encrypted. :param salt: Optionally define your own salt. If none is supplied, will use a random string of 5 characters. :return: Tuple containing the salt and hash. """ if not salt: salt = sha_constructor(str(random.random())).hexdigest()[:5] hash = sha_constructor(salt + str(string)).hexdigest() return (salt, hash)
9ff8cbfd987972fea6712f90752c9d94aeb78b44
3,638,187
def sentences_from_doc(ttree_doc, language, selector): """Given a Treex document, return a list of sentences in the given language and selector.""" return [bundle.get_zone(language, selector).sentence for bundle in ttree_doc.bundles]
d9c09249171d5d778981fb98a8a7f53765518479
3,638,188
import math def demo_func(par): """Test function to optimize.""" x = par['x'] y = par['y'] z = par['z'] p = par['p'] s = par['str'] funcs = { 'sin': math.sin, 'cos': math.cos, } return (x + (-y) * z) / ((funcs[s](p) ** 2) + 1)
5899be5709c4a6ecf09cf9852c1b7569d85616b3
3,638,189
def file_to_list(filename): """ Read in a one-column txt file to a list :param filename: :return: A list where each line is an element """ with open(filename, 'r') as fin: alist = [line.strip() for line in fin] return alist
33bee263b98c4ff85d10191fa2f5a0f095c6ae4b
3,638,190
def regular_ticket_price(distance_in_km: int) -> float: """ calculate the regular ticket price based on the given distance :int distance_in_km: """ # source --> Tarife 601 Chapter 10.1.3 on https://www.allianceswisspass.ch/de/Themen/TarifeVorschriften price_ticket_per_km = {range(1, 5): 44.51, range(5, 15): 42.30, range(15, 49): 37.24, range(49, 151): 26.46, range(151, 201): 25.71, range(201, 251): 22.85, range(251, 301): 20.63, range(301, 481): 20.09, range(481, 1501): 19.85, } price = calculate_price(distance_in_km, price_ticket_per_km) if price < MINDESTPRICE_IN_CHF: price = MINDESTPRICE_IN_CHF return price
220b4eb9362b9a48b5c3d2e2ad17e23755098785
3,638,191
def find_cal_indices(datetimes): """ Cal events are any time a standard is injected and being quantified by the system. Here, they're separated as though any calibration data that's more than 60s away from the previous cal data is a new event. :param epoch_time: array of epoch times for all of the supplied data :return: list of cal events indices, where each index is the beginning of a new cal event """ diff = datetimes.diff() indices = diff.loc[diff > pd.Timedelta(seconds=60)].index.values.tolist() # subtract one from all indices return indices
2e823e5ffc5fb509639a2d5746bd26af77a650ae
3,638,192
def rename(isamAppliance, id, new_name, check_mode=False, force=False): """ Rename a Password Strength """ if force is True or _check(isamAppliance, id) is True: if check_mode is True: return isamAppliance.create_return_object(changed=True) else: return isamAppliance.invoke_put( "Rename a Password Strength", "/wga/pwd_strength/{0}".format(id), { 'id': id, 'new_name': new_name }) return isamAppliance.create_return_object()
b140e618370cff1086b27fef13e0ff91b22cf075
3,638,193
import asyncio import traceback def reinvoke_on_edit(ctx, *additional_messages: discord.Message, timeout: float = 600) -> None: # noinspection PyUnresolvedReferences """ Watches a given context for a given period of time. If the message that invoked the context is edited within the time period, then the invoking message plus any additional messages are deleted. The context's command is then reinvoked with the new message body. Parameters: ctx: A :class:`discord.ext.commands.Context` to listen to. Create one with `bot.get_context` if you are in an event instead. additional_messages: Any additional messages to also destroy on close. timeout: The timeout to wait for before the call terminates. This defaults to `None`, which is a special case depending on whether or not the `ctx` that was passed was actually a `BaseNavigator` object. If the latter holds, then the timeout will trigger as soon as the navigator timeout triggers. Note: To invoke this on a response that is being paginated using the `libneko.pagination` module, you should attempt to invoke it like so:: >>> factory = ... >>> nav = factory.build() >>> nav.start(ctx) >>> reinvoke_on_edit(ctx, *nav.all_messages) >>> >>> # or if you just have a nav >>> >>> nav = StringNavigator(...) >>> nav.start() >>> reinvoke_on_edit(ctx, *nav.all_messages) """ if ctx.command is None: raise ValueError("Cannot reinvoke a non-valid command or non-command invocation") async def handle_wait_for_edit_or_close(): try: # Triggered when we should kill our events. event = asyncio.Event() def set_on_exit(f): @neko3.functional.wraps(f) async def wrapper(): r = await f() event.set() return r return wrapper @set_on_exit async def wait_for_close(): try: await ctx.bot.wait_for("message_delete", check=lambda m: m.id == ctx.message.id, timeout=timeout) except (asyncio.CancelledError, asyncio.TimeoutError): pass @set_on_exit async def wait_for_edit(): try: def predicate(before, after): try: # Only respond to this message if after.id != ctx.message.id: return False elif before.content == after.content: # Again, something went weird. return False elif not after.content.startswith(ctx.prefix): return False else: # Ensure same command. invoked = ctx.message.content[len(ctx.prefix) :].lstrip() return invoked.startswith(ctx.invoked_with) except Exception: traceback.print_exc() _, after = await ctx.bot.wait_for("message_edit", check=predicate) new_ctx = await ctx.bot.get_context(after) asyncio.ensure_future(asyncio.gather(*[m.delete() for m in additional_messages]), loop=ctx.bot.loop) ctx.bot.loop.create_task(ctx.command.reinvoke(new_ctx)) except asyncio.CancelledError: pass except Exception: traceback.print_exc() tasks = [ctx.bot.loop.create_task(wait_for_close()), ctx.bot.loop.create_task(wait_for_edit())] # On either of these events triggering, we kill the lot. await event.wait() for task in tasks: try: task.cancel() task.result() except Exception: pass except Exception: traceback.print_exc() ctx.bot.loop.create_task(handle_wait_for_edit_or_close())
523048caadac3efc0f8065623a6448077dcb05fd
3,638,194
def gen_model_forms(form, model): """Creates a dict of forms. model_forms[0] is a blank form used for adding new model objects. model_forms[m.pk] is an editing form pre-populated the fields of m""" model_forms = {0: form()} for m in model.objects.all(): model_forms[m.pk] = form(instance=m) return model_forms
28bf3f007a7f8f971c18980c84a7841fd116898f
3,638,195
def _state_size_with_prefix(state_size, prefix=None): """Helper function that enables int or TensorShape shape specification. This function takes a size specification, which can be an integer or a TensorShape, and converts it into a list of integers. One may specify any additional dimensions that precede the final state size specification. Args: state_size: TensorShape or int that specifies the size of a tensor. prefix: optional additional list of dimensions to prepend. Returns: result_state_size: list of dimensions the resulting tensor size. """ result_state_size = tensor_shape.as_shape(state_size).as_list() if prefix is not None: if not isinstance(prefix, list): raise TypeError("prefix of _state_size_with_prefix should be a list.") result_state_size = prefix + result_state_size return result_state_size
7f8aaab1dd42b6470dce08f9a13a59d8cdc66f4f
3,638,196
def as_pandas(cursor, coerce_float=False): """Return a pandas `DataFrame` out of an impyla cursor. This will pull the entire result set into memory. For richer pandas-like functionality on distributed data sets, see the Ibis project. Parameters ---------- cursor : `HiveServer2Cursor` The cursor object that has a result set waiting to be fetched. coerce_float : bool, optional Attempt to convert values of non-string, non-numeric objects to floating point. Returns ------- DataFrame """ from pandas import DataFrame # pylint: disable=import-error names = [metadata[0] for metadata in cursor.description] return DataFrame.from_records(cursor.fetchall(), columns=names, coerce_float=coerce_float)
e1a9f5ba9b589a9c94f6df1a379833d8d7176d2b
3,638,197
def check_ip(ip, network_range): """ Test if the IP is in range Range is expected to be in CIDR notation format. If no MASK is given /32 is used. It return True if the IP is in the range. """ netItem = str(network_range).split('/') rangeIP = netItem[0] if len(netItem) == 2: rangeMask = int(netItem[1]) else: rangeMask = 32 try: ripInt = ip2int(rangeIP) ipInt = ip2int(ip) result = not ((ipInt ^ ripInt) & 0xFFFFFFFF << (32 - rangeMask)); except: result = False return result
be2bf16e4b000ff4b106761ea99bf69596d3ece2
3,638,198
async def index(_request: HttpRequest) -> HttpResponse: """A request handler which provides an index of the compression methods""" html = """ <!DOCTYPE html> <html> <body> <ul> <li><a href='/gzip'>gzip</a></li> <li><a href='/deflate'>deflate</a></li> <li><a href='/compress'>compress</a></li> </ul> </body> </html> """ return HttpResponse( 200, [(b'content-type', b'text/html')], text_writer(html) )
df5af2085494f4dfe1ce22a8c6feaed71ebad5e7
3,638,199