content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def generate(*, artifacts: artifacts_types.ModelArtifacts, name: str) -> str: """ Generate the class source from the schema. Args: schema: The schema of the model. name: The name of the model. Returns: The source code for the model class. """ model_artifacts = models_file_artifacts.calculate(artifacts=artifacts, name=name) return _source.generate(artifacts=model_artifacts)
6a510f031a9971a49057e114cc129f05737226df
22,400
def get_rot_mat_kabsch(p_matrix, q_matrix): """ Get the optimal rotation matrix with the Kabsch algorithm. Notation is from https://en.wikipedia.org/wiki/Kabsch_algorithm Arguments: p_matrix: (np.ndarray) q_matrix: (np.ndarray) Returns: (np.ndarray) rotation matrix """ h = np.matmul(p_matrix.transpose(), q_matrix) u, _, vh = np.linalg.svd(h) d = np.linalg.det(np.matmul(vh.transpose(), u.transpose())) int_mat = np.identity(3) int_mat[2, 2] = d rot_matrix = np.matmul(np.matmul(vh.transpose(), int_mat), u.transpose()) return rot_matrix
2cdd46af7f6f05acec23a6cf20e8ca561126c4f2
22,401
def _msrest_next(iterator): """"To avoid: TypeError: StopIteration interacts badly with generators and cannot be raised into a Future """ try: return next(iterator) except StopIteration: raise _MsrestStopIteration()
fe1877cfe4c05adb8bc082ea6a7d1ef54e324386
22,402
def record_export(record=None, export_format=None, pid_value=None, permissions=None): """Export marc21 record page view.""" exporter = current_app.config.get("INVENIO_MARC21_RECORD_EXPORTERS", {}).get( export_format ) if exporter is None: abort(404) options = current_app.config.get( "INVENIO_MARC21_RECORD_EXPORTER_OPTIONS", { "indent": 2, "sort_keys": True, }, ) serializer = obj_or_import_string(exporter["serializer"])(options=options) exported_record = serializer.serialize_object(record.to_dict()) return render_template( "invenio_records_marc21/records/export.html", pid_value=pid_value, export_format=exporter.get("name", export_format), exported_record=exported_record, record=Marc21UIJSONSerializer().dump_one(record.to_dict()), permissions=permissions, )
2875c0fb019d5b8851c7cdcc31ac6ccd8e6b4002
22,403
from typing import List from typing import Tuple from typing import Optional def body_range( operators: List[str], font_changes: List[Tuple]) -> Tuple[Optional[int], Optional[int]]: """given some assumptions about how headers and footers are formatted, find the operations describing the body text of of a page""" # font_changes: (idx, weight, size) thresh = 20.0 if font_changes[0][2] > thresh: # if the first font is big, this is a chapter heading page # we want everything after the next font change # find the first Td after this point if len(font_changes) < 2: start_idx = None else: start_idx = font_changes[1][0] # And last three operations (for the page number) can be discarded. end_idx = len(operators) - 3 elif font_changes[0][1] == "regular": # otherwise, we are looking for a (regular bold regular) pattern if len(font_changes) < 3: start_idx = None else: start_idx = font_changes[2][0] + 1 # discard the final operation end_idx = len(operators) - 1 elif font_changes[0][1] == "bold": # or (bold regular) pattern if len(font_changes) < 2: start_idx = None else: start_idx = font_changes[1][0] + 1 + 2 # (to skip over page number) # discard the final operation end_idx = len(operators) - 1 else: start_idx = None end_idx = None if start_idx is not None and start_idx < len(operators): start_idx = operators[start_idx:].index(b"Td") + start_idx if end_idx is not None and end_idx > len(operators): end_idx = None return start_idx, end_idx
aac320631a53653a770ba362c4826fca9f8fe673
22,404
import math def sine(value, default=_SENTINEL): """Filter and function to get sine of the value.""" try: return math.sin(float(value)) except (ValueError, TypeError): if default is _SENTINEL: warn_no_default("sin", value, value) return value return default
48cfcdef750ce497f8f36acee74c440ec244d31f
22,405
def wmt_affine_base_1e4(): """Set of hyperparameters.""" hparams = wmt_affine_base() hparams.kl_reg = 1e-4 hparams.learning_rate_constant = 2.0 hparams.learning_rate_warmup_steps = 8000 return hparams
f6d047737846aa0d4518045709f87b7b5f66d6fa
22,406
import random def random_crop_with_constraints(bbox, size, height, width, min_scale=0.3, max_scale=1, max_aspect_ratio=2, constraints=None, max_trial=1000): """Crop an image randomly with bounding box constraints. This data augmentation is used in training of Single Shot Multibox Detector [#]_. More details can be found in data augmentation section of the original paper. .. [#] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang Fu, Alexander C. Berg. SSD: Single Shot MultiBox Detector. ECCV 2016. Parameters ---------- bbox : numpy.ndarray Numpy.ndarray with shape (N, 4+) where N is the number of bounding boxes. The second axis represents attributes of the bounding box. Specifically, these are :math:`(x_{min}, y_{min}, x_{max}, y_{max})`, we allow additional attributes other than coordinates, which stay intact during bounding box transformations. size : tuple Tuple of length 2 of image shape as (width, height). min_scale : float The minimum ratio between a cropped region and the original image. The default value is :obj:`0.3`. max_scale : float The maximum ratio between a cropped region and the original image. The default value is :obj:`1`. max_aspect_ratio : float The maximum aspect ratio of cropped region. The default value is :obj:`2`. constraints : iterable of tuples An iterable of constraints. Each constraint should be :obj:`(min_iou, max_iou)` format. If means no constraint if set :obj:`min_iou` or :obj:`max_iou` to :obj:`None`. If this argument defaults to :obj:`None`, :obj:`((0.1, None), (0.3, None), (0.5, None), (0.7, None), (0.9, None), (None, 1))` will be used. max_trial : int Maximum number of trials for each constraint before exit no matter what. Returns ------- numpy.ndarray Cropped bounding boxes with shape :obj:`(M, 4+)` where M <= N. tuple Tuple of length 4 as (x_offset, y_offset, new_width, new_height). """ # default params in paper if constraints is None: constraints = ( (0.1, None), (0.3, None), (0.5, None), (0.7, None), (0.9, None), (None, 1), ) if len(bbox) == 0: constraints = [] w, h = size candidates = [] for min_iou, max_iou in constraints: min_iou = -np.inf if min_iou is None else min_iou max_iou = np.inf if max_iou is None else max_iou for _ in range(max_trial): scale = random.uniform(min_scale, max_scale) aspect_ratio = random.uniform( max(1 / max_aspect_ratio, scale * scale), min(max_aspect_ratio, 1 / (scale * scale))) crop_h = int(height * scale / np.sqrt(aspect_ratio)) crop_w = int(width * scale * np.sqrt(aspect_ratio)) crop_t = random.randrange(h - crop_h) crop_l = random.randrange(w - crop_w) crop_bb = np.array((crop_l, crop_t, crop_l + crop_w, crop_t + crop_h)) iou = bbox_iou(bbox, crop_bb[np.newaxis]) if min_iou <= iou.min() and iou.max() <= max_iou: top, bottom = crop_t, crop_t + crop_h left, right = crop_l, crop_l + crop_w candidates.append((left, top, right-left, bottom-top)) break # random select one while candidates: crop = candidates.pop(np.random.randint(0, len(candidates))) new_bbox = bbox_crop(bbox, crop, allow_outside_center=False) if new_bbox.size < 1: continue new_crop = (crop[0], crop[1], crop[2], crop[3]) return new_bbox, new_crop return random_crop_with_constraints(bbox, (w, h), height, width,min_scale=0.9,max_scale=1,max_trial=50)
787c341f3f496eeedce18c42462efa5f5ba6515b
22,407
import re def unravelContent(originalData): """ This is the primary function responsible for creating an alternate data stream of unraveled data. Args: contentData: Script content Returns: contentData: Unraveled additional content """ contentData = normalize(originalData) loopCount = 0 while True: modificationFlag = None # Reversed Strings - Changes STATE # Looks only in originalData, can be problematic flipping unraveled content back and forth. reverseString = ["noitcnuf", "marap", "nruter", "elbairav", "tcejbo-wen", "ecalper",] if any(entry in originalData.lower() for entry in reverseString): contentData, modificationFlag = reverseStrings(originalData, contentData, modificationFlag) # Decompress Streams - Changes STATE if all(entry in contentData.lower() for entry in ["streamreader", "frombase64string"]) or \ all(entry in contentData.lower() for entry in ["deflatestream", "decompress"]) or \ all(entry in contentData.lower() for entry in ["memorystream", "frombase64string"]): contentData, modificationFlag = decompressContent(contentData, modificationFlag) # Base64 Decodes - Changes STATE if re.search("[A-Za-z0-9+/=]{30,}", contentData): contentData, modificationFlag = decodeBase64(contentData, modificationFlag) # Decrypts SecureStrings - Changes STATE if "convertto-securestring" in contentData.lower() and \ re.search("(?:[0-9]{1,3},){15,}[0-9]{1,3}", contentData.replace(" ", "")) and \ re.search("[A-Za-z0-9+=/]{255,}", contentData): contentData, modificationFlag = decryptStrings(contentData, modificationFlag) # Normalize / De-Obfuscate the new contents before proceeding. contentData = normalize(contentData) if modificationFlag == None: break loopCount += 1 return contentData
16c5c5b0bc1de026aa4ed8f931e171e11d3ecacc
22,408
import os import pickle import subprocess def fcwrapper(pyenv='python2', instruction=None, data=None, reprint_output=False): """Wrapper to isolate FreeCAD Python 2.7 calls from the Python 3 code base. :param str pyenv: Python interpreter, defaults to 'python2'. :param str instruction: A registered instruction for the QMT FreeCAD module. :param data: Any data type serialisable through pickle. :param bool reprint_output: Reprint suppressed output of wrapped call. :return: Any data type serialisable through pickle. """ qmtPath = os.path.join(os.path.dirname(qmt.__file__)) runPath = os.path.join(qmtPath, 'geometry', 'freecad', 'run.py') serial_data = pickle.dumps(data, protocol=2) proc = subprocess.Popen([pyenv, runPath, instruction], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = proc.communicate(serial_data) # output[1] not checked because stderr is used for mere warnings too often. if proc.returncode != 0: print(output[1].decode()) print(os.linesep + ' --- END OF FC WRAPPED STDERR ---' + os.linesep) raise ValueError('pywrapper returned ' + str(proc.returncode)) # The returned serialised byte stream is demarcated by the separator string # "MAGICQMTRANSFERBYTES". Most data preceding the separator corresponds to # FreeCAD notifications and gets discarded. pipe_data = output[0].decode().split('MAGICQMTRANSFERBYTES') if reprint_output is True: print(os.linesep + ' --- FC WRAPPED STDOUT ---' + os.linesep) print(str(*pipe_data[0:-1])) print(os.linesep + ' --- FC WRAPPED STDERR ---' + os.linesep) print(output[1].decode()) print(os.linesep + ' --- END OF FC WRAPPED STDERR ---' + os.linesep) serial_data = pipe_data[-1].encode() return pickle.loads(serial_data)
376c1ee11d8b4b55ac3ac7ad479aad7b487aa0eb
22,409
import hashlib import struct def quote_verify(data, validation, aik, pcrvalues): """Verify that a generated quote came from a trusted TPM and matches the previously obtained PCR values :param data: The TPM_QUOTE_INFO structure provided by the TPM :param validation: The validation information provided by the TPM :param aik: The object representing the Attestation Identity Key :param pcrvalues: A dictionary containing the PCRs read from the TPM :returns: True if the quote can be verified, False otherwise """ select = 0 maxpcr = 0 # Verify that the validation blob was generated by a trusted TPM pubkey = aik.get_pubkey() n = m2.bin_to_bn(pubkey) n = m2.bn_to_mpi(n) e = m2.hex_to_bn("010001") e = m2.bn_to_mpi(e) rsa = M2Crypto.RSA.new_pub_key((e, n)) m = hashlib.sha1() m.update(data) md = m.digest() try: ret = rsa.verify(md, str(validation), algo='sha1') except M2Crypto.RSA.RSAError: return False # And then verify that the validation blob corresponds to the PCR # values we have values = bytearray() for pcr in sorted(pcrvalues): values += pcrvalues[pcr] select |= (1 << pcr) maxpcr = pcr if maxpcr < 16: header = struct.pack('!H', 2) header += struct.pack('@H', select) header += struct.pack('!I', len(values)) else: header = struct.pack('!H', 4) header += struct.pack('@I', select) header += struct.pack('!I', len(values)) pcr_blob = header + values m = hashlib.sha1() m.update(pcr_blob) pcr_hash = m.digest() if pcr_hash == data[8:28]: return True else: return False
bf23af17310252ff4a6fe73d16fbd2fffbc49618
22,410
def _rectify_base(base): """ transforms base shorthand into the full list representation Example: >>> assert _rectify_base(NoParam) is DEFAULT_ALPHABET >>> assert _rectify_base('hex') is _ALPHABET_16 >>> assert _rectify_base('abc') is _ALPHABET_26 >>> assert _rectify_base(10) is _ALPHABET_10 >>> assert _rectify_base(['1', '2']) == ['1', '2'] >>> import pytest >>> assert pytest.raises(TypeError, _rectify_base, 'uselist') """ if base is NoParam or base == 'default': return DEFAULT_ALPHABET elif base in [26, 'abc', 'alpha']: return _ALPHABET_26 elif base in [16, 'hex']: return _ALPHABET_16 elif base in [10, 'dec']: return _ALPHABET_10 else: if not isinstance(base, (list, tuple)): raise TypeError( 'Argument `base` must be a key, list, or tuple; not {}'.format( type(base))) return base
d88ce9edc3a4e134d04b97c6e5804590da90eb67
22,411
import gzip import json def loadjson(filename): """ Load a python object saved with savejson.""" if filename.endswith('.gz'): with gzip.open(filename, "rb") as f: obj = json.loads(f.read().decode("ascii")) else: with open(filename, 'rt') as fh: obj = json.load(fh) return obj
5c8bc1446c5d48ebee51f11711b38ddce74835d2
22,412
def _write_ffxml(xml_compiler, filename=None): """Generate an ffxml file from a compiler object. Parameters ---------- xml_compiler : _TitratableForceFieldCompiler The object that contains all the ffxml template data filename : str, optional Location and name of the file to save. If not supplied, returns the ffxml template as a string. Returns ------- str or None """ # Generate the string version. xmlstring = etree.tostring( xml_compiler.ffxml, encoding="utf-8", pretty_print=True, xml_declaration=False ) xmlstring = xmlstring.decode("utf-8") if filename is not None: with open(filename, "w") as fstream: fstream.write(xmlstring) else: return xmlstring
b74434aeb481b11b7a9537932778fb596be205e7
22,413
from typing import List from typing import Union from datetime import datetime import pytz def get_utctime( md_keys: List[str], md: Union[pyexiv2.metadata.ImageMetadata, None] ) -> Union[datetime, None]: """Extract the datetime (to the nearest millisecond)""" utctime = None dt_key = "Exif.Image.DateTime" if md is not None: if dt_key in md_keys: utctime = datetime.strptime(md[dt_key].raw_value, "%Y:%m:%d %H:%M:%S") # utctime can also be obtained with DateTimeOriginal: # utctime = datetime.strptime( # md["Exif.Photo.DateTimeOriginal"].raw_value, "%Y:%m:%d %H:%M:%S" # ) # extract the millisecond from the EXIF metadata: subsec = int(md["Exif.Photo.SubSecTime"].raw_value) sign = -1.0 if subsec < 0 else 1.0 millisec = sign * 1e3 * float("0.{}".format(abs(subsec))) utctime += timedelta(milliseconds=millisec) timezone = pytz.timezone("UTC") utctime = timezone.localize(utctime) return utctime
6f95e50725e865b36a31be48722376301cca4cc1
22,414
def find_file_recursively(file_name, start_dir=getcwd(), stop_dir=None): """ This method will walk trough the directory tree upwards starting at the given directory searching for a file with the given name. :param file_name: The name of the file of interest. Make sure it does not contain any path information. :type file_name: str :param start_dir: The directory where the search should start. If it is omitted, the cwd is used. :type start_dir: str :param stop_dir: The directory where the search should stop. If this is omitted, it will stop at the root directory. :type stop_dir: str :rtype: str :return: The file path where the file was first found. """ cur_dir = abspath(start_dir) if not isabs(start_dir) else start_dir while True: if exists(join(cur_dir, file_name)): # The file of interest exists in the current directory # so return it. return join(cur_dir, file_name) # The file was not found yet so try in the parent directory. parent_dir = dirname(cur_dir) if parent_dir == cur_dir or parent_dir == stop_dir: # We are either at the root directory or reached the stop # directory. return None else: cur_dir = parent_dir
a7100e37a4f6244090c8b4363cda2b9893d27768
22,415
def text_cleaning(any_text, nlp): """ The function filters out stop words from any text and returns tokenized and lemmatized words """ doc = nlp(any_text.lower()) result = [] for token in doc: if token.text in nlp.Defaults.stop_words: continue # if token.is_punct: # continue result.append(token.lemma_) clean_text = " ".join(result) return clean_text
7383f075a501c7c11565eac2c825c55f37e2a637
22,416
def shave(q,options,undef=MISSING,has_undef=1,nbits=12): """ Shave variable. On input, nbits is the number of mantissa bits to keep out of maximum of 24. """ # no compression, no shave # ------------------------ if not options.zlib: return q # Determine shaving parameters # ---------------------------- xbits = 24 - nbits shp = q.shape rank = len(shp) if rank == 2: # yx chunksize = shp[0]*shp[1] elif rank == 3: # zyx chunksize = shp[1]*shp[2] else: raise ValueError, "invalid rank=%d"%rank # Shave it # -------- qs, rc = shave32(q.ravel(),xbits,has_undef,undef,chunksize) if rc: raise ValueError, "error on return from shave32, rc=%d"%rc return qs.reshape(shp)
7d5f907f46a703c49f02f9cff3107d67ecc8c2a6
22,417
from typing import Union from typing import List from typing import Sized from typing import Iterable def any_none_nan(values: Union[List, np.ndarray, pd.Series, pd.DataFrame, object]) -> bool: """Can be used with a single value or a collection of values. Returns `True` if any item in `values` are `None`, `np.Nan`, `pd.NA`, `pd.NaT` or if the length of `values` is `0`. Args: values: A collection of values to check. Returns: bool - True if any item in `values` are None/np.NaN """ # pylint: disable=too-many-return-statements if values is None or values is np.NaN or values is pd.NA or values is pd.NaT: # pylint: disable=nan-comparison return True if isinstance(values, Sized) and not isinstance(values, str) and len(values) == 0: return True if isinstance(values, pd.Series): return values.isnull().any() or values.isna().any() if isinstance(values, pd.DataFrame): return values.isnull().any().any() or values.isna().any().any() if isinstance(values, Iterable) and not isinstance(values, str): if len(values) == 0: return True return any((any_none_nan(x) for x in values)) try: if not isinstance(values, str) and None in values: return True except Exception: # pylint: disable=broad-except # noqa pass try: if np.isnan(values).any(): return True except TypeError: return False return False
659e3d90fe02487820a3cdc711422a7054500b89
22,418
def get_or_create_package(name, epoch, version, release, arch, p_type): """ Get or create a Package object. Returns the object. Returns None if the package is the pseudo package gpg-pubkey, or if it cannot create it """ package = None name = name.lower() if name == 'gpg-pubkey': return if epoch in [None, 0, '0']: epoch = '' try: with transaction.atomic(): package_names = PackageName.objects.all() p_name, c = package_names.get_or_create(name=name) except IntegrityError as e: error_message.send(sender=None, text=e) p_name = package_names.get(name=name) except DatabaseError as e: error_message.send(sender=None, text=e) package_arches = PackageArchitecture.objects.all() with transaction.atomic(): p_arch, c = package_arches.get_or_create(name=arch) try: with transaction.atomic(): packages = Package.objects.all() package, c = packages.get_or_create(name=p_name, arch=p_arch, epoch=epoch, version=version, release=release, packagetype=p_type) except IntegrityError as e: error_message.send(sender=None, text=e) package = packages.get(name=p_name, arch=p_arch, epoch=epoch, version=version, release=release, packagetype=p_type) except DatabaseError as e: error_message.send(sender=None, text=e) return package
e8a13b1a34e16c5f4e6de96bb66fb314fd301c10
22,419
def sort_dict(value, case_sensitive=False, by='key', reverse=False, index=0): """ 字典排序 :param value: 字典对象 :param case_sensitive: 是否大小写敏感 :param by: 排序对象 :param reverse: 排序方式(正序:True、倒序:False) :param index: 索引号(此处针对 value 为 list 情况下可根据 list 的某一 index 排序) :return: """ if by == 'key': pos = 0 elif by == 'value': pos = 1 else: raise FilterArgumentError('You can only sort by either "key" or "value"') def sort_func(item): value = item[pos] if index: try: value = value[index] except: pass if isinstance(value, string_types) and not case_sensitive: value = value.lower() return value return sorted(value.items(), key=sort_func, reverse=reverse)
a41034cbd9ebd35cddcd0b4f321ffe8dc2613791
22,420
def initialize_system(name=None): """Initializes a distributed NPU system for use with TensorFlow. Args: name: Name of ops. Returns: The npu init ops which will open the NPU system using `Session.run`. """ return NPUInit(name)
3b1d50862954e57c8206af974412f79492982e23
22,421
def zmap_1perm_2samp(X, cat1, cat2=None, rand_seed=-1, fstat=None, name=None): """ une permutation X (D, N, P) K points, N subjects, D dim return: Y (D,) zvalue at each point """ if fstat is None: fstat = hotelling_2samples #name = "MP-Hotelling" if cat2 is None: cat2 = np.logical_not(cat1) # Données if rand_seed < 0: # Sans permutation (on peut remplacer cat par idx[cat]) ix1 = cat1 ix2 = cat2 else: # Avec permutation np.random.seed(rand_seed) idx = np.arange(X.shape[1])[cat1 | cat2] per = np.random.permutation(idx.size) nsplit = cat1.sum() ix1 = idx[per][:nsplit] ix2 = idx[per][nsplit:] # Run Y = fstat(X[:, ix1, :], X[:, ix2, :]) if name is not None: print(name + " {0}, {1}\n".format(Y.min(), Y.max())) return Y
87ffc6a0c49750e9a39295c2775483c4812d0205
22,422
def grids_have_same_coords(grid0, grid1): """Whether two `ESMF.Grid` instances have identical coordinates. :Parameters: grid0, grid1: `ESMF.Grid`, `ESMF.Grid` The `ESMF` Grid instances to be compared :Returns: `bool` Whether or not the Grids have identical coordinates. """ coords0 = grid0.coords coords1 = grid1.coords if len(coords0) != len(coords1): return False for c, d in zip(coords0, coords1): if len(c) != len(d): return False for a, b in zip(c, d): if not np.array_equal(a, b): return False return True
2fc5001a85694ac9b7b31a383436cc8792b665a4
22,423
import logging import os import subprocess def _CreateTargetProfDataFileFromProfRawFiles(target, profraw_file_paths): """Returns a relative path to target profdata file by merging target profraw files. Args: profraw_file_paths: A list of relative paths to the profdata data files that are to be merged. Returns: A relative path to the merged coverage profdata file. Raises: CalledProcessError: An error occurred merging profdata files. """ logging.info('Creating target profile data file.') logging.debug('Merging target profraw files to create target profdata file.') profdata_file_path = os.path.join(OUTPUT_DIR, '%s.profdata' % target) try: subprocess_cmd = [ LLVM_PROFDATA_PATH, 'merge', '-o', profdata_file_path, '-sparse=true' ] subprocess_cmd.extend(profraw_file_paths) output = subprocess.check_output(subprocess_cmd) logging.debug('Merge output: %s', output) except subprocess.CalledProcessError as error: logging.error( 'Failed to merge target profraw files to create target profdata.') raise error logging.debug('Finished merging target profraw files.') logging.info('Target "%s" profile data is created as: "%s".', target, profdata_file_path) return profdata_file_path
18ce49fbbea79b683824cc0270c74a25a2c3429b
22,424
def get_mwa_eor_spec(nu_obs=150.0, nu_emit=1420.40575, bw=8.0, tint=1000.0, area_eff=21.5, n_stations=50, bmax=100.0): """ Parameters ---------- nu_obs : float or array-like, optional observed frequency [MHz] nu_emit : float or array-like, optional rest frequency [MHz] bw : float or array-like, optional frequency bandwidth [MHz] tint : float or array-like, optional integration time [hour] area_eff : float or array-like, optional effective area per station [m ** 2] n_stations : int or array-like, optional number of stations bmax : float or array-like, optional maximum baseline [wavelength] Returns ------- nu_obs, nu_emit, bw, tint, area_eff, n_stations, bmax """ return nu_obs, nu_emit, bw, tint, area_eff, n_stations, bmax
5bc97d666df938c4e5f42d2d429505e2b7f74004
22,425
def baseModel(data): """ 原有模型 """ formula = "label_code ~ education_num + capital_gain + capital_loss + hours_per_week" model = sm.Logit.from_formula(formula, data=data) re = model.fit() return re
7d66020dc2b527198c0b432b8c8fa9b703335d72
22,426
def load_screen(options: list) -> int: """Callback for loading a screen.""" return get_selection(options)
2c48fad6a644dad3ccf9ce1d2b4cbff8b841b043
22,427
import requests def retrieve_url(url): """ Retrieve the URL and parse the response for success/failure and a structured data output. :param url: The fully qualified URL which you want to query. Example: https://www.google.com.au :type url: string :return resp_ok: A True/False boolean to indicate whether a valid response was retrieved from the URL request :type resp_ok: boolean :return output: The JSON decoded output from the request. Example: - When URL is successful, a dictionary is returned - When URL is not successful, a string is returned """ # Try/Except block to retrieve the URL try: # Get the URL resp = requests.get(url) # Raise exception, print HTTP error except HTTPError as http_err: print(f"HTTP error occurred: {http_err}") # Raise exception, print other error except Exception as err: print(f"Other error occurred: {err}") # Debug printouts # print(f"Response {resp}") # print(f"Response OK? - {resp.ok}") # Assign response OK to a variable resp_ok = resp.ok # If/Else block to assess whether response is OK if resp_ok is True: # Assign JSON decoded output to a variable output = resp.json() else: # Assign the raw text string output to a variable output = resp.text # Return response ok and output return resp_ok, output
55b26ab56c5f15a003e3139ed24ae66bae771708
22,428
def count_cells(notebook): """ The function takes a notebook and returns the number of cells Args: notebook(Notebook): python object representing the notebook Returns: len(nb_dict["cells"]): integer value representing the number of cells into the notebook A way you might use me is cells_count = count_cells(nb) """ nb_dict = notebook.nb_dict return len(nb_dict["cells"])
19ec2631888ecbba51fa51870694a7217024e5ae
22,429
from typing import OrderedDict def assignments(): """ This is called for the assignments tab on the instructor interface When an assignment is selected get_assignment is called to gather the details for that assignment. """ response.title = "Assignments" cur_assignments = db(db.assignments.course == auth.user.course_id).select( orderby=db.assignments.duedate ) assigndict = OrderedDict() for row in cur_assignments: assigndict[row.id] = row.name tags = [] tag_query = db(db.tags).select() for tag in tag_query: tags.append(tag.tag_name) course = get_course_row(db.courses.ALL) base_course = course.base_course chapter_labels = [] chapters_query = db(db.chapters.course_id == base_course).select( db.chapters.chapter_label ) for row in chapters_query: chapter_labels.append(row.chapter_label) # See `models/db_ebook.py` for course_attributes table set_latex_preamble(course.base_course) return dict( coursename=auth.user.course_name, confirm=False, course_id=auth.user.course_name, assignments=assigndict, tags=tags, chapters=chapter_labels, toc=_get_toc_and_questions(), # <-- This Gets the readings and questions course=course, )
ac7834a96b876cadbcc91a7df9b59b9e51794142
22,430
from typing import Iterable from typing import Dict def get_sentences(data: Iterable[JSON_Object], match_by: str) -> Dict[Hash, JSON_Object]: """ Collect sentence objects w.r.t. matching criteria. :param data: Iterable of sentence objects :param match_by: Matching criteria / method :return: Dict of hash: sentence objects """ return { hash_sentence(sentence, match_by): sentence for sentence in data }
53430fcd65315dc98bd4bd0ac0c8a4d51dcef651
22,431
import re def getInterfaceText(caller_text, callee_method): """ This method parses method text that we grabbed from getMethodSignature for potential interface text that is present. It makes a sort of 'best guess' as to which lines of the caller method text invoke the callee This current implementation uses only the 'in' string method to detect interfaces and that is why it is a 'best guess' it is by no means perfect """ #pass in the caller method text and then search it for the callee method call callee_method = callee_method.split('#')[0].strip() # get the dict_link out interface_text = [] caller_text = commentRemover(caller_text) # don't want to trigger interface text on a comment caller_text = caller_text.split('\n') i = 0 end_header_regex = re.compile(r'^.+?\)') while i < len(caller_text): # remove method header from the interface text as to not accidentally trigger regex later to_remove = end_header_regex.search(caller_text[i]) #regex that searches to see the end parentheses where the header ends if not to_remove: # for when the method head # TODO replace this regex with regex for '{' not parentheses (?) del caller_text[i] #remove every line of the method header leading up to the end line i -= 1 else: # we are at the end line of the header caller_text[i] = caller_text[i].replace(to_remove.group(), '') # replace this line with empty string break i+=1 # after this loop ^ we should have isolated just the caller method text for searching of interface if '::' in callee_method: #class defined tmp = '' done = True for line in caller_text: line = line.strip() if not done: # if we haven't reached the end of the interface, i.e. it extends over many lines tmp = tmp + '\n' + line #capture the lines if callee_method in line: #found start of interface done = False tmp = tmp + line # capture it if ';' in line: # if the line has a semicolon we know that we have reached the end done = True if done and tmp != '': # we have the end of this interface, add it to the list and then reset tmp interface_text.append(tmp) tmp = '' if len(interface_text) == 0: #didn't find it with the class:: notation (usually because in method in same class) split = callee_method.split("::") # here we will search for just the method name cclass = split[0].strip() cname = split[1].strip() for line in caller_text: line = line.strip() if not done: # same process as previous loop but doesn't include ClassName:: in search tmp = tmp + '\n' + line # this happens in C++ when the namespace is implicit if cname in line: #found start done = False tmp = tmp + line if ';' in line: done = True if done and tmp != '': interface_text.append(tmp) tmp = '' else: #there is no class defined (ex. memset, or other built in functions, or C style methods with no class) tmp = '' done = True for line in caller_text: # same method as above, searches method text and appends results to a list line = line.strip() if not done: tmp = tmp + '\n' + line if callee_method in line: #found start done = False tmp = tmp + line if ';' in line: done = True if done and tmp != '': interface_text.append(tmp) tmp = '' return interface_text
98856a0db522a143274e79ad3841597498aa41d0
22,432
def str2bool(value): """ Args: value - text to be converted to boolean True values: y, yes, true, t, on, 1 False values: n, no, false, off, 0 """ return value in ['y', 'yes', 'true', 't', '1']
876a58c86b449ba3fac668a4ef2124ea31fda350
22,433
def numeric_float(max_abs: float = 1e3) -> st.SearchStrategy: """Search strategy for numeric (non-inf, non-NaN) floats with bounded absolute value.""" return st.floats(min_value=-max_abs, max_value=max_abs, allow_nan=False, allow_infinity=False)
b44764d88147793792ef90d96c17ec9770737fde
22,434
def add2dict(dict, parent_list, key, value): """ Add a key/value pair to a dictionary; the pair is added following the hierarchy of 'parents' as define in the parent_list list. That is if parent list is: ['5', '1'], and key='k', value='v', then the new, returned dictionary will have a value: dict['5']['1'][k] = v """ d = dict for p in parent_list: if p not in d: d[p] = {} d = d[p] d[key] = value return dict
32252d3253283110eee2edb2eb216cfd777a710f
22,435
def transform(x): """ transform x1 x2 ---> 1 x1 x2 x1**2 x2**2 x1x2 |x1 - x2| |x1 + x2| """ ones = np.ones(len(x)) x1 = x[:,0] x2 = x[:,1] x1_sqr = x1**2 x2_sqr = x2**2 x1x2 = x1 * x2 abs_x1_minus_x2 = abs(x1-x2) abs_x1_plus_x2 = abs(x1+x2) return np.stack([ones, x1, x2, x1_sqr, x2_sqr, x1x2, abs_x1_minus_x2, abs_x1_plus_x2], axis=1)
380cc6e8f181cc192b11e3fd466526093a75e74b
22,436
import json def gen_new_contact_json(csv_data): """ Generate json with data about Subnets and theirs Contacts :param csv_data: entry data :return: Stats about created subnets """ dist = {"subnets": csv_data} with open(f'{PATH}{CONTACTS_SUFIX}', 'w') as out_file: out_file.write(json.dumps(dist, indent=2, sort_keys=True)) stat = len(dist["subnets"]) return f'Reloaded {stat} subnets and their contacts. '
7615c76ea1c9fe392bd6d6d689b2b33a53beaa22
22,437
def resize(a, shape): """ if array a is larger than shape, crop a; if a is smaller than shape, pad a with zeros Args: a (numpy array): 2D array to resize shape: desired shape of the return Returns: numpy array: array a resized according to shape """ if a.shape[0] < shape[0]: a = np.pad(a, ((0, shape[0]-a.shape[0]), (0, 0)), mode="constant") if a.shape[1] < shape[1]: a = np.pad(a, ((0, 0), (0, shape[1]-a.shape[1])), mode="constant") if a.shape[0] > shape[0]: a = a[0:shape[0], :] if a.shape[1] > shape[1]: a = a[:, 0:shape[1]] return a
40e0829b8680ea5753b12b4bd24e591b9b222bcf
22,438
def _Run(args, holder, url_map_arg, release_track): """Issues requests necessary to import URL maps.""" client = holder.client url_map_ref = url_map_arg.ResolveAsResource( args, holder.resources, default_scope=compute_scope.ScopeEnum.GLOBAL, scope_lister=compute_flags.GetDefaultScopeLister(client)) data = console_io.ReadFromFileOrStdin(args.source or '-', binary=False) try: url_map = export_util.Import( message_type=client.messages.UrlMap, stream=data, schema_path=_GetSchemaPath(release_track)) except yaml_validator.ValidationError as e: raise exceptions.ToolException(e.message) # Get existing URL map. try: url_map_old = url_maps_utils.SendGetRequest(client, url_map_ref) except apitools_exceptions.HttpError as error: if error.status_code != 404: raise error # Url Map does not exist, create a new one. return _SendInsertRequest(client, url_map_ref, url_map) # No change, do not send requests to server. if url_map_old == url_map: return console_io.PromptContinue( message=('Url Map [{0}] will be overwritten.').format(url_map_ref.Name()), cancel_on_no=True) # Populate id and fingerprint fields. These two fields are manually # removed from the schema files. url_map.id = url_map_old.id url_map.fingerprint = url_map_old.fingerprint return _SendPatchRequest(client, url_map_ref, url_map)
a04b277fa704e3cc8889a7a1feb7cf16b6040e91
22,439
import logging def resolve_function(module, function): """ Locate specified Python function in the specified Python package. :param module: A Python module :type module: ``types.ModuleType.`` :param function: Name of Python function :type ``str`` :return: Function or None if not found. """ func = None if function_exists(module, function): func = getattr(module, function) if not func: nuoca_log(logging.ERROR, "Cannot find Python function %s in module %s" % ( function, module )) return func
5885755f485d4dc243075aa9df6677cd52f3ebf8
22,440
import select def from_table(table, engine, limit=None): """ Select data in a database table and put into prettytable. Create a :class:`prettytable.PrettyTable` from :class:`sqlalchemy.Table`. **中文文档** 将数据表中的数据放入prettytable中. """ sql = select([table]) if limit is not None: sql = sql.limit(limit) result_proxy = engine.execute(sql) return from_db_cursor(result_proxy.cursor)
df66b3f179d3bde600786b3bc590810ac410b6eb
22,441
import tqdm import requests import zipfile from io import StringIO def futures_sgx_daily(trade_date: str = "2020/03/06", recent_day: str = "3") -> pd.DataFrame: """ Futures daily data from sgx P.S. it will be slowly if you do not use VPN :param trade_date: it means the specific trade day you want to fetch :type trade_date: str e.g., "2020/03/06" :param recent_day: the data range near the specific trade day :type recent_day: str e.g. "3" means 3 day before specific trade day :return: data contains from (trade_date - recent_day) to trade_day :rtype: pandas.DataFrame """ big_df = pd.DataFrame() index_df = get_country_index(country="新加坡", index_name="FTSE Singapore", start_date="2020/01/01", end_date=trade_date) index_df.sort_index(inplace=True) index_df.reset_index(inplace=True) index_df.reset_index(inplace=True) index_df.index = index_df["index"] + 5840 date_start = index_df.index[-1] + 1 - int(recent_day) date_end = index_df.index[-1] + 1 for page in tqdm(range(date_start, date_end)): # page = 5883 url = f"https://links.sgx.com/1.0.0/derivatives-daily/{page}/FUTURE.zip" r = requests.get(url) with zipfile.ZipFile(BytesIO(r.content)) as file: with file.open(file.namelist()[0]) as my_file: data = my_file.read().decode() if file.namelist()[0].endswith("txt"): data_df = pd.read_table(StringIO(data)) else: data_df = pd.read_csv(StringIO(data)) big_df = big_df.append(data_df) return big_df
4b2aba7adb48066db1343469541b2007caa82d37
22,442
def draw_spectra(md, ds): """ Generate best-fit spectra for all the test objects Parameters ---------- md: model The Cannon spectral model ds: Dataset Dataset object Returns ------- best_fluxes: ndarray The best-fit test fluxes best_ivars: The best-fit test inverse variances """ coeffs_all, covs, scatters, red_chisqs, pivots, label_vector = model.model nstars = len(dataset.test_SNR) cannon_flux = np.zeros(dataset.test_flux.shape) cannon_ivar = np.zeros(dataset.test_ivar.shape) for i in range(nstars): x = label_vector[:,i,:] spec_fit = np.einsum('ij, ij->i', x, coeffs_all) cannon_flux[i,:] = spec_fit bad = dataset.test_ivar[i,:] == SMALL**2 cannon_ivar[i,:][~bad] = 1. / scatters[~bad] ** 2 return cannon_flux, cannon_ivar
03344230339e66ac03ffa0ac2d5744475c311591
22,443
def get_response( schema, # type: GraphQLSchema params, # type: RequestParams catch_exc, # type: Type[BaseException] allow_only_query=False, # type: bool **kwargs # type: Any ): # type: (...) -> Optional[ExecutionResult] """Get an individual execution result as response, with option to catch errors. This does the same as execute_graphql_request() except that you can catch errors that belong to an exception class that you need to pass as a parameter. """ # noinspection PyBroadException execute = ( execute_graphql_request_as_promise if kwargs.get("return_promise", False) else execute_graphql_request ) try: execution_result = execute(schema, params, allow_only_query, **kwargs) except catch_exc: return None return execution_result
c451514b588956c59046140c42c18d79bb151170
22,444
def _get_status_arrays(): """ Get status for all arrays. """ results = [] try: # Get array(s) status for a site. result = get_status_arrays() if result is not None: results = result return results except Exception as err: message = str(err) current_app.logger.info(message) raise Exception(message)
911a418f728e10949e756098414b7e0809fc2108
22,445
def get_cycle_amplitude(data, cycle, metric_to_use, hourly_period_to_exclude): """ given data (eg results[opposite_pair] [substratification][ substratification_level] ['take_simple_means_by_group_no_individual_mean']) and a cycle and a metric to use (max_minus_min or average_absolute_difference_from_mean) computes the cycle amplitude. """ data = deepcopy(data) assert metric_to_use in ['max_minus_min' ,'average_absolute_difference_from_mean'] assert cycle in ['date_relative_to_period', 'local_hour', 'weekday', 'month', 'week_of_year'] if cycle == 'date_relative_to_period': data[cycle] = data[cycle].loc[data[cycle].index.map(lambda x:np.abs(x) <= 14)] assert list(data[cycle].index) == list(range(-14, 15)) if cycle == 'local_hour': if hourly_period_to_exclude is None: assert list(data[cycle].index) == list(range(24)) else: assert len(hourly_period_to_exclude) == 2 assert hourly_period_to_exclude[0] < hourly_period_to_exclude[1] data[cycle] = data[cycle].loc[data[cycle].index.map(lambda x:(x < hourly_period_to_exclude[0]) or (x > hourly_period_to_exclude[1]))] assert list(data[cycle].index) == [a for a in list(range(24)) if a < hourly_period_to_exclude[0] or a > hourly_period_to_exclude[1]] if cycle == 'weekday': assert list(data[cycle].index) == list(['Friday', 'Monday', 'Saturday', 'Sunday', 'Thursday', 'Tuesday', 'Wednesday']) if cycle == 'month': assert list(data[cycle].index) == list(range(1, 13)) if cycle == 'week_of_year': assert list(data[cycle].index) == list(range(52)) y = np.array(data[cycle]['mean']) y_mu = y.mean() average_absolute_difference_from_mean = np.mean(np.abs(y - y_mu)) largest_difference = y.max() - y.min() if metric_to_use == 'max_minus_min': metric_val = largest_difference else: metric_val = average_absolute_difference_from_mean return metric_val
bbbb3a0e7d97da4710319135deb583705fbb5a55
22,446
import subprocess def submit_job(scheduler_args, command): """Submit a job to the scheduler, returning the supplied job ID. """ cl = ["qsub", "-cwd", "-b", "y", "-j", "y"] + scheduler_args + command status = subprocess.check_output(cl) match = _jobid_pat.search(status) return match.groups("jobid")[0]
81c85e652959501e622531553a77aaed6151413b
22,447
import requests import tqdm def _download(url: str, dst: str) -> int: """ @param: url to download file @param: dst place to put the file """ file_size = int(urlopen(url).info().get("Content-Length", -1)) r = requests.get(url, stream=True) with open(get_full_data_path(dst), "wb") as f: pbar = tqdm(total=int(r.headers['Content-Length'])) for chunk in r.iter_content(chunk_size=1024): if chunk: f.write(chunk) pbar.update(len(chunk)) pbar.close() return file_size
55d748465d83d9d2a5408d4995c2363cf88090b2
22,448
def target2line(target, img_size, k, eval=False): """ target: line representetitve in grid [L, grid_h, grid_w] img_size: (width, height): Input image size, PIL Image size eval=False : Default. For inference. Line width not big. eval=True : For iou. Line width is bigger. return line_img """ line_img = Image.new("L", img_size) draw = ImageDraw.Draw(line_img) resolution = 32 / pow(2, k) grid_h = int(img_size[1] // resolution) grid_w = int(img_size[0] // resolution) line_width = 4 if not eval: line_width = 2 for i in range(1, grid_h): grid = [] grid.append(((0, i * img_size[1]/grid_h)) ) grid.append(((img_size[0], i * img_size[1]/grid_h)) ) draw.line(grid, fill='blue', width=0) for i in range(grid_w): grid = [] grid.append(((i * img_size[0]/grid_w, 0)) ) grid.append(((i * img_size[0]/grid_w, img_size[1])) ) draw.line(grid, fill='blue', width=0) targets = np.transpose(target, [1, 2, 0]) targets = targets.reshape(grid_h, grid_w, -1, 4) offset_x = np.linspace(0, grid_w - 1, grid_w) offset_y = np.linspace(0, grid_h - 1, grid_h) off_w, off_h = np.meshgrid(offset_x, offset_y) indexes = np.argwhere( np.sum( targets.reshape(-1, 4) , axis=1, keepdims=False ) > 0 ) targets = np.transpose(targets, (3, 2, 0, 1)) targets[0,:] += off_w targets[1,:] += off_h targets[2,:] += off_w targets[3,:] += off_h targets = (targets * resolution) targets = np.transpose(targets, ( 2, 3, 1, 0)).reshape(-1, 4) detected = targets[indexes[..., 0]] # print( 'detected lines shape: ', detected.shape) [draw.line([(x1, y1), (x2,y2)], fill='white', width=line_width) for (x1, y1, x2, y2) in detected ] return line_img
ca0ad3938261cdb31de5c1ad502bef2d16d3e6cb
22,449
import types def unmarshal(raw, signature): """Unmarshal objects. The elements of the returned tuple will be of types according to the column *Python OUT* in the :ref:`types summary <ref-types-table>`. :param RawData raw: raw message data :param signature: see :class:`~dcar.signature.Signature` :return: tuple of unmarshalled data :rtype: tuple :raises ~dcar.MessageError: if the data could not be unmarshalled """ signature = _signature(signature) data = [] for t, s in signature: data.append(types[t].unmarshal(raw, s)) return tuple(data)
958761030418450cb65aeef2d966ef3d9157167c
22,450
def remove_keys(d, to_remove): """ This function removes the given keys from the dictionary d. N.B., "not in" is used to match the keys. Args: d (dict): a dictionary to_remove (list): a list of keys to remove from d Returns: dict: a copy of d, excluding keys in to_remove """ ret = { k:v for k,v in d.items() if k not in to_remove } return ret
94146bb19e8d39ea28c0940307c4c998fe5b7063
22,451
import os def parse_line(line_str): """ Parse a line from sha1sum output into tuple of hash, directory path and file name. Eg. line '3af30443352a5760cb0f88e619819cee1b1599e0 foo/bar/baz' would be parsed into tuple ('3af30443352a5760cb0f88e619819cee1b1599e0', 'foo/bar', 'baz'). """ line_str = line_str.rstrip() hash_str, path_str = line_str.split(' ', maxsplit=1) path_pair = os.path.split(path_str) return hash_str, path_pair[0], path_pair[1]
6a643e7b121e54b224a257c615568c617d0f216f
22,452
def get_crypto_quotes(**kwargs): """ Top-level function for obtaining all available cryptocurrency quotes """ return CryptoReader(**kwargs).fetch()
1eb94bf698e10b43e0cb4c794d7fb66a823295c3
22,453
def mutual_information(y_true, y_pred): """Mutual information score. """ # This is a simple wrapper for returning the score as given in y_pred return y_pred
fae45b40fb3ca285bef57e06b30c42d7f87b5286
22,454
from typing import Sequence from typing import Optional def AvgPool(window_shape: Sequence[int], strides: Optional[Sequence[int]] = None, padding: str = Padding.VALID.name, normalize_edges: bool = False, batch_axis: int = 0, channel_axis: int = -1) -> InternalLayerMasked: """Layer construction function for an average pooling layer. Based on `jax.example_libraries.stax.AvgPool`. Args: window_shape: The number of pixels over which pooling is to be performed. strides: The stride of the pooling window. `None` corresponds to a stride of `(1, 1)`. padding: Can be `VALID`, `SAME`, or `CIRCULAR` padding. Here `CIRCULAR` uses periodic boundary conditions on the image. normalize_edges: `True` to normalize output by the effective receptive field, `False` to normalize by the window size. Only has effect at the edges when `SAME` padding is used. Set to `True` to retain correspondence to `ostax.AvgPool`. batch_axis: Specifies the batch dimension. Defaults to `0`, the leading axis. channel_axis: Specifies the channel / feature dimension. Defaults to `-1`, the trailing axis. For `kernel_fn`, channel size is considered to be infinite. Returns: `(init_fn, apply_fn, kernel_fn)`. """ return _Pool(_Pooling.AVG, window_shape, strides, padding, normalize_edges, batch_axis, channel_axis)
17e523a6092db0d90eb6960e76a3f8fece94d57a
22,455
def pairwise_distances(x, y): """Computes pairwise squared l2 distances between tensors x and y. Args: x: Tensor of shape [n, feature_dim]. y: Tensor of shape [m, feature_dim]. Returns: Float32 distances tensor of shape [n, m]. """ # d[i,j] = (x[i] - y[j]) * (x[i] - y[j])' # = sum(x[i]^2, 1) + sum(y[j]^2, 1) - 2 * x[i] * y[j]' xs = tf.reduce_sum(x * x, axis=1)[:, tf.newaxis] ys = tf.reduce_sum(y * y, axis=1)[tf.newaxis, :] d = xs + ys - 2 * tf.matmul(x, y, transpose_b=True) return d
4928e02c0580f97b4a97db48c6d67f8e15000d46
22,456
import re def timerange(rstring): """ range from string specifier | 2010-M08 -> range of August 2010 | 2009-Q1 -> range of first quarter, 2009 | 2001-S1 -> range of first "semi" 2001 | 2008 -> range of year 2008 :param rstring: range string :rtype: timerange dictionary """ m_match = re.search(r'(\d{4})-M(\d{2})', rstring) if m_match: return month_range(int(m_match.group(1)), int(m_match.group(2))) q_match = re.search(r'(\d{4})-Q(\d{1})', rstring) if q_match: return quarter_range(int(q_match.group(1)), int(q_match.group(2))) s_match = re.search(r'(\d{4})-S(\d{1})', rstring) if s_match: return semi_range(int(s_match.group(1)), int(s_match.group(2))) y_match = re.search(r'(\d{4})', rstring) if y_match: return year_range(int(y_match.group(1)))
d623e28ad4b040f833d96fed932117b8873f28ac
22,457
def comp_height_wire(self): """Return bar height Parameters ---------- self : CondType21 A CondType21 object Returns ------- H: float Height of the bar [m] """ return self.Hbar
98f98d021774166aa960080f353b6fbc01229eab
22,458
from datetime import datetime import logging def get_update_seconds(str_time: str) -> int: """This function calculates the seconds between the current time and the scheduled time utelising the datetime module. Args: str_time (str): Time of scheduled event taken from user input as a string Returns: int: Returns the seconds until the scheduled event should occur """ #creates timedeltas for current time and update time interval_bin = datetime(1900,1,1) update_time = datetime.strptime(str_time, '%H:%M') - interval_bin current_time = datetime.now() current_timedelta = timedelta(hours=current_time.hour, minutes = current_time.minute, seconds= current_time.second) #calculates update interval by comparing the two timedeltas if update_time >= current_timedelta: update_interval = update_time - current_timedelta if update_time < current_timedelta: update_time+= timedelta(hours=24) update_interval = update_time - current_timedelta logging.info('UPDATE INTERVAL: ' + str(update_interval.seconds)) return update_interval.seconds
0440ac847bc6c19290bdfa231971d33236335904
22,459
def full_name(decl, with_defaults=True): """ Returns declaration full qualified name. If `decl` belongs to anonymous namespace or class, the function will return C++ illegal qualified name. :param decl: :class:`declaration_t` :type decl: :class:`declaration_t` :rtype: full name of declarations. """ if None is decl: raise RuntimeError("Unable to generate full name for None object!") if with_defaults: if not decl.cache.full_name: path = declaration_path(decl) if path == [""]: # Declarations without names are allowed (for examples class # or struct instances). In this case set an empty name.. decl.cache.full_name = "" else: decl.cache.full_name = full_name_from_declaration_path(path) return decl.cache.full_name else: if not decl.cache.full_partial_name: path = partial_declaration_path(decl) if path == [""]: # Declarations without names are allowed (for examples class # or struct instances). In this case set an empty name. decl.cache.full_partial_name = "" else: decl.cache.full_partial_name = \ full_name_from_declaration_path(path) return decl.cache.full_partial_name
b9828bf4045baa2edbec0c5007406309d90391c5
22,460
import time import sys def aggregate_metrics_by_nodesets(df, nodelists, nodeset_names=None, weightlists=None, level_name="node", use_metrics=None, print_looptime=True): """Aggregates a dataframe by nodes (into nodesets), returning a data frame with same structure but with nodesets instead of nodes. Accepts different weights for each node. Let it registered: this single function with ugly implementation took me days of suffering to figure out a way for it to *simply work* using the pandas logic. Parameters ---------- df : pd.DataFrame Thebig_df Signature: df[(strat_name, sim_prefix, exec, node)] -> [metrics] nodelists : list or None This represents the sets of nodes to aggregate. It is a nested list, each one containing the node indexes of each set. Nodes may be in more than one set. nodeset_names : sequence List of names of the nodesets. Used in out_df to label the sets, so they must be unique. weightlists : list This represents the weights of each node in each nodeset. Must have the same nested structure (and lenghts) of nodelists. level_name : hashable Name of the level to be aggregated (i.e., the nodes). The method is agnostic to the other levels. use_metrics : sequence Metrics to use. Must be a subset of the names of columns in df. print_looptime : bool Whether the method should print the main loop execution time. It's quite costly and can be optimized... Returns ------- Possible signature of the output dataframe: out_df(strategy, sim_prefix, exec, nodeset)[use_metrics] """ num_nodesets = len(nodelists) if nodeset_names is None: # Uses simple numbers nodeset_names = list(range(num_nodesets)) if weightlists is None: # Weights not passed - set all to 1 weightlists = [[1.]*len(l_nodes) for l_nodes in nodelists] # Precalculate the normalization of weights in each nodeset weight_sums = [sum(weights) for weights in weightlists] if use_metrics is None: use_metrics = df.columns # print(df.index) # print(df.index.set_levels(nodeset_names, level="node")) # This complicated routine designs a new index obeject, with nodesets instead of nodes # It is agnostic to the other levels, only "node" is replaced. new_index = df.index.droplevel(level=level_name) # Multiindex without level 'node' df_from_index = new_index.to_frame(index=False) # Converts multiindex to a frame with each level as a column df_from_index.drop_duplicates(inplace=True, ignore_index=True) # new_index = pd.MultiIndex.from_frame(df_from_index) # Creates an index yet without level "node" num_rows = len(df_from_index) # Number of rows without the "node" level. # nodeset_col = nodeset_names * len(df_from_index) # Creates a repeated list of nodeset names tmp_df = pd.DataFrame({name: np.repeat(np.nan, num_rows) for name in nodeset_names}, index=new_index) stacked_df = tmp_df.stack(dropna=False) # Reshapes the previous df, finally making the desired multiindex stacked_df.index.set_names("nodeset", level=-1, inplace=True) # After everything, allocates the output df. # Possible Signature: out_df(strategy, sim_prefix, exec, nodeset)[metrics] out_df = pd.DataFrame({metric: np.repeat(np.nan, len(stacked_df)) for metric in use_metrics}, index=stacked_df.index) # TODO: remove the trash from memory? (Eg. tmp_df, stacked_df) # --------------------------- # Main loop over executions (agnostic to the df levels, except the one given as level_name) levels_to_iterate = list(df.index.names) levels_to_iterate.remove(level_name) # Only removes the node level, agnostic to the others loop_t0 = time.time() # Progress monitoring # TODO: The next loop seems painfully slow, but I think that actually the ugly ways of indexing and cross-sectioning # are the main bottleneck (rather than the calculations themselves). Check that and see what can be done. loop_size = len(df.groupby(level=levels_to_iterate)) # Yup, twice. Sorry. param_t0 = time.time() for i_param, (params, exec_df) in enumerate(df.groupby(level=levels_to_iterate)): # Local loop over each set of nodes for i, l_nodes in enumerate(nodelists): # Calculates the weighted average of required metrics for the current nodeset nset_average = sum(weight * exec_df.loc[(*params, ni)][use_metrics] for (ni, weight) in zip(l_nodes, weightlists[i])) nset_average /= weight_sums[i] # Put into final data frame out_df.loc[(*params, nodeset_names[i])] = nset_average # Iteration time feedback if i_param % 100 == 0: param_tf = time.time() print("{:0.3f}%: {:6.4}s\n".format(100 * i_param / loop_size, param_tf - param_t0), end=" ") sys.stdout.flush() param_t0 = param_tf # # WOULD NOT PARALLELIZE - the slow part is probably not paralellizable # def func(i_param, params, exec_df): # # # Local loop over each set of nodes # for i, l_nodes in enumerate(nodelists): # # # Calculates the weighted average of required metrics for the current nodeset # nset_average = sum(weight * exec_df.loc[(*params, ni)][use_metrics] # for (ni, weight) in zip(l_nodes, weightlists[i])) # nset_average /= weight_sums[i] # # # Put into final data frame # out_df.loc[(*params, nodeset_names[i])] = nset_average # # # pool = ProcessPool(num_processes) print() # Execution time feedback loop_tf = time.time() if print_looptime: print(" - Time calculating nodeset averages: {} ({:0.5f}s)" "".format(seconds_to_hhmmss(loop_tf - loop_t0), loop_tf - loop_t0)) return out_df
5f844915fdc78696a3f0f3bcaeeb3ff88a6af438
22,461
def zero_pad(data, window_size): """ Pads with window_size / 2 zeros the given input. Args: data (numpy.ndarray): data to be padded. window_size (int): parameter that controls the size of padding. Returns: numpy.ndarray: padded data. """ pad_width = ceil(window_size / 2) padded = np.pad(data, (pad_width, pad_width), 'constant', constant_values=(0,0)) return padded
234f27f06bba9dff3a38292e1190b01a767bd56b
22,462
import requests def get_results(url_id): """Get the scanned results of a URL""" r = requests.get('https://webcookies.org/api2/urls/%s' % url_id, headers=headers) return r.json()
be5b660acd847066ec4c476dfe25d2fe21f8e2c4
22,463
def build_dense_constraint(base_name, v_vars, u_exprs, pos, ap_x): """Alias for :func:`same_act`""" return same_act (base_name, v_vars, u_exprs, pos, ap_x)
b35b07c3825a76ac046d8b32490cb5836ae6176a
22,464
from typing import Tuple from typing import Union def list_snapshots(client, data_args) -> Tuple[str, dict, Union[list, dict]]: """ List all snapshots at the system. :type client: ``Client`` :param client: client which connects to api. :type data_args: ``dict`` :param data_args: request arguments. :return: human readable format, context output and the original raw response. :rtype: ``tuple`` """ limit = arg_to_number(data_args.get('limit')) offset = arg_to_number(data_args.get('offset')) params = assign_params(limit=limit, offset=offset) raw_response = client.do_request(method='GET', url_suffix='/plugin/products/threat-response/api/v1/snapshot', params=params) snapshots = raw_response.get('snapshots', []) for snapshot in snapshots: if created := snapshot.get('created'): try: snapshot['created'] = timestamp_to_datestring(created) except ValueError: pass context = createContext(snapshots, removeNull=True) headers = ['uuid', 'name', 'evidenceType', 'hostname', 'created'] outputs = {'Tanium.Snapshot(val.uuid === obj.uuid)': context} human_readable = tableToMarkdown('Snapshots:', snapshots, headers=headers, headerTransform=pascalToSpace, removeNull=True) return human_readable, outputs, raw_response
5452b162d372a016fc63fded510e9361869581b8
22,465
def data_context_path_computation_context_pathuuid_linktopology_uuidlink_uuid_get(uuid, topology_uuid, link_uuid): # noqa: E501 """data_context_path_computation_context_pathuuid_linktopology_uuidlink_uuid_get returns tapi.topology.LinkRef # noqa: E501 :param uuid: Id of path :type uuid: str :param topology_uuid: Id of link :type topology_uuid: str :param link_uuid: Id of link :type link_uuid: str :rtype: TapiTopologyLinkRef """ return 'do some magic!'
98c1b8721fb3edcc8cb3acc196e8c5aa8eb8a4f6
22,466
def ipfs_qm_hash_to_32_bytes(ipfs_qm: str) -> str: """ Transform IPFS base58 Qm... hash to a 32 bytes sting (without 2 heading '0x' bytes). :param ipfs_qm: IPFS base58 Qm... hash. :return: 32 bytes sting (without 2 heading bytes). """ return f"0x{b58decode(ipfs_qm).hex()[4:]}"
e60386928c3836edcd3ebef3740e1bbc8b095724
22,467
def get_service_state(scheduler): """Return the current state of the job service.""" return {"state": get_service_state_str(scheduler)}, 200
e18f66a2d2a2a97a37aed427178b65c2a9c8d919
22,468
def determine_file_type(filename): """ :param filename: str :rtype: FileType """ if filename.endswith('.cls'): return FileType.CLS elif filename.endswith('.java'): return FileType.JAVA elif filename.endswith('.js'): return FileType.JAVASCRIPT elif filename.endswith('.php'): return FileType.PHP elif filename.endswith('.py'): return FileType.PYTHON elif ( filename.endswith( ('.yaml', '.yml'), ) ): return FileType.YAML return FileType.OTHER
030d11266a8b93056c1d82778ba95a67fea7a799
22,469
def sanitise_text(text): """When we process text before saving or executing, we sanitise it by changing all CR/LF pairs into LF, and then nuking all remaining CRs. This consistency also ensures that the files we save have the correct line-endings depending on the operating system we are running on. It also turns out that things break when after an indentation level at the very end of the code, there is no empty line. For example (thanks to Emiel v. IJsseldijk for reproducing!): def hello(): print "hello" # and this is the last line of the text Will not completely define method hello. To remedy this, we add an empty line at the very end if there's not one already. """ text = text.replace('\r\n', '\n') text = text.replace('\r', '') lines = text.split('\n') if lines and len(lines[-1]) != 0: return text + '\n' else: return text
1d7d047fba7c8697748d0cf115e0f74fcad8c1c4
22,470
import string def create_regression( n_samples=settings["make_regression"]["n_samples"] ) -> pd.DataFrame: """Creates a fake regression dataset with 20 features Parameters ---------- n_samples : int number of samples to generate Returns ------- pd.DataFrame of features and targets: feature names are lowercase letters, targets are in the column "target" """ X, y = make_regression(n_samples=n_samples, n_features=20, n_informative=5) features = pd.DataFrame(X, columns=list(string.ascii_lowercase[: X.shape[1]])) targets = pd.Series(y, name="target") data = features.join(targets) return data
b1d91b5e56366a8a2df7731550baedcf154e8c9a
22,471
def _divide_no_nan(x, y, epsilon=1e-8): """Equivalent to tf.math.divide_no_nan but supports bfloat16.""" # need manual broadcast... safe_y = tf.where( tf.logical_and(tf.greater_equal(y, -epsilon), tf.less_equal(y, epsilon)), tf.ones_like(y), y) return tf.where( tf.logical_and( tf.greater_equal(tf.broadcast_to(y, x.get_shape()), -epsilon), tf.less_equal(tf.broadcast_to(y, x.get_shape()), epsilon)), tf.zeros_like(x), x / safe_y)
c7dc806bbdd7968fe61a9c7be76369b5608d9636
22,472
def make_release(t, **params_or_funcs): """Create particle release table to be used for testing""" t = np.array(t) i = np.arange(len(t)) params = { k: (p(i, t) if callable(p) else p) + np.zeros_like(t) for k, p in params_or_funcs.items() } start_date = np.datetime64("2000-01-02T03") minute = np.timedelta64(60, "s") dates = start_date + np.array(t) * minute return pd.DataFrame(data={**dict(release_time=dates.astype(str)), **params})
3dd2778dcf6962171d585244fc276832a300557c
22,473
import re def get_apartment_divs(driver): """Scrapes the url the driver is pointing at and extract any divs with "listitems". Those divs are used as apartment objects at Immowelt. Args: driver (Webdriver): A Webdriver instance. Returns: list: returns a list of all divs of class listitem... """ source = get_list_source(driver) regex = re.compile('listitem.*relative js-listitem') return set(source.findAll("div", regex))
ccef9159d7731ce78d5deeff92364e4bc43f5f3e
22,474
def smart_apply(tensor, static_fn, dynamic_fn): """ Apply transformation on `tensor`, with either `static_fn` for static tensors (e.g., Numpy arrays, numbers) or `dynamic_fn` for dynamic tensors. Args: tensor: The tensor to be transformed. static_fn: Static transformation function. dynamic_fn: Dynamic transformation function. Returns: Tensor: The transformed tensor. """ if isinstance(tensor, (tf.Tensor, tf.Variable, StochasticTensor, zs.StochasticTensor)): return dynamic_fn(tensor) else: return static_fn(tensor)
e2798377891ff6fc0ba4440357b83f927760bc59
22,475
import time def new_scan(host, publish = "off", start_new = "on", all = "done", ignoreMismatch = "on"): """This function requests SSL Labs to run new scan for the target domain.""" if helpers.is_ip(host): print(red("[!] Your target host must be a domain, not an IP address! \ SSL Labs will onyl scan domains.")) exit() else: path = "analyze" payload = {'host': host, 'publish': publish, 'start_new': start_new, 'all': all, 'ignoreMismatch': ignoreMismatch} results = request_api(path, payload) payload.pop('start_new') while results['status'] != 'READY' and results['status'] != 'ERROR': print("Scan in progress, please wait for the results.") time.sleep(30) results = request_api(path, payload) return results
7c7341778028fac5d7c7829f9b57174f0fdb251c
22,476
def removeBubbles(I, kernelSize = (11,11)): """remove bright spots (mostly bubbles) in retardance images. Need to add a size filter Parameters ---------- I kernelSize Returns ------- """ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernelSize) Bg = cv2.morphologyEx(I, cv2.MORPH_OPEN, kernel) I8bit = I/np.nanmax(I[:])*255 # rescale to 8 bit as OpenCV only takes 8 bit (REALLY????) I8bit = I8bit.astype(np.uint8, copy=False) # convert to 8 bit ITh = cv2.adaptiveThreshold(I8bit,255,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,201,-1) kernelSize = (3,3) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernelSize) IThBig = cv2.morphologyEx(ITh, cv2.MORPH_CLOSE, kernel) kernelSize = (21,21) kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, kernelSize) IThBig = cv2.morphologyEx(IThBig, cv2.MORPH_OPEN, kernel) ITh=ITh-IThBig IBi = ITh.astype(np.bool_, copy=True) # convert to 8 bit INoBub = np.copy(I) INoBub[IBi] = Bg[IBi] figSize = (8,8) fig = plt.figure(figsize = figSize) a=fig.add_subplot(2,2,1) plt.tick_params(labelbottom='off',labelleft='off') # labels along the bottom edge are off plt.imshow(imadjust(I), cmap='gray') plt.title('Retardance (MM)') plt.show() a=fig.add_subplot(2,2,2) plt.tick_params(labelbottom='off',labelleft='off') # labels along the bottom edge are off plt.imshow(IThBig, cmap='gray') plt.title('Orientation (MM)') plt.show() a=fig.add_subplot(2,2,3) plt.tick_params(labelbottom='off',labelleft='off') # labels along the bottom edge are off plt.imshow(ITh, cmap='gray') plt.title('Retardance (Py)') plt.show() a=fig.add_subplot(2,2,4) plt.tick_params(labelbottom='off',labelleft='off') # labels along the bottom edge are off plt.imshow(imadjust(INoBub), cmap='gray') plt.title('Orientation (Py)') plt.show() return INoBub
193863cb63ed3a1a785aa2c64367eb7a3518c671
22,477
import operator import json def assert_json_response(response, status_code, body, headers=None, body_cmp=operator.eq): """Assert JSON response has the expected status_code, body, and headers. Asserts that the response's content-type is application/json. body_cmp is a callable that takes the JSON-decoded response body and expected body and returns a boolean stating whether the comparison succeeds. body_cmp(json.loads(response.data.decode('utf-8')), body) """ headers = dict(headers or {}) headers['Content-Type'] = 'application/json' def json_cmp(response_body, body): return body_cmp(json.loads(response_body.decode('utf-8')), body) assert_response(response, status_code, body, headers, json_cmp)
db910cb0cb68bdbf9ad4b9490f3bbc6a87d1545d
22,478
def variable(value, dtype=None, name=None, constraint=None): """Instantiates a variable and returns it. # Arguments value: Numpy array, initial value of the tensor. dtype: Tensor type. name: Optional name string for the tensor. constraint: Optional projection function to be applied to the variable after an optimizer update. # Returns A variable instance (with Keras metadata included). # Examples ```python >>> from keras import backend as K >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = K.variable(value=val, dtype='float64', name='example_var') >>> K.dtype(kvar) 'float64' >>> print(kvar) example_var >>> K.eval(kvar) array([[ 1., 2.], [ 3., 4.]]) ``` """ if name is None: name = 'variable' + str(get_uid('variable')) if constraint is not None: raise NotImplementedError('Constraints are not supported') if is_tensor(value): variable = av.variable_from_node(name, value) else: if dtype is None: value = np.array(value) if value.dtype == 'int64': value = np.array(value, dtype='int32') dtype = 'int32' elif value.dtype == 'float64': dtype = floatx() value = np.array(value, dtype=floatx()) else: dtype = value.dtype.name else: value = np.array(value, dtype=dtype) variable = av.variable( name, value.shape, avalanche_dtype(dtype), av.value_initializer(value)) variable._uses_learning_phase = False variable._keras_shape = value.shape variable._is_variable = True return variable
3dbb67493e4529469ca0da73159ec495b1d30f07
22,479
from typing import Protocol import json def autoprotocol_protocol(protocol_id): """Get autoprotocol-python representation of a protocol.""" current_protocol = Protocol.query.filter_by(id=protocol_id).first() if not current_protocol: flash('No such specification!', 'danger') return redirect('.') if current_protocol.public: print("PUBLIC") else: print("NOT PUBLIC") if current_protocol.user != current_user and not current_protocol.public: flash('Not your project!', 'danger') return redirect('.') if not current_protocol.protocol: return "" protocol_object = json.loads(current_protocol.protocol) converter = AutoProtocol() resp = make_response(converter.convert(protocol_object, current_protocol.name, current_protocol.description)) resp.headers['Content-Type'] = "text" resp.headers['Content-Disposition'] = "attachment; filename=" + current_protocol.name + "-autoprotocol.py" return resp
85d0a3d5a215c50124c86b17114c82c60b07fae5
22,480
from re import T def tensor_to_P(tensor, wig3j = None): """ Transform an arbitray SO(3) tensor into real P which transforms under the irreducible representation with l = 1. Wigner-3j symbols can be provided or calculated on the fly for faster evaluation. If providedn, wig3j should be an array with indexing [l1,l2,m,m1,m2] """ P = [] n_rad, n_l = get_max(tensor) lam = 1 # It is faster to pre-evaluate the wigner-3j symbol, even faster if it is passed if not isinstance(wig3j, np.ndarray): wig3j = np.zeros([n_l,n_l,2*n_l+1,2*n_l+1,2*n_l+1]) wig3j = wig3j.astype(np.complex128) for l1 in range(n_l): for l2 in range(n_l): for m in range(-lam,lam+1): for m1 in range(-n_l,n_l+1): for m2 in range(-n_l,n_l+1): wig3j[l2,l1,m,m1,m2] = N(wigner_3j(lam,l2,l1,m,m1,m2)) for mu in range(-lam,lam + 1): P.append([]) for n1 in range(n_rad): for n2 in range(n_rad): for l1 in range(n_l): for l2 in range(n_l): if (l1 + l2)%2 == 0: continue p = 0 for m in range(-n_l, n_l+1): wig = wig3j[l2,l1,mu,(m-mu),-m] if wig != 0: p += tensor['{},{},{}'.format(n1,l1,m)]*tensor['{},{},{}'.format(n2,l2,m-mu)].conj() *\ (-1)**m * wig p *= (-1)**(lam-l2) P[mu+lam].append(p) p_real = [] for pi in np.array(P).T: p_real.append((T.dot(pi))[[2,0,1]]) P = np.array(p_real).T if not np.allclose(P.imag,np.zeros_like(P)): raise Exception('Ooops, something went wrong. P not purely real.') return P.real.T
c4cf2746ac0376b29df437e1938644c9df8e1dd2
22,481
from soc.modules.ghop.logic.helper import notifications as ghop_notifications from soc.modules.ghop.logic.models import comment as ghop_comment_logic from soc.modules.ghop.logic.models import task_subscription as \ def createNotificationMail(request, *args, **kwargs): """Appengine task that sends mail to the subscribed users. Expects the following to be present in the POST dict: comment_key: Specifies the comment id for which to send the notifications task_key: Specifies the task key name for which the comment belongs to Args: request: Django Request object """ ghop_task_subscription_logic # set default batch size batch_size = 10 post_dict = request.POST comment_key = post_dict.get('comment_key') task_key = post_dict.get('task_key') if not (comment_key and task_key): # invalid task data, log and return OK return error_handler.logErrorAndReturnOK( 'Invalid createNotificationMail data: %s' % post_dict) comment_key = long(comment_key) # get the task entity under which the specified comment was made task_entity = ghop_task_logic.logic.getFromKeyName(task_key) # get the comment for the given id comment_entity = ghop_comment_logic.logic.getFromID( comment_key, task_entity) if not comment_entity: # invalid comment specified, log and return OK return error_handler.logErrorAndReturnOK( 'Invalid comment specified: %s/%s' % (comment_key, task_key)) # check and retrieve the subscriber_start_key that has been done last if 'subscriber_start_index' in post_dict: subscriber_start_index = post_dict['subscriber_start_index'] else: subscriber_start_index = 0 # get all subscribers to GHOP task fields = { 'task': task_entity, } ts_entity = ghop_task_subscription_logic.logic.getForFields( fields, unique=True) subscribers = db.get(ts_entity.subscribers[ subscriber_start_index:subscriber_start_index+batch_size]) task_url = "http://%(host)s%(task)s" % { 'host': system.getHostname(), 'task': redirects.getPublicRedirect( task_entity, {'url_name': 'ghop/task'}), } # create the data for the mail to be sent message_properties = { 'task_url': task_url, 'redirect_url': "%(task_url)s#c%(cid)d" % { 'task_url': task_url, 'cid': comment_entity.key().id_or_name() }, 'comment_entity': comment_entity, 'task_entity': task_entity, } subject = DEF_TASK_UPDATE_SUBJECT_FMT % { 'title': task_entity.title, } for subscriber in subscribers: ghop_notifications.sendTaskUpdate(entity, subject, message_properties) if len(subscribers) == batch_size: # spawn task for sending out notifications to next set of subscribers next_start = subscriber_start_index + batch_size task_params = { 'comment_key': comment_key, 'task_key': task_key, 'subscriber_start_index': next_start } task_url = '/tasks/ghop/task/mail/create' new_task = taskqueue.Task(params=task_params, url=task_url) new_task.add('mail') # return OK return http.HttpResponse()
859372c83fb37d440456c380cf313a27b029f018
22,482
def mergediscnodes(tree): """Reverse transformation of ``splitdiscnodes()``.""" treeclass = tree.__class__ for node in tree.subtrees(): merge = defaultdict(list) # a series of queues of nodes # e.g. merge['VP_2*'] = [Tree('VP_2', []), ...] # when origin is present (index after *), the node is moved to where # the next one is expected, e.g., VP_2*1 after VP_2*0 is added. nodes = list(node) # the original, unmerged children node[:] = [] # the new, merged children for child in nodes: if not isinstance(child, Tree): node.append(child) continue match = SPLITLABELRE.search(child.label) if not match: node.append(child) continue label, part, _ = match.groups() grandchildren = list(child) child[:] = [] if not merge[child.label]: merge[child.label].append(treeclass(label, [])) node.append(merge[child.label][0]) merge[child.label][0].extend(grandchildren) if part: nextlabel = '%s*%d' % (label, int(part) + 1) merge[nextlabel].append(merge[child.label].pop(0)) return tree
300aaebe95604c611ecf1f65373ba83f97361438
22,483
import sys def main(): # type: () -> typing.Any """Parse the command line options and launch the requested command. If the command is 'help' then print the help message for the subcommand; if no subcommand is given, print the standard help message. """ colorama.init(strip=not sys.stdout.isatty()) doc = usage.get_primary_command_usage() allow_subcommands = "<command>" in doc args = docopt( doc, version=settings.version, options_first=allow_subcommands ) if sys.excepthook is sys.__excepthook__: sys.excepthook = log.excepthook try: log.enable_logging(log.get_log_level(args)) default_args = sys.argv[2 if args.get("<command>") else 1 :] if ( args.get("<command>") == "help" and None not in settings.subcommands ): subcommand = next(iter(args.get("<args>", default_args)), None) return usage.get_help_usage(subcommand) argv = [args.get("<command>")] + args.get("<args>", default_args) return _run_command(argv) except exc.InvalidCliValueError as e: return str(e)
4726833f091bd5c1b9f4e305f26278a87330f946
22,484
def count_go_nogo_trials(eventcode): """ :param eventcode: list of event codes from operant conditioning file :return: number of go and no go trials in the go/no go tasks """ lever_on = get_events_indices(eventcode, ['RLeverOn', 'LLeverOn']) (go_trials, nogo_trials) = (0, 0) for lever in lever_on: if eventcode[lever + 1] in ('LightOn1', 'LightOn2'): nogo_trials += 1 else: go_trials += 1 return go_trials, nogo_trials
2de71a663f158a0942d2c3f01973ed5dc999b3d7
22,485
def getScriptExecutionContext(): """ Returns the repository description instance and the set of items selected on script action execution. @return: Script execution context. @rtype: L{ScriptExecutionContext<datafinder.gui.user.script_api.ScriptExecutionContext>} """ scriptExecutionContext = None if not _context.scriptController.boundScriptExecutionContext is None: repository, items = _context.scriptController.boundScriptExecutionContext itemPaths = [item.path for item in items] scriptExecutionContext = ScriptExecutionContext(RepositoryDescription(repository), itemPaths) return scriptExecutionContext
e49e8cfd140f967859cf0e0c75bfe69fac87835f
22,486
import copy import numpy def electrondensity_spin(ccdata, volume, mocoeffslist): """Calculate the magnitude of the electron density at every point in a volume for either up or down spin Inputs: ccdata -- ccData object volume -- Volume object (will not be altered) mocoeffslist -- list of molecular orbital to calculate electron density from; i.e. [ccdata.mocoeffs[0][1:2]] Output: Volume object with wavefunction at each grid point stored in data attribute Attributes: coords -- the coordinates of the atoms mocoeffs -- mocoeffs for all of the occupied eigenvalues gbasis -- gbasis from a parser object volume -- a template Volume object (will not be altered) Note: mocoeffs is a list of NumPy arrays. The list will be of length 1. """ assert len(mocoeffslist) == 1, "mocoeffslist input to the function should have length of 1." bfs = getbfs(ccdata) density = copy.copy(volume) density.data = numpy.zeros(density.data.shape, "d") x, y, z = getGrid(density) # For occupied orbitals # `mocoeff` and `gbasis` in ccdata object is ordered in a way `homos` can specify which orbital # is the highest lying occupied orbital in mocoeff and gbasis. for mocoeffs in mocoeffslist: for mocoeff in mocoeffs: wavefn = numpy.zeros(density.data.shape, "d") for bs in range(len(bfs)): data = numpy.zeros(density.data.shape, "d") for i, xval in enumerate(x): for j, yval in enumerate(y): tmp = [] for zval in z: tmp.append(pyamp(bfs, bs, xval, yval, zval)) data[i, j, :] = tmp data *= mocoeff[bs] wavefn += data density.data += wavefn ** 2 return density
7a232f2dbae8ff7905b2eff680a44521b010334e
22,487
def create_missing_dataframe(nrows, ncols, density=.9, random_state=None, index_type=None, freq=None): """Create a Pandas dataframe with random missingness. Parameters ---------- nrows : int Number of rows ncols : int Number of columns density: float Amount of available data random_state: float, optional Random seed. If not given, default to 33. index_type: float, optional Accepts the following values: "dt" for timestamp, "int" for integer. freq: string, optional: Sampling frequency. This option is only available is index_type is "dt". Returns ------- df : pandas.DataFrame Pandas dataframe containing sample data with random missing rows. """ # Create a nrows x ncols matrix data = np.random.uniform(100, size=(nrows, ncols)) df = pd.DataFrame(data) if index_type: if index_type == "dt": if freq is None: freq='h' idx = _makeDatetimeIndex(nrows, freq=freq) df = df.set_index(idx) elif index_type == "int": return else: raise ValueError("Can't recognize index_type. Try the following values: 'dt', 'int'.") i_idx, j_idx = _create_missing_idx(nrows, ncols, density, random_state) df.values[i_idx, j_idx] = None return df
e3c7f44f5238f929928ee5ec65c33bdb91fd8705
22,488
def decode_name_value_pairs(buffer): """ Decode a name-value pair list from a buffer. :param bytearray buffer: a buffer containing a FastCGI name-value pair list :raise ProtocolError: if the buffer contains incomplete data :return: a list of (name, value) tuples where both elements are unicode strings :rtype: list """ index = 0 pairs = [] while index < len(buffer): if buffer[index] & 0x80 == 0: name_length = buffer[index] index += 1 elif len(buffer) - index > 4: name_length = length4_struct.unpack_from(buffer, index)[0] & 0x7fffffff index += 4 else: raise ProtocolError('not enough data to decode name length in name-value pair') if len(buffer) - index > 1 and buffer[index] & 0x80 == 0: value_length = buffer[index] index += 1 elif len(buffer) - index > 4: value_length = length4_struct.unpack_from(buffer, index)[0] & 0x7fffffff index += 4 else: raise ProtocolError('not enough data to decode value length in name-value pair') if len(buffer) - index >= name_length + value_length: name = buffer[index:index + name_length].decode('ascii') value = buffer[index + name_length:index + name_length + value_length].decode('utf-8') pairs.append((name, value)) index += name_length + value_length else: raise ProtocolError('name/value data missing from buffer') return pairs
ef302eb2c6c55605fdc9b4a9ef06f59782ba1d94
22,489
def host_is_local(host: str) -> bool: """ Tells whether given host is local. :param host: host name or address :return: True if host is local otherwise False """ local_names = { "localhost", "127.0.0.1", } is_local = any(local_name in host for local_name in local_names) return is_local
ce823b8c309ec842ed1dd5bb04e41356db500658
22,490
import os def find_in_path(name, path): """Search PATH for a binary. Args: name: the filename to search for path: the path ['./', './path/to/stuff'] Returns: The abspath to the fie or None if not found. """ for dir in path: binpath = os.path.join(dir, name) if os.path.exists(binpath): return os.path.abspath(binpath) return None
14d24a51e9c885c469b3f36ff13bc3aa3740e811
22,491
def sigma_function(coeff_matU, coeff_matX, order, V_slack): """ :param coeff_matU: array with voltage coefficients :param coeff_matX: array with inverse conjugated voltage coefficients :param order: should be prof - 1 :param V_slack: slack bus voltage vector. Must contain only 1 slack bus :return: sigma complex value """ if len(V_slack) > 1: print('Sigma values may not be correct') V0 = V_slack[0] coeff_matU = coeff_matU / V0 coeff_matX = coeff_matX / V0 nbus = coeff_matU.shape[1] complex_type = nb.complex128 sigmes = np.zeros(nbus, dtype=complex_type) if order % 2 == 0: M = int(order / 2) - 1 else: M = int(order / 2) for d in range(nbus): a = coeff_matU[1:2 * M + 2, d] b = coeff_matX[0:2 * M + 1, d] C = np.zeros((2 * M + 1, 2 * M + 1), dtype=complex_type) for i in range(2 * M + 1): if i < M: C[1 + i:, i] = a[:2 * M - i] else: C[i - M:, i] = - b[:3 * M - i + 1] lhs = np.linalg.solve(C, -a) sigmes[d] = np.sum(lhs[M:]) / (np.sum(lhs[:M]) + 1) return sigmes
1accad7b95360a0652143ca367c54bca372662a7
22,492
import logging import sys def get_logger(verbose=0): """ set up logging according to the verbose level given on the command line """ global LOGGER if LOGGER is None: LOGGER = logging.getLogger(sys.argv[0]) stderr = logging.StreamHandler() level = logging.WARNING lformat = "%(message)s" if verbose == 1: level = logging.INFO elif verbose > 1: stderr.setFormatter(logging.Formatter("%(asctime)s: %(levelname)s: %(message)s")) level = logging.DEBUG LOGGER.setLevel(level) LOGGER.addHandler(stderr) syslog = logging.handlers.SysLogHandler("/dev/log") syslog.setFormatter(logging.Formatter("%(name)s: %(message)s")) LOGGER.addHandler(syslog) LOGGER.debug("Setting verbose to %s" % verbose) return LOGGER
973f7024664144728a74319e02bb1d2a2fe71774
22,493
from typing import List def get_dbs(db_names: List[str], db_file: str = "./db_info.pub.json") -> List: """Read the db_file and get the databases corresponding to <<db_name>> Args: db_name (List[str]): A list of names of the database we want db_file (str): The db_file we are reading from Returns: MongograntStore: the store we need to access """ db_dict = loadfn(db_file) stores = [] for j_name in db_names: if j_name not in db_dict: raise ValueError( f"The store named {j_name} is missing from the db_file") stores.append(db_dict[j_name]) return stores
ab0074f3cc5d846f7c24bf4bca6b348bfa3d6bf3
22,494
from sklearn import neighbors def knn_threshold(data, column, threshold=15, k=3): """ Cluster rare samples in data[column] with frequency less than threshold with one of k-nearest clusters Args: data - pandas.DataFrame containing colums: latitude, longitude, column column - the name of the column to threshold threshold - the minimum sample frequency k - the number of k-neighbors to explore when selecting cluster partner """ def ids_centers_sizes(data): dat = np.array([(i, data.latitude[data[column]==i].mean(), data.longitude[data[column]==i].mean(), (data[column]==i).sum()) for i in set(list(data[column]))]) return dat[:,0], dat[:,1:-1].astype(float), dat[:,-1].astype(int) knn = neighbors.NearestNeighbors(n_neighbors=k) while True: ids, centers, sizes = ids_centers_sizes(data) asrt = np.argsort(sizes) if sizes[asrt[0]] >= threshold: break cids = np.copy(ids) knn.fit(centers) for i in asrt: if sizes[i] < threshold: nearest = knn.kneighbors(centers[i])[1].flatten() nearest = nearest[nearest != i] sel = nearest[np.argmin(sizes[nearest])] total_size = sizes[sel] + sizes[i] data[column][data[column]==cids[i]] = cids[sel] cids[cids==i] = cids[sel] sizes[i] = total_size sizes[sel] = total_size return data
37de2c0b4c14cdbb6a0dd10ee7ea1e270fe6ef56
22,495
from mitsuba.core import (Float, UInt32, UInt64, Vector2f, is_monochromatic, is_rgb, is_polarized, DEBUG) from mitsuba.render import ImageBlock from mitsuba.core import depolarize from mitsuba.core import spectrum_to_xyz, xyz_to_srgb def _render_helper(scene, spp=None, sensor_index=0): """ Internally used function: render the specified Mitsuba scene and return a floating point array containing RGB values and AOVs, if applicable """ sensor = scene.sensors()[sensor_index] film = sensor.film() sampler = sensor.sampler() film_size = film.crop_size() if spp is None: spp = sampler.sample_count() total_sample_count = ek.hprod(film_size) * spp if sampler.wavefront_size() != total_sample_count: sampler.seed(ek.arange(UInt64, total_sample_count)) pos = ek.arange(UInt32, total_sample_count) pos //= spp scale = Vector2f(1.0 / film_size[0], 1.0 / film_size[1]) pos = Vector2f(Float(pos % int(film_size[0])), Float(pos // int(film_size[0]))) pos += sampler.next_2d() rays, weights = sensor.sample_ray_differential( time=0, sample1=sampler.next_1d(), sample2=pos * scale, sample3=0 ) spec, mask, aovs = scene.integrator().sample(scene, sampler, rays) spec *= weights del mask if is_polarized: spec = depolarize(spec) if is_monochromatic: rgb = [spec[0]] elif is_rgb: rgb = spec else: xyz = spectrum_to_xyz(spec, rays.wavelengths) rgb = xyz_to_srgb(xyz) del xyz aovs.insert(0, Float(1.0)) for i in range(len(rgb)): aovs.insert(i + 1, rgb[i]) del rgb, spec, weights, rays block = ImageBlock( size=film.crop_size(), channel_count=len(aovs), filter=film.reconstruction_filter(), warn_negative=False, warn_invalid=DEBUG, border=False ) block.clear() block.put(pos, aovs) del pos del aovs data = block.data() ch = block.channel_count() i = UInt32.arange(ek.hprod(block.size()) * (ch - 1)) weight_idx = i // (ch - 1) * ch values_idx = (i * ch) // (ch - 1) + 1 weight = ek.gather(data, weight_idx) values = ek.gather(data, values_idx) return values / (weight + 1e-8)
54aba5d9051953dc4ac94160432b21ceee4acdb4
22,496
def format_formula(formula): """Converts str of chemical formula into latex format for labelling purposes Parameters ---------- formula: str Chemical formula """ formatted_formula = "" number_format = "" for i, s in enumerate(formula): if s.isdigit(): if not number_format: number_format = "_{" number_format += s if i == len(formula) - 1: number_format += "}" formatted_formula += number_format else: if number_format: number_format += "}" formatted_formula += number_format number_format = "" formatted_formula += s return r"$%s$" % (formatted_formula)
c3c87ffcdc5695b584892c643f02a7959b649935
22,497
def ParseQuery(query): """Parses the entire query. Arguments: query: The command the user sent that needs to be parsed. Returns: Dictionary mapping clause names to their arguments. Raises: bigquery_client.BigqueryInvalidQueryError: When invalid query is given. """ clause_arguments = { 'SELECT': [], 'AS': {}, 'WITHIN': {}, 'FROM': [], 'JOIN': [], 'WHERE': [], 'GROUP BY': [], 'HAVING': [], 'ORDER BY': [], 'LIMIT': [], } try: _EBQParser(clause_arguments).parseString(query) except ValueError as e: raise bigquery_client.BigqueryInvalidQueryError(e, None, None, None) return clause_arguments
b3348b10ec7aeb57916366b96409666b71c9a9ce
22,498
def primary_astigmatism_00(rho, phi): """Zernike primary astigmatism 0°.""" return rho**2 * e.cos(2 * phi)
031bb068b4384dc2cd15bebf3450faa25e0177bc
22,499