code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def set_contents_from_file(self, fp, headers=None, replace=True, cb=None, num_cb=10, policy=None, md5=None): """ Store an object in a file using the name of the Key object as the key in file URI and the contents of the file pointed to by 'fp' as the contents. :type fp: file :param fp: the file whose contents to upload :type headers: dict :param headers: ignored in this subclass. :type replace: bool :param replace: If this parameter is False, the method will first check to see if an object exists in the bucket with the same key. If it does, it won't overwrite it. The default value is True which will overwrite the object. :type cb: function :param cb: ignored in this subclass. :type cb: int :param num_cb: ignored in this subclass. :type policy: :class:`boto.s3.acl.CannedACLStrings` :param policy: ignored in this subclass. :type md5: A tuple containing the hexdigest version of the MD5 checksum of the file as the first element and the Base64-encoded version of the plain checksum as the second element. This is the same format returned by the compute_md5 method. :param md5: ignored in this subclass. """ if self.key_type & self.KEY_STREAM_WRITABLE: raise BotoClientError('Stream is not writable') elif self.key_type & self.KEY_STREAM_READABLE: key_file = self.fp else: if not replace and os.path.exists(self.full_path): return key_file = open(self.full_path, 'wb') try: shutil.copyfileobj(fp, key_file) finally: key_file.close()
Store an object in a file using the name of the Key object as the key in file URI and the contents of the file pointed to by 'fp' as the contents. :type fp: file :param fp: the file whose contents to upload :type headers: dict :param headers: ignored in this subclass. :type replace: bool :param replace: If this parameter is False, the method will first check to see if an object exists in the bucket with the same key. If it does, it won't overwrite it. The default value is True which will overwrite the object. :type cb: function :param cb: ignored in this subclass. :type cb: int :param num_cb: ignored in this subclass. :type policy: :class:`boto.s3.acl.CannedACLStrings` :param policy: ignored in this subclass. :type md5: A tuple containing the hexdigest version of the MD5 checksum of the file as the first element and the Base64-encoded version of the plain checksum as the second element. This is the same format returned by the compute_md5 method. :param md5: ignored in this subclass.
def parse(item): r""" >>> Tag.parse('x') == {'key': 'x', 'value': None} True >>> Tag.parse('x=yes') == {'key': 'x', 'value': 'yes'} True >>> Tag.parse('x=3')['value'] '3' >>> Tag.parse('x=red fox\\:green eggs')['value'] 'red fox;green eggs' >>> Tag.parse('x=red fox:green eggs')['value'] 'red fox:green eggs' >>> Tag.parse('x=a\\nb\\nc')['value'] 'a\nb\nc' """ key, sep, value = item.partition('=') value = value.replace('\\:', ';') value = value.replace('\\s', ' ') value = value.replace('\\n', '\n') value = value.replace('\\r', '\r') value = value.replace('\\\\', '\\') value = value or None return { 'key': key, 'value': value, }
r""" >>> Tag.parse('x') == {'key': 'x', 'value': None} True >>> Tag.parse('x=yes') == {'key': 'x', 'value': 'yes'} True >>> Tag.parse('x=3')['value'] '3' >>> Tag.parse('x=red fox\\:green eggs')['value'] 'red fox;green eggs' >>> Tag.parse('x=red fox:green eggs')['value'] 'red fox:green eggs' >>> Tag.parse('x=a\\nb\\nc')['value'] 'a\nb\nc'
def list_regions(self): """获得账号可见的区域的信息 列出当前用户所有可使用的区域。 Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回区域列表,失败返回None - ResponseInfo 请求的Response信息 """ url = '{0}/v3/regions'.format(self.host) return http._get_with_qiniu_mac(url, None, self.auth)
获得账号可见的区域的信息 列出当前用户所有可使用的区域。 Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回区域列表,失败返回None - ResponseInfo 请求的Response信息
def listen(self, addr=None): """Wait for a connection/reconnection from a DCC peer. Returns the DCCConnection object. The local IP address and port are available as self.localaddress and self.localport. After connection from a peer, the peer address and port are available as self.peeraddress and self.peerport. """ self.buffer = buffer.LineBuffer() self.handlers = {} self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.passive = True default_addr = socket.gethostbyname(socket.gethostname()), 0 try: self.socket.bind(addr or default_addr) self.localaddress, self.localport = self.socket.getsockname() self.socket.listen(10) except socket.error as x: raise DCCConnectionError("Couldn't bind socket: %s" % x) return self
Wait for a connection/reconnection from a DCC peer. Returns the DCCConnection object. The local IP address and port are available as self.localaddress and self.localport. After connection from a peer, the peer address and port are available as self.peeraddress and self.peerport.
def _color_dialog_changed(self, n, top, c): """ Updates the color of the slider. """ self._button_save.setEnabled(True) cp = self._colorpoint_list[n] # if they're linked, set both if self._checkboxes[n].isChecked(): self.modify_colorpoint(n, cp[0], [c.red()/255.0, c.green()/255.0, c.blue()/255.0], [c.red()/255.0, c.green()/255.0, c.blue()/255.0]) self._buttons_top_color [n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;") self._buttons_bottom_color[n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;") elif top: self.modify_colorpoint(n, cp[0], cp[1], [c.red()/255.0, c.green()/255.0, c.blue()/255.0]) self._buttons_top_color [n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;") else: self.modify_colorpoint(n, cp[0], [c.red()/255.0, c.green()/255.0, c.blue()/255.0], cp[2]) self._buttons_bottom_color[n].setStyleSheet("background-color: rgb("+str(c.red())+","+str(c.green())+","+str(c.green())+"); border-radius: 3px;")
Updates the color of the slider.
def save_params_to_file(self, fname: str): """ Saves model parameters to file. :param fname: Path to save parameters to. """ if self.aux_params is not None: utils.save_params(self.params.copy(), fname, self.aux_params.copy()) else: utils.save_params(self.params.copy(), fname) logging.info('Saved params to "%s"', fname)
Saves model parameters to file. :param fname: Path to save parameters to.
def estimateAnomalyLikelihoods(anomalyScores, averagingWindow=10, skipRecords=0, verbosity=0): """ Given a series of anomaly scores, compute the likelihood for each score. This function should be called once on a bunch of historical anomaly scores for an initial estimate of the distribution. It should be called again every so often (say every 50 records) to update the estimate. :param anomalyScores: a list of records. Each record is a list with the following three elements: [timestamp, value, score] Example:: [datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0] For best results, the list should be between 1000 and 10,000 records :param averagingWindow: integer number of records to average over :param skipRecords: integer specifying number of records to skip when estimating distributions. If skip records are >= len(anomalyScores), a very broad distribution is returned that makes everything pretty likely. :param verbosity: integer controlling extent of printouts for debugging 0 = none 1 = occasional information 2 = print every record :returns: 3-tuple consisting of: - likelihoods numpy array of likelihoods, one for each aggregated point - avgRecordList list of averaged input records - params a small JSON dict that contains the state of the estimator """ if verbosity > 1: print("In estimateAnomalyLikelihoods.") print("Number of anomaly scores:", len(anomalyScores)) print("Skip records=", skipRecords) print("First 20:", anomalyScores[0:min(20, len(anomalyScores))]) if len(anomalyScores) == 0: raise ValueError("Must have at least one anomalyScore") # Compute averaged anomaly scores aggRecordList, historicalValues, total = _anomalyScoreMovingAverage( anomalyScores, windowSize = averagingWindow, verbosity = verbosity) s = [r[2] for r in aggRecordList] dataValues = numpy.array(s) # Estimate the distribution of anomaly scores based on aggregated records if len(aggRecordList) <= skipRecords: distributionParams = nullDistribution(verbosity = verbosity) else: distributionParams = estimateNormal(dataValues[skipRecords:]) # HACK ALERT! The HTMPredictionModel currently does not handle constant # metric values very well (time of day encoder changes sometimes lead to # unstable SDR's even though the metric is constant). Until this is # resolved, we explicitly detect and handle completely flat metric values by # reporting them as not anomalous. s = [r[1] for r in aggRecordList] # Only do this if the values are numeric if all([isinstance(r[1], numbers.Number) for r in aggRecordList]): metricValues = numpy.array(s) metricDistribution = estimateNormal(metricValues[skipRecords:], performLowerBoundCheck=False) if metricDistribution["variance"] < 1.5e-5: distributionParams = nullDistribution(verbosity = verbosity) # Estimate likelihoods based on this distribution likelihoods = numpy.array(dataValues, dtype=float) for i, s in enumerate(dataValues): likelihoods[i] = tailProbability(s, distributionParams) # Filter likelihood values filteredLikelihoods = numpy.array( _filterLikelihoods(likelihoods) ) params = { "distribution": distributionParams, "movingAverage": { "historicalValues": historicalValues, "total": total, "windowSize": averagingWindow, }, "historicalLikelihoods": list(likelihoods[-min(averagingWindow, len(likelihoods)):]), } if verbosity > 1: print("Discovered params=") print(params) print("Number of likelihoods:", len(likelihoods)) print("First 20 likelihoods:", ( filteredLikelihoods[0:min(20, len(filteredLikelihoods))] )) print("leaving estimateAnomalyLikelihoods") return (filteredLikelihoods, aggRecordList, params)
Given a series of anomaly scores, compute the likelihood for each score. This function should be called once on a bunch of historical anomaly scores for an initial estimate of the distribution. It should be called again every so often (say every 50 records) to update the estimate. :param anomalyScores: a list of records. Each record is a list with the following three elements: [timestamp, value, score] Example:: [datetime.datetime(2013, 8, 10, 23, 0), 6.0, 1.0] For best results, the list should be between 1000 and 10,000 records :param averagingWindow: integer number of records to average over :param skipRecords: integer specifying number of records to skip when estimating distributions. If skip records are >= len(anomalyScores), a very broad distribution is returned that makes everything pretty likely. :param verbosity: integer controlling extent of printouts for debugging 0 = none 1 = occasional information 2 = print every record :returns: 3-tuple consisting of: - likelihoods numpy array of likelihoods, one for each aggregated point - avgRecordList list of averaged input records - params a small JSON dict that contains the state of the estimator
def fit(self, X, y=None): """Fit distance-based AD. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Use ``dtype=np.float32`` for maximum efficiency. Returns ------- self : object Returns self. """ # Check data X = check_array(X) self.tree = BallTree(X, leaf_size=self.leaf_size, metric=self.metric) dist_train = self.tree.query(X, k=2)[0] if self.threshold == 'auto': self.threshold_value = 0.5 * sqrt(var(dist_train[:, 1])) + mean(dist_train[:, 1]) elif self.threshold == 'cv': if y is None: raise ValueError("Y must be specified to find the optimal threshold.") y = check_array(y, accept_sparse='csc', ensure_2d=False, dtype=None) self.threshold_value = 0 score = 0 Y_pred, Y_true, AD = [], [], [] cv = KFold(n_splits=5, random_state=1, shuffle=True) for train_index, test_index in cv.split(X): x_train = safe_indexing(X, train_index) x_test = safe_indexing(X, test_index) y_train = safe_indexing(y, train_index) y_test = safe_indexing(y, test_index) data_test = safe_indexing(dist_train[:, 1], test_index) if self.reg_model is None: reg_model = RandomForestRegressor(n_estimators=500, random_state=1).fit(x_train, y_train) else: reg_model = clone(self.reg_model).fit(x_train, y_train) Y_pred.append(reg_model.predict(x_test)) Y_true.append(y_test) AD.append(data_test) AD_ = unique(hstack(AD)) for z in AD_: AD_new = hstack(AD) <= z if self.score == 'ba_ad': val = balanced_accuracy_score_with_ad(Y_true=hstack(Y_true), Y_pred=hstack(Y_pred), AD=AD_new) elif self.score == 'rmse_ad': val = rmse_score_with_ad(Y_true=hstack(Y_true), Y_pred=hstack(Y_pred), AD=AD_new) if val >= score: score = val self.threshold_value = z else: self.threshold_value = self.threshold return self
Fit distance-based AD. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The input samples. Use ``dtype=np.float32`` for maximum efficiency. Returns ------- self : object Returns self.
def extract_stack(f=None, limit=None): """equivalent to traceback.extract_stack(), but also works with psyco """ if f is not None: raise RuntimeError("Timba.utils.extract_stack: f has to be None, don't ask why") # normally we can just use the traceback.extract_stack() function and # cut out the last frame (which is just ourselves). However, under psyco # this seems to return an empty list, so we use sys._getframe() instead lim = limit if lim is not None: lim += 1 tb = traceback.extract_stack(None, lim) if tb: return tb[:-1]; # skip current frame # else presumably running under psyco return nonportable_extract_stack(f, limit)
equivalent to traceback.extract_stack(), but also works with psyco
def course_run_detail(self, request, pk, course_id): # pylint: disable=invalid-name,unused-argument """ Return the metadata for the specified course run. The course run needs to be included in the specified EnterpriseCustomerCatalog in order for metadata to be returned from this endpoint. """ enterprise_customer_catalog = self.get_object() course_run = enterprise_customer_catalog.get_course_run(course_id) if not course_run: raise Http404 context = self.get_serializer_context() context['enterprise_customer_catalog'] = enterprise_customer_catalog serializer = serializers.CourseRunDetailSerializer(course_run, context=context) return Response(serializer.data)
Return the metadata for the specified course run. The course run needs to be included in the specified EnterpriseCustomerCatalog in order for metadata to be returned from this endpoint.
async def acquire_lease_async(self, lease): """ Acquire the lease on the desired partition for this EventProcessorHost. Note that it is legal to acquire a lease that is already owned by another host. Lease-stealing is how partitions are redistributed when additional hosts are started. :param lease: The stored lease to be acquired. :type lease: ~azure.eventprocessorhost.lease.Lease :return: `True` if the lease was acquired successfully, `False` if not. :rtype: bool """ retval = True new_lease_id = str(uuid.uuid4()) partition_id = lease.partition_id try: if asyncio.iscoroutinefunction(lease.state): state = await lease.state() else: state = lease.state() if state == "leased": if not lease.token: # We reach here in a race condition: when this instance of EventProcessorHost # scanned the lease blobs, this partition was unowned (token is empty) but # between then and now, another instance of EPH has established a lease # (getLeaseState() is LEASED). We normally enforcethat we only steal the lease # if it is still owned by the instance which owned it when we scanned, but we # can't do that when we don't know who owns it. The safest thing to do is just # fail the acquisition. If that means that one EPH instance gets more partitions # than it should, rebalancing will take care of that quickly enough. retval = False else: _logger.info("ChangingLease %r %r", self.host.guid, lease.partition_id) await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.change_blob_lease, self.lease_container_name, partition_id, lease.token, new_lease_id)) lease.token = new_lease_id else: _logger.info("AcquiringLease %r %r", self.host.guid, lease.partition_id) lease.token = await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.acquire_blob_lease, self.lease_container_name, partition_id, self.lease_duration, new_lease_id)) lease.owner = self.host.host_name lease.increment_epoch() # check if this solves the issue retval = await self.update_lease_async(lease) except Exception as err: # pylint: disable=broad-except _logger.error("Failed to acquire lease %r %r %r", err, partition_id, lease.token) return False return retval
Acquire the lease on the desired partition for this EventProcessorHost. Note that it is legal to acquire a lease that is already owned by another host. Lease-stealing is how partitions are redistributed when additional hosts are started. :param lease: The stored lease to be acquired. :type lease: ~azure.eventprocessorhost.lease.Lease :return: `True` if the lease was acquired successfully, `False` if not. :rtype: bool
def get_backoff_time(self): """ Formula for computing the current backoff :rtype: float """ # We want to consider only the last consecutive errors sequence (Ignore redirects). consecutive_errors_len = len(list(takewhile(lambda x: x.redirect_location is None, reversed(self.history)))) if consecutive_errors_len <= 1: return 0 backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1)) return min(self.BACKOFF_MAX, backoff_value)
Formula for computing the current backoff :rtype: float
def showMenu(self, point): """ Displays the menu for this filter widget. """ menu = QMenu(self) acts = {} acts['edit'] = menu.addAction('Edit quick filter...') trigger = menu.exec_(self.mapToGlobal(point)) if trigger == acts['edit']: text, accepted = XTextEdit.getText(self.window(), 'Edit Format', 'Format:', self.filterFormat(), wrapped=False) if accepted: self.setFilterFormat(text)
Displays the menu for this filter widget.
def open_unknown_proxy(self, proxy, fullurl, data=None): """Overridable interface to open unknown URL type.""" type, url = splittype(fullurl) raise IOError('url error', 'invalid proxy for %s' % type, proxy)
Overridable interface to open unknown URL type.
def get_type_string(self, data, type_string): """ Gets type string. Finds the type string for 'data' contained in ``python_type_strings`` using its ``type``. Non-``None`` 'type_string` overrides whatever type string is looked up. The override makes it easier for subclasses to convert something that the parent marshaller can write to disk but still put the right type string in place). Parameters ---------- data : type to be marshalled The Python object that is being written to disk. type_string : str or None If it is a ``str``, it overrides any looked up type string. ``None`` means don't override. Returns ------- str The type string associated with 'data'. Will be 'type_string' if it is not ``None``. Notes ----- Subclasses probably do not need to override this method. """ if type_string is not None: return type_string else: tp = type(data) try: return self.type_to_typestring[tp] except KeyError: return self.type_to_typestring[tp.__module__ + '.' + tp.__name__]
Gets type string. Finds the type string for 'data' contained in ``python_type_strings`` using its ``type``. Non-``None`` 'type_string` overrides whatever type string is looked up. The override makes it easier for subclasses to convert something that the parent marshaller can write to disk but still put the right type string in place). Parameters ---------- data : type to be marshalled The Python object that is being written to disk. type_string : str or None If it is a ``str``, it overrides any looked up type string. ``None`` means don't override. Returns ------- str The type string associated with 'data'. Will be 'type_string' if it is not ``None``. Notes ----- Subclasses probably do not need to override this method.
def to_int(argument): """ Converts the ``str`` argument to an integer: >>> from py_register_machine2.engine_tools.conversions import * >>> to_int("0x04") 4 >>> to_int("'a'") 97 """ if(argument.startswith("0b")): return int(argument[2:], 2) elif(argument.startswith("0x")): return int(argument[2:], 16) elif(argument.startswith("0") and argument != "0"): return int(argument[1:], 8) elif(argument[0] == "'" and argument[2] == "'"): return ord(argument[1]) return int(argument)
Converts the ``str`` argument to an integer: >>> from py_register_machine2.engine_tools.conversions import * >>> to_int("0x04") 4 >>> to_int("'a'") 97
def _nginx_http_spec(port_spec, bridge_ip): """This will output the nginx HTTP config string for specific port spec """ server_string_spec = "\t server {\n" server_string_spec += "\t \t {}\n".format(_nginx_max_file_size_string()) server_string_spec += "\t \t {}\n".format(_nginx_listen_string(port_spec)) server_string_spec += "\t \t {}\n".format(_nginx_server_name_string(port_spec)) server_string_spec += _nginx_location_spec(port_spec, bridge_ip) server_string_spec += _custom_502_page() server_string_spec += "\t }\n" return server_string_spec
This will output the nginx HTTP config string for specific port spec
def in6_getha(prefix): """ Return the anycast address associated with all home agents on a given subnet. """ r = in6_and(inet_pton(socket.AF_INET6, prefix), in6_cidr2mask(64)) r = in6_or(r, inet_pton(socket.AF_INET6, '::fdff:ffff:ffff:fffe')) return inet_ntop(socket.AF_INET6, r)
Return the anycast address associated with all home agents on a given subnet.
def update_host_password(host, username, password, new_password, protocol=None, port=None): ''' Update the password for a given host. .. note:: Currently only works with connections to ESXi hosts. Does not work with vCenter servers. host The location of the ESXi host. username The username used to login to the ESXi host, such as ``root``. password The password used to login to the ESXi host. new_password The new password that will be updated for the provided username on the ESXi host. protocol Optionally set to alternate protocol if the host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the host is not using the default port. Default port is ``443``. CLI Example: .. code-block:: bash salt '*' vsphere.update_host_password my.esxi.host root original-bad-password new-bad-password ''' service_instance = salt.utils.vmware.get_service_instance(host=host, username=username, password=password, protocol=protocol, port=port) # Get LocalAccountManager object account_manager = salt.utils.vmware.get_inventory(service_instance).accountManager # Create user account specification object and assign id and password attributes user_account = vim.host.LocalAccountManager.AccountSpecification() user_account.id = username user_account.password = new_password # Update the password try: account_manager.UpdateUser(user_account) except vmodl.fault.SystemError as err: raise CommandExecutionError(err.msg) except vim.fault.UserNotFound: raise CommandExecutionError('\'vsphere.update_host_password\' failed for host {0}: ' 'User was not found.'.format(host)) # If the username and password already exist, we don't need to do anything. except vim.fault.AlreadyExists: pass return True
Update the password for a given host. .. note:: Currently only works with connections to ESXi hosts. Does not work with vCenter servers. host The location of the ESXi host. username The username used to login to the ESXi host, such as ``root``. password The password used to login to the ESXi host. new_password The new password that will be updated for the provided username on the ESXi host. protocol Optionally set to alternate protocol if the host is not using the default protocol. Default protocol is ``https``. port Optionally set to alternate port if the host is not using the default port. Default port is ``443``. CLI Example: .. code-block:: bash salt '*' vsphere.update_host_password my.esxi.host root original-bad-password new-bad-password
def _search_files(self): """Retrieve the file paths stored under the base path.""" for root, _, files in os.walk(self.dirpath): for filename in files: location = os.path.join(root, filename) yield location
Retrieve the file paths stored under the base path.
def sweFixedStar(star, jd): """ Returns a fixed star from the Ephemeris. """ sweList = swisseph.fixstar_ut(star, jd) mag = swisseph.fixstar_mag(star) return { 'id': star, 'mag': mag, 'lon': sweList[0], 'lat': sweList[1] }
Returns a fixed star from the Ephemeris.
def dag(self) -> Tuple[Dict, Dict]: """Construct the DAG of this pipeline run based on the its operation runs and their downstream. """ from pipelines import dags operation_runs = self.operation_runs.all().prefetch_related('downstream_runs') def get_downstream(op_run): return op_run.downstream_runs.values_list('id', flat=True) return dags.get_dag(operation_runs, get_downstream)
Construct the DAG of this pipeline run based on the its operation runs and their downstream.
def search(cls, term=None, page=0, **criteria): """ Search a list of the model If you use "term": - Returns a collection of people that have a name matching the term passed in through the URL. If you use "criteria": - returns people who match your search criteria. Search by any criteria you can on the Contacts tab, including custom fields. Combine criteria to narrow results :param term: params as string :type term: str :param criteria: search for more criteria :type criteria: dict :param page: the page :type page: int :return: the list of the parsed xml objects :rtype: list """ assert (term or criteria and not (term and criteria)) params = { 'n': int(page) * cls.SEARCH_OFFSET, } if term: params['term'] = term if criteria: for key, value in criteria.items(): params['criteria[{}]'.format(key)] = value return fields.ListField(name=cls.ENDPOINT, init_class=cls).decode( cls.element_from_string( cls._get_request( endpoint=cls.ENDPOINT + '/search', params=params ).text ) )
Search a list of the model If you use "term": - Returns a collection of people that have a name matching the term passed in through the URL. If you use "criteria": - returns people who match your search criteria. Search by any criteria you can on the Contacts tab, including custom fields. Combine criteria to narrow results :param term: params as string :type term: str :param criteria: search for more criteria :type criteria: dict :param page: the page :type page: int :return: the list of the parsed xml objects :rtype: list
def opt_grid_parallel(params, func, limits, ftol=0.01, disp=0, compute_errors=True): """ parallelized version of :func:`opt_grid` """ import multiprocessing def spawn(f): def fun(q_in,q_out): while True: i,x = q_in.get() if i == None: break q_out.put((i,f(x))) return fun def parmap(f, X, nprocs = multiprocessing.cpu_count()): q_in = multiprocessing.Queue(1) q_out = multiprocessing.Queue() proc = [multiprocessing.Process(target=spawn(f),args=(q_in,q_out)) for _ in range(nprocs)] for p in proc: p.daemon = True p.start() sent = [q_in.put((i,x)) for i,x in enumerate(X)] [q_in.put((None,None)) for _ in range(nprocs)] res = [q_out.get() for _ in range(len(sent))] [p.join() for p in proc] return [x for i,x in sorted(res)] nthreads = multiprocessing.cpu_count() caches = [[] for p in params] newparams = numpy.copy(params) errors = [[] for p in params] indices = range(0, len(params), nthreads) k = 0 while k < len(params): j = min(len(params), k + nthreads * 2) def run1d((i, curparams, curlimits)): cache = [] def func1(x0): curparams[i] = x0 v = func(curparams) cache.append([x0, v]) return v lo, hi = curlimits bestval = optimize(func1, x0=p, cons=[lambda x: x - lo, lambda x: hi - x], ftol=ftol, disp=disp - 1) beststat = func1(bestval) if compute_errors: errors = cache2errors(func1, cache, disp=disp - 1) return bestval, beststat, errors, cache return bestval, beststat, cache results = parmap(run1d, [(i, numpy.copy(newparams), limits[i]) for i in range(k, j)]) for i, r in enumerate(results): if compute_errors: v, s, e, c = r if disp > 0: print '\tnew value of %d: %e [%e .. %e] yielded %e' % (i + k, v, e[0], e[1], s) else: v, s, c = r e = [] if disp > 0: print '\tnew value of %d: %e yielded %e' % (i + k, v, s) newparams[i + k] = v caches[i + k] = c errors[i + k] = e k = j beststat = func(newparams) if disp > 0: print 'optimization done, reached %e' % (beststat) if compute_errors: return newparams, errors else: return newparams
parallelized version of :func:`opt_grid`
def save_linked_hdds_info(self): """ Save linked cloned hard disks information. :returns: disk table information """ hdd_table = [] if self.linked_clone: if os.path.exists(self.working_dir): hdd_files = yield from self._get_all_hdd_files() vm_info = yield from self._get_vm_info() for entry, value in vm_info.items(): match = re.search("^([\s\w]+)\-(\d)\-(\d)$", entry) # match Controller-PortNumber-DeviceNumber entry if match: controller = match.group(1) port = match.group(2) device = match.group(3) if value in hdd_files and os.path.exists(os.path.join(self.working_dir, self._vmname, "Snapshots", os.path.basename(value))): log.info("VirtualBox VM '{name}' [{id}] detaching HDD {controller} {port} {device}".format(name=self.name, id=self.id, controller=controller, port=port, device=device)) hdd_table.append( { "hdd": os.path.basename(value), "controller": controller, "port": port, "device": device, } ) if hdd_table: try: hdd_info_file = os.path.join(self.working_dir, self._vmname, "hdd_info.json") with open(hdd_info_file, "w", encoding="utf-8") as f: json.dump(hdd_table, f, indent=4) except OSError as e: log.warning("VirtualBox VM '{name}' [{id}] could not write HHD info file: {error}".format(name=self.name, id=self.id, error=e.strerror)) return hdd_table
Save linked cloned hard disks information. :returns: disk table information
def reset(self): """ Reset the Quantum Abstract Machine to its initial state, which is particularly useful when it has gotten into an unwanted state. This can happen, for example, if the QAM is interrupted in the middle of a run. """ self._variables_shim = {} self._executable = None self._bitstrings = None self.status = 'connected'
Reset the Quantum Abstract Machine to its initial state, which is particularly useful when it has gotten into an unwanted state. This can happen, for example, if the QAM is interrupted in the middle of a run.
def get_new_client(self, public=True): """ Returns a new instance of the client for this endpoint. """ return self._get_client(public=public, cached=False)
Returns a new instance of the client for this endpoint.
def generate_host_keys(hostname: str) -> Iterator[str]: """Yield Chrome/Chromium keys for `hostname`, from least to most specific. Given a hostname like foo.example.com, this yields the key sequence: example.com .example.com foo.example.com .foo.example.com """ labels = hostname.split('.') for i in range(2, len(labels) + 1): domain = '.'.join(labels[-i:]) yield domain yield '.' + domain
Yield Chrome/Chromium keys for `hostname`, from least to most specific. Given a hostname like foo.example.com, this yields the key sequence: example.com .example.com foo.example.com .foo.example.com
def vectored_io_from_metadata(md): # type: (dict) -> collections.namedtuple """Convert vectored io metadata in json metadata :param dict md: metadata dictionary :rtype: VectoredStripe or None :return: vectored io metadata """ try: mdattr = json.loads( md[JSON_KEY_BLOBXFER_METADATA])[_JSON_KEY_VECTORED_IO] except (KeyError, TypeError): pass else: if mdattr[_JSON_KEY_VECTORED_IO_MODE] == _JSON_KEY_VECTORED_IO_STRIPE: mdstripe = mdattr[_JSON_KEY_VECTORED_IO_STRIPE] try: nextptr = explode_vectored_io_next_entry( mdstripe[_JSON_KEY_VECTORED_IO_STRIPE_NEXT]) except (KeyError, AttributeError): nextptr = None vio = VectoredStripe( total_size=mdstripe[_JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SIZE], offset_start=mdstripe[ _JSON_KEY_VECTORED_IO_STRIPE_OFFSET_START], total_slices=mdstripe[ _JSON_KEY_VECTORED_IO_STRIPE_TOTAL_SLICES], slice_id=mdstripe[_JSON_KEY_VECTORED_IO_STRIPE_SLICE_ID], next=nextptr, ) return vio else: raise RuntimeError('Cannot handle Vectored IO mode: {}'.format( mdattr[_JSON_KEY_VECTORED_IO_MODE])) return None
Convert vectored io metadata in json metadata :param dict md: metadata dictionary :rtype: VectoredStripe or None :return: vectored io metadata
def get_functions_overridden_by(self, function): ''' Return the list of functions overriden by the function Args: (core.Function) Returns: list(core.Function) ''' candidates = [c.functions_not_inherited for c in self.inheritance] candidates = [candidate for sublist in candidates for candidate in sublist] return [f for f in candidates if f.full_name == function.full_name]
Return the list of functions overriden by the function Args: (core.Function) Returns: list(core.Function)
def _intermediary_to_markdown(tables, relationships): """ Returns the er markup source in a string. """ t = '\n'.join(t.to_markdown() for t in tables) r = '\n'.join(r.to_markdown() for r in relationships) return '{}\n{}'.format(t, r)
Returns the er markup source in a string.
async def flush(self) -> None: """ Give the writer a chance to flush the pending data out of the internal buffer. """ async with self._flush_lock: if self.finished(): if self._exc: raise self._exc return try: await self._delegate.flush_buf() except asyncio.CancelledError: # pragma: no cover raise except BaseWriteException as e: self._finished.set() if self._exc is None: self._exc = e raise
Give the writer a chance to flush the pending data out of the internal buffer.
def cudnn_stacked_bi_gru(units, n_hidden, seq_lengths=None, n_stacks=2, keep_prob=1.0, concat_stacked_outputs=False, trainable_initial_states=False, name='cudnn_stacked_bi_gru', reuse=False): """ Fast CuDNN Stacked Bi-GRU implementation Args: units: tf.Tensor with dimensions [B x T x F], where B - batch size T - number of tokens F - features n_hidden: dimensionality of hidden state seq_lengths: number of tokens in each sample in the batch n_stacks: number of stacked Bi-GRU keep_prob: dropout keep_prob between Bi-GRUs (intra-layer dropout) concat_stacked_outputs: return last Bi-GRU output or concat outputs from every Bi-GRU, trainable_initial_states: whether to create a special trainable variable to initialize the hidden states of the network or use just zeros name: name of the variable scope to use reuse: whether to reuse already initialized variable Returns: h - all hidden states along T dimension, tf.Tensor with dimensionality [B x T x ((n_hidden * 2) * n_stacks)] """ if seq_lengths is None: seq_lengths = tf.ones([tf.shape(units)[0]], dtype=tf.int32) * tf.shape(units)[1] outputs = [units] with tf.variable_scope(name, reuse=reuse): for n in range(n_stacks): if n == 0: inputs = outputs[-1] else: inputs = variational_dropout(outputs[-1], keep_prob=keep_prob) (h_fw, h_bw), _ = cudnn_bi_gru(inputs, n_hidden, seq_lengths, n_layers=1, trainable_initial_states=trainable_initial_states, name='{}_cudnn_bi_gru'.format(n), reuse=reuse) outputs.append(tf.concat([h_fw, h_bw], axis=2)) if concat_stacked_outputs: return tf.concat(outputs[1:], axis=2) return outputs[-1]
Fast CuDNN Stacked Bi-GRU implementation Args: units: tf.Tensor with dimensions [B x T x F], where B - batch size T - number of tokens F - features n_hidden: dimensionality of hidden state seq_lengths: number of tokens in each sample in the batch n_stacks: number of stacked Bi-GRU keep_prob: dropout keep_prob between Bi-GRUs (intra-layer dropout) concat_stacked_outputs: return last Bi-GRU output or concat outputs from every Bi-GRU, trainable_initial_states: whether to create a special trainable variable to initialize the hidden states of the network or use just zeros name: name of the variable scope to use reuse: whether to reuse already initialized variable Returns: h - all hidden states along T dimension, tf.Tensor with dimensionality [B x T x ((n_hidden * 2) * n_stacks)]
def get_minibam_bed(bamfile, bedfile, minibam=None): """ samtools view -L could do the work, but it is NOT random access. Here we are processing multiple regions sequentially. See also: https://www.biostars.org/p/49306/ """ pf = op.basename(bedfile).split(".")[0] minibamfile = minibam or op.basename(bamfile).replace(".bam", ".{}.bam".format(pf)) minisamfile = minibam.replace(".bam", ".sam") baifile = minibamfile + ".bai" if op.exists(baifile): sh("rm {}".format(baifile)) cmd = "samtools view -H {} > {}".format(bamfile, minisamfile) sh(cmd) cmd = "cat {}".format(bedfile) cmd += " | perl -lane 'print \"$F[0]:$F[1]-$F[2]\"'" cmd += " | xargs -n1 -t -I \{\}" cmd += " samtools view {}".format(bamfile) cmd += " \{\} >> " + minisamfile sh(cmd) cmd = "samtools view {} -b".format(minisamfile) cmd += " | samtools sort -" cmd += " -o {0}".format(minibamfile) sh(cmd) sh("samtools index {0}".format(minibamfile)) return minibamfile
samtools view -L could do the work, but it is NOT random access. Here we are processing multiple regions sequentially. See also: https://www.biostars.org/p/49306/
def get_data_home(data_home=None): """Return the path of the arviz data dir. This folder is used by some dataset loaders to avoid downloading the data several times. By default the data dir is set to a folder named 'arviz_data' in the user home folder. Alternatively, it can be set by the 'ARVIZ_DATA' environment variable or programmatically by giving an explicit folder path. The '~' symbol is expanded to the user home folder. If the folder does not already exist, it is automatically created. Parameters ---------- data_home : str | None The path to arviz data dir. """ if data_home is None: data_home = os.environ.get("ARVIZ_DATA", os.path.join("~", "arviz_data")) data_home = os.path.expanduser(data_home) if not os.path.exists(data_home): os.makedirs(data_home) return data_home
Return the path of the arviz data dir. This folder is used by some dataset loaders to avoid downloading the data several times. By default the data dir is set to a folder named 'arviz_data' in the user home folder. Alternatively, it can be set by the 'ARVIZ_DATA' environment variable or programmatically by giving an explicit folder path. The '~' symbol is expanded to the user home folder. If the folder does not already exist, it is automatically created. Parameters ---------- data_home : str | None The path to arviz data dir.
def directive(apply_globally=False, api=None): """A decorator that registers a single hug directive""" def decorator(directive_method): if apply_globally: hug.defaults.directives[underscore(directive_method.__name__)] = directive_method else: apply_to_api = hug.API(api) if api else hug.api.from_object(directive_method) apply_to_api.add_directive(directive_method) directive_method.directive = True return directive_method return decorator
A decorator that registers a single hug directive
def verify_token(self, token, expiration_in_seconds=None): """ Verify token signature, verify token expiration, and decrypt token. | Returns None if token is expired or invalid. | Returns a list of strings and integers on success. Implemented as:: concatenated_str = self.decrypt_string(token, expiration_in_seconds) data_items = self.decode_data_items(concatenated_str) return data_items Example: :: # Verify that a User with ``user_id`` has a password that ends in ``password_ends_with``. token_is_valid = False data_items = token_manager.verify(token, expiration_in_seconds) if data_items: user_id = data_items[0] password_ends_with = data_items[1] user = user_manager.db_manager.get_user_by_id(user_id) token_is_valid = user and user.password[-8:]==password_ends_with """ from cryptography.fernet import InvalidToken try: concatenated_str = self.decrypt_string(token, expiration_in_seconds) data_items = self.decode_data_items(concatenated_str) except InvalidToken: data_items = None return data_items
Verify token signature, verify token expiration, and decrypt token. | Returns None if token is expired or invalid. | Returns a list of strings and integers on success. Implemented as:: concatenated_str = self.decrypt_string(token, expiration_in_seconds) data_items = self.decode_data_items(concatenated_str) return data_items Example: :: # Verify that a User with ``user_id`` has a password that ends in ``password_ends_with``. token_is_valid = False data_items = token_manager.verify(token, expiration_in_seconds) if data_items: user_id = data_items[0] password_ends_with = data_items[1] user = user_manager.db_manager.get_user_by_id(user_id) token_is_valid = user and user.password[-8:]==password_ends_with
def copytree(src, dst, symlinks=False, ignore=None): """Recursively copy a directory tree using copy2(). The destination directory must not already exist. If exception(s) occur, an Error is raised with a list of reasons. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. The optional ignore argument is a callable. If given, it is called with the `src` parameter, which is the directory being visited by copytree(), and `names` which is the list of `src` contents, as returned by os.listdir(): callable(src, names) -> ignored_names Since copytree() is called recursively, the callable will be called once for each directory that is copied. It returns a list of names relative to the `src` directory that should not be copied. XXX Consider this example code rather than the ultimate tool. """ from shutil import copy2, Error, copystat names = os.listdir(src) if ignore is not None: ignored_names = ignore(src, names) else: ignored_names = set() os.makedirs(dst) errors = [] for name in names: if name in ignored_names: continue srcname = os.path.join(src, name) dstname = os.path.join(dst, name) try: if symlinks and os.path.islink(srcname): linkto = os.readlink(srcname) os.symlink(linkto, dstname) elif os.path.isdir(srcname): copytree(srcname, dstname, symlinks, ignore) else: # Will raise a SpecialFileError for unsupported file types copy2(srcname, dstname) # catch the Error from the recursive copytree so that we can # continue with other files except Error as err: errors.extend(err.args[0]) except EnvironmentError as why: errors.append((srcname, dstname, str(why))) try: copystat(src, dst) except OSError as why: if WindowsError is not None and isinstance(why, WindowsError): # Copying file access times may fail on Windows pass else: errors.extend((src, dst, str(why))) if errors: raise Error(errors)
Recursively copy a directory tree using copy2(). The destination directory must not already exist. If exception(s) occur, an Error is raised with a list of reasons. If the optional symlinks flag is true, symbolic links in the source tree result in symbolic links in the destination tree; if it is false, the contents of the files pointed to by symbolic links are copied. The optional ignore argument is a callable. If given, it is called with the `src` parameter, which is the directory being visited by copytree(), and `names` which is the list of `src` contents, as returned by os.listdir(): callable(src, names) -> ignored_names Since copytree() is called recursively, the callable will be called once for each directory that is copied. It returns a list of names relative to the `src` directory that should not be copied. XXX Consider this example code rather than the ultimate tool.
def apply(self, args, kwargs): """ Replicate a call to the encapsulated function. Unlike func(*args, **kwargs) the call is deterministic in the order kwargs are being checked by python. In other words, it behaves exactly the same as if typed into the repl prompt. This is usually only a problem when a function is given two invalid keyword arguments. In such cases func(*args, **kwargs) syntax will result in random error on either of those invalid keyword arguments. This is most likely caused by a temporary dictionary created by the runtime. For testing a OderedDictionary instance may be passed as kwargs. In such case the call, and the error message, is fully deterministic. This function is implemented with eval() """ # Construct helper locals that only contain the function to call as # 'func', all positional arguments as 'argX' and all keyword arguments # as 'kwX' _locals = {'func': self._func} if args is not None: _locals.update({ "arg{}".format(index): args[index] for index, value in enumerate(args)}) if kwargs is not None: # Explicitly build a list of keyword arguments so that we never # traverse kwargs more than once kw_list = list(kwargs.keys()) _locals.update({ "kw{}".format(index): kwargs[key] for index, key in enumerate(kw_list)}) # Construct the call expression string by carefully # placing each positional and keyword arguments in right # order that _exactly_ matches how apply() was called. params = [] if args is not None: params.extend([ "arg{}".format(index) for index in range(len(args))]) if kwargs is not None: params.extend([ "{}=kw{}".format(key, index) for index, key in enumerate(kw_list)]) expr = "func({})".format(", ".join(params)) return eval(expr, globals(), _locals)
Replicate a call to the encapsulated function. Unlike func(*args, **kwargs) the call is deterministic in the order kwargs are being checked by python. In other words, it behaves exactly the same as if typed into the repl prompt. This is usually only a problem when a function is given two invalid keyword arguments. In such cases func(*args, **kwargs) syntax will result in random error on either of those invalid keyword arguments. This is most likely caused by a temporary dictionary created by the runtime. For testing a OderedDictionary instance may be passed as kwargs. In such case the call, and the error message, is fully deterministic. This function is implemented with eval()
def ssh_accept_sec_context(self, hostname, username, recv_token): """ Accept a SSPI context (server mode). :param str hostname: The servers FQDN :param str username: The name of the user who attempts to login :param str recv_token: The SSPI Token received from the server, if it's not the initial call. :return: A ``String`` if the SSPI has returned a token or ``None`` if no token was returned """ self._gss_host = hostname self._username = username targ_name = "host/" + self._gss_host self._gss_srv_ctxt = sspi.ServerAuth("Kerberos", spn=targ_name) error, token = self._gss_srv_ctxt.authorize(recv_token) token = token[0].Buffer if error == 0: self._gss_srv_ctxt_status = True token = None return token
Accept a SSPI context (server mode). :param str hostname: The servers FQDN :param str username: The name of the user who attempts to login :param str recv_token: The SSPI Token received from the server, if it's not the initial call. :return: A ``String`` if the SSPI has returned a token or ``None`` if no token was returned
def get_metrics(thing, extra=''): """Return MetricsInterface instance with specified name. The name is used as the prefix for all keys generated with this :py:class:`markus.main.MetricsInterface`. The :py:class:`markus.main.MetricsInterface` is not tied to metrics backends. The list of active backends are globally configured. This allows us to create :py:class:`markus.main.MetricsInterface` classes without having to worry about bootstrapping order of the app. :arg class/instance/str thing: The name to use as a key prefix. If this is a class, it uses the dotted Python path. If this is an instance, it uses the dotted Python path plus ``str(instance)``. :arg str extra: Any extra bits to add to the end of the name. :returns: a ``MetricsInterface`` instance Examples: >>> from markus import get_metrics Create a MetricsInterface with the name "myapp" and generate a count with stat "myapp.thing1" and value 1: >>> metrics = get_metrics('myapp') >>> metrics.incr('thing1', value=1) Create a MetricsInterface with the prefix of the Python module it's being called in: >>> metrics = get_metrics(__name__) Create a MetricsInterface with the prefix as the qualname of the class: >>> class Foo: ... def __init__(self): ... self.metrics = get_metrics(self) Create a prefix of the class path plus some identifying information: >>> class Foo: ... def __init__(self, myname): ... self.metrics = get_metrics(self, extra=myname) ... >>> foo = Foo('jim') Assume that ``Foo`` is defined in the ``myapp`` module. Then this will generate the name ``myapp.Foo.jim``. """ thing = thing or '' if not isinstance(thing, str): # If it's not a str, it's either a class or an instance. Handle # accordingly. if type(thing) == type: thing = '%s.%s' % (thing.__module__, thing.__name__) else: thing = '%s.%s' % ( thing.__class__.__module__, thing.__class__.__name__ ) if extra: thing = '%s.%s' % (thing, extra) return MetricsInterface(thing)
Return MetricsInterface instance with specified name. The name is used as the prefix for all keys generated with this :py:class:`markus.main.MetricsInterface`. The :py:class:`markus.main.MetricsInterface` is not tied to metrics backends. The list of active backends are globally configured. This allows us to create :py:class:`markus.main.MetricsInterface` classes without having to worry about bootstrapping order of the app. :arg class/instance/str thing: The name to use as a key prefix. If this is a class, it uses the dotted Python path. If this is an instance, it uses the dotted Python path plus ``str(instance)``. :arg str extra: Any extra bits to add to the end of the name. :returns: a ``MetricsInterface`` instance Examples: >>> from markus import get_metrics Create a MetricsInterface with the name "myapp" and generate a count with stat "myapp.thing1" and value 1: >>> metrics = get_metrics('myapp') >>> metrics.incr('thing1', value=1) Create a MetricsInterface with the prefix of the Python module it's being called in: >>> metrics = get_metrics(__name__) Create a MetricsInterface with the prefix as the qualname of the class: >>> class Foo: ... def __init__(self): ... self.metrics = get_metrics(self) Create a prefix of the class path plus some identifying information: >>> class Foo: ... def __init__(self, myname): ... self.metrics = get_metrics(self, extra=myname) ... >>> foo = Foo('jim') Assume that ``Foo`` is defined in the ``myapp`` module. Then this will generate the name ``myapp.Foo.jim``.
def do_commit(self, subcmd, opts, *args): """Send changes from your working copy to the repository. usage: commit [PATH...] A log message must be provided, but it can be empty. If it is not given by a --message or --file option, an editor will be started. ${cmd_option_list} """ print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
Send changes from your working copy to the repository. usage: commit [PATH...] A log message must be provided, but it can be empty. If it is not given by a --message or --file option, an editor will be started. ${cmd_option_list}
def getMaxDelay(inferences): """ Returns the maximum delay for the InferenceElements in the inference dictionary Parameters: ----------------------------------------------------------------------- inferences: A dictionary where the keys are InferenceElements """ maxDelay = 0 for inferenceElement, inference in inferences.iteritems(): if isinstance(inference, dict): for key in inference.iterkeys(): maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement, key), maxDelay) else: maxDelay = max(InferenceElement.getTemporalDelay(inferenceElement), maxDelay) return maxDelay
Returns the maximum delay for the InferenceElements in the inference dictionary Parameters: ----------------------------------------------------------------------- inferences: A dictionary where the keys are InferenceElements
def get_operator_statistic(self, name): """|coro| Gets the operator unique statistic from the operator definitions dict Returns ------- str the name of the operator unique statistic""" opdefs = yield from self.get_operator_definitions() name = name.lower() if name not in opdefs: return None # some operators (e.g. Kaid and Nomad) don't have a unique statistic sectoin for some reason... if "uniqueStatistic" not in opdefs[name] or "pvp" not in opdefs[name]["uniqueStatistic"]: return None return opdefs[name]["uniqueStatistic"]["pvp"]["statisticId"]
|coro| Gets the operator unique statistic from the operator definitions dict Returns ------- str the name of the operator unique statistic
def get_cancel_operation_by_id(cls, cancel_operation_id, **kwargs): """Find CancelOperation Return single instance of CancelOperation by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_cancel_operation_by_id(cancel_operation_id, async=True) >>> result = thread.get() :param async bool :param str cancel_operation_id: ID of cancelOperation to return (required) :return: CancelOperation If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._get_cancel_operation_by_id_with_http_info(cancel_operation_id, **kwargs) else: (data) = cls._get_cancel_operation_by_id_with_http_info(cancel_operation_id, **kwargs) return data
Find CancelOperation Return single instance of CancelOperation by its ID. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.get_cancel_operation_by_id(cancel_operation_id, async=True) >>> result = thread.get() :param async bool :param str cancel_operation_id: ID of cancelOperation to return (required) :return: CancelOperation If the method is called asynchronously, returns the request thread.
def strip_suffix(string, suffix, regex=False): """Strip the suffix from the string. If 'regex' is specified, suffix is understood as a regular expression.""" if not isinstance(string, six.string_types) or not isinstance(suffix, six.string_types): msg = 'Arguments to strip_suffix must be string types. Are: {s}, {p}'\ .format(s=type(string), p=type(suffix)) raise TypeError(msg) if not regex: suffix = re.escape(suffix) if not suffix.endswith('$'): suffix = '({s})$'.format(s=suffix) return _strip(string, suffix)
Strip the suffix from the string. If 'regex' is specified, suffix is understood as a regular expression.
def _get_rdfclass(self, class_type, **kwargs): """ returns the instanticated class from the class list args: class_type: dictionary with rdf_types """ def select_class(class_name): """ finds the class in the rdfclass Module""" try: return getattr(MODULE.rdfclass, class_name.pyuri) except AttributeError: return RdfClassBase if kwargs.get("def_load"): return RdfClassBase if isinstance(class_type[self.omap], list): bases = [select_class(class_name) for class_name in class_type[self.omap]] bases = [base for base in bases if base != RdfClassBase] if len(bases) == 0: return RdfClassBase elif len(bases) == 1: return bases[0] else: bases = remove_parents(bases) if len(bases) == 1: return bases[0] else: name = "_".join(sorted(class_type[self.omap])) # if the the class has already been created return it if hasattr(MODULE.rdfclass, name): return getattr(MODULE.rdfclass, name) new_class = type(name, tuple(bases), {}) new_class.hierarchy = list_hierarchy(class_type[self.omap][0], bases) new_class.class_names = sorted([base.__name__ \ for base in bases \ if base not in [RdfClassBase, dict]]) setattr(MODULE.rdfclass, name, new_class) return new_class else: return select_class(class_type[self.omap])
returns the instanticated class from the class list args: class_type: dictionary with rdf_types
def _populate_cparams(self, img_array, mct=None, cratios=None, psnr=None, cinema2k=None, cinema4k=None, irreversible=None, cbsize=None, eph=None, grid_offset=None, modesw=None, numres=None, prog=None, psizes=None, sop=None, subsam=None, tilesize=None, colorspace=None): """Directs processing of write method arguments. Parameters ---------- img_array : ndarray Image data to be written to file. kwargs : dictionary Non-image keyword inputs provided to write method. """ other_args = (mct, cratios, psnr, irreversible, cbsize, eph, grid_offset, modesw, numres, prog, psizes, sop, subsam) if (((cinema2k is not None or cinema4k is not None) and (not all([arg is None for arg in other_args])))): msg = ("Cannot specify cinema2k/cinema4k along with any other " "options.") raise IOError(msg) if cratios is not None and psnr is not None: msg = "Cannot specify cratios and psnr options together." raise IOError(msg) if version.openjpeg_version_tuple[0] == 1: cparams = opj.set_default_encoder_parameters() else: cparams = opj2.set_default_encoder_parameters() outfile = self.filename.encode() num_pad_bytes = opj2.PATH_LEN - len(outfile) outfile += b'0' * num_pad_bytes cparams.outfile = outfile if self.filename[-4:].endswith(('.jp2', '.JP2')): cparams.codec_fmt = opj2.CODEC_JP2 else: cparams.codec_fmt = opj2.CODEC_J2K # Set defaults to lossless to begin. cparams.tcp_rates[0] = 0 cparams.tcp_numlayers = 1 cparams.cp_disto_alloc = 1 cparams.irreversible = 1 if irreversible else 0 if cinema2k is not None: self._cparams = cparams self._set_cinema_params('cinema2k', cinema2k) return if cinema4k is not None: self._cparams = cparams self._set_cinema_params('cinema4k', cinema4k) return if cbsize is not None: cparams.cblockw_init = cbsize[1] cparams.cblockh_init = cbsize[0] if cratios is not None: cparams.tcp_numlayers = len(cratios) for j, cratio in enumerate(cratios): cparams.tcp_rates[j] = cratio cparams.cp_disto_alloc = 1 cparams.csty |= 0x02 if sop else 0 cparams.csty |= 0x04 if eph else 0 if grid_offset is not None: cparams.image_offset_x0 = grid_offset[1] cparams.image_offset_y0 = grid_offset[0] if modesw is not None: for shift in range(6): power_of_two = 1 << shift if modesw & power_of_two: cparams.mode |= power_of_two if numres is not None: cparams.numresolution = numres if prog is not None: cparams.prog_order = core.PROGRESSION_ORDER[prog.upper()] if psnr is not None: cparams.tcp_numlayers = len(psnr) for j, snr_layer in enumerate(psnr): cparams.tcp_distoratio[j] = snr_layer cparams.cp_fixed_quality = 1 if psizes is not None: for j, (prch, prcw) in enumerate(psizes): cparams.prcw_init[j] = prcw cparams.prch_init[j] = prch cparams.csty |= 0x01 cparams.res_spec = len(psizes) if subsam is not None: cparams.subsampling_dy = subsam[0] cparams.subsampling_dx = subsam[1] if tilesize is not None: cparams.cp_tdx = tilesize[1] cparams.cp_tdy = tilesize[0] cparams.tile_size_on = opj2.TRUE if mct is None: # If the multi component transform was not specified, we infer # that it should be used if the color space is RGB. cparams.tcp_mct = 1 if self._colorspace == opj2.CLRSPC_SRGB else 0 else: if self._colorspace == opj2.CLRSPC_GRAY: msg = ("Cannot specify usage of the multi component transform " "if the colorspace is gray.") raise IOError(msg) cparams.tcp_mct = 1 if mct else 0 self._validate_compression_params(img_array, cparams, colorspace) self._cparams = cparams
Directs processing of write method arguments. Parameters ---------- img_array : ndarray Image data to be written to file. kwargs : dictionary Non-image keyword inputs provided to write method.
def select_upstream(self, device: devicetools.Device) -> 'Selection': """Restrict the current selection to the network upstream of the given starting point, including the starting point itself. See the documentation on method |Selection.search_upstream| for additional information. """ upstream = self.search_upstream(device) self.nodes = upstream.nodes self.elements = upstream.elements return self
Restrict the current selection to the network upstream of the given starting point, including the starting point itself. See the documentation on method |Selection.search_upstream| for additional information.
def make_method_names(self): """Create tokens for setting __testname__ on functions""" lst = [] for group in self.all_groups: for single in group.singles: name, english = single.name, single.english if english[1:-1] != name.replace('_', ' '): lst.extend(self.tokens.make_name_modifier(not group.root, single.identifier, english)) return lst
Create tokens for setting __testname__ on functions
def allele_support_df(loci, sources): """ Returns a DataFrame of allele counts for all given loci in the read sources """ return pandas.DataFrame( allele_support_rows(loci, sources), columns=EXPECTED_COLUMNS)
Returns a DataFrame of allele counts for all given loci in the read sources
def find_match_command(self, rule): """Return a matching (possibly munged) command, if found in rule.""" command_string = rule['command'] command_list = command_string.split() self.logdebug('comparing "%s" to "%s"\n' % (command_list, self.original_command_list)) if rule.get('allow_trailing_args'): self.logdebug('allow_trailing_args is true - comparing initial ' 'list.\n') # Verify the initial arguments are all the same if (self.original_command_list[:len(command_list)] == command_list): self.logdebug('initial list is same\n') return {'command': self.original_command_list} else: self.logdebug('initial list is not same\n') elif rule.get('pcre_match'): if re.search(command_string, self.original_command_string): return {'command': self.original_command_list} elif command_list == self.original_command_list: return {'command': command_list}
Return a matching (possibly munged) command, if found in rule.
def build_casc(ObsData, hourly=True,level=9, months=None, avg_stats=True, percentile=50): '''Builds the cascade statistics of observed data for disaggregation Parameters ----------- ObsData : pd.Series hourly=True -> hourly obs data else -> 5min data (disaggregation level=9 (default), 10, 11) months : numpy array of ints Months for each seasons to be used for statistics (array of numpy array, default=1-12, e.g., [np.arange(12) + 1]) avg_stats : bool average statistics for all levels True/False (default=True) percentile : int, float percentile for splitting the dataset in small and high intensities (default=50) Returns ------- list_seasonal_casc : list holding the results ''' list_seasonal_casc = list() if months is None: months = [np.arange(12) + 1] # Parameter estimation for each season for cur_months in months: vdn = seasonal_subset(ObsData, cur_months) if len(ObsData.precip[np.isnan(ObsData.precip)]) > 0: ObsData.precip[np.isnan(ObsData.precip)] = 0 casc_opt = melodist.cascade.CascadeStatistics() casc_opt.percentile = percentile list_casc_opt = list() count = 0 if hourly: aggre_level = 5 else: aggre_level = level thresholds = np.zeros(aggre_level) #np.array([0., 0., 0., 0., 0.]) for i in range(0, aggre_level): # aggregate the data casc_opt_i, vdn = aggregate_precipitation(vdn, hourly, \ percentile=percentile) thresholds[i] = casc_opt_i.threshold copy_of_casc_opt_i = copy.copy(casc_opt_i) list_casc_opt.append(copy_of_casc_opt_i) n_vdn = len(vdn) casc_opt_i * n_vdn # level related weighting casc_opt + casc_opt_i # add to total statistics count = count + n_vdn casc_opt * (1. / count) # transfer weighted matrices to probabilities casc_opt.threshold = thresholds # statistics object if avg_stats: # in this case, the average statistics will be applied for all levels likewise stat_obj = casc_opt else: # for longer time series, separate statistics might be more appropriate # level dependent statistics will be assumed stat_obj = list_casc_opt list_seasonal_casc.append(stat_obj) return list_seasonal_casc
Builds the cascade statistics of observed data for disaggregation Parameters ----------- ObsData : pd.Series hourly=True -> hourly obs data else -> 5min data (disaggregation level=9 (default), 10, 11) months : numpy array of ints Months for each seasons to be used for statistics (array of numpy array, default=1-12, e.g., [np.arange(12) + 1]) avg_stats : bool average statistics for all levels True/False (default=True) percentile : int, float percentile for splitting the dataset in small and high intensities (default=50) Returns ------- list_seasonal_casc : list holding the results
def create(self, path, data, **kwargs): """Create a file at the given path. :param data: ``bytes`` or a ``file``-like object to upload :param overwrite: If a file already exists, should it be overwritten? :type overwrite: bool :param blocksize: The block size of a file. :type blocksize: long :param replication: The number of replications of a file. :type replication: short :param permission: The permission of a file/directory. Any radix-8 integer (leading zeros may be omitted.) :type permission: octal :param buffersize: The size of the buffer used in transferring data. :type buffersize: int """ metadata_response = self._put( path, 'CREATE', expected_status=httplib.TEMPORARY_REDIRECT, **kwargs) assert not metadata_response.content data_response = self._requests_session.put( metadata_response.headers['location'], data=data, **self._requests_kwargs) _check_response(data_response, expected_status=httplib.CREATED) assert not data_response.content
Create a file at the given path. :param data: ``bytes`` or a ``file``-like object to upload :param overwrite: If a file already exists, should it be overwritten? :type overwrite: bool :param blocksize: The block size of a file. :type blocksize: long :param replication: The number of replications of a file. :type replication: short :param permission: The permission of a file/directory. Any radix-8 integer (leading zeros may be omitted.) :type permission: octal :param buffersize: The size of the buffer used in transferring data. :type buffersize: int
def _analyze_read_write(self): """ Compute variables read/written/... """ write_var = [x.variables_written_as_expression for x in self.nodes] write_var = [x for x in write_var if x] write_var = [item for sublist in write_var for item in sublist] write_var = list(set(write_var)) # Remove dupplicate if they share the same string representation write_var = [next(obj) for i, obj in groupby(sorted(write_var, key=lambda x: str(x)), lambda x: str(x))] self._expression_vars_written = write_var write_var = [x.variables_written for x in self.nodes] write_var = [x for x in write_var if x] write_var = [item for sublist in write_var for item in sublist] write_var = list(set(write_var)) # Remove dupplicate if they share the same string representation write_var = [next(obj) for i, obj in\ groupby(sorted(write_var, key=lambda x: str(x)), lambda x: str(x))] self._vars_written = write_var read_var = [x.variables_read_as_expression for x in self.nodes] read_var = [x for x in read_var if x] read_var = [item for sublist in read_var for item in sublist] # Remove dupplicate if they share the same string representation read_var = [next(obj) for i, obj in\ groupby(sorted(read_var, key=lambda x: str(x)), lambda x: str(x))] self._expression_vars_read = read_var read_var = [x.variables_read for x in self.nodes] read_var = [x for x in read_var if x] read_var = [item for sublist in read_var for item in sublist] # Remove dupplicate if they share the same string representation read_var = [next(obj) for i, obj in\ groupby(sorted(read_var, key=lambda x: str(x)), lambda x: str(x))] self._vars_read = read_var self._state_vars_written = [x for x in self.variables_written if\ isinstance(x, StateVariable)] self._state_vars_read = [x for x in self.variables_read if\ isinstance(x, (StateVariable))] self._solidity_vars_read = [x for x in self.variables_read if\ isinstance(x, (SolidityVariable))] self._vars_read_or_written = self._vars_written + self._vars_read slithir_variables = [x.slithir_variables for x in self.nodes] slithir_variables = [x for x in slithir_variables if x] self._slithir_variables = [item for sublist in slithir_variables for item in sublist]
Compute variables read/written/...
def value(self): """ returns the object instead of instance """ if self._wrapped is not self.Null: return self._wrapped else: return self.obj
returns the object instead of instance
def CEscape(text, as_utf8): """Escape a bytes string for use in an ascii protocol buffer. text.encode('string_escape') does not seem to satisfy our needs as it encodes unprintable characters using two-digit hex escapes whereas our C++ unescaping function allows hex escapes to be any length. So, "\0011".encode('string_escape') ends up being "\\x011", which will be decoded in C++ as a single-character string with char code 0x11. Args: text: A byte string to be escaped as_utf8: Specifies if result should be returned in UTF-8 encoding Returns: Escaped string """ # PY3 hack: make Ord work for str and bytes: # //platforms/networking/data uses unicode here, hence basestring. Ord = ord if isinstance(text, six.string_types) else lambda x: x if as_utf8: return ''.join(_cescape_utf8_to_str[Ord(c)] for c in text) return ''.join(_cescape_byte_to_str[Ord(c)] for c in text)
Escape a bytes string for use in an ascii protocol buffer. text.encode('string_escape') does not seem to satisfy our needs as it encodes unprintable characters using two-digit hex escapes whereas our C++ unescaping function allows hex escapes to be any length. So, "\0011".encode('string_escape') ends up being "\\x011", which will be decoded in C++ as a single-character string with char code 0x11. Args: text: A byte string to be escaped as_utf8: Specifies if result should be returned in UTF-8 encoding Returns: Escaped string
def deprecated(will_be=None, on_version=None, name=None): """ Function decorator that warns about deprecation upon function invocation. :param will_be: str representing the target action on the deprecated function :param on_version: tuple representing a SW version :param name: name of the entity to be deprecated (useful when decorating __init__ methods so you can specify the deprecated class name) :return: callable """ def outer_function(function): if name is None: _name = function.__name__ else: _name = name warning_msg = '"%s" is deprecated.' % _name if will_be is not None and on_version is not None: warning_msg += " It will be %s on version %s" % ( will_be, '.'.join(map(str, on_version))) @wraps(function) def inner_function(*args, **kwargs): warnings.warn(warning_msg, category=DeprecationWarning, stacklevel=2) return function(*args, **kwargs) return inner_function return outer_function
Function decorator that warns about deprecation upon function invocation. :param will_be: str representing the target action on the deprecated function :param on_version: tuple representing a SW version :param name: name of the entity to be deprecated (useful when decorating __init__ methods so you can specify the deprecated class name) :return: callable
def flags2text(self): """ parse the `self.flags` field and create a list of `CKF_*` strings corresponding to bits set in flags :return: a list of strings :rtype: list """ r = [] for v in self.flags_dict.keys(): if self.flags & v: r.append(self.flags_dict[v]) return r
parse the `self.flags` field and create a list of `CKF_*` strings corresponding to bits set in flags :return: a list of strings :rtype: list
def setUp(self, tp): '''tp -- complexType/simpleContent/[Exention,Restriction] ''' self._item = tp assert tp.isComplex() is True and tp.content.isSimple() is True,\ 'expecting complexType/simpleContent not: %s' %tp.content.getItemTrace() simple = tp.content dv = simple.content assert dv.isExtension() is True or dv.isRestriction() is True,\ 'expecting complexType/simpleContent/[Extension,Restriction] not: %s' \ %tp.content.getItemTrace() self.name = tp.getAttribute('name') self.ns = tp.getTargetNamespace() # TODO: Why is this being set? self.content.attributeContent = dv.getAttributeContent() base = dv.getAttribute('base') if base is not None: self.sKlass = BTI.get_typeclass( base[1], base[0] ) if not self.sKlass: self.sKlass,self.sKlassNS = base[1], base[0] self.attrComponents = self._setAttributes( self.content.attributeContent ) return raise Wsdl2PythonError,\ 'simple content derivation bad base attribute: ' %tp.getItemTrace()
tp -- complexType/simpleContent/[Exention,Restriction]
def git_merge(base, head, no_ff=False): # type: (str, str, bool) -> None """ Merge *head* into *base*. Args: base (str): The base branch. *head* will be merged into this branch. head (str): The branch that will be merged into *base*. no_ff (bool): If set to **True** it will force git to create merge commit. If set to **False** (default) it will do a fast-forward merge if possible. """ pretend = context.get('pretend', False) branch = git.current_branch(refresh=True) if branch.name != base and not pretend: git_checkout(base) args = [] if no_ff: args.append('--no-ff') log.info("Merging <33>{}<32> into <33>{}<32>", head, base) shell.run('git merge {args} {branch}'.format( args=' '.join(args), branch=head, )) if branch.name != base and not pretend: git_checkout(branch.name)
Merge *head* into *base*. Args: base (str): The base branch. *head* will be merged into this branch. head (str): The branch that will be merged into *base*. no_ff (bool): If set to **True** it will force git to create merge commit. If set to **False** (default) it will do a fast-forward merge if possible.
def _linux_os_release(): """Try to determine the name of a Linux distribution. This function checks for the /etc/os-release file. It takes the name from the 'NAME' field and the version from 'VERSION_ID'. An empty string is returned if the above values cannot be determined. """ pretty_name = '' ashtray = {} keys = ['NAME', 'VERSION_ID'] try: with open(os.path.join('/etc', 'os-release')) as f: for line in f: for key in keys: if line.startswith(key): ashtray[key] = re.sub(r'^"|"$', '', line.strip().split('=')[1]) except (OSError, IOError): return pretty_name if ashtray: if 'NAME' in ashtray: pretty_name = ashtray['NAME'] if 'VERSION_ID' in ashtray: pretty_name += ' {}'.format(ashtray['VERSION_ID']) return pretty_name
Try to determine the name of a Linux distribution. This function checks for the /etc/os-release file. It takes the name from the 'NAME' field and the version from 'VERSION_ID'. An empty string is returned if the above values cannot be determined.
def sampleCellsWithinColumns(numCellPairs, cellsPerColumn, numColumns, seed=42): """ Generate indices of cell pairs, each pair of cells are from the same column @return cellPairs (list) list of cell pairs """ np.random.seed(seed) cellPairs = [] for i in range(numCellPairs): randCol = np.random.randint(numColumns) randCells = np.random.choice(np.arange(cellsPerColumn), (2, ), replace=False) cellsPair = randCol * cellsPerColumn + randCells cellPairs.append(cellsPair) return cellPairs
Generate indices of cell pairs, each pair of cells are from the same column @return cellPairs (list) list of cell pairs
def add_node(self, node_or_ID, **kwds): """ Adds a node to the graph. """ if not isinstance(node_or_ID, Node): nodeID = str( node_or_ID ) if nodeID in self.nodes: node = self.nodes[ self.nodes.index(nodeID) ] else: if self.default_node is not None: node = self.default_node.clone_traits(copy="deep") node.ID = nodeID else: node = Node(nodeID) self.nodes.append( node ) else: node = node_or_ID if node in self.nodes: node = self.nodes[ self.nodes.index(node_or_ID) ] else: self.nodes.append( node ) node.set( **kwds ) return node
Adds a node to the graph.
def _copy_deploy_scripts_for_hosts(self, domains): """ Copy the deploy scripts for all the domains into the prefix scripts dir Args: domains(dict): spec with the domains info as when loaded from the initfile Returns: None """ with LogTask('Copying any deploy scripts'): for host_name, host_spec in domains.iteritems(): host_metadata = host_spec.get('metadata', {}) deploy_scripts = self._get_scripts(host_metadata) new_scripts = self._copy_delpoy_scripts(deploy_scripts) self._set_scripts( host_metadata=host_metadata, scripts=new_scripts, ) return domains
Copy the deploy scripts for all the domains into the prefix scripts dir Args: domains(dict): spec with the domains info as when loaded from the initfile Returns: None
def check_my_users(user): """Check if user exists and its credentials. Take a look at encrypt_app.py and encrypt_cli.py to see how to encrypt passwords """ user_data = my_users.get(user['username']) if not user_data: return False # <--- invalid credentials elif user_data.get('password') == user['password']: return True # <--- user is logged in! return False
Check if user exists and its credentials. Take a look at encrypt_app.py and encrypt_cli.py to see how to encrypt passwords
def reset_permission_factories(self): """Remove cached permission factories.""" for key in ('read', 'create', 'update', 'delete'): full_key = '{0}_permission_factory'.format(key) if full_key in self.__dict__: del self.__dict__[full_key]
Remove cached permission factories.
def main(self, standalone=False): """Create interactive browser window. keyword arguments standalone -- Set to true, if the browser is not attached to other windows """ window = _Tkinter.Tk() sc = _TreeWidget.ScrolledCanvas(window, bg="white",\ highlightthickness=0, takefocus=1) sc.frame.pack(expand=1, fill="both") item = _ReferrerTreeItem(window, self.get_tree(), self) node = _TreeNode(sc.canvas, None, item) node.expand() if standalone: window.mainloop()
Create interactive browser window. keyword arguments standalone -- Set to true, if the browser is not attached to other windows
def prune_unspecified_categories(modules, categories): """ Removes unspecified module categories. Mutates dictionary and returns it. """ res = {} for mod_name, mod_info in modules.items(): mod_categories = mod_info.get("categories", all_categories) for category in categories: if category in mod_categories: break else: continue for input_name, input_info in mod_info["inputs"].items(): for c in input_info["categories"]: if c in categories: break else: del mod_info["inputs"][input_name] for output_name, output_info in mod_info["outputs"].items(): for c in output_info["categories"]: if c in categories: break else: del mod_info["outputs"][output_name] res[mod_name] = mod_info return res
Removes unspecified module categories. Mutates dictionary and returns it.
def get_location(self, location, columns=None, as_dict=False, index=True): """ For an index location and either (1) list of columns return a DataFrame or dictionary of the values or (2) single column name and return the value of that cell. This is optimized for speed because it does not need to lookup the index location with a search. Also can accept relative indexing from the end of the DataFrame in standard python notation [-3, -2, -1] :param location: index location in standard python form of positive or negative number :param columns: list of columns, single column name, or None to include all columns :param as_dict: if True then return a dictionary :param index: if True then include the index in the dictionary if as_dict=True :return: DataFrame or dictionary if columns is a list or value if columns is a single column name """ if columns is None: columns = self._columns elif not isinstance(columns, list): # single value for columns c = self._columns.index(columns) return self._data[c][location] elif all([isinstance(i, bool) for i in columns]): if len(columns) != len(self._columns): raise ValueError('boolean column list must be same size of existing columns') columns = list(compress(self._columns, columns)) data = dict() for column in columns: c = self._columns.index(column) data[column] = self._data[c][location] index_value = self._index[location] if as_dict: if index: data[self._index_name] = index_value return data else: data = {k: [data[k]] for k in data} # this makes the dict items lists return DataFrame(data=data, index=[index_value], columns=columns, index_name=self._index_name, sort=self._sort)
For an index location and either (1) list of columns return a DataFrame or dictionary of the values or (2) single column name and return the value of that cell. This is optimized for speed because it does not need to lookup the index location with a search. Also can accept relative indexing from the end of the DataFrame in standard python notation [-3, -2, -1] :param location: index location in standard python form of positive or negative number :param columns: list of columns, single column name, or None to include all columns :param as_dict: if True then return a dictionary :param index: if True then include the index in the dictionary if as_dict=True :return: DataFrame or dictionary if columns is a list or value if columns is a single column name
def create_widget(self): """ Create the toolkit widget for the proxy object. """ d = self.declaration button_type = UIButton.UIButtonTypeSystem if d.flat else UIButton.UIButtonTypeRoundedRect self.widget = UIButton(buttonWithType=button_type)
Create the toolkit widget for the proxy object.
def _from_dict_dict(cls, dic): """Takes a dict {id : dict_attributes} """ return cls({_convert_id(i): v for i, v in dic.items()})
Takes a dict {id : dict_attributes}
def beam_best_first(problem, beam_size=100, iterations_limit=0, viewer=None): ''' Beam search best first. beam_size is the size of the beam. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value. ''' return _local_search(problem, _first_expander, iterations_limit=iterations_limit, fringe_size=beam_size, random_initial_states=True, stop_when_no_better=iterations_limit==0, viewer=viewer)
Beam search best first. beam_size is the size of the beam. If iterations_limit is specified, the algorithm will end after that number of iterations. Else, it will continue until it can't find a better node than the current one. Requires: SearchProblem.actions, SearchProblem.result, and SearchProblem.value.
def get_artist_hotttnesss(self, cache=True): """Get our numerical description of how hottt a song's artist currently is Args: Kwargs: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. Returns: A float representing hotttnesss. Example: >>> s = song.Song('SOOLGAZ127F3E1B87C') >>> s.artist_hotttnesss 0.45645633000000002 >>> s.get_artist_hotttnesss() 0.45645633000000002 >>> """ if not (cache and ('artist_hotttnesss' in self.cache)): response = self.get_attribute('profile', bucket='artist_hotttnesss') self.cache['artist_hotttnesss'] = response['songs'][0]['artist_hotttnesss'] return self.cache['artist_hotttnesss']
Get our numerical description of how hottt a song's artist currently is Args: Kwargs: cache (bool): A boolean indicating whether or not the cached value should be used (if available). Defaults to True. Returns: A float representing hotttnesss. Example: >>> s = song.Song('SOOLGAZ127F3E1B87C') >>> s.artist_hotttnesss 0.45645633000000002 >>> s.get_artist_hotttnesss() 0.45645633000000002 >>>
def has_parent_logs(self, log_id): """Tests if the ``Log`` has any parents. arg: log_id (osid.id.Id): the ``Id`` of a log return: (boolean) - ``true`` if the log has parents, ``false`` otherwise raise: NotFound - ``log_id`` is not found raise: NullArgument - ``log_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.has_parent_bins if self._catalog_session is not None: return self._catalog_session.has_parent_catalogs(catalog_id=log_id) return self._hierarchy_session.has_parents(id_=log_id)
Tests if the ``Log`` has any parents. arg: log_id (osid.id.Id): the ``Id`` of a log return: (boolean) - ``true`` if the log has parents, ``false`` otherwise raise: NotFound - ``log_id`` is not found raise: NullArgument - ``log_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def setup_venv(self): """Setup virtualenv if necessary.""" venv = self.opts.venv if not venv: venv = os.environ.get('CRONY_VENV') if not venv and self.config['crony']: venv = self.config['crony'].get('venv') if venv: if not venv.endswith('activate'): add_path = os.path.join('bin', 'activate') self.logger.debug(f'Venv directory given, adding {add_path}') venv = os.path.join(venv, add_path) self.logger.debug(f'Adding sourcing virtualenv {venv}') self.cmd = f'. {venv} && {self.cmd}'
Setup virtualenv if necessary.
def csw_global_dispatch(request, url=None, catalog_id=None): """pycsw wrapper""" if request.user.is_authenticated(): # turn on CSW-T settings.REGISTRY_PYCSW['manager']['transactions'] = 'true' env = request.META.copy() # TODO: remove this workaround # HH should be able to pass env['wsgi.input'] without hanging # details at https://github.com/cga-harvard/HHypermap/issues/94 if request.method == 'POST': from StringIO import StringIO env['wsgi.input'] = StringIO(request.body) env.update({'local.app_root': os.path.dirname(__file__), 'REQUEST_URI': request.build_absolute_uri()}) # if this is a catalog based CSW, then update settings if url is not None: settings.REGISTRY_PYCSW['server']['url'] = url if catalog_id is not None: settings.REGISTRY_PYCSW['repository']['filter'] = 'catalog_id = %d' % catalog_id csw = server.Csw(settings.REGISTRY_PYCSW, env) content = csw.dispatch_wsgi() # pycsw 2.0 has an API break: # pycsw < 2.0: content = xml_response # pycsw >= 2.0: content = [http_status_code, content] # deal with the API break if isinstance(content, list): # pycsw 2.0+ content = content[1] response = HttpResponse(content, content_type=csw.contenttype) # TODO: Fix before 1.0 release. CORS should not be enabled blindly like this. response['Access-Control-Allow-Origin'] = '*' return response
pycsw wrapper
def check_backup_count_and_state(self, site): """Look up basebackups from the object store, prune any extra backups and return the datetime of the latest backup.""" basebackups = self.get_remote_basebackups_info(site) self.log.debug("Found %r basebackups", basebackups) if basebackups: last_backup_time = basebackups[-1]["metadata"]["start-time"] else: last_backup_time = None allowed_basebackup_count = self.config["backup_sites"][site]["basebackup_count"] if allowed_basebackup_count is None: allowed_basebackup_count = len(basebackups) while len(basebackups) > allowed_basebackup_count: self.log.warning("Too many basebackups: %d > %d, %r, starting to get rid of %r", len(basebackups), allowed_basebackup_count, basebackups, basebackups[0]["name"]) basebackup_to_be_deleted = basebackups.pop(0) pg_version = basebackup_to_be_deleted["metadata"].get("pg-version") last_wal_segment_still_needed = 0 if basebackups: last_wal_segment_still_needed = basebackups[0]["metadata"]["start-wal-segment"] if last_wal_segment_still_needed: self.delete_remote_wal_before(last_wal_segment_still_needed, site, pg_version) self.delete_remote_basebackup(site, basebackup_to_be_deleted["name"], basebackup_to_be_deleted["metadata"]) self.state["backup_sites"][site]["basebackups"] = basebackups return last_backup_time
Look up basebackups from the object store, prune any extra backups and return the datetime of the latest backup.
def distance_to_edge(self, skydir): """Return the angular distance from the given direction and the edge of the projection.""" xpix, ypix = skydir.to_pixel(self.wcs, origin=0) deltax = np.array((xpix - self._pix_center[0]) * self._pix_size[0], ndmin=1) deltay = np.array((ypix - self._pix_center[1]) * self._pix_size[1], ndmin=1) deltax = np.abs(deltax) - 0.5 * self._width[0] deltay = np.abs(deltay) - 0.5 * self._width[1] m0 = (deltax < 0) & (deltay < 0) m1 = (deltax > 0) & (deltay < 0) m2 = (deltax < 0) & (deltay > 0) m3 = (deltax > 0) & (deltay > 0) mx = np.abs(deltax) <= np.abs(deltay) my = np.abs(deltay) < np.abs(deltax) delta = np.zeros(len(deltax)) delta[(m0 & mx) | (m3 & my) | m1] = deltax[(m0 & mx) | (m3 & my) | m1] delta[(m0 & my) | (m3 & mx) | m2] = deltay[(m0 & my) | (m3 & mx) | m2] return delta
Return the angular distance from the given direction and the edge of the projection.
async def set_analog_latch(self, pin, threshold_type, threshold_value, cb=None, cb_type=None): """ This method "arms" an analog pin for its data to be latched and saved in the latching table If a callback method is provided, when latching criteria is achieved, the callback function is called with latching data notification. Data returned in the callback list has the pin number as the first element, :param pin: Analog pin number (value following an 'A' designator, i.e. A5 = 5 :param threshold_type: ANALOG_LATCH_GT | ANALOG_LATCH_LT | ANALOG_LATCH_GTE | ANALOG_LATCH_LTE :param threshold_value: numerical value - between 0 and 1023 :param cb: callback method :param cb_type: Constants.CB_TYPE_DIRECT = direct call or Constants.CB_TYPE_ASYNCIO = asyncio coroutine :returns: True if successful, False if parameter data is invalid """ if Constants.LATCH_GT <= threshold_type <= Constants.LATCH_LTE: key = 'A' + str(pin) if 0 <= threshold_value <= 1023: self.latch_map[key] = [Constants.LATCH_ARMED, threshold_type, threshold_value, 0, 0, cb, cb_type] return True else: return False
This method "arms" an analog pin for its data to be latched and saved in the latching table If a callback method is provided, when latching criteria is achieved, the callback function is called with latching data notification. Data returned in the callback list has the pin number as the first element, :param pin: Analog pin number (value following an 'A' designator, i.e. A5 = 5 :param threshold_type: ANALOG_LATCH_GT | ANALOG_LATCH_LT | ANALOG_LATCH_GTE | ANALOG_LATCH_LTE :param threshold_value: numerical value - between 0 and 1023 :param cb: callback method :param cb_type: Constants.CB_TYPE_DIRECT = direct call or Constants.CB_TYPE_ASYNCIO = asyncio coroutine :returns: True if successful, False if parameter data is invalid
def _ReadLine(self, file_object): """Reads a line from the file object. Args: file_object (dfvfs.FileIO): file-like object. Returns: str: line read from the file-like object. """ if len(self._buffer) < self._buffer_size: content = file_object.read(self._buffer_size) content = content.decode(self._encoding) self._buffer = ''.join([self._buffer, content]) line, new_line, self._buffer = self._buffer.partition('\n') if not line and not new_line: line = self._buffer self._buffer = '' self._current_offset += len(line) # Strip carriage returns from the text. if line.endswith('\r'): line = line[:-len('\r')] if new_line: line = ''.join([line, '\n']) self._current_offset += len('\n') return line
Reads a line from the file object. Args: file_object (dfvfs.FileIO): file-like object. Returns: str: line read from the file-like object.
def encrypt_dynamodb_item(item, crypto_config): # type: (dynamodb_types.ITEM, CryptoConfig) -> dynamodb_types.ITEM """Encrypt a DynamoDB item. >>> from dynamodb_encryption_sdk.encrypted.item import encrypt_dynamodb_item >>> plaintext_item = { ... 'some': {'S': 'data'}, ... 'more': {'N': '5'} ... } >>> encrypted_item = encrypt_dynamodb_item( ... item=plaintext_item, ... crypto_config=my_crypto_config ... ) .. note:: This handles DynamoDB-formatted items and is for use with the boto3 DynamoDB client. :param dict item: Plaintext DynamoDB item :param CryptoConfig crypto_config: Cryptographic configuration :returns: Encrypted and signed DynamoDB item :rtype: dict """ if crypto_config.attribute_actions.take_no_actions: # If we explicitly have been told not to do anything to this item, just copy it. return item.copy() for reserved_name in ReservedAttributes: if reserved_name.value in item: raise EncryptionError( 'Reserved attribute name "{}" is not allowed in plaintext item.'.format(reserved_name.value) ) encryption_materials = crypto_config.encryption_materials() inner_material_description = encryption_materials.material_description.copy() try: encryption_materials.encryption_key except AttributeError: if crypto_config.attribute_actions.contains_action(CryptoAction.ENCRYPT_AND_SIGN): raise EncryptionError( "Attribute actions ask for some attributes to be encrypted but no encryption key is available" ) encrypted_item = item.copy() else: # Add the attribute encryption mode to the inner material description encryption_mode = MaterialDescriptionValues.CBC_PKCS5_ATTRIBUTE_ENCRYPTION.value inner_material_description[MaterialDescriptionKeys.ATTRIBUTE_ENCRYPTION_MODE.value] = encryption_mode algorithm_descriptor = encryption_materials.encryption_key.algorithm + encryption_mode encrypted_item = {} for name, attribute in item.items(): if crypto_config.attribute_actions.action(name) is CryptoAction.ENCRYPT_AND_SIGN: encrypted_item[name] = encrypt_attribute( attribute_name=name, attribute=attribute, encryption_key=encryption_materials.encryption_key, algorithm=algorithm_descriptor, ) else: encrypted_item[name] = attribute.copy() signature_attribute = sign_item(encrypted_item, encryption_materials.signing_key, crypto_config) encrypted_item[ReservedAttributes.SIGNATURE.value] = signature_attribute try: # Add the signing key algorithm identifier to the inner material description if provided inner_material_description[ MaterialDescriptionKeys.SIGNING_KEY_ALGORITHM.value ] = encryption_materials.signing_key.signing_algorithm() except NotImplementedError: # Not all signing keys will provide this value pass material_description_attribute = serialize_material_description(inner_material_description) encrypted_item[ReservedAttributes.MATERIAL_DESCRIPTION.value] = material_description_attribute return encrypted_item
Encrypt a DynamoDB item. >>> from dynamodb_encryption_sdk.encrypted.item import encrypt_dynamodb_item >>> plaintext_item = { ... 'some': {'S': 'data'}, ... 'more': {'N': '5'} ... } >>> encrypted_item = encrypt_dynamodb_item( ... item=plaintext_item, ... crypto_config=my_crypto_config ... ) .. note:: This handles DynamoDB-formatted items and is for use with the boto3 DynamoDB client. :param dict item: Plaintext DynamoDB item :param CryptoConfig crypto_config: Cryptographic configuration :returns: Encrypted and signed DynamoDB item :rtype: dict
def _get_server(vm_, volumes, nics): ''' Construct server instance from cloud profile config ''' # Apply component overrides to the size from the cloud profile config vm_size = _override_size(vm_) # Set the server availability zone from the cloud profile config availability_zone = config.get_cloud_config_value( 'availability_zone', vm_, __opts__, default=None, search_global=False ) # Assign CPU family from the cloud profile config cpu_family = config.get_cloud_config_value( 'cpu_family', vm_, __opts__, default=None, search_global=False ) # Contruct server object return Server( name=vm_['name'], ram=vm_size['ram'], availability_zone=availability_zone, cores=vm_size['cores'], cpu_family=cpu_family, create_volumes=volumes, nics=nics )
Construct server instance from cloud profile config
def same_disks(self, count=2): """ filter self to the required number of disks with same size and type Select the disks with the same type and same size. If not enough disks available, set self to empty. :param count: number of disks to retrieve :return: disk list """ ret = self if len(self) > 0: type_counter = Counter(self.drive_type) drive_type, counts = type_counter.most_common()[0] self.set_drive_type(drive_type) if len(self) > 0: size_counter = Counter(self.capacity) size, counts = size_counter.most_common()[0] self.set_capacity(size) if len(self) >= count: indices = self.index[:count] self.set_indices(indices) else: self.set_indices('N/A') return ret
filter self to the required number of disks with same size and type Select the disks with the same type and same size. If not enough disks available, set self to empty. :param count: number of disks to retrieve :return: disk list
def _shutdown(self): """Gracefully shut down the consumer and exit.""" if self._channel: _log.info("Halting %r consumer sessions", self._channel.consumer_tags) self._running = False if self._connection and self._connection.is_open: self._connection.close() # Reset the signal handler for signum in (signal.SIGTERM, signal.SIGINT): signal.signal(signum, signal.SIG_DFL)
Gracefully shut down the consumer and exit.
def decrypt(self, k, a, iv, e, t): """ Decrypt accoriding to the selected encryption and hashing functions. :param k: Encryption key (optional) :param a: Additional Authenticated Data :param iv: Initialization Vector :param e: Ciphertext :param t: Authentication Tag Returns plaintext or raises an error """ cipher = Cipher(algorithms.AES(k), modes.GCM(iv, t), backend=self.backend) decryptor = cipher.decryptor() decryptor.authenticate_additional_data(a) return decryptor.update(e) + decryptor.finalize()
Decrypt accoriding to the selected encryption and hashing functions. :param k: Encryption key (optional) :param a: Additional Authenticated Data :param iv: Initialization Vector :param e: Ciphertext :param t: Authentication Tag Returns plaintext or raises an error
def linOriginRegression(points): """ computes a linear regression starting at zero """ j = sum([ i[0] for i in points ]) k = sum([ i[1] for i in points ]) if j != 0: return k/j, j, k return 1, j, k
computes a linear regression starting at zero
def update_params_for_auth(self, headers, querys, auth_settings): """Updates header and query params based on authentication setting. :param headers: Header parameters dict to be updated. :param querys: Query parameters tuple list to be updated. :param auth_settings: Authentication setting identifiers list. """ if self.auth_token_holder.token is not None: headers[Configuration.AUTH_TOKEN_HEADER_NAME] = self.auth_token_holder.token else: headers['Authorization'] = self.configuration.get_basic_auth_token()
Updates header and query params based on authentication setting. :param headers: Header parameters dict to be updated. :param querys: Query parameters tuple list to be updated. :param auth_settings: Authentication setting identifiers list.
def search_task_views(self, user, search_string): """ invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action :return: JSON response """ magic = self._magic_json( action=TouchWorksMagicConstants.ACTION_SEARCH_TASK_VIEWS, parameter1=user, parameter2=search_string) response = self._http_request(TouchWorksEndPoints.MAGIC_JSON, data=magic) result = self._get_results_or_raise_if_magic_invalid( magic, response, TouchWorksMagicConstants.RESULT_SEARCH_TASK_VIEWS) return result
invokes TouchWorksMagicConstants.ACTION_GET_ENCOUNTER_LIST_FOR_PATIENT action :return: JSON response
def solar_elevation(self, dateandtime, latitude, longitude): """Calculate the elevation angle of the sun. :param dateandtime: The date and time for which to calculate the angle. :type dateandtime: :class:`~datetime.datetime` :param latitude: Latitude - Northern latitudes should be positive :type latitude: float :param longitude: Longitude - Eastern longitudes should be positive :type longitude: float :return: The elevation angle in degrees above the horizon. :rtype: float If `dateandtime` is a naive Python datetime then it is assumed to be in the UTC timezone. """ if latitude > 89.8: latitude = 89.8 if latitude < -89.8: latitude = -89.8 if dateandtime.tzinfo is None: zone = 0 utc_datetime = dateandtime else: zone = -dateandtime.utcoffset().total_seconds() / 3600.0 utc_datetime = dateandtime.astimezone(pytz.utc) timenow = ( utc_datetime.hour + (utc_datetime.minute / 60.0) + (utc_datetime.second / 3600) ) JD = self._julianday(dateandtime) t = self._jday_to_jcentury(JD + timenow / 24.0) theta = self._sun_declination(t) eqtime = self._eq_of_time(t) solarDec = theta # in degrees solarTimeFix = eqtime - (4.0 * -longitude) + (60 * zone) trueSolarTime = ( dateandtime.hour * 60.0 + dateandtime.minute + dateandtime.second / 60.0 + solarTimeFix ) # in minutes while trueSolarTime > 1440: trueSolarTime = trueSolarTime - 1440 hourangle = trueSolarTime / 4.0 - 180.0 # Thanks to Louis Schwarzmayr for the next line: if hourangle < -180: hourangle = hourangle + 360.0 harad = radians(hourangle) csz = sin(radians(latitude)) * sin(radians(solarDec)) + cos( radians(latitude) ) * cos(radians(solarDec)) * cos(harad) if csz > 1.0: csz = 1.0 elif csz < -1.0: csz = -1.0 zenith = degrees(acos(csz)) azDenom = cos(radians(latitude)) * sin(radians(zenith)) if abs(azDenom) > 0.001: azRad = ( (sin(radians(latitude)) * cos(radians(zenith))) - sin(radians(solarDec)) ) / azDenom if abs(azRad) > 1.0: if azRad < 0: azRad = -1.0 else: azRad = 1.0 azimuth = 180.0 - degrees(acos(azRad)) if hourangle > 0.0: azimuth = -azimuth else: if latitude > 0.0: azimuth = 180.0 else: azimuth = 0.0 if azimuth < 0.0: azimuth = azimuth + 360.0 exoatmElevation = 90.0 - zenith if exoatmElevation > 85.0: refractionCorrection = 0.0 else: te = tan(radians(exoatmElevation)) if exoatmElevation > 5.0: refractionCorrection = ( 58.1 / te - 0.07 / (te * te * te) + 0.000086 / (te * te * te * te * te) ) elif exoatmElevation > -0.575: step1 = -12.79 + exoatmElevation * 0.711 step2 = 103.4 + exoatmElevation * (step1) step3 = -518.2 + exoatmElevation * (step2) refractionCorrection = 1735.0 + exoatmElevation * (step3) else: refractionCorrection = -20.774 / te refractionCorrection = refractionCorrection / 3600.0 solarzen = zenith - refractionCorrection solarelevation = 90.0 - solarzen return solarelevation
Calculate the elevation angle of the sun. :param dateandtime: The date and time for which to calculate the angle. :type dateandtime: :class:`~datetime.datetime` :param latitude: Latitude - Northern latitudes should be positive :type latitude: float :param longitude: Longitude - Eastern longitudes should be positive :type longitude: float :return: The elevation angle in degrees above the horizon. :rtype: float If `dateandtime` is a naive Python datetime then it is assumed to be in the UTC timezone.
def _LeaseFlowProcessingReqests(self, cursor=None): """Leases a number of flow processing requests.""" now = rdfvalue.RDFDatetime.Now() expiry = now + rdfvalue.Duration("10m") query = """ UPDATE flow_processing_requests SET leased_until=FROM_UNIXTIME(%(expiry)s), leased_by=%(id)s WHERE (delivery_time IS NULL OR delivery_time <= NOW(6)) AND (leased_until IS NULL OR leased_until < NOW(6)) LIMIT %(limit)s """ id_str = utils.ProcessIdString() args = { "expiry": mysql_utils.RDFDatetimeToTimestamp(expiry), "id": id_str, "limit": 50, } updated = cursor.execute(query, args) if updated == 0: return [] query = """ SELECT UNIX_TIMESTAMP(timestamp), request FROM flow_processing_requests FORCE INDEX (flow_processing_requests_by_lease) WHERE leased_by=%(id)s AND leased_until=FROM_UNIXTIME(%(expiry)s) LIMIT %(updated)s """ args = { "expiry": mysql_utils.RDFDatetimeToTimestamp(expiry), "id": id_str, "updated": updated, } cursor.execute(query, args) res = [] for timestamp, request in cursor.fetchall(): req = rdf_flows.FlowProcessingRequest.FromSerializedString(request) req.timestamp = mysql_utils.TimestampToRDFDatetime(timestamp) req.leased_until = expiry req.leased_by = id_str res.append(req) return res
Leases a number of flow processing requests.
def default_validity_start(): """ Sets validity_start field to 1 day before the current date (avoids "certificate not valid yet" edge case). In some cases, because of timezone differences, when certificates were just created they were considered valid in a timezone (eg: Europe) but not yet valid in another timezone (eg: US). This function intentionally returns naive datetime (not timezone aware), so that certificates are valid from 00:00 AM in all timezones. """ start = datetime.now() - timedelta(days=1) return start.replace(hour=0, minute=0, second=0, microsecond=0)
Sets validity_start field to 1 day before the current date (avoids "certificate not valid yet" edge case). In some cases, because of timezone differences, when certificates were just created they were considered valid in a timezone (eg: Europe) but not yet valid in another timezone (eg: US). This function intentionally returns naive datetime (not timezone aware), so that certificates are valid from 00:00 AM in all timezones.
def dvhat(s1): """ Find the unit vector corresponding to a state vector and the derivative of the unit vector. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dvhat_c.html :param s1: State to be normalized. :type s1: 6-Element Array of floats :return: Unit vector s1 / abs(s1), and its time derivative. :rtype: 6-Element Array of floats """ assert len(s1) is 6 s1 = stypes.toDoubleVector(s1) sout = stypes.emptyDoubleVector(6) libspice.dvhat_c(s1, sout) return stypes.cVectorToPython(sout)
Find the unit vector corresponding to a state vector and the derivative of the unit vector. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dvhat_c.html :param s1: State to be normalized. :type s1: 6-Element Array of floats :return: Unit vector s1 / abs(s1), and its time derivative. :rtype: 6-Element Array of floats
def iscan(self, *, match=None, count=None): """Incrementally iterate the keys space using async for. Usage example: >>> async for key in redis.iscan(match='something*'): ... print('Matched:', key) """ return _ScanIter(lambda cur: self.scan(cur, match=match, count=count))
Incrementally iterate the keys space using async for. Usage example: >>> async for key in redis.iscan(match='something*'): ... print('Matched:', key)
def download_docs(client, output_filename=None, expanded=False): """ Given a LuminosoClient pointing to a project and a filename to write to, retrieve all its documents in batches, and write them to a JSON lines (.jsons) file with one document per line. """ if output_filename is None: # Find a default filename to download to, based on the project name. projname = _sanitize_filename(client.get()['name']) output_filename = '{}.jsons'.format(projname) # If the file already exists, add .1, .2, ..., after the project name # to unobtrusively get a unique filename. counter = 0 while os.access(output_filename, os.F_OK): counter += 1 output_filename = '{}.{}.jsons'.format(projname, counter) print('Downloading project to {!r}'.format(output_filename)) with open(output_filename, 'w', encoding='utf-8') as out: for doc in iterate_docs(client, expanded=expanded, progress=True): print(json.dumps(doc, ensure_ascii=False), file=out)
Given a LuminosoClient pointing to a project and a filename to write to, retrieve all its documents in batches, and write them to a JSON lines (.jsons) file with one document per line.
def format_search_results(self, search_results): """Format search results. Args: search_results (list of `ResourceSearchResult`): Search to format. Returns: List of 2-tuple: Text and color to print in. """ formatted_lines = [] for search_result in search_results: lines = self._format_search_result(search_result) formatted_lines.extend(lines) return formatted_lines
Format search results. Args: search_results (list of `ResourceSearchResult`): Search to format. Returns: List of 2-tuple: Text and color to print in.
def _access_control(self, access_control, my_media_group=None): """ Prepares the extension element for access control Extension element is the optional parameter for the YouTubeVideoEntry We use extension element to modify access control settings Returns: tuple of extension elements """ # Access control extension = None if access_control is AccessControl.Private: # WARNING: this part of code is not tested # set video as private if my_media_group: my_media_group.private = gdata.media.Private() elif access_control is AccessControl.Unlisted: # set video as unlisted from gdata.media import YOUTUBE_NAMESPACE from atom import ExtensionElement kwargs = { "namespace": YOUTUBE_NAMESPACE, "attributes": {'action': 'list', 'permission': 'denied'}, } extension = ([ExtensionElement('accessControl', **kwargs)]) return extension
Prepares the extension element for access control Extension element is the optional parameter for the YouTubeVideoEntry We use extension element to modify access control settings Returns: tuple of extension elements
def get_patches_ignore_regex(self): """Returns a string representing a regex for filtering out patches This string is parsed from a comment in the specfile that contains the word filter-out followed by an equal sign. For example, a comment as such: # patches_ignore=(regex) would mean this method returns the string '(regex)' Only a very limited subset of characters are accepted so no fancy stuff like matching groups etc. """ match = re.search(r'# *patches_ignore=([\w *.+?[\]|{,}\-_]+)', self.txt) if not match: return None regex_string = match.group(1) try: return re.compile(regex_string) except Exception: return None
Returns a string representing a regex for filtering out patches This string is parsed from a comment in the specfile that contains the word filter-out followed by an equal sign. For example, a comment as such: # patches_ignore=(regex) would mean this method returns the string '(regex)' Only a very limited subset of characters are accepted so no fancy stuff like matching groups etc.
def get_changes(self, commit_id, repository_id, project=None, top=None, skip=None): """GetChanges. Retrieve changes for a particular commit. :param str commit_id: The id of the commit. :param str repository_id: The id or friendly name of the repository. To use the friendly name, projectId must also be specified. :param str project: Project ID or project name :param int top: The maximum number of changes to return. :param int skip: The number of changes to skip. :rtype: :class:`<GitCommitChanges> <azure.devops.v5_0.git.models.GitCommitChanges>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if commit_id is not None: route_values['commitId'] = self._serialize.url('commit_id', commit_id, 'str') if repository_id is not None: route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str') query_parameters = {} if top is not None: query_parameters['top'] = self._serialize.query('top', top, 'int') if skip is not None: query_parameters['skip'] = self._serialize.query('skip', skip, 'int') response = self._send(http_method='GET', location_id='5bf884f5-3e07-42e9-afb8-1b872267bf16', version='5.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('GitCommitChanges', response)
GetChanges. Retrieve changes for a particular commit. :param str commit_id: The id of the commit. :param str repository_id: The id or friendly name of the repository. To use the friendly name, projectId must also be specified. :param str project: Project ID or project name :param int top: The maximum number of changes to return. :param int skip: The number of changes to skip. :rtype: :class:`<GitCommitChanges> <azure.devops.v5_0.git.models.GitCommitChanges>`
def create(self, doc_details): ''' a method to create a new document in the collection :param doc_details: dictionary with document details and user id value :return: dictionary with document details and _id and _rev values ''' # https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/document/post__db___doc_ title = '%s.create' % self.__class__.__name__ # validate input if self.model: doc_details = self.model.validate(doc_details, path_to_root='', object_title='%s(doc_details={...}' % title) # define request fields from copy import deepcopy new_record = deepcopy(doc_details) url = self.bucket_url + '/' # send request and construct output response = requests.post(url, json=new_record) if response.status_code not in (200, 201): response = response.json() raise Exception('%s() error: %s' % (title, response)) response = response.json() new_record['_id'] = response['id'] new_record['_rev'] = response['rev'] return new_record
a method to create a new document in the collection :param doc_details: dictionary with document details and user id value :return: dictionary with document details and _id and _rev values