code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
|---|---|---|
def sum_campaign_odds(outcomes): <NEW_LINE> <INDENT> victory_prob = 0 <NEW_LINE> for o in outcomes[-1]: <NEW_LINE> <INDENT> victory_prob += outcomes[-1][o] <NEW_LINE> <DEDENT> return victory_prob
|
sum_campaign_odds() adds up the odds of the possible outcomes (generated by
calculate_campaign) to return the odds of the campaign's success.
|
625941bbd53ae8145f87a131
|
def _homset(self, *args, **kwds): <NEW_LINE> <INDENT> from sage.schemes.toric.homset import SchemeHomset_toric_variety <NEW_LINE> return SchemeHomset_toric_variety(*args, **kwds)
|
Return the homset between two toric varieties.
INPUT:
Same as :class:`sage.schemes.generic.homset.SchemeHomset_generic`.
OUTPUT:
A :class:`sage.schemes.toric.homset.SchemeHomset_toric_variety`.
EXAMPLES::
sage: P1xP1 = toric_varieties.P1xP1()
sage: P1 = toric_varieties.P1()
sage: hom_set = P1xP1.Hom(P1); hom_set
Set of morphisms
From: 2-d CPR-Fano toric variety covered by 4 affine patches
To: 1-d CPR-Fano toric variety covered by 2 affine patches
sage: type(hom_set)
<class 'sage.schemes.toric.homset.SchemeHomset_toric_variety_with_category'>
This is also the Hom-set for algebraic subschemes of toric varieties::
sage: P1xP1.inject_variables()
Defining s, t, x, y
sage: P1 = P1xP1.subscheme(s-t)
sage: hom_set = P1xP1.Hom(P1)
sage: hom_set([s,s,x,y])
Scheme morphism:
From: 2-d CPR-Fano toric variety covered by 4 affine patches
To: Closed subscheme of 2-d CPR-Fano toric variety covered by 4 affine patches defined by:
s - t
Defn: Defined on coordinates by sending [s : t : x : y] to
[s : s : x : y]
sage: hom_set = P1.Hom(P1)
sage: hom_set([s,s,x,y])
Scheme endomorphism of Closed subscheme of 2-d CPR-Fano toric
variety covered by 4 affine patches defined by:
s - t
Defn: Defined on coordinates by sending [s : t : x : y] to
[t : t : x : y]
|
625941bbbaa26c4b54cb0fdf
|
def test(self): <NEW_LINE> <INDENT> filepath = ("".join([path, os.sep, "%s_output.txt" % name]) if name not in imported else None) <NEW_LINE> with StdoutCaptured(logfilepath=filepath): <NEW_LINE> <INDENT> if name not in imported: <NEW_LINE> <INDENT> imported[name] = importlib.import_module(name) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> reload(imported[name]) <NEW_LINE> <DEDENT> <DEDENT> getattr(self, name)(imported[name])
|
Tests and logs stdout.
|
625941bb656771135c3eb72a
|
def rayleigh_jeans(wave_units='AA',flux_units='erg/s/cm2/AA',disc_integrated=True): <NEW_LINE> <INDENT> pnames = ['T', 'scale'] <NEW_LINE> function = lambda p, x: p[1]*sed_model.rayleigh_jeans(x, p[0], wave_units=wave_units, flux_units=flux_units, disc_integrated=disc_integrated) <NEW_LINE> function.__name__ = 'rayleigh_jeans' <NEW_LINE> return Function(function=function, par_names=pnames)
|
Rayleigh-Jeans tail (T, scale).
@param wave_units: wavelength units
@type wave_units: string
@param flux_units: flux units
@type flux_units: string
@param disc_integrated: sets units equal to SED models
@type disc_integrated: bool
|
625941bb7d847024c06be175
|
def permuteUnique(self, nums): <NEW_LINE> <INDENT> size = len(nums) <NEW_LINE> if size == 0: <NEW_LINE> <INDENT> return [] <NEW_LINE> <DEDENT> nums.sort() <NEW_LINE> used = [False] * size <NEW_LINE> res = [] <NEW_LINE> def backtrack(nums, size, depth, path, used, res): <NEW_LINE> <INDENT> if size == depth: <NEW_LINE> <INDENT> res.append(path[:]) <NEW_LINE> return <NEW_LINE> <DEDENT> for i in range(size): <NEW_LINE> <INDENT> if not used[i]: <NEW_LINE> <INDENT> if i > 0 and nums[i] == nums[i - 1] and not used[i - 1]: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> used[i] = True <NEW_LINE> path.append(nums[i]) <NEW_LINE> backtrack(nums, size, depth + 1, path, used, res) <NEW_LINE> used[i] = False <NEW_LINE> path.pop() <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> backtrack(nums, size, 0, [], used, res) <NEW_LINE> return res
|
回溯 + 剪枝
https://pic.leetcode-cn.com/1600386643-uhkGmW-image.png
递归的终止条件是: 一个排列中的数字已经选够了 ,因此我们需要一个变量来表示当前程序递归到第几层,我们把这个变量叫做 depth,或者命名为 index ,表示当前要确定的是某个全排列中下标为 index 的那个数是多少;
path 变量是一个栈
对象类型变量在传参的过程中,复制的是变量的地址。这些地址被添加到 res 变量,但实际上指向的是同一块内存地址,因此我们会看到 6 个空的列表对象。解决的方法很简单,在 res.add(path); 这里做一次拷贝即可。
布尔数组 used,初始化的时候都为 false 表示这些数还没有被选择,当我们选定一个数的时候,就将这个数组的相应位置设置为 true ,这样在考虑下一个位置的时候,就能够以 O(1)O(1) 的时间复杂度判断这个数是否被选择过,这是一种「以空间换时间」的思想。
T: O(n×n!)
S: O(n×n!)
|
625941bb26238365f5f0ed26
|
def update_variables(self, px, py, pz): <NEW_LINE> <INDENT> self._last_h = self._h <NEW_LINE> self._h = pz <NEW_LINE> self._arm_angle = 2 * atan( sqrt(((self.a + self.b) ** 2 - (px ** 2 + py ** 2)) / ((px ** 2 + py ** 2) - (self.a - self.b) ** 2))) <NEW_LINE> if abs(self._arm_angle - self._arm_angle) > abs(self._arm_angle + self._arm_angle): <NEW_LINE> <INDENT> self._arm_angle = -self._arm_angle <NEW_LINE> <DEDENT> phi = atan2(py, px) <NEW_LINE> psi = atan2(self.b * sin(self._arm_angle), self.a + self.b * cos(self._arm_angle)) <NEW_LINE> self._base_angle = phi - psi
|
Given a position for the end effector, updates the robot variables to match that location.
Note:
This method should yield same results as inverse_kinematics, if the correct nx and ny are given.
It is the preferable way to update variables, since only position is given.
Adapted from source below:
https://github.com/edmundofuentes/raspberry-scara-robot-python/blob/master/classes/ScaraRobot.py
Args:
px: X coordinate of end effector.
py: Y coordinate of end effector.
pz: Z coordinate of end effector (height).
|
625941bbe8904600ed9f1de5
|
def delete_rows(self, row_indices): <NEW_LINE> <INDENT> if isinstance(row_indices, list) is False: <NEW_LINE> <INDENT> raise IndexError <NEW_LINE> <DEDENT> if len(row_indices) > 0: <NEW_LINE> <INDENT> unique_list = _unique(row_indices) <NEW_LINE> sorted_list = sorted(unique_list, reverse=True) <NEW_LINE> for i in sorted_list: <NEW_LINE> <INDENT> if i < self.number_of_rows(): <NEW_LINE> <INDENT> del self.__array[i]
|
Deletes specified row indices
|
625941bb50485f2cf553cc55
|
def has_isomers(self): <NEW_LINE> <INDENT> return 'isomers' in self.properties.keys()
|
Returns true if the isotope has any isomeric states.
|
625941bb097d151d1a222d18
|
def trycatch(*args, **kwargs): <NEW_LINE> <INDENT> rethrow = kwargs.get('rethrow', False) <NEW_LINE> oncatch = kwargs.get('oncatch', None) <NEW_LINE> def decor(func): <NEW_LINE> <INDENT> def wrapper(*fargs, **fkrgs): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return func(*fargs, **fkrgs) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> cresult = None <NEW_LINE> if oncatch != None: <NEW_LINE> <INDENT> cresult = oncatch() <NEW_LINE> <DEDENT> if rethrow: <NEW_LINE> <INDENT> raise <NEW_LINE> <DEDENT> return cresult <NEW_LINE> <DEDENT> <DEDENT> return wrapper <NEW_LINE> <DEDENT> if len(args) > 0 and callable(args[0]): <NEW_LINE> <INDENT> func = args[0] <NEW_LINE> return decor(func) <NEW_LINE> <DEDENT> return decor
|
Wraps a function in a try/catch block. Can be used as a function
decorator or as a function that accepts another function.
**Params**:
- func (func) - Function to call. Only available when used as a function.
- oncatch (str) [kwargs] - Function to call if an exception is caught.
- rethrow (str) [kwargs] - If true, exception will be re-thrown.
**Examples**:
::
trycatch(myfunc)(myarg1, myarg2, kwarg=mykwarg)
trycatch(myfunc, oncatch=mycatchfunc)(myarg1, myarg2, kwarg=mykwarg)
trycatch(myfunc, rethrow=True)(myarg1, myarg2, kwarg=mykwarg)
|
625941bb07d97122c4178747
|
def SetAsSource(self,pSource): <NEW_LINE> <INDENT> pass
|
Create a sender box.
Use an existing FBBox object to create a sender in the relation.
pSource : Source box to insert in the constraint.
return : A place holder box for the object.
|
625941bbfb3f5b602dac354b
|
def unarchive_user(self, user_id): <NEW_LINE> <INDENT> url = self.record_url + "/unarchive" <NEW_LINE> res = requests.patch(url=url, json={"user_id": user_id}, headers=HEADERS, verify=False) <NEW_LINE> self.write_response_html_to_file(res,"bob.html") <NEW_LINE> res.raise_for_status()
|
Unarchives the user with the specified user ID.
Args:
user_id: `int`. The ID of the user to unarchive.
Returns:
`NoneType`: None.
|
625941bb187af65679ca4fda
|
def intersection_with(self, line): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> A, B = self.normal_vector.coordinates <NEW_LINE> C, D = line.normal_vector.coordinates <NEW_LINE> k1 = self.constant_term <NEW_LINE> k2 = line.constant_term <NEW_LINE> x_numerator = D*k1 - B*k2 <NEW_LINE> y_numerator = -C*k1 + A*k2 <NEW_LINE> one_over_denom = Decimal('1')/round((A*D - B*C), 10) <NEW_LINE> return Vector([x_numerator, y_numerator]) * one_over_denom <NEW_LINE> <DEDENT> except ZeroDivisionError: <NEW_LINE> <INDENT> if self == line: <NEW_LINE> <INDENT> return self <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None
|
find the intersection of two lines
the intersection of a line with itself is the line itself (infinity)
the intersection of two different lines is a single point
the intersection of parallel lines is None
|
625941bb24f1403a92600a25
|
def _getOrException(self, d, k): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return d.get(k) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> raise OneDriveException("obj do not has key %s" % (str(k),))
|
get or exception
|
625941bbfb3f5b602dac354c
|
def complete_assignment(self, assignment, data): <NEW_LINE> <INDENT> if ( assignment.status != AssignmentStatusField.ACCEPTED or assignment.dispose): <NEW_LINE> <INDENT> raise InvalidAssignmentStateError() <NEW_LINE> <DEDENT> ans = QFormAnswer() <NEW_LINE> q = QuestionValidator() <NEW_LINE> name, form = q.extract(assignment.task.question) <NEW_LINE> if ( name == "QuestionForm" ): <NEW_LINE> <INDENT> form.process(data) <NEW_LINE> if ( not form.is_valid() ): <NEW_LINE> <INDENT> raise InvalidQuestionFormError(form) <NEW_LINE> <DEDENT> ansStr = ans.encode(form) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> data = dict(data) <NEW_LINE> ansStr = ans.encode(data) <NEW_LINE> <DEDENT> assignment.answer = ansStr <NEW_LINE> assignment.status = AssignmentStatusField.SUBMITTED <NEW_LINE> assignment.submitted = timezone.now() <NEW_LINE> aaTS = assignment.submitted + assignment.task.tasktype.auto_approve <NEW_LINE> assignment.auto_approve = aaTS <NEW_LINE> assignment.save()
|
Complete a task assignment by submitting data that answers
the questions of the task.
@param assignment the task assignment that this data is
intended to complete
@param data dict-like object or a QuestionForm object that
contains the values submitted by the worker
|
625941bb5fdd1c0f98dc00ee
|
def test_weather_info_without_coords(self): <NEW_LINE> <INDENT> result = get_weather_info(coords=('a', 'b')) <NEW_LINE> self.assertTrue(result['cod'] == '400')
|
Return None if user coordinates are wrong
|
625941bbaad79263cf3908f8
|
def dir_exists(self): <NEW_LINE> <INDENT> return path.exists(path.join(self.tmp_dir, self.file_id))
|
This checks if tmp-directory for your file already exists.
|
625941bbdd821e528d63b067
|
def number_prettify(number: int) -> str: <NEW_LINE> <INDENT> number = int(number) <NEW_LINE> if number > 1000000: <NEW_LINE> <INDENT> number = str(round(number/1000000, 1)) + "M" <NEW_LINE> <DEDENT> elif number > 1000: <NEW_LINE> <INDENT> number = str(round(number/1000, 1)) + "K" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> number = str(number) <NEW_LINE> <DEDENT> number = number.replace(".0", "") <NEW_LINE> return number
|
Takes numbers (int or str format), and if they are too big, shortens them in readable format and returns string.
|
625941bb851cf427c661a3ce
|
def interruptCmdDelay(self, icommand, argList=[], delay=0): <NEW_LINE> <INDENT> myenv = os.environ.copy() <NEW_LINE> myenv['irodsEnvFile'] = "%s/.irodsEnv" % (self.sessionDir) <NEW_LINE> myenv['irodsAuthFileName'] = "%s/.irodsA" % (self.sessionDir) <NEW_LINE> cmdStr = "%s/%s" % (self.icommandsDir, icommand) <NEW_LINE> argList = [cmdStr] + argList <NEW_LINE> global RODSLOGDIR <NEW_LINE> proc = subprocess.Popen('ls -t '+RODSLOGDIR+'/rodsLog* | head -n1', stdout=subprocess.PIPE, shell=True) <NEW_LINE> (myrodslogfile, err) = proc.communicate() <NEW_LINE> if myrodslogfile == "": <NEW_LINE> <INDENT> myrodslogfile = RODSLOGDIR + "/rodsLog" <NEW_LINE> <DEDENT> with open(myrodslogfile.rstrip(),"a") as myrodslog: <NEW_LINE> <INDENT> myrodslog.write(" --- interrupt icommand delay("+str(delay)+") ["+' '.join(argList)+"] --- \n") <NEW_LINE> <DEDENT> myrodslog.close() <NEW_LINE> p = subprocess.Popen(argList, stdout = subprocess.PIPE, stderr = subprocess.PIPE, env = myenv) <NEW_LINE> granularity = 0.01 <NEW_LINE> t = 0.0 <NEW_LINE> while t < delay and p.poll() is None: <NEW_LINE> <INDENT> time.sleep(granularity) <NEW_LINE> t += granularity <NEW_LINE> <DEDENT> if p.poll() is None: <NEW_LINE> <INDENT> p.terminate() <NEW_LINE> returncode = 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> returncode = -1 <NEW_LINE> <DEDENT> return returncode
|
Runs an icommand with optional argument list but
terminates the icommand subprocess after delay seconds.
Returns 0 if subprocess was terminated.
Returns -1 if subprocess completed normally.
Not currently checking against allowed icommands.
|
625941bb92d797404e304045
|
def convertPhase_to_Timing(phase_calibration, sample_time=5.0e-9): <NEW_LINE> <INDENT> phases = np.angle(phase_calibration) <NEW_LINE> delays = (phases[:, 1] - phases[:, 0]) * (1024 / (2*np.pi)) * sample_time <NEW_LINE> return delays
|
Given the phase calibration of the 512 LOFAR subbands, such as the output of getStationPhaseCalibration, return the timing callibration of each antenna.
Not sure how well this works with HBA antennas. Sample time should be seconds per sample. Default is 5 ns
|
625941bb63b5f9789fde6fa2
|
def bcost(action): <NEW_LINE> <INDENT> a, b, arrow = action <NEW_LINE> return max(a, b)
|
Returns the cost (a number) of an action in the
bridge problem.
An action is an (a, b, arrow) tuple; a and b are
times; arrow is a string.
|
625941bb2ae34c7f2600cfee
|
def runValueIteration(self): <NEW_LINE> <INDENT> allStates = self.mdp.getStates() <NEW_LINE> for i in range(self.iterations): <NEW_LINE> <INDENT> values = util.Counter() <NEW_LINE> for state in allStates: <NEW_LINE> <INDENT> if self.mdp.isTerminal(state): <NEW_LINE> <INDENT> values[state] = 0.0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> possibleActions = self.mdp.getPossibleActions(state) <NEW_LINE> q_vals = [] <NEW_LINE> for action in possibleActions: <NEW_LINE> <INDENT> q_vals.append(self.computeQValueFromValues(state, action)) <NEW_LINE> <DEDENT> values[state] = max(q_vals) <NEW_LINE> <DEDENT> <DEDENT> self.values = values
|
Return max Q value.
|
625941bb85dfad0860c3ad15
|
def plot_comparison(orgImg, xImg, yImg): <NEW_LINE> <INDENT> if type(xImg) is dict: <NEW_LINE> <INDENT> f, axarr = plt.subplots(2, len(xImg)) <NEW_LINE> axarr[0, 0].imshow(orgImg, cmap='gray') <NEW_LINE> axarr[0, 0].set_title('Original Image') <NEW_LINE> axarr[0, 1].imshow(yImg, cmap='gray') <NEW_LINE> axarr[0, 1].set_title('Noisy Image') <NEW_LINE> axarr[0, 2] = None <NEW_LINE> for i, (name, x) in enumerate(xImg.items()): <NEW_LINE> <INDENT> axarr[1, i].imshow(x, cmap='gray') <NEW_LINE> axarr[1, i].set_title(name) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> f, axarr = plt.subplots(1,3) <NEW_LINE> axarr[0].imshow(orgImg, cmap='gray') <NEW_LINE> axarr[0].set_title('Original Image') <NEW_LINE> axarr[1].imshow(yImg, cmap='gray') <NEW_LINE> axarr[1].set_title('Noisy Image') <NEW_LINE> axarr[2].imshow(xImg, cmap='gray') <NEW_LINE> axarr[2].set_title('De-noised Image') <NEW_LINE> <DEDENT> plt.show()
|
Plot original image, noisy image and de-noised image
:param orgImg: original image
:param xImg: latent image (x)
:param yImg: noisy image (y)
|
625941bb2c8b7c6e89b3567f
|
def set_hist_data(self, data): <NEW_LINE> <INDENT> self.set_hist_source(HistDataSource(data))
|
Set histogram data
|
625941bbfff4ab517eb2f2f6
|
def test_superuser_sees_error_if_permission_is_already_registered(superuser, permission, testapp): <NEW_LINE> <INDENT> res = testapp.get('/') <NEW_LINE> form = res.forms['loginForm'] <NEW_LINE> form['username'] = superuser.email <NEW_LINE> form['password'] = 'myPrecious' <NEW_LINE> res = form.submit().follow() <NEW_LINE> assert res.status_code == 200 <NEW_LINE> res = testapp.get(url_for('permission.home')) <NEW_LINE> res = res.click(_('New Permission')) <NEW_LINE> form = res.forms['registerPermissionForm'] <NEW_LINE> form['user_id'] = permission.user.id <NEW_LINE> form['collection_id'] = permission.collection.id <NEW_LINE> res = form.submit() <NEW_LINE> assert escape( _('Permissions for user "%(username)s" on collection "%(code)s" already registered', username=permission.user.email, code=permission.collection.code)) in res
|
Show error if permission is already registered.
|
625941bbb545ff76a8913cda
|
def parse(self,response): <NEW_LINE> <INDENT> item=ArtscrapItem() <NEW_LINE> articles=response.xpath("//"+self.xpath_for_class("media__content")) <NEW_LINE> for article in articles: <NEW_LINE> <INDENT> title = self.get_first(article.xpath(self.xpath_for_class('media__title') + "/a/text()").extract(), "").strip(' \n') <NEW_LINE> if title is not "": <NEW_LINE> <INDENT> item["title"] = title <NEW_LINE> item["summary"] = self.get_first(article.xpath(self.xpath_for_class('media__summary') + "/text()").extract(), "").strip(' \n') <NEW_LINE> item["tags"] = self.get_first(article.xpath(self.xpath_for_class('tag') + "/text()").extract(), "").strip(' \n') <NEW_LINE> article_url = ''.join(article.xpath(self.xpath_for_class("media__title") + "/a/@href").extract()) <NEW_LINE> item["url"] = response.urljoin(article_url) <NEW_LINE> yield scrapy.Request(item["url"], callback=self.parse_contents, meta=item)
|
DESCRIPTION:
-----------
* This function is called for parsing every URL encountered,
starting from 'start_urls'.
PARAMETERS:
----------
1. response object of Web page.
|
625941bb566aa707497f4435
|
def get_gene(self, query_id, **options): <NEW_LINE> <INDENT> return self._get('gene', query_id, options)
|
Returns the genes present in the genomic region
|
625941bb9b70327d1c4e0c90
|
def __init__(self,pixel_dim,initial_color = Color(),**kwargs): <NEW_LINE> <INDENT> super(Image,self).__init__(pixel_dim,**kwargs) <NEW_LINE> self.populate(initial_color)
|
Image constructor.
:param pixel_dim: Resolution of image.
:type pixel_dim: Interval or Tuple of two Integers
:param initial_color: Start color of image.
:type initial_color: Color
:param include_corners: Boolean value.
:type include_corners: bool
:param wrap: Boolean value.
:type wrap: bool
:result: Image object.
:rtype: Image
|
625941bbcc40096d6159580f
|
def read_label_file(dataset_dir, filename=LABELS_FILENAME): <NEW_LINE> <INDENT> labels_filename = os.path.join(dataset_dir, filename) <NEW_LINE> with tf.gfile.Open(labels_filename, 'rb') as f: <NEW_LINE> <INDENT> lines = f.read().decode() <NEW_LINE> <DEDENT> lines = lines.split('\n') <NEW_LINE> lines = filter(None, lines) <NEW_LINE> labels_to_class_names = {} <NEW_LINE> for line in lines: <NEW_LINE> <INDENT> index = line.index(':') <NEW_LINE> labels_to_class_names[int(line[:index])] = line[index + 1:] <NEW_LINE> <DEDENT> return labels_to_class_names
|
Reads the labels file and returns a mapping from ID to class name.
Args:
dataset_dir: The directory in which the labels file is found.
filename: The filename where the class names are written.
Returns:
A map from a label (integer) to class name.
|
625941bb26068e7796caeb96
|
def relation_from_DB(rel_data: RelationDB) -> Relation: <NEW_LINE> <INDENT> return Relation( identifier=str(rel_data.id), relation_type=RelationType(rel_data.rel_type), e_print=EPrint( arxiv_id=rel_data.arxiv_id, version=rel_data.arxiv_ver ), resource=Resource( resource_type=rel_data.resource_type, identifier=rel_data.resource_id ), description=rel_data.description, added_at=rel_data.added_at, creator=rel_data.creator, supercedes_or_suppresses=rel_data.supercedes_or_suppresses )
|
Retrieve a relation from a result of DB query.
|
625941bbd268445f265b4d2b
|
def select_and_apply_random_policy_augmix(policies, image, bboxes, mixture_width=3, mixture_depth=-1, alpha=1): <NEW_LINE> <INDENT> policy_to_select = tf.random_uniform([], maxval=len(policies), dtype=tf.int32) <NEW_LINE> ws = tfp.distributions.Dirichlet([alpha] * mixture_width).sample() <NEW_LINE> m = tfp.distributions.Beta(alpha, alpha).sample() <NEW_LINE> mix = tf.zeros_like(image, dtype=tf.float32) <NEW_LINE> for j in range(mixture_width): <NEW_LINE> <INDENT> aug_image = image <NEW_LINE> depth = mixture_depth if mixture_depth > 0 else np.random.randint(1, 4) <NEW_LINE> for _ in range(depth): <NEW_LINE> <INDENT> for (i, policy) in enumerate(policies): <NEW_LINE> <INDENT> aug_image, bboxes = tf.cond( tf.equal(i, policy_to_select), lambda policy_fn=policy, img=aug_image: policy_fn(img, bboxes), lambda img=aug_image: (img, bboxes)) <NEW_LINE> <DEDENT> <DEDENT> mix += ws[j] * tf.cast(aug_image, tf.float32) <NEW_LINE> <DEDENT> mixed = tf.cast((1 - m) * tf.cast(image, tf.float32) + m * mix, tf.uint8) <NEW_LINE> return (mixed, bboxes)
|
Select a random policy from `policies` and apply it to `image`.
|
625941bb8da39b475bd64e34
|
def register(self, key, default): <NEW_LINE> <INDENT> if "." in key: <NEW_LINE> <INDENT> section, setting = key.split(".", 1) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise AttributeError("Invalid config section.setting name: '%s'" % key) <NEW_LINE> <DEDENT> if section not in self.data: <NEW_LINE> <INDENT> self.data[section] = {} <NEW_LINE> <DEDENT> if section not in self.default: <NEW_LINE> <INDENT> self.default[section] = {} <NEW_LINE> <DEDENT> if section not in self.callbacks: <NEW_LINE> <INDENT> self.callbacks[section] = {} <NEW_LINE> <DEDENT> if setting not in self.callbacks[section]: <NEW_LINE> <INDENT> self.callbacks[section][setting] = [] <NEW_LINE> <DEDENT> if setting not in self.data[section]: <NEW_LINE> <INDENT> self.data[section][setting] = default <NEW_LINE> <DEDENT> self.default[section][setting] = copy.deepcopy(default)
|
Register a section.setting, and set the default.
Will overwrite any previously set default, and set setting if not one.
The default value deterimines the type of the setting.
|
625941bbd164cc6175782c0a
|
def get_intervals_between(target_date, base_date, res): <NEW_LINE> <INDENT> diff = check_date_diff(target_date, base_date, res) <NEW_LINE> return int(diff // res)
|
Return the number of intervals between the two dates *target_date* and
*base_date* with a given interval resolution *res* in seconds.
*target_date* must be greater or equal to *base_date*. The difference
between both dates must be a multiple of *res*. Raise a :exc:`ValueError`
if these constraints are violated.
|
625941bb99fddb7c1c9de24f
|
def do_feature_extraction(files, dataset, feature_path, params, overwrite=False): <NEW_LINE> <INDENT> check_path(feature_path) <NEW_LINE> for file_id, audio_filename in enumerate(files): <NEW_LINE> <INDENT> current_feature_file = get_feature_filename(audio_file=os.path.split(audio_filename)[1], path=feature_path) <NEW_LINE> progress(title_text='Extracting', percentage=(float(file_id) / len(files)), note=os.path.split(audio_filename)[1]) <NEW_LINE> if not os.path.isfile(current_feature_file) or overwrite: <NEW_LINE> <INDENT> if os.path.isfile(dataset.relative_to_absolute_path(audio_filename)): <NEW_LINE> <INDENT> y, fs = load_audio(filename=dataset.relative_to_absolute_path(audio_filename), mono=True, fs=params['fs']) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise IOError("Audio file not found [%s]" % audio_filename) <NEW_LINE> <DEDENT> feature_data = feature_extraction(y=y, fs=fs, include_mfcc0=params['include_mfcc0'], include_delta=params['include_delta'], include_acceleration=params['include_acceleration'], mfcc_params=params['mfcc'], delta_params=params['mfcc_delta'], acceleration_params=params['mfcc_acceleration']) <NEW_LINE> save_data(current_feature_file, feature_data)
|
Feature extraction
Parameters
----------
files : list
file list
dataset : class
dataset class
feature_path : str
path where the features are saved
params : dict
parameter dict
overwrite : bool
overwrite existing feature files
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Audio file not found.
|
625941bb92d797404e304046
|
def latitudeStr(self): <NEW_LINE> <INDENT> (degrees, minutes, seconds, polarity) = GeoInfo.latitudeToDegMinSec(self.latitude) <NEW_LINE> retVal = "{}\xb0 {}' {}\" {}". format(degrees, minutes, seconds, polarity) <NEW_LINE> return retVal
|
Returns a string of the latitude in the form: 28° 14' 23" N
|
625941bb63f4b57ef0000fde
|
def canPartition(self, nums): <NEW_LINE> <INDENT> target = sum(nums) <NEW_LINE> if target & 1: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> target >>= 1 <NEW_LINE> rec_sum = {0} <NEW_LINE> for x in nums: <NEW_LINE> <INDENT> aux = set() <NEW_LINE> for t in rec_sum: <NEW_LINE> <INDENT> if x + t == target: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> aux.add(x + t) <NEW_LINE> <DEDENT> rec_sum |= aux <NEW_LINE> <DEDENT> return False
|
:type nums: List[int]
:rtype: bool
|
625941bb55399d3f05588570
|
def plot_summary_axes(graph: BELGraph, lax, rax, logx: bool = True): <NEW_LINE> <INDENT> function_counter = graph.count.functions() <NEW_LINE> relation_counter = graph.count.relations() <NEW_LINE> function_df = pd.DataFrame.from_dict(dict(function_counter), orient='index').reset_index() <NEW_LINE> function_df.columns = ['Function', 'Count'] <NEW_LINE> function_df.sort_values('Count', ascending=False, inplace=True) <NEW_LINE> relation_df = pd.DataFrame.from_dict(dict(relation_counter), orient='index').reset_index() <NEW_LINE> relation_df.columns = ['Relation', 'Count'] <NEW_LINE> relation_df.sort_values('Count', ascending=False, inplace=True) <NEW_LINE> sns.barplot(x='Count', y='Function', data=function_df, ax=lax, orient='h') <NEW_LINE> lax.set_title('Number of nodes: {}'.format(graph.number_of_nodes())) <NEW_LINE> sns.barplot(x='Count', y='Relation', data=relation_df, ax=rax, orient='h') <NEW_LINE> rax.set_title('Number of edges: {}'.format(graph.number_of_edges())) <NEW_LINE> if logx: <NEW_LINE> <INDENT> lax.set_xscale('log') <NEW_LINE> rax.set_xscale('log')
|
Plot the graph summary statistics on the given axes.
After, you should run :func:`plt.tight_layout` and you must run :func:`plt.show` to view.
Shows:
1. Count of nodes, grouped by function type
2. Count of edges, grouped by relation type
:param pybel.BELGraph graph: A BEL graph
:param lax: An axis object from matplotlib
:param rax: An axis object from matplotlib
Example usage:
>>> import matplotlib.pyplot as plt
>>> from pybel import from_pickle
>>> from pybel_tools.summary import plot_summary_axes
>>> graph = from_pickle('~/dev/bms/aetionomy/parkinsons.gpickle')
>>> fig, axes = plt.subplots(1, 2, figsize=(10, 4))
>>> plot_summary_axes(graph, axes[0], axes[1])
>>> plt.tight_layout()
>>> plt.show()
|
625941bbe64d504609d746fd
|
def send_email_pyzmail(self, maildict): <NEW_LINE> <INDENT> mailsettings = self.get_setting_value(mconst.DEF_SETTINGSEC_mail) <NEW_LINE> efrom = mailsettings['mail_from'] <NEW_LINE> eto = maildict['to'] <NEW_LINE> esubject = maildict['subject'] <NEW_LINE> ebody = maildict['body'] <NEW_LINE> preferred_encoding = 'iso-8859-1' <NEW_LINE> text_encoding = preferred_encoding <NEW_LINE> if (isinstance(eto, basestring)): <NEW_LINE> <INDENT> eto = [eto] <NEW_LINE> <DEDENT> (payload, mail_from, rcpt_to, msg_id) = pyzmail.compose_mail(efrom, eto, esubject, preferred_encoding, (ebody,text_encoding)) <NEW_LINE> smtp_host = mailsettings['smtp_host'] <NEW_LINE> smtp_port = mailsettings['smtp_port'] <NEW_LINE> smtp_mode = mailsettings['smtp_mode'] <NEW_LINE> smtp_login = mailsettings['smtp_login'] <NEW_LINE> smtp_password = mailsettings['smtp_password'] <NEW_LINE> ret=pyzmail.send_mail(payload, efrom, eto, smtp_host=smtp_host, smtp_port=smtp_port, smtp_mode=smtp_mode, smtp_login=smtp_login, smtp_password=smtp_password) <NEW_LINE> if isinstance(ret, dict): <NEW_LINE> <INDENT> if ret: <NEW_LINE> <INDENT> return EFailure('failed recipients: ' + ', '.join(ret.keys())) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> <DEDENT> return EFailure('error:'+ ret)
|
Send a mail message.
pyzmail is supposed to be good but my experience with it has been terrible. It fails mysteriously and takes minutes to time out.
and the compose_mail function seems to return broken value from mail_from return argument.
|
625941bb4f88993c3716bf29
|
def compound(tgt, minion_id=None): <NEW_LINE> <INDENT> if minion_id is not None: <NEW_LINE> <INDENT> if not isinstance(minion_id, str): <NEW_LINE> <INDENT> minion_id = str(minion_id) <NEW_LINE> <DEDENT> <DEDENT> matchers = salt.loader.matchers(__opts__) <NEW_LINE> try: <NEW_LINE> <INDENT> ret = matchers["compound_match.match"](tgt, opts=__opts__, minion_id=minion_id) <NEW_LINE> <DEDENT> except Exception as exc: <NEW_LINE> <INDENT> log.exception(exc) <NEW_LINE> ret = False <NEW_LINE> <DEDENT> return ret
|
Return True if the minion ID matches the given compound target
minion_id
Specify the minion ID to match against the target expression
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' match.compound 'L@cheese,foo and *'
|
625941bb8a349b6b435e8031
|
def single_random_user(ev_room): <NEW_LINE> <INDENT> return random.choice(GlobalVars.users_chatting[ev_room])
|
Returns a single user name from users in a room
:param ev_room: Room to select users from
:return: A single user tuple
|
625941bbd18da76e2353238f
|
def env_restore(self): <NEW_LINE> <INDENT> self.log.info("(EnvSetup) Restore Environment...")
|
Called when we make a restore
|
625941bb38b623060ff0acac
|
def test_as_dict(): <NEW_LINE> <INDENT> ipf = ilf.Ip4Filter() <NEW_LINE> ipf.add(0, ['1.1.1.1'], ['2.2.2.2'], ['80/tcp']) <NEW_LINE> m0 = ipf.as_dict <NEW_LINE> ipf.as_dict[2] = None <NEW_LINE> m1 = ipf.as_dict <NEW_LINE> assert m0 == m1
|
changing as_dict property does not change Ip4Filter
|
625941bba05bb46b383ec6e1
|
def grab_cinder_dat(build_dir="", datapath=''): <NEW_LINE> <INDENT> build_filename = os.path.join(build_dir, 'cinder.dat') <NEW_LINE> if os.path.exists(build_filename): <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> if isinstance(datapath, basestring) and 0 < len(datapath): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> elif 'DATAPATH' in os.environ: <NEW_LINE> <INDENT> datapath = os.environ['DATAPATH'] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print(failure("DATAPATH not defined in environment; cinder.dat not found - skipping.")) <NEW_LINE> return False <NEW_LINE> <DEDENT> local_filename = os.path.join(datapath, "[Cc][Ii][Nn][Dd][Ee][Rr].[Dd][Aa][Tt]") <NEW_LINE> local_filename = glob(local_filename) <NEW_LINE> if 0 < len(local_filename): <NEW_LINE> <INDENT> print("Grabbing cinder.dat from " + datapath) <NEW_LINE> shutil.copy(local_filename[0], build_filename) <NEW_LINE> rtn = True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print(failure("cinder.dat file not found in DATAPATH dir - skipping.")) <NEW_LINE> rtn = False <NEW_LINE> <DEDENT> return rtn
|
Grabs the cinder.dat file from the DATAPATH directory if not already present.
|
625941bb76e4537e8c351534
|
def vecino_aleatorio(self, estado): <NEW_LINE> <INDENT> raise NotImplementedError("Este metodo debe ser implementado")
|
Genera un vecino de un estado en forma aleatoria.
Procurar generar el estado vecino a partir de una
distribución uniforme de ser posible.
@param estado: Una tupla que describe un estado
@return: Una tupla con un estado vecino.
|
625941bb3346ee7daa2b2c27
|
def remove(self, val): <NEW_LINE> <INDENT> if val in self.randomList: <NEW_LINE> <INDENT> self.randomList.remove(val); <NEW_LINE> return True; <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False;
|
Removes a value from the collection. Returns true if the collection contained the specified element.
:type val: int
:rtype: bool
|
625941bb31939e2706e4cd2b
|
def DrawFocusRect(self,rect:'Tuple[Any, Any, Any, Any]') -> 'None': <NEW_LINE> <INDENT> pass
|
Draws a rectangle in the style used to
indicate the rectangle has focus
Args:
rect(Tuple[Any, Any, Any, Any]):The coordinates of the rectangleMFC References
Returns:
None
|
625941bb31939e2706e4cd2c
|
def letterCasePermutation(self, S): <NEW_LINE> <INDENT> chars = list(S) <NEW_LINE> perms = [] <NEW_LINE> self.generatePerms(chars, 0, perms) <NEW_LINE> return perms
|
:type S: str
:rtype: List[str]
|
625941bb046cf37aa974cc07
|
def makeControllerInScopes(self, scopes): <NEW_LINE> <INDENT> call_log = [] <NEW_LINE> def scope_cb(scope): <NEW_LINE> <INDENT> call_log.append(scope) <NEW_LINE> return scope in scopes <NEW_LINE> <DEDENT> controller = FeatureController(scope_cb, StormFeatureRuleSource()) <NEW_LINE> return controller, call_log
|
Make a controller that will report it's in the given scopes.
|
625941bb85dfad0860c3ad16
|
def assessment_is_finished(submission_uuid, requirements): <NEW_LINE> <INDENT> return bool(get_latest_assessment(submission_uuid))
|
Determine if the assessment of the given submission is completed. This
checks to see if the AI has completed the assessment.
Args:
submission_uuid (str): The UUID of the submission being graded.
requirements (dict): Not used.
Returns:
True if the assessment has been completed for this submission.
|
625941bbb57a9660fec3373e
|
def setUp(self): <NEW_LINE> <INDENT> self._db = SqliteDatabase('file:cachedb?mode=memory&cache=shared') <NEW_LINE> for model in [IngestState]: <NEW_LINE> <INDENT> model.bind(self._db, bind_refs=False, bind_backrefs=False) <NEW_LINE> <DEDENT> self._db.connect() <NEW_LINE> self._db.create_tables([IngestState])
|
Setup the database with in memory sqlite.
|
625941bbd58c6744b4257b1e
|
def export_to_line_protocol(self): <NEW_LINE> <INDENT> self.add_tag(['device', 'mode']) <NEW_LINE> self.add_tag(['device', 'state']) <NEW_LINE> self.add_tags(['device', 'additionalData'], prefix=['device']) <NEW_LINE> self.add_tag(['part', 'id']) <NEW_LINE> self.add_tag(['part', 'type']) <NEW_LINE> self.add_tag(['part', 'typeId']) <NEW_LINE> self.add_tag(['part', 'code']) <NEW_LINE> self.add_tag(['part', 'result']) <NEW_LINE> self.add_tags(['part', 'additionalData'], prefix=['part']) <NEW_LINE> for measurement in self.data['measurements']: <NEW_LINE> <INDENT> meas_obj = Measurement(json.dumps(measurement), self.hostname()) <NEW_LINE> meas_obj.add_tag(['code'], ['process']) <NEW_LINE> meas_obj.add_tag(['name'], ['process']) <NEW_LINE> meas_obj.add_tag(['phase'], ['process']) <NEW_LINE> meas_obj.add_tag(['result'], ['process']) <NEW_LINE> meas_obj.add_tags(['additionalData'], prefix=['process']) <NEW_LINE> timestamp = parser.parse(measurement['ts']) <NEW_LINE> keys = list(filter(lambda key: key != 'time', measurement['series'].keys())) <NEW_LINE> self.points = [] <NEW_LINE> for index in range(0, len(measurement['series'][keys[0]])): <NEW_LINE> <INDENT> fields = {key.replace(' ', '_'): measurement['series'][key][index] for key in keys if key != '' and measurement['series'][key][index] != ''} <NEW_LINE> ts_w_offset = timestamp + timedelta(milliseconds=measurement['series']['time'][index]) <NEW_LINE> ts_w_offset = int(round(ts_w_offset.timestamp() * 1000000000)) <NEW_LINE> self.add_point(fields, ts_w_offset) <NEW_LINE> <DEDENT> current_tags = self.tags.copy() <NEW_LINE> current_tags.update(meas_obj.tags) <NEW_LINE> if len(fields) > 0: <NEW_LINE> <INDENT> tmp = line_protocol.make_lines({'tags': current_tags, 'points': self.points}) <NEW_LINE> self.line_protocol_data += tmp <NEW_LINE> <DEDENT> <DEDENT> return self.line_protocol_data if self.line_protocol_data else None
|
Export object to InfluxDB Line Protocol syntax
|
625941bbab23a570cc25003d
|
def get_transactions(self, tx_hashes): <NEW_LINE> <INDENT> data = { "txs_hashes": tx_hashes, "decode_as_json": True } <NEW_LINE> r = self.make_request(data, "gettransactions") <NEW_LINE> response = r.json() <NEW_LINE> txs = [] <NEW_LINE> for tx, tx_hash in zip(response["txs_as_json"], tx_hashes): <NEW_LINE> <INDENT> mtx = MoneroTransaction(tx_hash) <NEW_LINE> mtx.from_rpc(json.loads(tx)) <NEW_LINE> txs.append(mtx) <NEW_LINE> <DEDENT> return txs
|
:rtype: list[MoneroTransaction]
|
625941bb462c4b4f79d1d58d
|
def get_power(self): <NEW_LINE> <INDENT> return 0.5 * (1 + self._health) * random.randint(50 + self._experience, 100) / 100
|
Calculate the power of the unit
:return: attack power
:rtype: float
|
625941bb3eb6a72ae02ec392
|
def test_empty_ensure_index_defaults(self): <NEW_LINE> <INDENT> indices = { 'index': {} } <NEW_LINE> handler = ConnectionHandler({}, indices) <NEW_LINE> handler.ensure_index_defaults('index') <NEW_LINE> index = handler.indices['index'] <NEW_LINE> expected_index = { 'NAME': 'index', 'ALIASES': [], 'SETTINGS': None, } <NEW_LINE> assert index == expected_index
|
Assert default values are set properly on an empty index.
|
625941bb711fe17d8254222f
|
def writeValidatedTargetsFile(header, validatedTargets, outputFile): <NEW_LINE> <INDENT> f = open(outputFile,'w') <NEW_LINE> f.write(header + ',cleavage position,PARE reads,10 nt window abundance,' 'PARE reads/window abundance,p-value\n') <NEW_LINE> for target in validatedTargets: <NEW_LINE> <INDENT> if(float(target[len(target)-1]) < 0.5): <NEW_LINE> <INDENT> for j in range(len(target)): <NEW_LINE> <INDENT> f.write(str(target[j])) <NEW_LINE> if(j == len(target)-1): <NEW_LINE> <INDENT> f.write('\n') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> f.write(',')
|
Write validated targets to an output file
Args:
header: the stripped header from the target file
validatedTargets: list of the validated targets
outputFile: file to output validated targets to
|
625941bbd58c6744b4257b1d
|
def move_row_right(self, row): <NEW_LINE> <INDENT> d = deque(self.maze_pieces[row]) <NEW_LINE> old_maze = d.popleft() <NEW_LINE> d.append(self.current_maze) <NEW_LINE> self.maze_pieces[row] = d <NEW_LINE> self.current_maze = old_maze
|
Move a row left and add the free maze to the end. Movable rows [1, 3, 5]
|
625941bb167d2b6e31218a54
|
def test_if2(self): <NEW_LINE> <INDENT> self.assertEqual(mint.Template('#if False:\n' ' true\n' '#else:\n' ' false').render(), 'false\n')
|
if-else statements
|
625941bb23849d37ff7b2f4f
|
def check_if_dead(self, to_save=True): <NEW_LINE> <INDENT> from xpathscraper.utils import can_get_url <NEW_LINE> url = 'http://instagram.com/' + self.username <NEW_LINE> res = can_get_url(url) <NEW_LINE> if not res: <NEW_LINE> <INDENT> print("%s is dead" % url) <NEW_LINE> if to_save: <NEW_LINE> <INDENT> self.append_tag('DEAD') <NEW_LINE> <DEDENT> return True <NEW_LINE> <DEDENT> return False
|
This function marks a profile as DEAD if url raises an exception
:param to_save: whether to save or not
:return:
|
625941bbbe8e80087fb20b05
|
def manhattan_distance(p1, p2): <NEW_LINE> <INDENT> return abs(p1[0] - p2[0]) + abs(p1[1] - p2[1])
|
Returns the Manhattan distance between points p1 and p2
|
625941bb4f88993c3716bf2a
|
def test_content_summary(self): <NEW_LINE> <INDENT> self.assertEqual(len(self.to_json[str(self.now.year)][self.user1]), 0) <NEW_LINE> self.assertEqual(len(self.to_json[str(self.now.year)][self.user2]), 0)
|
core does not show current month
|
625941bb097d151d1a222d19
|
def calc_wedge_bounds(levels, level_width): <NEW_LINE> <INDENT> inners = levels * level_width <NEW_LINE> outers = inners + level_width <NEW_LINE> return inners, outers
|
Calculate inner and outer radius bounds of the donut wedge based on levels.
|
625941bb4a966d76dd550eca
|
def get_y_aperture_extent(self): <NEW_LINE> <INDENT> od = [1.0e10, -1.0e10] <NEW_LINE> if len(self.edge_apertures) > 0: <NEW_LINE> <INDENT> for e in self.edge_apertures: <NEW_LINE> <INDENT> edg = e.bounding_box() <NEW_LINE> if edg[0][1] < od[0]: <NEW_LINE> <INDENT> od[0] = edg[0][1] <NEW_LINE> <DEDENT> if edg[1][1] > od[1]: <NEW_LINE> <INDENT> od[1] = edg[1][1] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> elif len(self.clear_apertures) > 0: <NEW_LINE> <INDENT> for ca in self.clear_apertures: <NEW_LINE> <INDENT> ap = ca.bounding_box() <NEW_LINE> if ap[0][1] < od[0]: <NEW_LINE> <INDENT> od[0] = ap[0][1] <NEW_LINE> <DEDENT> if ap[1][1] > od[1]: <NEW_LINE> <INDENT> od[1] = ap[1][1] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> od = [-self.max_aperture, self.max_aperture] <NEW_LINE> <DEDENT> return od
|
returns [y_min, y_max] for the union of apertures
|
625941bb9f2886367277a74e
|
def create(self, request): <NEW_LINE> <INDENT> d = request.data <NEW_LINE> metric = None <NEW_LINE> if 'pod_name' in d: <NEW_LINE> <INDENT> pod = KubePod.objects.filter(name=d['pod_name']).first() <NEW_LINE> if pod is None: <NEW_LINE> <INDENT> return Response({ 'status': 'Not Found', 'message': 'Pod not found' }, status=status.HTTP_404_NOT_FOUND) <NEW_LINE> <DEDENT> metric = KubeMetric( name=d['name'], date=parse_datetime(d['date']), value=d['value'], metadata=d['metadata'], cumulative=d['cumulative'], pod=pod) <NEW_LINE> metric.save() <NEW_LINE> return Response( metric, status=status.HTTP_201_CREATED ) <NEW_LINE> <DEDENT> elif 'run_id' in d: <NEW_LINE> <INDENT> run = ModelRun.objects.get(pk=d['run_id']) <NEW_LINE> if run is None: <NEW_LINE> <INDENT> return Response({ 'status': 'Not Found', 'message': 'Run not found' }, status=status.HTTP_404_NOT_FOUND) <NEW_LINE> <DEDENT> metric = KubeMetric( name=d['name'], date=parse_datetime(d['date']), value=d['value'], metadata=d['metadata'], cumulative=d['cumulative'], model_run=run) <NEW_LINE> metric.save() <NEW_LINE> serializer = KubeMetricsSerializer(metric, many=False) <NEW_LINE> return Response( serializer.data, status=status.HTTP_201_CREATED ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return Response({ 'status': 'Bad Request', 'message': 'Pod Name or run id have to be supplied', 'data': d, }, status=status.HTTP_400_BAD_REQUEST)
|
Create a new metric
Arguments:
request {[Django request]} -- The request object
Returns:
Json -- Returns posted values
|
625941bbfb3f5b602dac354d
|
def find(self, random_values): <NEW_LINE> <INDENT> random_values *= self.tree[0] <NEW_LINE> tree_idxs = np.zeros(len(random_values), dtype=np.int32) <NEW_LINE> for _ in range(self.tree_level - 1): <NEW_LINE> <INDENT> tree_idxs = 2 * tree_idxs + 1 <NEW_LINE> left_values = self.tree[tree_idxs] <NEW_LINE> where_right = np.where(random_values > left_values)[0] <NEW_LINE> tree_idxs[where_right] += 1 <NEW_LINE> random_values[where_right] -= left_values[where_right] <NEW_LINE> <DEDENT> return tree_idxs
|
Random values: numpy array of floats in range [0, 1]
|
625941bbde87d2750b85fc4c
|
def _write_bar_text(self, cr, text, bar_height, x_pos): <NEW_LINE> <INDENT> if self.noselection: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> cr.save() <NEW_LINE> cr.select_font_face("Sans", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_BOLD) <NEW_LINE> _, _, width, height, _, _ = cr.text_extents(text) <NEW_LINE> center = (self.bar_draw["bar_width"] / 2.0) + (height / 2.0) <NEW_LINE> if self.bar_draw["start_y"] - bar_height != 0: <NEW_LINE> <INDENT> if self.bar_draw["start_y"] - bar_height - width - 3 == 0: <NEW_LINE> <INDENT> h_pos = bar_height + width <NEW_LINE> <DEDENT> elif self.bar_draw["start_y"] - bar_height - width - 3 >= 3: <NEW_LINE> <INDENT> h_pos = bar_height + width + 3 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> h_pos = self.bar_draw["start_y"] - 3 <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> h_pos = bar_height - 3 <NEW_LINE> <DEDENT> cr.move_to(self.bar_draw["start_x"] + x_pos + center + 1, h_pos) <NEW_LINE> cr.rotate(- PI/2) <NEW_LINE> cr.set_source_rgb(0, 0, 0) <NEW_LINE> cr.show_text(text) <NEW_LINE> cr.rotate(PI/2) <NEW_LINE> cr.move_to(self.bar_draw["start_x"] + x_pos + center, h_pos - 1) <NEW_LINE> cr.rotate(- PI/2) <NEW_LINE> cr.set_source_rgb(1, 1, 1) <NEW_LINE> cr.show_text(text) <NEW_LINE> cr.rotate(PI/2) <NEW_LINE> cr.restore()
|
Write text above or inside bar.
|
625941bb796e427e537b0480
|
def adjust_learning_rate(optimizer, epoch): <NEW_LINE> <INDENT> lr = args.lr * (args.lr_decay ** (epoch // args.epoch_decay)) <NEW_LINE> lr = lr * args.world_size <NEW_LINE> for param_group in optimizer.param_groups: <NEW_LINE> <INDENT> param_group['lr'] = lr * param_group['lr_mult'] <NEW_LINE> <DEDENT> print_log('Setting LR for this epoch to {} = {} GPUs * {}'.format( lr, args.world_size, lr/args.world_size))
|
Sets the learning rate to the initial LR decayed by 10 every 30 epochs
|
625941bb4f6381625f1148fb
|
def get_user_dn(con, account_base_dn, account_pattern, scope=ldap.SCOPE_SUBTREE): <NEW_LINE> <INDENT> with ldap_error_handler(): <NEW_LINE> <INDENT> user_data = con.search_s(account_base_dn, scope, account_pattern) <NEW_LINE> if user_data: <NEW_LINE> <INDENT> user_dn = user_data[0][0] <NEW_LINE> LOG.debug("Found user: %s", user_dn) <NEW_LINE> return user_dn <NEW_LINE> <DEDENT> <DEDENT> LOG.debug("Searching for user failed with pattern: %s", account_pattern) <NEW_LINE> LOG.debug("Account base DN: %s", account_base_dn) <NEW_LINE> return None
|
Search for the user dn based on the account pattern.
Return the full user dn None if search failed.
|
625941bb4527f215b584c318
|
def reset (self, **kw): <NEW_LINE> <INDENT> self.rows = kw.get("rows") or 0 <NEW_LINE> self.columns = kw.get("columns") or 0 <NEW_LINE> _fill_with = kw.get("fill_with") <NEW_LINE> if callable(_fill_with): <NEW_LINE> <INDENT> self.fill_with = _fill_with <NEW_LINE> <DEDENT> self.reset_contents(**kw) <NEW_LINE> return self
|
resets matrix to fit new @kw keyword arguments;
supported keywords: rows, columns, fill_with;
|
625941bb15fb5d323cde09c9
|
def __init__( self, hass: HomeAssistant, config_entry: ConfigEntry, implementation: AbstractOAuth2Implementation, ): <NEW_LINE> <INDENT> self._hass = hass <NEW_LINE> self._config_entry = config_entry <NEW_LINE> self._implementation = implementation <NEW_LINE> self.session = OAuth2Session(hass, config_entry, implementation)
|
Initialize object.
|
625941bbfb3f5b602dac354e
|
@pytest.fixture <NEW_LINE> def editor_splitter_bot(qtbot): <NEW_LINE> <INDENT> es = EditorSplitter(None, Mock(), [], first=True) <NEW_LINE> qtbot.addWidget(es) <NEW_LINE> es.show() <NEW_LINE> return es
|
Create editor splitter.
|
625941bb1f5feb6acb0c4a12
|
def cal(self, raw): <NEW_LINE> <INDENT> return raw * self.slope + self.inter
|
Transform raw ATWD to calibrated.
|
625941bb956e5f7376d70d36
|
def read_sorted_indices(self, what, start, stop, step): <NEW_LINE> <INDENT> (start, stop, step) = self._process_range(start, stop, step) <NEW_LINE> if start >= stop: <NEW_LINE> <INDENT> return np.empty(0, self.dtype) <NEW_LINE> <DEDENT> if step < 0: <NEW_LINE> <INDENT> tmp = start <NEW_LINE> start = self.nelements - stop <NEW_LINE> stop = self.nelements - tmp <NEW_LINE> <DEDENT> if what == "sorted": <NEW_LINE> <INDENT> values = self.sorted <NEW_LINE> valuesLR = self.sortedLR <NEW_LINE> buffer_ = np.empty(stop - start, dtype=self.dtype) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> values = self.indices <NEW_LINE> valuesLR = self.indicesLR <NEW_LINE> buffer_ = np.empty(stop - start, dtype="u%d" % self.indsize) <NEW_LINE> <DEDENT> ss = self.slicesize <NEW_LINE> nrow_start = start // ss <NEW_LINE> istart = start % ss <NEW_LINE> nrow_stop = stop // ss <NEW_LINE> tlen = stop - start <NEW_LINE> bstart = 0 <NEW_LINE> ilen = 0 <NEW_LINE> for nrow in range(nrow_start, nrow_stop + 1): <NEW_LINE> <INDENT> blen = ss - istart <NEW_LINE> if ilen + blen > tlen: <NEW_LINE> <INDENT> blen = tlen - ilen <NEW_LINE> <DEDENT> if blen <= 0: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> if nrow < self.nslices: <NEW_LINE> <INDENT> self.read_slice( values, nrow, buffer_[bstart:bstart + blen], istart) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.read_slice_lr( valuesLR, buffer_[bstart:bstart + blen], istart) <NEW_LINE> <DEDENT> istart = 0 <NEW_LINE> bstart += blen <NEW_LINE> ilen += blen <NEW_LINE> <DEDENT> return buffer_[::step]
|
Return the sorted or indices values in the specified range.
|
625941bb91af0d3eaac9b8d3
|
def _unsubscribe(self, subscription): <NEW_LINE> <INDENT> assert(isinstance(subscription, Subscription)) <NEW_LINE> assert subscription.active <NEW_LINE> assert(subscription.id in self._subscriptions) <NEW_LINE> if not self._transport: <NEW_LINE> <INDENT> raise exception.TransportLost() <NEW_LINE> <DEDENT> request = util.id() <NEW_LINE> d = self.create_future() <NEW_LINE> self._unsubscribe_reqs[request] = (d, subscription) <NEW_LINE> msg = message.Unsubscribe(request, subscription.id) <NEW_LINE> self._transport.send(msg) <NEW_LINE> return d
|
Called from :meth:`autobahn.wamp.protocol.Subscription.unsubscribe`
|
625941bb92d797404e304047
|
@shared_task <NEW_LINE> def test_admin_user_connectability_util(admin_user, task_name): <NEW_LINE> <INDENT> from ops.utils import update_or_create_ansible_task <NEW_LINE> assets = admin_user.get_related_assets() <NEW_LINE> hosts = [asset.hostname for asset in assets] <NEW_LINE> tasks = const.TEST_ADMIN_USER_CONN_TASKS <NEW_LINE> task, created = update_or_create_ansible_task( task_name=task_name, hosts=hosts, tasks=tasks, pattern='all', options=const.TASK_OPTIONS, run_as_admin=True, created_by='System', ) <NEW_LINE> result = task.run() <NEW_LINE> set_admin_user_connectability_info(result, admin_user=admin_user.name) <NEW_LINE> return result
|
Test asset admin user can connect or not. Using ansible api do that
:param admin_user:
:param task_name:
:return:
|
625941bb63b5f9789fde6fa3
|
def load(self, setupfile): <NEW_LINE> <INDENT> with open(setupfile, "r") as sfile: <NEW_LINE> <INDENT> lines = sfile.readlines() <NEW_LINE> <DEDENT> self.path = setupfile <NEW_LINE> self.loadLines(lines) <NEW_LINE> self.flight = self.parseFlight(setupfile) <NEW_LINE> self.original_ofile = self.ofile
|
Read and parse a setup file.
|
625941bbd99f1b3c44c67453
|
def get_reward(self, state, sim): <NEW_LINE> <INDENT> d_alt = abs(sim.get_property_value(c.delta_altitude)) <NEW_LINE> reward = (self.worstCaseAltitudeDelta - d_alt) / self.worstCaseAltitudeDelta <NEW_LINE> if sim.get_property_value(c.simulation_sim_time_sec) >= self.maxSimTime: <NEW_LINE> <INDENT> reward = 100.0 <NEW_LINE> <DEDENT> self.mostRecentRewards = { 'delta_alt': d_alt, 'reward': reward, } <NEW_LINE> self.stepCount += 1 <NEW_LINE> self.simTime = sim.get_property_value(c.simulation_sim_time_sec) <NEW_LINE> return reward
|
Reward according to just altitude.
|
625941bb090684286d50eb9f
|
def countPlayers(): <NEW_LINE> <INDENT> db = connect() <NEW_LINE> c = db.cursor() <NEW_LINE> c.execute('SELECT count(*) FROM players;') <NEW_LINE> registered_players = c.fetchone()[0] <NEW_LINE> db.commit() <NEW_LINE> db.close() <NEW_LINE> return registered_players
|
Returns the number of players currently registered.
|
625941bb498bea3a759b996e
|
def cookiecutter(input_dir, checkout=None): <NEW_LINE> <INDENT> if input_dir.endswith('.git'): <NEW_LINE> <INDENT> got_repo_arg = True <NEW_LINE> repo_dir = git_clone(input_dir, checkout) <NEW_LINE> project_template = find_template(repo_dir) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> got_repo_arg = False <NEW_LINE> project_template = find_template(input_dir) <NEW_LINE> <DEDENT> config_file = os.path.join(os.path.dirname(project_template), 'cookiecutter.json') <NEW_LINE> logging.debug('config_file is {0}'.format(config_file)) <NEW_LINE> context = generate_context( config_file=config_file ) <NEW_LINE> if got_repo_arg: <NEW_LINE> <INDENT> cookiecutter_dict = prompt_for_config(context) <NEW_LINE> context['cookiecutter'] = cookiecutter_dict <NEW_LINE> <DEDENT> generate_files( template_dir=project_template, context=context ) <NEW_LINE> if got_repo_arg: <NEW_LINE> <INDENT> generated_project = context['cookiecutter']['repo_name'] <NEW_LINE> remove_repo(repo_dir, generated_project)
|
API equivalent to using Cookiecutter at the command line.
:param input_dir: A directory containing a project template dir,
or a URL to git repo.
:param checkout: The branch, tag or commit ID to checkout after clone
|
625941bb76d4e153a657e9ee
|
def show_tasklists(self): <NEW_LINE> <INDENT> for tasklist in self.tasklists: <NEW_LINE> <INDENT> print(tasklist.name)
|
Printar ut alla uppgiftslistor.
|
625941bb236d856c2ad4469b
|
def _read_hdf5( self, path, datasets=("train", "valid", "test"), max_size: Optional[int] = None ): <NEW_LINE> <INDENT> with h5py.File(path, "r") as f: <NEW_LINE> <INDENT> self.datasets: Dict[str, Dataset] = { dataset: Dataset(f, dataset, max_size=max_size) for dataset in datasets }
|
This function read the data from the HDF5 file into the datasets, loading them
into memory.
Args:
path (Path): Path to the HDF5 file.
datasets (Tuple[str,...]): Tuple with which datasets should be loaded.
max_size (Optional[int]): Maximum sequence length to load.
|
625941bbc4546d3d9de728ef
|
def synced(func): <NEW_LINE> <INDENT> def wrapper(self, *args, **kwargs): <NEW_LINE> <INDENT> task = DataManagerTask(func, *args, **kwargs) <NEW_LINE> self.submit_task(task) <NEW_LINE> return task.get_results() <NEW_LINE> <DEDENT> return wrapper
|
Decorator for functions that should be called synchronously from another thread
:param func: function to call
|
625941bb293b9510aa2c3156
|
def get_init_flt(): <NEW_LINE> <INDENT> user = get_user() <NEW_LINE> if user in INIT_QUERIES: <NEW_LINE> <INDENT> return INIT_QUERIES[user] <NEW_LINE> <DEDENT> if isinstance(user, basestring) and '@' in user: <NEW_LINE> <INDENT> realm = user[user.index('@'):] <NEW_LINE> if realm in INIT_QUERIES: <NEW_LINE> <INDENT> return INIT_QUERIES[realm] <NEW_LINE> <DEDENT> <DEDENT> if config.WEB_PUBLIC_SRV: <NEW_LINE> <INDENT> return db.nmap.searchcategory(["Shared", get_anonymized_user()]) <NEW_LINE> <DEDENT> return DEFAULT_INIT_QUERY
|
Return a filter corresponding to the current user's
privileges.
|
625941bb60cbc95b062c6407
|
def oversampling(self, times=2): <NEW_LINE> <INDENT> diff_samples = self.ds['train']['x'][:, -1, -1] != self.ds['train']['y'][:, -1] <NEW_LINE> diff_samples_x = self.ds['train']['x'][diff_samples] <NEW_LINE> diff_samples_y = self.ds['train']['y'][diff_samples] <NEW_LINE> diff_samples_dt = self.ds['train']['dt'][diff_samples] <NEW_LINE> for i in range(times-1): <NEW_LINE> <INDENT> self.ds['train']['x'] = np.concatenate([self.ds['train']['x'], diff_samples_x], axis=0) <NEW_LINE> self.ds['train']['y'] = np.concatenate([self.ds['train']['y'], diff_samples_y], axis=0) <NEW_LINE> self.ds['train']['dt'] = np.concatenate([self.ds['train']['dt'], diff_samples_dt], axis=0) <NEW_LINE> <DEDENT> p = np.random.permutation(len(self.ds['train']['x'])) <NEW_LINE> self.ds['train']['x'] = self.ds['train']['x'][p] <NEW_LINE> self.ds['train']['y'] = self.ds['train']['y'][p] <NEW_LINE> self.ds['train']['dt'] = self.ds['train']['dt'][p]
|
swell_t-1과 맞춰야하는 swell이 다른 샘플들을 오버샘플링
마지막 feature가 swell_t-1 이어야 함
times: 배수 e.g. 1이면 데이터셋 변함 없음. (int)
|
625941bb21bff66bcd684813
|
def _evaluation(self, point, cache): <NEW_LINE> <INDENT> if id(self) not in cache: <NEW_LINE> <INDENT> left = self.expr1._evaluation(point, cache) <NEW_LINE> right = self.expr2._evaluation(point, cache) <NEW_LINE> cache[id(self)] = left / right <NEW_LINE> <DEDENT> return cache[id(self)]
|
:param point:
:param cache:
:return:
|
625941bbbde94217f3682cb9
|
def init_sdf(local_id=0): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> list(environ.keys()).index("LSB_JOBNAME") <NEW_LINE> job_id = int(environ["SLURM_ARRAY_TASK_ID"]) <NEW_LINE> tmpdir = environ["LSCRATCH"] <NEW_LINE> logging.info('os.listdir: {0}'.format(listdir(tmpdir))) <NEW_LINE> sleep(20.) <NEW_LINE> tmpdir = join(tmpdir,'{0:s}.XXXXXX'.format(environ["SLURM_JOB_ID"])) <NEW_LINE> p = Popen(["mktemp", "-d", tmpdir], stdout=PIPE) <NEW_LINE> sleep(20.) <NEW_LINE> out, err = p.communicate() <NEW_LINE> logging.info('out: {0}'.format(out)) <NEW_LINE> logging.info('err: {0}'.format(err)) <NEW_LINE> tmpdir = join(environ["LSCRATCH"], out.decode('ascii').split()[0]) <NEW_LINE> sleep(20.) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> job_id = local_id <NEW_LINE> tmpdir = mkdir(join(environ["PWD"],'tmp/')) <NEW_LINE> <DEDENT> logging.info('tmpdir is {0:s}.'.format(tmpdir)) <NEW_LINE> if not exists(tmpdir): <NEW_LINE> <INDENT> logging.error('Tmpdir does not exist: {0}. Exit 14'.format(tmpdir)) <NEW_LINE> sys.exit(14) <NEW_LINE> <DEDENT> return tmpdir,job_id
|
Init sdf cluster jobs: set up tmpdir on cluster scratch, determine job_id and set pfiles to tmpdir on scratch
kwargs
------
local_id: int
if not on lsf, return this value as job_id
Returns
-------
tuple with tmpdir on cluster scratch and lsf job id
|
625941bb099cdd3c635f0b1a
|
def t_COMMENT(t): <NEW_LINE> <INDENT> for i in t.value: <NEW_LINE> <INDENT> if i == '\n': <NEW_LINE> <INDENT> t.lexer.lineno += 1 <NEW_LINE> <DEDENT> <DEDENT> if _filter: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return t
|
[(][*]((([^*])*([^)])*)|((([^*])*([^)])*[*][^)]+[)]([^*])*([^)])*))*)[*][)]
|
625941bb6fece00bbac2d5fa
|
def jaccard(file1, file2, shingles_size=3) -> float: <NEW_LINE> <INDENT> set1 = set(get_shingles(file1, shingles_size, 200)) <NEW_LINE> set2 = set(get_shingles(file2, shingles_size, 200)) <NEW_LINE> x = len(set1.intersection(set2)) <NEW_LINE> y = len(set1.union(set2)) <NEW_LINE> return x / float(y)
|
Computes the jaccard distance between to sets.
:param file1: The first file to compare.
:param file2: The second file to compare.
:param shingles_size: The size of shingles.
:return: The result of comparison.
:rtype: float
|
625941bb4527f215b584c319
|
def test_compatible_layer_versions(self): <NEW_LINE> <INDENT> compat_lv = self.project.get_all_compatible_layer_versions() <NEW_LINE> self.assertEqual(list(compat_lv), [self.lver, self.lver2])
|
When we have a 2 layer versions, get_all_compatible_layerversions()
should return a queryset with both.
|
625941bbde87d2750b85fc4d
|
def _add_node_to_layer(self,node,layer): <NEW_LINE> <INDENT> if node not in self._nodeToLayers: <NEW_LINE> <INDENT> self._nodeToLayers[node]=set() <NEW_LINE> <DEDENT> self._nodeToLayers[node].add(layer) <NEW_LINE> if layer not in self._layerToNodes: <NEW_LINE> <INDENT> self._layerToNodes[layer]=set() <NEW_LINE> <DEDENT> self._layerToNodes[layer].add(node)
|
Add node to layer. Network must not be node-aligned.
|
625941bb4c3428357757c1e8
|
def __init__(self, name, symbol, dimension, power = 0, scale = 1.0, offset = 0.0, description = ''): <NEW_LINE> <INDENT> self.name = name <NEW_LINE> self.symbol = symbol <NEW_LINE> self.dimension = dimension <NEW_LINE> self.power = power <NEW_LINE> self.scale = scale <NEW_LINE> self.offset = offset <NEW_LINE> self.description = description
|
Constructor.
See instance variable documentation for more details on parameters.
|
625941bb45492302aab5e17e
|
def __str__(self): <NEW_LINE> <INDENT> return 'name={0}, age={1}'.format(self.__name, self.age)
|
类似java的toString()方法,直接打印对象,可以定制打印哪些信息, 如果不定制打印的是类的内存地址
:return:
|
625941bbcb5e8a47e48b796c
|
def _zero(self, obj): <NEW_LINE> <INDENT> if isinstance(obj, int): <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> elif isinstance(obj, float): <NEW_LINE> <INDENT> return 0.0 <NEW_LINE> <DEDENT> elif isinstance(obj, np.ndarray): <NEW_LINE> <INDENT> return np.zeros(obj.shape) <NEW_LINE> <DEDENT> elif isinstance(obj, dict): <NEW_LINE> <INDENT> return {k: self._zero(v) for k, v in obj.items()} <NEW_LINE> <DEDENT> elif isnamedtuple(obj) or isinstance(obj, MinnetonkaNamedTuple): <NEW_LINE> <INDENT> typ = type(obj) <NEW_LINE> return typ(*(self._zero(o) for o in obj)) <NEW_LINE> <DEDENT> elif isinstance(obj, tuple): <NEW_LINE> <INDENT> return tuple(self._zero(o) for o in obj) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise MinnetonkaError( 'Do not know how to find initial velocity of {}'.format(obj) + 'as it is {}'.format(type(obj)))
|
Return the zero with the same shape as obj.
|
625941bb0a50d4780f666d4d
|
def write_sampler_metadata(self, sampler): <NEW_LINE> <INDENT> super(PTEmceeFile, self).write_sampler_metadata(sampler) <NEW_LINE> group = self[self.sampler_group] <NEW_LINE> group.attrs["starting_betas"] = sampler.starting_betas <NEW_LINE> group.attrs["adaptive"] = sampler.adaptive <NEW_LINE> group.attrs["adaptation_lag"] = sampler.adaptation_lag <NEW_LINE> group.attrs["adaptation_time"] = sampler.adaptation_time <NEW_LINE> group.attrs["scale_factor"] = sampler.scale_factor
|
Adds writing ptemcee-specific metadata to MultiTemperedMCMCIO.
|
625941bb7cff6e4e81117844
|
def test_inc_abs(self): <NEW_LINE> <INDENT> self.emulator.edit_memory("300", "EA EE 06 03 00") <NEW_LINE> self.emulator.edit_memory("306", "FF") <NEW_LINE> output = self.emulator.run_program("300") <NEW_LINE> self.assertEqual( output, " PC OPC INS AMOD OPRND AC XR YR SP NV-BDIZC\n" + " 300 EA NOP impl -- -- 00 00 00 FF 00100000\n" + " 301 EE INC abs 06 03 00 00 00 FF 00100010\n" + " 304 00 BRK impl -- -- 00 00 00 FC 00110110\n") <NEW_LINE> output = self.emulator.access_memory("306") <NEW_LINE> self.assertEqual(output, "306\t00")
|
Test inc abs instruction.
|
625941bb3539df3088e2e209
|
def TearDown(self): <NEW_LINE> <INDENT> self._windowsize = self._windowsize - self._step <NEW_LINE> step = self._currentStep <NEW_LINE> if step > self._bottomright.y: <NEW_LINE> <INDENT> step = self._bottomright.y <NEW_LINE> <DEDENT> if self._windowsize > 0: <NEW_LINE> <INDENT> if self._scrollType == TB_SCR_TYPE_UD: <NEW_LINE> <INDENT> dimY = self._dialogtop[1] <NEW_LINE> <DEDENT> elif self._scrollType == TB_SCR_TYPE_DU: <NEW_LINE> <INDENT> dimY = step <NEW_LINE> <DEDENT> self.SetSize(self._dialogtop[0], dimY, self.GetSize().GetWidth(), self._windowsize) <NEW_LINE> self.Update() <NEW_LINE> self.Refresh() <NEW_LINE> self._currentStep += self._scrollStep <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self._scrollTimer.Stop() <NEW_LINE> self.Hide() <NEW_LINE> if self._parent2: <NEW_LINE> <INDENT> self._parent2.Notify()
|
Scrolls the :class:`ToasterBox` down, which means gradually hiding it.
|
625941bb8a43f66fc4b53f27
|
def _get_author(self, svn_commit): <NEW_LINE> <INDENT> cvs_author = svn_commit.get_author() <NEW_LINE> return self._map_author(cvs_author)
|
Return the author to be used for SVN_COMMIT.
Return the author as a UTF-8 string in the form needed by git
fast-import; that is, 'name <email>'.
|
625941bb3cc13d1c6d3c7241
|
def _init_data(self, train_folder, validation_folder, holdout_folder): <NEW_LINE> <INDENT> self.train_folder = train_folder <NEW_LINE> self.validation_folder = validation_folder <NEW_LINE> self.holdout_folder = holdout_folder <NEW_LINE> self.n_train = sum(len(files) for _, _, files in os.walk( self.train_folder)) <NEW_LINE> self.n_val = sum(len(files) for _, _, files in os.walk( self.validation_folder)) <NEW_LINE> self.n_holdout = sum(len(files) for _, _, files in os.walk( self.holdout_folder)) <NEW_LINE> self.n_categories = sum(len(dirnames) for _, dirnames, _ in os.walk( self.train_folder)) <NEW_LINE> self.set_class_names()
|
Initializes class data
Args:
train_folder(str): folder containing train data
validation_folder(str): folder containing validation data
holdout_folder(str): folder containing holdout data
|
625941bb1d351010ab8559db
|
def read_minutes_months(self, s): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> (minutes, months) = [int(x.strip()) for x in s.split(',')] <NEW_LINE> return minutes, months <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> raise ParsingError(('Value should be "minutes, months"'))
|
Return a (minutes, months) tuple after parsing a "M,N" string.
|
625941bb91f36d47f21ac3ad
|
def test_get_last_synced_changelist(self): <NEW_LINE> <INDENT> with mock_p4() as mock: <NEW_LINE> <INDENT> mock.add_changelist('test_changelist', ('/p4/file_1', 'rev 1')) <NEW_LINE> cl_number = mock.add_changelist('test_changelist', ('/p4/file_1', 'rev 2')) <NEW_LINE> self._p4.sync() <NEW_LINE> self.assertEqual(self._p4.get_last_synced_changelist(), cl_number)
|
describe should return changelist description
|
625941bbe1aae11d1e749b73
|
def setApplied(self, state=True): <NEW_LINE> <INDENT> TRACE('setApplied %s', state) <NEW_LINE> self.applied = state <NEW_LINE> if not state and not self.modified: <NEW_LINE> <INDENT> self.setModified(setDirty=False)
|
Set panel state.
|
625941bbeab8aa0e5d26da1d
|
def test_Seq_len(): <NEW_LINE> <INDENT> seq = Seq(id="test", desc=None, seq="four") <NEW_LINE> assert len(seq) == 4 <NEW_LINE> return
|
Simple wrapper function, so gets a simple test.
|
625941bb925a0f43d2549d32
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.