code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
|---|---|---|
def handle_answer(self, choice): <NEW_LINE> <INDENT> for c in self.choices: <NEW_LINE> <INDENT> if c.shortcut == choice: <NEW_LINE> <INDENT> return c.handle() <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError('Invalid choice "{0}"'.format(choice))
|
Return the result for replace_links.
|
625941bdaad79263cf39092c
|
def put_map_image(self, mission_id, image_data): <NEW_LINE> <INDENT> self.put('/api/maps/%d/%s' % (mission_id, self.username), data=image_data)
|
PUT map image. Image must be PNG or JPEG data.
Args:
mission_id: The mission for which to upload a map.
image_data: The image data (bytes loaded from file) to upload.
Raises:
InteropError: Error from server.
requests.Timeout: Request timeout.
|
625941bd10dbd63aa1bd2a96
|
def review_to_word(review): <NEW_LINE> <INDENT> review_text = BeautifulSoup(review,'html.parser').get_text() <NEW_LINE> letters_only = re.sub("[^a-zA-Z]", " ", review_text) <NEW_LINE> words = letters_only.lower().split() <NEW_LINE> stops = set(stopwords.words("english")) <NEW_LINE> meaningful_words = [w for w in words if not w in stops] <NEW_LINE> return( " ".join( meaningful_words ))
|
Return the string of words from a raw imdb review
Parameters
----------
review : string
a movie review
Returns
-------
meaningful_words : string
a preprocessed and cleaned review
|
625941bdff9c53063f47c0e5
|
def _prelaunch(self, operation, uid=None, available_disk_space=0, **kwargs): <NEW_LINE> <INDENT> self.nr_of_datatypes = 0 <NEW_LINE> msg, _ = ABCUploader._prelaunch(self, operation, uid=None, **kwargs) <NEW_LINE> return msg, self.nr_of_datatypes
|
Overwrite method in order to return the correct number of stored datatypes.
|
625941bd099cdd3c635f0b4c
|
def start_up(self): <NEW_LINE> <INDENT> print( f"{COL.green}{(str(self.start_datetime).split('.'))[0]} " f"{self.user.my_call}-{self.user.ssid} " f"IGgate started - Program Version {self.VERS[-3:]} by 9V1KG{COL.end}" ) <NEW_LINE> pos_c = compress_position(self.user.pos[0], self.user.pos[1], self.user.pos[2]) <NEW_LINE> pos_f = format_position(self.user.pos[0], self.user.pos[1]) <NEW_LINE> print(" " * 9 + f"Formatted Position: {pos_f}") <NEW_LINE> print(" " * 9 + f"Compressed Position: {pos_c}") <NEW_LINE> logging.info("Ygate program started, version %s", self.VERS) <NEW_LINE> loc_time = time.strftime("%H:%M:%S") <NEW_LINE> if not self.open_serial(): <NEW_LINE> <INDENT> sys.exit(1) <NEW_LINE> <DEDENT> if is_internet(): <NEW_LINE> <INDENT> print(f"{loc_time} Logging in to {self.HOST}") <NEW_LINE> if self.aprs_con: <NEW_LINE> <INDENT> self.send_status() <NEW_LINE> time.sleep(5.) <NEW_LINE> self.send_my_position() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print( f"{loc_time} {COL.red}" f"Cannot establish connection to APRS server" f"{COL.end}" ) <NEW_LINE> if self.ser: <NEW_LINE> <INDENT> self.ser.close() <NEW_LINE> <DEDENT> sys.exit(1) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> print(f"{loc_time} {COL.red}No internet available{COL.end}") <NEW_LINE> if self.ser: <NEW_LINE> <INDENT> self.ser.close() <NEW_LINE> <DEDENT> sys.exit(1)
|
Startup of IGate: opens serial port and internet connection
Login to APRS server and send bulletin and beacon
:return: None
|
625941bdfb3f5b602dac3580
|
def get_auth_url_twitch(self, state): <NEW_LINE> <INDENT> params = {"client_id": safe.get('twitch-client-id'), "response_type": "token", "state": state, "redirect_uri": TWITCH_REDIRECT_URI, "scope": "channel_editor chat_login"} <NEW_LINE> url = "https://id.twitch.tv/oauth2/authorize?" + urllib.parse.urlencode(params) <NEW_LINE> return url
|
Generate auth url for twitch.
|
625941bd7047854f462a12fc
|
def decommission_brokers(self, broker_ids): <NEW_LINE> <INDENT> groups = set() <NEW_LINE> for b_id in broker_ids: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> broker = self.cluster_topology.brokers[b_id] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> self.log.error("Invalid broker id %s.", b_id) <NEW_LINE> raise InvalidBrokerIdError( "Broker id {} does not exist in cluster".format(b_id), ) <NEW_LINE> <DEDENT> broker.mark_decommissioned() <NEW_LINE> groups.add(broker.replication_group) <NEW_LINE> <DEDENT> for group in groups: <NEW_LINE> <INDENT> self._decommission_brokers_in_group(group)
|
Decommission a list of brokers trying to keep the replication group
the brokers belong to balanced.
:param broker_ids: list of string representing valid broker ids in the cluster
:raises: InvalidBrokerIdError when the id is invalid.
|
625941bda4f1c619b28aff2f
|
def suite(): <NEW_LINE> <INDENT> loader = TestLoader() <NEW_LINE> return loader.discover(getModule(__name__).filePath.parent().path, pattern='test_*.py')
|
Auto-discover test suite
:return: the TestSuite
:rtype: unittest.TestSuite
|
625941bdd4950a0f3b08c241
|
def _to_node_size(self, machine_type): <NEW_LINE> <INDENT> extra = {} <NEW_LINE> extra['selfLink'] = machine_type.get('selfLink') <NEW_LINE> extra['zone'] = self.ex_get_zone(machine_type['zone']) <NEW_LINE> extra['description'] = machine_type.get('description') <NEW_LINE> extra['guestCpus'] = machine_type.get('guestCpus') <NEW_LINE> extra['creationTimestamp'] = machine_type.get('creationTimestamp') <NEW_LINE> try: <NEW_LINE> <INDENT> price = self._get_size_price(size_id=machine_type['name']) <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> price = None <NEW_LINE> <DEDENT> return GCENodeSize(id=machine_type['id'], name=machine_type['name'], ram=machine_type.get('memoryMb'), disk=machine_type.get('imageSpaceGb'), bandwidth=0, price=price, driver=self, extra=extra)
|
Return a Size object from the json-response dictionary.
:param machine_type: The dictionary describing the machine.
:type machine_type: ``dict``
:return: Size object
:rtype: :class:`GCENodeSize`
|
625941bd4428ac0f6e5ba6e1
|
def parseREM(rem): <NEW_LINE> <INDENT> today = time.localtime() <NEW_LINE> warnDays, newrem = hasWarning(rem, today) <NEW_LINE> if warnDays: <NEW_LINE> <INDENT> warnDays = int(warnDays) <NEW_LINE> rem = newrem <NEW_LINE> <DEDENT> repeatDays, newrem = hasRepeat(rem, today) <NEW_LINE> if repeatDays: <NEW_LINE> <INDENT> repeatDays = int(repeatDays) <NEW_LINE> rem = newrem <NEW_LINE> <DEDENT> type, now = singleDay(rem, today) <NEW_LINE> if type and now: return True <NEW_LINE> if type and not now: return False <NEW_LINE> type, now = multiDay(rem, today) <NEW_LINE> if type and now: return True <NEW_LINE> if type and not now: return False <NEW_LINE> type, now = singleDoW(rem, today) <NEW_LINE> if type and now: return True <NEW_LINE> if type and not now: return False <NEW_LINE> type, now = multiDoW(rem, today) <NEW_LINE> if type and now: return True <NEW_LINE> if type and not now: return False <NEW_LINE> type, now = monthDay(rem, today, warn=warnDays, rep=repeatDays) <NEW_LINE> if type and now: return True <NEW_LINE> if type and not now: return False <NEW_LINE> type, now = monthDayYear(rem, today, warn=warnDays, rep=repeatDays) <NEW_LINE> if type and now: return True <NEW_LINE> if type and not now: return False
|
parses REM style date strings - returns True if event is today
|
625941bdde87d2750b85fc7f
|
def loglikelihoods(Eigen,A,b, deriv=False): <NEW_LINE> <INDENT> E, V, Ecol = Eigen <NEW_LINE> bE = b*Ecol <NEW_LINE> logdets = torch.log1p(bE).sum(axis=0) <NEW_LINE> bE1 = 1+bE <NEW_LINE> R = (V.T @ A) / bE1 <NEW_LINE> S = V @ R <NEW_LINE> y = (A*S).sum(axis=0) <NEW_LINE> llh = (y - logdets)/2 <NEW_LINE> if not deriv: return llh <NEW_LINE> def back(dllh): <NEW_LINE> <INDENT> dA = dllh*S <NEW_LINE> dllhb = dllh*b <NEW_LINE> M1 = (S*dllhb)@S.T <NEW_LINE> M2 = (V*(dllhb/bE1).sum(axis=1))@V.T <NEW_LINE> dL = -(M1+M2) <NEW_LINE> vv = (V**2).sum(axis=0)**2 <NEW_LINE> t1 = ((vv*E).reshape(-1,1)/bE1 ).sum(axis=0) <NEW_LINE> t2 = E @ R**2 <NEW_LINE> db = (-0.5*dllh)*(t1+t2) <NEW_LINE> return dL, dA, db <NEW_LINE> <DEDENT> return llh, back
|
Computes a vector of log-likelihoods.
For trial j, A[:,j] and b[j] represent additive statistics computed from
one or more x-vectors that are hypothesized to be of the same speaker.
llh[j] is the log-likelihood that these x-vectors belong to the same speaker.
L is used in: B = L @ L.T is a derived HT-PLDA model parameter (a precision matrix)
Inputs:
Eigen: a tuple containing the eigenanalysis of B = LL'
A: (d,n)
b: (n,)
Outputs:
llh: (n,) a vector of log-likelihoods
back: optional backprop function handle: dL, dA, db = back(dllh)
|
625941bd851cf427c661a402
|
def AddGroupMemberships(self): <NEW_LINE> <INDENT> self.groups = {g.name: self._Members(g) for g in self.groups.itervalues()} <NEW_LINE> for g in self.groups.itervalues(): <NEW_LINE> <INDENT> for user in g.members: <NEW_LINE> <INDENT> membership = self.memberships.setdefault(user, set()) <NEW_LINE> membership.add(g.gid) <NEW_LINE> <DEDENT> <DEDENT> for user in self.entry.itervalues(): <NEW_LINE> <INDENT> user.gids = self.memberships.get(user.username)
|
Adds aggregate group membership from group, gshadow and passwd.
|
625941bd99fddb7c1c9de282
|
def test_venue(self): <NEW_LINE> <INDENT> self.fields_to_verify = ['name', 'location', 'description', 'latitude', 'longitude'] <NEW_LINE> self.venue = Venue.objects.create(name='Seoul City Hall', location='Seoul', latitude=37.566676, longitude=126.978397) <NEW_LINE> self.venue_fields = [field.name for field in self.venue._meta.get_fields()] <NEW_LINE> [self.assertIn(field, self.venue_fields) for field in self.fields_to_verify]
|
This test is for proving that the Venue model's attributes exist
|
625941bd009cb60464c632a4
|
def untag(self, identifier, tags): <NEW_LINE> <INDENT> with ConnectionContext(self.identifier, commit=True) as ctxt: <NEW_LINE> <INDENT> if not isinstance(tags, (list, tuple)): <NEW_LINE> <INDENT> tags = [tags] <NEW_LINE> <DEDENT> for tag in tags: <NEW_LINE> <INDENT> self._execute( ctxt, 'tags_rem', replacements={ '$(IDENTIFIER)': identifier, '$(TAG)': tag, }, )
|
This will unassign the given tags from the element with the given
identifier.
:param identifier: Data Element identifier
:type identifier: str
:param tags: A single tag, or list of tags to unassign
:type tags: string or list(str, str, ...)
:return: None
|
625941bda8370b7717052791
|
def test_bool_symbol(): <NEW_LINE> <INDENT> A, B, C = list(map(Boolean, symbols('A,B,C'))) <NEW_LINE> assert And(A, True) == A <NEW_LINE> assert And(A, True, True) == A <NEW_LINE> assert And(A, False) == False <NEW_LINE> assert And(A, True, False) == False <NEW_LINE> assert Or(A, True) == True <NEW_LINE> assert Or(A, False) == A
|
Test that mixing symbols with boolean values
works as expected
|
625941bd96565a6dacc8f5bc
|
def ERROR0006(self): <NEW_LINE> <INDENT> return "There is a problem connecting to the internet!"
|
def ERROR0006():
Call this if there is a problem with the internet connection.
|
625941bde1aae11d1e749ba5
|
def get_dim_slice(self, dim): <NEW_LINE> <INDENT> if dim["name"] in self.dim_slices.keys(): <NEW_LINE> <INDENT> return self.dim_slices[dim["name"]] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return slice(dim["size"])
|
Get the slice for some dimension dim. First taken from self.dim_slices if it's been
explicitly set or overridden, falling back on getting the actual size out of the
file if it hasn't been overridden.
:type dim: dict
:param dim: dimension to get slice for
:rtype: slice | int
:return: slice for dimension dim
|
625941bdd10714528d5ffbd0
|
def check_existing_apikey(lcc_server): <NEW_LINE> <INDENT> USERHOME = os.path.expanduser('~') <NEW_LINE> APIKEYFILE = os.path.join(USERHOME, '.astrobase', 'lccs', 'apikey-%s' % lcc_server.replace( 'https://', 'https-' ).replace( 'http://', 'http-' )) <NEW_LINE> if os.path.exists(APIKEYFILE): <NEW_LINE> <INDENT> fileperm = oct(os.stat(APIKEYFILE)[stat.ST_MODE]) <NEW_LINE> if fileperm == '0100600' or fileperm == '0o100600': <NEW_LINE> <INDENT> with open(APIKEYFILE) as infd: <NEW_LINE> <INDENT> apikey, expires = infd.read().strip('\n').split() <NEW_LINE> <DEDENT> now = datetime.now(utc) <NEW_LINE> if sys.version_info[:2] < (3,7): <NEW_LINE> <INDENT> expdt = datetime.strptime( expires.replace('Z',''), '%Y-%m-%dT%H:%M:%S.%f' ).replace(tzinfo=utc) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> expdt = datetime.fromisoformat(expires.replace('Z','+00:00')) <NEW_LINE> <DEDENT> if now > expdt: <NEW_LINE> <INDENT> LOGERROR('API key has expired. expiry was on: %s' % expires) <NEW_LINE> return False, apikey, expires <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return True, apikey, expires <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> LOGWARNING('The API key file %s has bad permissions ' 'and is insecure, not reading it.\n' '(you need to chmod 600 this file)' % APIKEYFILE) <NEW_LINE> return False, None, None <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> LOGWARNING('No LCC-Server API key ' 'found in: {apikeyfile}'.format(apikeyfile=APIKEYFILE)) <NEW_LINE> return False, None, None
|
This validates if an API key for the specified LCC-Server is available.
API keys are stored using the following file scheme::
~/.astrobase/lccs/apikey-domain.of.lccserver.org
e.g. for the HAT LCC-Server at https://data.hatsurveys.org::
~/.astrobase/lccs/apikey-https-data.hatsurveys.org
Parameters
----------
lcc_server : str
The base URL of the LCC-Server for which the existence of API keys will
be checked.
Returns
-------
(apikey_ok, apikey_str, expiry) : tuple
The returned tuple contains the status of the API key, the API key
itself if present, and its expiry date if present.
|
625941bd8e7ae83300e4aebc
|
def test_check_grad(): <NEW_LINE> <INDENT> def f_df_correct(x): <NEW_LINE> <INDENT> return x**2, 2 * x <NEW_LINE> <DEDENT> def f_df_incorrect(x): <NEW_LINE> <INDENT> return x**3, 0.5 * x**2 <NEW_LINE> <DEDENT> output = StringIO() <NEW_LINE> check_grad(f_df_correct, 5, out=output, style='grid') <NEW_LINE> ansi_escape = re.compile(r'\x1b[^m]*m') <NEW_LINE> getvalues = lambda o: [float(ansi_escape.sub('', s.strip()).split(' ')[0]) for s in o.getvalue().split('\n')[3].split('|')[1:-1]] <NEW_LINE> values = getvalues(output) <NEW_LINE> print(values) <NEW_LINE> assert values[0] == values[1] == 10.0, "Correct gradient computation" <NEW_LINE> assert np.allclose(values[2], 0), "Correct error computation" <NEW_LINE> output = StringIO() <NEW_LINE> check_grad(f_df_incorrect, 5, out=output, style='grid') <NEW_LINE> values = getvalues(output) <NEW_LINE> printed_error = values[2] <NEW_LINE> correct_error = np.abs(values[0] - values[1]) / (np.abs(values[0]) + np.abs(values[1])) <NEW_LINE> assert np.isclose(printed_error, correct_error, atol=1e-4), "Correct relative error"
|
Tests the check_grad() function
|
625941bd76d4e153a657ea20
|
def __init__(self, identifier, datasets, specs=None, coords=None): <NEW_LINE> <INDENT> if coords is None: <NEW_LINE> <INDENT> coords = ['x', 'y', 'z'] <NEW_LINE> <DEDENT> self.coords = coords <NEW_LINE> self.dcoords = ['d'+c for c in self.coords] <NEW_LINE> self.dim = len(self.coords) <NEW_LINE> if self.dim == 3: <NEW_LINE> <INDENT> self.ncoords = ['n'+c for c in self.coords] <NEW_LINE> <DEDENT> frame_types = {'edge', 'vert', 'face', 'cell'} <NEW_LINE> self.identifier = identifier <NEW_LINE> if not set(datasets).issubset(frame_types): <NEW_LINE> <INDENT> raise ValueError('The `datasets` dictionnary should' ' contain keys in {}'.format(frame_types)) <NEW_LINE> <DEDENT> self.datasets = datasets <NEW_LINE> self.data_names = list(datasets.keys()) <NEW_LINE> self.element_names = ['srce', 'trgt', 'face', 'cell'][:len(self.data_names)] <NEW_LINE> if specs is None: <NEW_LINE> <INDENT> specs = {name: {} for name in self.data_names} <NEW_LINE> <DEDENT> if 'settings' not in specs: <NEW_LINE> <INDENT> specs['settings'] = {} <NEW_LINE> <DEDENT> self.specs = specs <NEW_LINE> self.update_specs(specs, reset=False) <NEW_LINE> self.edge_mindex = pd.MultiIndex.from_arrays(self.edge_idx.values.T, names=self.element_names) <NEW_LINE> self.reset_topo() <NEW_LINE> self.bbox = None <NEW_LINE> self.set_bbox()
|
Creates an epithelium
Parameters:
-----------
identifier: string
datasets: dictionary of dataframes
the datasets dict specifies the names, data columns
and value types of the modeled tyssue
|
625941bda8ecb033257d2fc0
|
def test_schema_access_multiple(self): <NEW_LINE> <INDENT> c = SqlAlchemyDatabaseConnector(engine_url="sqlite://", schema_builder=fruit_schemas, ) <NEW_LINE> super_classes = [cc.__name__ for cc in c.schema.Pear.mro()] <NEW_LINE> expected = ['Pear', 'Base', 'object'] <NEW_LINE> self.assertEqual(expected, super_classes)
|
list of ORM models in schema
|
625941bd4f6381625f11492e
|
def can_launch_bibupload(taskid): <NEW_LINE> <INDENT> if taskid == 0: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> sql = 'SELECT status FROM schTASK WHERE id = %s' <NEW_LINE> if run_sql(sql, [str(taskid)])[0][0] != 'DONE': <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> return True
|
Checks if task can be launched.
|
625941bd4f6381625f11492d
|
def load_json_file_contents(path: str) -> str: <NEW_LINE> <INDENT> assert isinstance(path, str) <NEW_LINE> content = None <NEW_LINE> file_path = os.path.abspath(path) <NEW_LINE> content = fileutils.read_text_from_file(file_path) <NEW_LINE> json_object = json.loads(content) <NEW_LINE> content = json.dumps(json_object, sort_keys=True, indent=4) <NEW_LINE> return content
|
Loads contents from a json file
|
625941bd796e427e537b04b3
|
def updateChild(self, *args): <NEW_LINE> <INDENT> return _DataModel.ConfigStation_updateChild(self, *args)
|
updateChild(ConfigStation self, Object child) -> bool
|
625941bdd6c5a10208143f38
|
def validate_data_generator_variables(variables, model_etrees=None, validate_targets_with_model_sources=True): <NEW_LINE> <INDENT> model_etrees = model_etrees or {} <NEW_LINE> errors = [] <NEW_LINE> warnings = [] <NEW_LINE> task_types = set() <NEW_LINE> for i_variable, variable in enumerate(variables): <NEW_LINE> <INDENT> variable_errors = [] <NEW_LINE> variable_warnings = [] <NEW_LINE> if not variable.id: <NEW_LINE> <INDENT> variable_errors.append(['Variable must have an id.']) <NEW_LINE> <DEDENT> if variable.model: <NEW_LINE> <INDENT> variable_errors.append(['Variable should not reference a model.']) <NEW_LINE> <DEDENT> if variable.task: <NEW_LINE> <INDENT> task_types.add(get_task_results_shape(variable.task)) <NEW_LINE> <DEDENT> elif not (variable.target and variable.target.startswith('#')): <NEW_LINE> <INDENT> variable_errors.append(['Variable must reference a task.']) <NEW_LINE> <DEDENT> if (variable.symbol and variable.target) or (not variable.symbol and not variable.target): <NEW_LINE> <INDENT> variable_errors.append(['Variable must define a symbol or target.']) <NEW_LINE> <DEDENT> if variable.target and variable.task: <NEW_LINE> <INDENT> models = get_models_referenced_by_task(variable.task) <NEW_LINE> for model in models: <NEW_LINE> <INDENT> if model and model.language: <NEW_LINE> <INDENT> temp_errors, temp_warnings = validate_target( variable.target, variable.target_namespaces, DataGenerator, model.language, model_id=model.id, model_etree=model_etrees.get(model, None), check_in_model_source=validate_targets_with_model_sources, model_change=model.has_structural_changes()) <NEW_LINE> variable_errors.extend(temp_errors) <NEW_LINE> variable_warnings.extend(temp_warnings) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if variable_errors: <NEW_LINE> <INDENT> variable_id = '`' + variable.id + '`' if variable and variable.id else str(i_variable + 1) <NEW_LINE> errors.append(['Variable {} is invalid.'.format(variable_id), variable_errors]) <NEW_LINE> <DEDENT> if variable_warnings: <NEW_LINE> <INDENT> variable_id = '`' + variable.id + '`' if variable and variable.id else str(i_variable + 1) <NEW_LINE> warnings.append(['Variable {} has warnings.'.format(variable_id), variable_warnings]) <NEW_LINE> <DEDENT> <DEDENT> if len(task_types) > 1: <NEW_LINE> <INDENT> warnings.append(['The variables do not have consistent shapes.']) <NEW_LINE> <DEDENT> return errors, warnings
|
Check variables have a symbol or target
Args:
variables (:obj:`list` of :obj:`Variable`): variables
model_etrees (:obj:`dict`, optional): dictionary that maps models to XML element trees of their sources
validate_targets_with_model_sources (:obj:`bool`, optional): whether to validate the targets of the variables of the data generator
Returns:
nested :obj:`list` of :obj:`str`: nested list of errors (e.g., required ids missing or ids not unique)
|
625941bd9b70327d1c4e0cc4
|
def SetUpper(self, *args): <NEW_LINE> <INDENT> return _itkNeighborhoodConnectedImageFilterPython.itkNeighborhoodConnectedImageFilterIUC3IUC3_SetUpper(self, *args)
|
SetUpper(self, unsigned char _arg)
|
625941bd4527f215b584c34b
|
def good_password(user, password): <NEW_LINE> <INDENT> return password != None and authenticate(username=user.username, password=password) != None
|
Check if given password can authenticate given user
|
625941bd67a9b606de4a7dac
|
def update(pkg, slot=None, fromrepo=None, refresh=False, binhost=None): <NEW_LINE> <INDENT> if salt.utils.is_true(refresh): <NEW_LINE> <INDENT> refresh_db() <NEW_LINE> <DEDENT> full_atom = pkg <NEW_LINE> if slot is not None: <NEW_LINE> <INDENT> full_atom = '{0}:{1}'.format(full_atom, slot) <NEW_LINE> <DEDENT> if fromrepo is not None: <NEW_LINE> <INDENT> full_atom = '{0}::{1}'.format(full_atom, fromrepo) <NEW_LINE> <DEDENT> if binhost == 'try': <NEW_LINE> <INDENT> bin_opts = '-g' <NEW_LINE> <DEDENT> elif binhost == 'force': <NEW_LINE> <INDENT> bin_opts = '-G' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> bin_opts = '' <NEW_LINE> <DEDENT> old = list_pkgs() <NEW_LINE> cmd = 'emerge --ask n --quiet --update --newuse --oneshot {0} {1}'.format(bin_opts, full_atom) <NEW_LINE> call = __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) <NEW_LINE> __context__.pop('pkg.list_pkgs', None) <NEW_LINE> if call['retcode'] != 0: <NEW_LINE> <INDENT> return _process_emerge_err(call['stdout'], call['stderr']) <NEW_LINE> <DEDENT> new = list_pkgs() <NEW_LINE> return salt.utils.compare_dicts(old, new)
|
Updates the passed package (emerge --update package)
slot
Restrict the update to a particular slot. It will update to the
latest version within the slot.
fromrepo
Restrict the update to a particular repository. It will update to the
latest version within the repository.
binhost
has two options try and force.
try - tells emerge to try and install the package from a configured binhost.
force - forces emerge to install the package from a binhost otherwise it fails out.
Return a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.update <package name>
|
625941bdbaa26c4b54cb1013
|
def insert_single_data(self,index_name,doc_type,data): <NEW_LINE> <INDENT> res = self.es.index(index=index_name,doc_type=doc_type,body=data) <NEW_LINE> return res
|
:param index_name: 索引名称
:param doc_type: 文档类型
:param data: 需要插入的数据内容
:return: 执行结果
|
625941bdd99f1b3c44c67486
|
def run_conversion(self, src_file_path): <NEW_LINE> <INDENT> dst_file_path = splitext(src_file_path)[0] + '.converting.mp4' <NEW_LINE> final_dst_file_path = splitext(src_file_path)[0] + '.mp4' <NEW_LINE> log_file_path = splitext(src_file_path)[0] + '.conversion.log' <NEW_LINE> error_file_path = splitext(src_file_path)[0] + '.conversion.error' <NEW_LINE> try: <NEW_LINE> <INDENT> self.conversion = Conversion(src_file_path, dst_file_path, log_file_path) <NEW_LINE> self.conversion.start() <NEW_LINE> converting = True <NEW_LINE> <DEDENT> except (StopIteration, MediaInfoError): <NEW_LINE> <INDENT> print("Error, failed to start conversion of {}".format(src_file_path)) <NEW_LINE> converting = False <NEW_LINE> <DEDENT> while converting: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> elapsed = str(self.conversion.elapsed()) <NEW_LINE> eta = str(self.conversion.eta()) <NEW_LINE> output_size = human_readable_size(self.conversion.output_size()) <NEW_LINE> progress = percentage(self.conversion.progress()) <NEW_LINE> if output_size is not None and progress is not None: <NEW_LINE> <INDENT> output_str = "Converting [{}]: {} Progress {} ETA: {}\r".format(elapsed, output_size, progress, eta) <NEW_LINE> sys.stdout.write(output_str) <NEW_LINE> <DEDENT> sleep(0.5) <NEW_LINE> sys.stdout.flush() <NEW_LINE> <DEDENT> except psutil.NoSuchProcess: <NEW_LINE> <INDENT> print() <NEW_LINE> print("Conversion process ended...") <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> result = {'error':'Conversion could not be started'} if not self.conversion else self.conversion.result() <NEW_LINE> if 'error' in result: <NEW_LINE> <INDENT> print("There was an error during conversion: {}".format(result)) <NEW_LINE> increment_error_counter(error_file_path) <NEW_LINE> log_failed_conversion(log_file_path) <NEW_LINE> <DEDENT> elif getsize(dst_file_path) < 10000: <NEW_LINE> <INDENT> print("There was an error during conversion: {} is too small...".format(dst_file_path)) <NEW_LINE> increment_error_counter(error_file_path) <NEW_LINE> log_failed_conversion(log_file_path) <NEW_LINE> <DEDENT> elif not MediaInfo(dst_file_path).valid(): <NEW_LINE> <INDENT> print("There was an error during conversion: {} media info is invalid".format(dst_file_path)) <NEW_LINE> increment_error_counter(error_file_path) <NEW_LINE> log_failed_conversion(log_file_path) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> remove(src_file_path) <NEW_LINE> rename(dst_file_path, final_dst_file_path) <NEW_LINE> log_successful_conversion(log_file_path)
|
Starts a conversion subprocess for a given source
|
625941bda4f1c619b28aff30
|
def _handlePitchWheel(self, ch, value): <NEW_LINE> <INDENT> self.settings.setFaderPos(ch, value) <NEW_LINE> apiRange = [0, 4] <NEW_LINE> midiRange = [-8192, 8192] <NEW_LINE> faderValue = convertValueToOSCRange(value, apiRange, midiRange, "log") <NEW_LINE> if self.mcu.getMode() is "main": <NEW_LINE> <INDENT> if ch is 7: <NEW_LINE> <INDENT> self.motu.setMainFader(faderValue) <NEW_LINE> <DEDENT> elif ch is 6: <NEW_LINE> <INDENT> self.motu.setMonitorFader(faderValue) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> self.motu.setFader(ch, faderValue) <NEW_LINE> <DEDENT> self.mcu.faderPos(ch, value)
|
Handle fader moves
|
625941bd01c39578d7e74d2b
|
def change_equip(): <NEW_LINE> <INDENT> equip_option = input('\nWhich equipment would you like to change?' '\n1. Weapon' '\n2. Armor' '\n3. None\n\t') <NEW_LINE> if equip_option.lower() == 'weapon' or equip_option == '1': <NEW_LINE> <INDENT> characters.hero.equip_weapon() <NEW_LINE> <DEDENT> elif equip_option.lower() == 'armor' or equip_option == '2': <NEW_LINE> <INDENT> characters.hero.equip_armor() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pass
|
Allows player to change their equipment
|
625941bdbf627c535bc130bf
|
def euclid_dist(point, centroid): <NEW_LINE> <INDENT> dist = 0.0 <NEW_LINE> point_2 = centroid.location <NEW_LINE> for p in enumerate(point): <NEW_LINE> <INDENT> p_val = p[1] <NEW_LINE> index = p[0] <NEW_LINE> q_val = point_2[index] <NEW_LINE> pq = math.pow((p_val - q_val), 2) <NEW_LINE> dist += pq <NEW_LINE> <DEDENT> dist = math.sqrt(dist) <NEW_LINE> return dist
|
Returns a double representing the Euclidean distance between two point
:param point:
a data point
:param centroid:
a Centroid object
:return:
distance between data point and Centroid object
|
625941bd5e10d32532c5ee18
|
def apibakjson(code=0, data=None, msg='error', appid=None, key=None, datatype='json'): <NEW_LINE> <INDENT> bakJson = {} <NEW_LINE> bakJson['code'] = code <NEW_LINE> bakJson['msg'] = msg <NEW_LINE> bakJson['appid'] = appid <NEW_LINE> bakJson['datatype'] = datatype <NEW_LINE> bakJson['timestamp'] = str(jgpycshare.DateTime.DateTime().Now().thisDate) <NEW_LINE> bakJson['data'] = data <NEW_LINE> bakJson['sign'] = 'sign' <NEW_LINE> bakJson['ranstr'] = str(uuid.uuid1()) <NEW_LINE> return jgpycshare.JsonTools.arraytojson(bakJson)
|
:param code : 10000 接口调用成功,调用结果请参考具体的API文档所对应的业务返回参数
:param data:
:param msg:
:param appid:
:param key:
:param datatype:
:return:
|
625941bd01c39578d7e74d2c
|
def get_backup_path(self) -> Tuple[Path, Path]: <NEW_LINE> <INDENT> i = 0 <NEW_LINE> basebackup_path = Path(self.basebackup_path) <NEW_LINE> incoming_basebackup_path = Path(self.basebackup_path + "_incoming") <NEW_LINE> local_repo_root = basebackup_path.parent <NEW_LINE> relative_basebackup_dir = basebackup_path.relative_to(local_repo_root) <NEW_LINE> while True: <NEW_LINE> <INDENT> tsfilename = "{}_{}".format(datetime.datetime.utcnow().strftime("%Y-%m-%d_%H-%M"), i) <NEW_LINE> basebackup_path = relative_basebackup_dir / tsfilename <NEW_LINE> local_basebackup_path = incoming_basebackup_path / tsfilename <NEW_LINE> if not local_basebackup_path.exists(): <NEW_LINE> <INDENT> local_basebackup_path.mkdir(exist_ok=True) <NEW_LINE> return basebackup_path, local_basebackup_path <NEW_LINE> <DEDENT> i += 1
|
Build a unique backup path
FIXME: this should look at the object storage, not the local incoming
dir.
|
625941bd24f1403a92600a5a
|
def render_text(self, outfd, data): <NEW_LINE> <INDENT> outfd.write("Ascii Scancode\n") <NEW_LINE> for c, s in data: <NEW_LINE> <INDENT> outfd.write("{0} (0x{1:02x}) 0x{2:02x}\n".format(self.format_char(c), ord(c), s))
|
Displays the character codes
|
625941bdadb09d7d5db6c682
|
def stop(self): <NEW_LINE> <INDENT> self._is_running = False <NEW_LINE> time.sleep(self._sampling_interval_sec) <NEW_LINE> self.tag.disconnect() <NEW_LINE> log.info("Disconnected with SensorTag Device: {0} with MAC_ADDRESS: {1} Successfully!". format(self.device_name, self.device_mac))
|
Disconnect from SensorTag and stops the SensorTagCollector Thread.
:return:
|
625941bd379a373c97cfaa36
|
def test_ap_wpa2_tdls_concurrent_init2(dev, apdev): <NEW_LINE> <INDENT> hapd = start_ap_wpa2_psk(apdev[0]) <NEW_LINE> wlantest_setup(hapd) <NEW_LINE> connect_2sta_wpa2_psk(dev, hapd) <NEW_LINE> dev[1].request("SET tdls_testing 0x80") <NEW_LINE> setup_tdls(dev[0], dev[1], hapd)
|
Concurrent TDLS setup initiation (reverse)
|
625941bdbd1bec0571d90528
|
def calculate_nearby_relevance_tuple(group, row, col_name, ngrams): <NEW_LINE> <INDENT> ngrams = list(range(1, ngrams + 1)) <NEW_LINE> weighted_ratings = {rating: {ngram: [0,0] for ngram in ngrams} for rating in range(1,5)} <NEW_LINE> for i, group_row in group.iterrows(): <NEW_LINE> <INDENT> if group_row['id'] != row['id']: <NEW_LINE> <INDENT> for ngram in ngrams: <NEW_LINE> <INDENT> similarity = get_n_gram_string_similarity(row[col_name], group_row[col_name], ngram) <NEW_LINE> weighted_ratings[group_row['median_relevance']][ngram][1] += similarity <NEW_LINE> weighted_ratings[group_row['median_relevance']][ngram][0] += 1 <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return weighted_ratings
|
Takes the group of rows for a particular query ("group") and a row within that
group ("row") and returns a dictionary of "similarity" calculations of row compared to the rest
of the rows in group. Returns a tuple of calculations that will be used to create similarity features for row.
|
625941bd796e427e537b04b4
|
def add_group1(): <NEW_LINE> <INDENT> group1 = tk.LabelFrame(left_frame, text="Link parameters", padx=5, pady=5 ) <NEW_LINE> group1.grid(row=0, column=1, sticky=tk.NW) <NEW_LINE> '''Fields for group-1 includes Params 1,2,3,4,5,6''' <NEW_LINE> self.link_param1 = tk.StringVar() <NEW_LINE> self.link_param2 = tk.StringVar() <NEW_LINE> self.link_param3 = tk.StringVar() <NEW_LINE> self.link_param4 = tk.StringVar() <NEW_LINE> self.link_param5 = tk.StringVar() <NEW_LINE> self.link_param6 = tk.StringVar() <NEW_LINE> lp21 = tk.Label(group1, text="Param1") <NEW_LINE> lp21.grid(row=1, column=1, sticky=(tk.W, tk.E)) <NEW_LINE> self.param1_entry = tk.Entry(group1, width=31, textvariable=self.link_param1) <NEW_LINE> self.param1_entry.grid(row=1, column=2, sticky=(tk.W, tk.E)) <NEW_LINE> lp22 = tk.Label(group1, text="Param2") <NEW_LINE> lp22.grid(row=2, column=1, sticky=(tk.W, tk.E)) <NEW_LINE> self.param2_entry = tk.Entry(group1, width=31, textvariable=self.link_param2) <NEW_LINE> self.param2_entry.grid(row=2, column=2, sticky=(tk.W, tk.E)) <NEW_LINE> lp23 = tk.Label(group1, text="Param3") <NEW_LINE> lp23.grid(row=3, column=1, sticky=(tk.W, tk.E)) <NEW_LINE> self.param3_entry = tk.Entry(group1, width=31, textvariable=self.link_param3) <NEW_LINE> self.param3_entry.grid(row=3, column=2, sticky=(tk.W, tk.E)) <NEW_LINE> lp24 = tk.Label(group1, text="Param4") <NEW_LINE> lp24.grid(row=4, column=1, sticky=(tk.W, tk.E)) <NEW_LINE> self.param4_entry = tk.Entry(group1, width=31, textvariable=self.link_param4) <NEW_LINE> self.param4_entry.grid(row=4, column=2, sticky=(tk.W, tk.E) ) <NEW_LINE> lp25 = tk.Label(group1, text="Param5") <NEW_LINE> lp25.grid(row=5, column=1, sticky=(tk.W, tk.E)) <NEW_LINE> self.param5_entry = tk.Entry(group1, width=31, textvariable=self.link_param5) <NEW_LINE> self.param5_entry.grid(row=5, column=2, sticky=(tk.W, tk.E)) <NEW_LINE> lp26 = tk.Label(group1, text="Param6") <NEW_LINE> lp26.grid(row=6, column=1, sticky=(tk.W, tk.E)) <NEW_LINE> self.param6_entry = tk.Entry(group1, width=31, textvariable=self.link_param6) <NEW_LINE> self.param6_entry.grid(row=6, column=2, sticky=(tk.W, tk.E))
|
Add widget with Link params, URL process and proxies
|
625941bd167d2b6e31218a87
|
def unassign_runtime_property_from_resource(property_name, ctx_instance): <NEW_LINE> <INDENT> value = ctx_instance.runtime_properties.pop(property_name, None) <NEW_LINE> ctx.logger.debug( 'Unassigned {0} runtime property: {1}'.format(property_name, value))
|
Pops a runtime_property and reports to debug.
:param property_name: The runtime_property to remove.
:param ctx_instance: The CTX Node-Instance Context.
:param ctx: The Cloudify ctx context.
|
625941bd2c8b7c6e89b356b3
|
@tf_export('keras.backend.placeholder') <NEW_LINE> def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None): <NEW_LINE> <INDENT> if context.executing_eagerly(): <NEW_LINE> <INDENT> raise ValueError( '`keras.backend.placeholder` is not supported with eager execution.') <NEW_LINE> <DEDENT> if dtype is None: <NEW_LINE> <INDENT> dtype = floatx() <NEW_LINE> <DEDENT> if not shape: <NEW_LINE> <INDENT> if ndim: <NEW_LINE> <INDENT> shape = tuple([None for _ in range(ndim)]) <NEW_LINE> <DEDENT> <DEDENT> if sparse: <NEW_LINE> <INDENT> x = array_ops.sparse_placeholder(dtype, shape=shape, name=name) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> x = array_ops.placeholder(dtype, shape=shape, name=name) <NEW_LINE> <DEDENT> x._uses_learning_phase = False <NEW_LINE> return x
|
Instantiates a placeholder tensor and returns it.
Arguments:
shape: Shape of the placeholder
(integer tuple, may include `None` entries).
ndim: Number of axes of the tensor.
At least one of {`shape`, `ndim`} must be specified.
If both are specified, `shape` is used.
dtype: Placeholder type.
sparse: Boolean, whether the placeholder should have a sparse type.
name: Optional name string for the placeholder.
Raises:
ValueError: If called with eager execution.
Returns:
Tensor instance (with Keras metadata included).
Examples:
```python
>>> from keras import backend as K
>>> input_ph = K.placeholder(shape=(2, 4, 5))
>>> input_ph
<tf.Tensor 'Placeholder_4:0' shape=(2, 4, 5) dtype=float32>
```
|
625941bd1f037a2d8b9460f0
|
def add_stem(self, sl): <NEW_LINE> <INDENT> stem = self.prepare_stem(sl.stem) <NEW_LINE> statesAdded = self.add_string(stem, sl) <NEW_LINE> if self.verbose > 1: <NEW_LINE> <INDENT> print('stem:', sl.stem, ';', statesAdded, 'states added.')
|
Add a SubLexeme object to the transducer.
|
625941bdb57a9660fec33772
|
def NegativeIntZeroIncluded(p_question: str): <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> number = Int(ERASE_LINE + p_question) <NEW_LINE> if (number > 0): <NEW_LINE> <INDENT> print(CURSOR_UP_ONE + ERASE_LINE + bcolors.FAIL + "***Enter a negative number you dangus." + bcolors.ENDC, end="\r") <NEW_LINE> time.sleep(3) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return number
|
Validates the negative Int that the user entered.
0 is counted as a negative number.
Keyword arguments:
p_question -- question posed to the user
|
625941bd442bda511e8be30d
|
def init_worksheet_dict(self): <NEW_LINE> <INDENT> self._worksheet_dict = dict([(worksheet['Name'], worksheet) for worksheet in [element for element in self._attributes['children'] if element['name'] == 'Worksheet']]) <NEW_LINE> return self._worksheet_dict
|
Инициализация словаря листов.
|
625941bd1f5feb6acb0c4a45
|
def primer_number(self): <NEW_LINE> <INDENT> return [i for i in primer(self.number)]
|
找一定范围的素数
|
625941bd67a9b606de4a7dad
|
def _correct_ilepi(kspace, p): <NEW_LINE> <INDENT> if int(p['sliceorder']) == 1: <NEW_LINE> <INDENT> slices = kspace.shape[2] <NEW_LINE> if int(p['ns']) % 2 == 0: <NEW_LINE> <INDENT> c = np.zeros(kspace.shape,dtype=kspace.dtype) <NEW_LINE> c[...,0::2,:,:] = kspace[...,:slices//2,:,:] <NEW_LINE> c[...,1::2,:,:] = kspace[...,slices//2:,:,:] <NEW_LINE> kspace = c <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> c[...,0::2,:,:] = kspace[...,:(slices+1)//2,:,:] <NEW_LINE> c[...,1::2,:,:] = kspace[...,(slices-1)//2+1:,:,:] <NEW_LINE> kspace = c <NEW_LINE> <DEDENT> return kspace <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return kspace
|
Reorder slices if acquisition was interleaved
|
625941bd5fcc89381b1e15ae
|
def activate_video(self): <NEW_LINE> <INDENT> self.dialogWidget.deactivate() <NEW_LINE> self.videoWidget.activate() <NEW_LINE> self.stackLayout.setCurrentIndex(0)
|
Method to activate the videoWidget and face recognition thread
|
625941bd57b8e32f5248338b
|
def silhouette_method(data): <NEW_LINE> <INDENT> sil = [] <NEW_LINE> kmax = 10 <NEW_LINE> for k in range(2, kmax + 1): <NEW_LINE> <INDENT> kmeans = model_kmeans(data, k) <NEW_LINE> labels = kmeans.labels_ <NEW_LINE> sil.append(silhouette_score(att_def, labels, metric='euclidean')) <NEW_LINE> <DEDENT> k_cluster = pd.DataFrame({'k': range(2, kmax + 1), 'silhouette score': sil}) <NEW_LINE> fig = go.Figure(data=go.Scatter(x=k_cluster['k'], y=k_cluster['silhouette score'])) <NEW_LINE> fig.update_layout(title='Silhouette Methods for finding best K values in KMeans', xaxis_title='K', yaxis_title='Silhouette Score') <NEW_LINE> return fig
|
Silhouette method is an addition secondary methods when Elbows get ambigous to interpret the k value
:param data: 2 columns dataframe
:return: Plotly Figures
|
625941bda219f33f3462885e
|
def _connect(self, user=None, token=None, **kwargs): <NEW_LINE> <INDENT> if user and token: <NEW_LINE> <INDENT> url = urllib.parse.urlparse(self.config.url) <NEW_LINE> api_url = "{scheme}://{user}:{token}@{loc}{path}".format( scheme=url.scheme, user=user, token=token, loc=url.netloc, path=url.path) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> api_url = self.config.url <NEW_LINE> <DEDENT> if api_url.strip()[-1] != '/': <NEW_LINE> <INDENT> api_url = f'{api_url}/' <NEW_LINE> <DEDENT> self._server = xmlrpc.client.ServerProxy(api_url) <NEW_LINE> return self._server
|
Connect to the remote server API
*user* is the name of the user to connect to the lab
*token* is the token associated with the user to connect to the lab
|
625941bdd8ef3951e324342e
|
def __init__(self, file_name, line, column): <NEW_LINE> <INDENT> self.name = None <NEW_LINE> self.cpp_name = None <NEW_LINE> self.description = None <NEW_LINE> self.type = None <NEW_LINE> self.ignore = False <NEW_LINE> self.optional = False <NEW_LINE> self.default = None <NEW_LINE> self.supports_doc_sequence = False <NEW_LINE> self.serialize_op_msg_request_only = False <NEW_LINE> self.constructed = False <NEW_LINE> super(Field, self).__init__(file_name, line, column)
|
Construct a Field.
|
625941bd498bea3a759b99a1
|
def __init__(self, css_class=None, col_headers=[], title=None): <NEW_LINE> <INDENT> self._css_class = css_class <NEW_LINE> self._header = ''.join(['<th>' + h + '</th>' for h in col_headers]) <NEW_LINE> self._title = title <NEW_LINE> self._rows = [] <NEW_LINE> self._footer = '</table>'
|
num_columns: Number of table columns
col_headers: A list of strings representing table column headings
|
625941bd63f4b57ef0001011
|
def QA_util_time_delay(time_=0): <NEW_LINE> <INDENT> def _exec(func): <NEW_LINE> <INDENT> threading.Timer(time_, func) <NEW_LINE> <DEDENT> return _exec
|
'这是一个用于复用/比如说@装饰器的延时函数 使用threading里面的延时,为了是不阻塞进程 有时候,同时发进去两个函数,第一个函数需要延时 第二个不需要的话,用sleep就会阻塞掉第二个进程'
:param time_:
:return:
|
625941bd3539df3088e2e23c
|
def get_lastlastFriday(d): <NEW_LINE> <INDENT> lastSunday = get_lastweek_from(d) <NEW_LINE> lastThursday = lastSunday - datetime.timedelta(days=3) <NEW_LINE> return lastThursday
|
返回上上个周五的00:00:00
|
625941bd31939e2706e4cd5f
|
def __init__(self): <NEW_LINE> <INDENT> self.isActive = True <NEW_LINE> self.queue = Queue() <NEW_LINE> self.thread = Thread(target=self._messengerThreadMain) <NEW_LINE> self.listeners = [] <NEW_LINE> self.start()
|
Initialize
|
625941bd4a966d76dd550efe
|
def retrieve(self, request, slug=''): <NEW_LINE> <INDENT> queryset = BlogPost.objects.all() <NEW_LINE> post = get_object_or_404(queryset, slug=slug) <NEW_LINE> serializer = BlogPostSerializer(post, context={'request': request}) <NEW_LINE> return Response(serializer.data)
|
Return BlogPost regardless of Draft state. If you know the URL, you can read the draft.
|
625941bd3c8af77a43ae368f
|
def toqa(m): <NEW_LINE> <INDENT> m['status'] = 'qa-overlap' <NEW_LINE> m['pos'] = listify(m['pos']) <NEW_LINE> m['eqpos'] = listify(m['eqpos'])
|
Change status and pos/eqpos arrays
|
625941bd046cf37aa974cc3b
|
def column_alias(cell, names): <NEW_LINE> <INDENT> column = slugify(cell.column or '', sep='_') <NEW_LINE> column = column.strip('_') <NEW_LINE> column = 'column' if not len(column) else column <NEW_LINE> name, i = column, 2 <NEW_LINE> while name in names: <NEW_LINE> <INDENT> name = '%s_%s' % (name, i) <NEW_LINE> i += 1 <NEW_LINE> <DEDENT> return name
|
Generate a normalized version of the column name.
|
625941bda05bb46b383ec716
|
def subm_set_up_throttleI(self): <NEW_LINE> <INDENT> self.strGuid = self.guid <NEW_LINE> throttle_str = "" <NEW_LINE> if self.m_CurrentThrottle == 0: <NEW_LINE> <INDENT> throttle_str = "Loiter" <NEW_LINE> <DEDENT> elif self.m_CurrentThrottle == 1: <NEW_LINE> <INDENT> throttle_str = "Full" <NEW_LINE> <DEDENT> elif self.m_CurrentThrottle == 2: <NEW_LINE> <INDENT> throttle_str = "Flank" <NEW_LINE> <DEDENT> elif self.m_CurrentThrottle == 3: <NEW_LINE> <INDENT> throttle_str = "Cruise" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> return super().set_throttle(throttle_str)
|
升油门
|
625941bdd8ef3951e324342f
|
def test_evaluating_quote(): <NEW_LINE> <INDENT> assert_equals("foo", evaluate(["quote", "foo"], Environment())) <NEW_LINE> assert_equals([1, 2, False], evaluate(["quote", [1, 2, False]], Environment())) <NEW_LINE> assert_equals([], evaluate(["quote", []], Environment()))
|
TEST 2.3: When a call is done to the `quote` form, the argument should
be returned without being evaluated.
(quote foo) -> foo
|
625941bd460517430c39407e
|
def test_svn_export(self): <NEW_LINE> <INDENT> base_url = "https://desi.lbl.gov/svn/code/desimodel/{0}/data" <NEW_LINE> cmd = svn_export() <NEW_LINE> self.assertEqual(cmd[2], base_url.format('trunk')) <NEW_LINE> cmd = svn_export('trunk') <NEW_LINE> self.assertEqual(cmd[2], base_url.format('trunk')) <NEW_LINE> cmd = svn_export('branches/v3') <NEW_LINE> self.assertEqual(cmd[2], base_url.format('branches/v3')) <NEW_LINE> cmd = svn_export('1.2.3') <NEW_LINE> self.assertEqual(cmd[2], base_url.format('tags/1.2.3'))
|
Test svn export command.
|
625941bd6aa9bd52df036c94
|
def _parseReply(self, tag, elements, data): <NEW_LINE> <INDENT> status = unpack_from('<B', data, 48)[0] <NEW_LINE> extendedStatus = unpack_from('<B', data, 49)[0] <NEW_LINE> if status == 0 or status == 6: <NEW_LINE> <INDENT> tagName, basetag, index = TagNameParser(tag, 0) <NEW_LINE> datatype = self.KnownTags[basetag][0] <NEW_LINE> CIPFormat = self.CIPTypes[datatype][2] <NEW_LINE> if elements == 1: <NEW_LINE> <INDENT> if datatype == 211: <NEW_LINE> <INDENT> returnvalue = _getAtomicArrayValue(CIPFormat, tag, data) <NEW_LINE> <DEDENT> elif datatype == 160: <NEW_LINE> <INDENT> returnvalue = _getSingleString( data) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> returnvalue = unpack_from(CIPFormat, data, 52)[0] <NEW_LINE> <DEDENT> s = tag.split(".") <NEW_LINE> doo = s[len(s)-1] <NEW_LINE> if doo.isdigit(): <NEW_LINE> <INDENT> returnvalue = _getBitOfWord(s, returnvalue) <NEW_LINE> <DEDENT> return returnvalue <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> Array = [] <NEW_LINE> if datatype == 160: <NEW_LINE> <INDENT> dataSize = self.KnownTags[basetag][1]-30 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dataSize = self.CIPTypes[datatype][0] <NEW_LINE> <DEDENT> numbytes = len(data)-dataSize <NEW_LINE> counter = 0 <NEW_LINE> self.Offset = 0 <NEW_LINE> stringLen = self.KnownTags[basetag][1]-30 <NEW_LINE> for i in range(elements): <NEW_LINE> <INDENT> index = 52+(counter*dataSize) <NEW_LINE> self.Offset += dataSize <NEW_LINE> if datatype == 160: <NEW_LINE> <INDENT> index = 54+(counter*stringLen) <NEW_LINE> NameLength = unpack_from('<L', data, index)[0] <NEW_LINE> returnvalue = data[index+4:index+4+NameLength] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> returnvalue = unpack_from(CIPFormat, data, index)[0] <NEW_LINE> <DEDENT> Array.append(returnvalue) <NEW_LINE> counter += 1 <NEW_LINE> if index == numbytes and status == 6: <NEW_LINE> <INDENT> index = 0 <NEW_LINE> counter = 0 <NEW_LINE> tagIOI = _buildTagIOI(self, tag, isBoolArray=False) <NEW_LINE> readIOI = _addPartialReadIOI(self, tagIOI, elements) <NEW_LINE> eipHeader = _buildEIPHeader(self, readIOI) <NEW_LINE> self.Socket.send(eipHeader) <NEW_LINE> data = self.Socket.recv(1024) <NEW_LINE> status = unpack_from('<h', data, 48)[0] <NEW_LINE> numbytes = len(data)-dataSize <NEW_LINE> <DEDENT> <DEDENT> return Array <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if status in cipErrorCodes.keys(): <NEW_LINE> <INDENT> err = cipErrorCodes[status] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> err = 'Unknown error' <NEW_LINE> <DEDENT> return "Failed to read tag: " + tag + ' - ' + err
|
Take the received packet data on a read and
extract the value out of it. This is a little
crazy because different data types (DINT/STRING/REAL)
and different types of reads (Single/Array/Partial) all
have to be handled differently
|
625941bd3346ee7daa2b2c5b
|
def display_with_information(processed_image): <NEW_LINE> <INDENT> if processed_image.image is not None: <NEW_LINE> <INDENT> image_to_display = processed_image.image <NEW_LINE> line = processed_image.darts_axis <NEW_LINE> x = processed_image.bounding_box.x <NEW_LINE> y = processed_image.bounding_box.y <NEW_LINE> w = processed_image.bounding_box.w <NEW_LINE> h = processed_image.bounding_box.h <NEW_LINE> if line is not None: <NEW_LINE> <INDENT> x_value = int(x + processed_image.darts_axis[len(processed_image.darts_axis) - 1]) <NEW_LINE> image_to_display = cv2.line(image_to_display, (int(x + line[0]), y), (x_value, processed_image.darts_board_offset), (0, 255, 0), 3) <NEW_LINE> image_to_display = cv2.line(image_to_display, (x_value, processed_image.darts_board_offset + 70), (x_value, processed_image.darts_board_offset - 20), (0, 0, 255), 1) <NEW_LINE> image_to_display = cv2.line(image_to_display, (x_value, processed_image.darts_board_offset + 30), (x_value, processed_image.darts_board_offset + 70), (0, 0, 255), 10) <NEW_LINE> <DEDENT> image_to_display = cv2.line(image_to_display, (0, processed_image.darts_board_offset), (processed_image.image_width, processed_image.darts_board_offset), (255, 0, 0), 5) <NEW_LINE> image_to_display = cv2.rectangle(image_to_display, (x, y), (x + w, y + h), (0, 255, 0), 1) <NEW_LINE> cv2.imshow('device_' + str(processed_image.device_number), image_to_display) <NEW_LINE> cv2.waitKey(1)
|
Displays a processed image with all additional information (like dartboard level, darts axis etc.) in a
seperate window.
:param processed_image: The processed image which wraps the image itself and all additional information
:type processed_image: ProcessedImage
:return: None
:rtype: None
|
625941bd9b70327d1c4e0cc5
|
def update(self, gamearea, env=None): <NEW_LINE> <INDENT> self.animate()
|
updates entity
Parameters
----------
gamearea | object: area object
env | Optional[object]: environment object --defaults to None
|
625941bd283ffb24f3c557fd
|
@mock.patch("requests.get") <NEW_LINE> def test_get_without_leading_slash_on_path(mock_get): <NEW_LINE> <INDENT> def mock_get_function(url, params, timeout): <NEW_LINE> <INDENT> assert url == "http://www.example.com/api/stream" <NEW_LINE> assert params is None <NEW_LINE> return mock.Mock() <NEW_LINE> <DEDENT> mock_get.side_effect = mock_get_function <NEW_LINE> client = api_client.Client("http://www.example.com/api") <NEW_LINE> client.get("stream") <NEW_LINE> assert mock_get.call_count == 1
|
get() should handle paths with no leading slash.
Even when the root_url doesn't have a trailing slash.
|
625941bd3539df3088e2e23d
|
def handle_endtag(self, tag): <NEW_LINE> <INDENT> if self.DEBUG: <NEW_LINE> <INDENT> print ('Encountered an end tag :', tag) <NEW_LINE> <DEDENT> if tag in self.sanitizelist: <NEW_LINE> <INDENT> self.level -= 1 <NEW_LINE> return <NEW_LINE> <DEDENT> if tag in self.unclosedTags: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if self.isNotPurify or tag in self.whitelist_keys: <NEW_LINE> <INDENT> self.data.append('</%s>' % tag)
|
Handler of ending tag processing (overrided, private)
|
625941bd7d43ff24873a2b8f
|
def make(self, *args, **kwargs): <NEW_LINE> <INDENT> return _filter_swig.fir_filter_ccf_sptr_make(self, *args, **kwargs)
|
make(fir_filter_ccf_sptr self, int decimation, pmt_vector_float taps) -> fir_filter_ccf_sptr
FIR filter with gr_complex input, gr_complex output, and float taps.
The fir_filter_XXX blocks create finite impulse response (FIR) filters that perform the convolution in the time domain:
The taps are a C++ vector (or Python list) of values of the type specified by the third letter in the block's suffix. For this block, the value is of type float. Taps can be created using the firdes or optfir tools.
These versions of the filter can also act as down-samplers (or decimators) by specifying an integer value for .
Constructor Specific Documentation:
FIR filter with gr_complex input, gr_complex output, and float taps.
Args:
decimation : set the integer decimation rate
taps : a vector/list of taps of type float
|
625941bdac7a0e7691ed3fca
|
def __init__(self, oFailureReasonData, sMode, oDisp): <NEW_LINE> <INDENT> sTitle = 'Failure Reason'; <NEW_LINE> if sMode == WuiFormContentBase.ksMode_Add: <NEW_LINE> <INDENT> sTitle = 'Add' + sTitle; <NEW_LINE> <DEDENT> elif sMode == WuiFormContentBase.ksMode_Edit: <NEW_LINE> <INDENT> sTitle = 'Edit' + sTitle; <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> assert sMode == WuiFormContentBase.ksMode_Show; <NEW_LINE> <DEDENT> WuiFormContentBase.__init__(self, oFailureReasonData, sMode, 'FailureReason', oDisp, sTitle);
|
Prepare & initialize parent
|
625941bd97e22403b379ce8a
|
def hit_long(self): <NEW_LINE> <INDENT> while self.hand.get_value() < 17: <NEW_LINE> <INDENT> self.hit()
|
The dealer hits until 17. In pyBlackJack dealer stands on soft 17.
|
625941bd8a43f66fc4b53f5a
|
def _get_day_of_month(other, day_option): <NEW_LINE> <INDENT> if day_option == "start": <NEW_LINE> <INDENT> return 1 <NEW_LINE> <DEDENT> elif day_option == "end": <NEW_LINE> <INDENT> days_in_month = _days_in_month(other) <NEW_LINE> return days_in_month <NEW_LINE> <DEDENT> elif day_option is None: <NEW_LINE> <INDENT> raise NotImplementedError() <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> raise ValueError(day_option)
|
Find the day in `other`'s month that satisfies a BaseCFTimeOffset's
onOffset policy, as described by the `day_option` argument.
Parameters
----------
other : cftime.datetime
day_option : 'start', 'end'
'start': returns 1
'end': returns last day of the month
Returns
-------
day_of_month : int
|
625941bdbe383301e01b537e
|
@connect.route('/api/littlecloud', methods=['GET']) <NEW_LINE> @login_required <NEW_LINE> def get_all_littleclouds(): <NEW_LINE> <INDENT> littleclouds = LittleCloud.query.all() <NEW_LINE> dic = [] <NEW_LINE> for i in littleclouds: <NEW_LINE> <INDENT> groups = [] <NEW_LINE> for a in i.appgroups: <NEW_LINE> <INDENT> groups.append({"id": a.id, "name": a.name}) <NEW_LINE> <DEDENT> item = { "id": i.id, "name": i.name, "url": i.url, "is_connectible": i.is_connectible, "is_connected": i.is_connected, "phone": i.phone, "email": i.email, "ip": str(i.ip), "port": i.port, "protocol": i.protocol, "appgroups": groups, } <NEW_LINE> dic.append(item) <NEW_LINE> <DEDENT> res = {"result": True, "data": dic, "message": u"Get all littleclouds successfully"} <NEW_LINE> return jsonify(res)
|
【API】得到所有小云的数据。
:return:
|
625941bd31939e2706e4cd60
|
def _increasing_z_indexes(self): <NEW_LINE> <INDENT> return array([i[2].value for i in self.samples]).argsort().tolist()
|
Returns a list sample indices sorted by increasing z-coordinate.
|
625941bd73bcbd0ca4b2bf6f
|
def compute_haploscore(segment, snpdata, inddata, geno_penalty, switch_penalty): <NEW_LINE> <INDENT> start, end = [snpdata.get_snp(rsid).index for rsid in [segment.rstart, segment.rend]] <NEW_LINE> individual1 = inddata.get_individual(segment.iid1) <NEW_LINE> individual2 = inddata.get_individual(segment.iid2) <NEW_LINE> haps1 = individual1.haplotypes[:, start:(end+1)] <NEW_LINE> haps2 = individual2.haplotypes[:, start:(end+1)] <NEW_LINE> def _get_current_genotype_penalty(i): <NEW_LINE> <INDENT> _penalty = [] <NEW_LINE> for h1 in xrange(len(haps1)): <NEW_LINE> <INDENT> m1 = haps1[h1, i] <NEW_LINE> for h2 in xrange(len(haps2)): <NEW_LINE> <INDENT> m2 = haps2[h2, i] <NEW_LINE> _penalty.append((m1 != m2) * geno_penalty) <NEW_LINE> <DEDENT> <DEDENT> return np.array(_penalty, dtype=np.float) <NEW_LINE> <DEDENT> prevscore = _get_current_genotype_penalty(0) <NEW_LINE> nsnp = end - start + 1 <NEW_LINE> for i in xrange(1, nsnp): <NEW_LINE> <INDENT> nextswitch = prevscore + switch_penalty <NEW_LINE> nextscore = _get_current_genotype_penalty(i) + nextswitch.min(1) <NEW_LINE> prevscore = nextscore <NEW_LINE> <DEDENT> return min(prevscore) / float(nsnp)
|
Return the HaploScore of the given IBD segment based on the haplotypes of
the individuals and the expected genotype and switch error penalties.
|
625941bd5fdd1c0f98dc0124
|
def next_group_boundary(self, index): <NEW_LINE> <INDENT> bound = None <NEW_LINE> for b in self.group_boundaries: <NEW_LINE> <INDENT> if index < b[1]: <NEW_LINE> <INDENT> bound = b <NEW_LINE> break <NEW_LINE> <DEDENT> <DEDENT> return bound
|
Return the next match group boundaries
in relation to the index. Return 'None'
if there are no more boundaries.
|
625941bd56b00c62f0f14549
|
def deserialize(self, str): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if self.traj is None: <NEW_LINE> <INDENT> self.traj = pr_msgs.msg.JointTraj() <NEW_LINE> <DEDENT> end = 0 <NEW_LINE> start = end <NEW_LINE> end += 4 <NEW_LINE> (length,) = _struct_I.unpack(str[start:end]) <NEW_LINE> self.traj.positions = [] <NEW_LINE> for i in range(0, length): <NEW_LINE> <INDENT> val1 = pr_msgs.msg.Joints() <NEW_LINE> start = end <NEW_LINE> end += 4 <NEW_LINE> (length,) = _struct_I.unpack(str[start:end]) <NEW_LINE> pattern = '<%sd'%length <NEW_LINE> start = end <NEW_LINE> end += struct.calcsize(pattern) <NEW_LINE> val1.j = struct.unpack(pattern, str[start:end]) <NEW_LINE> self.traj.positions.append(val1) <NEW_LINE> <DEDENT> start = end <NEW_LINE> end += 4 <NEW_LINE> (length,) = _struct_I.unpack(str[start:end]) <NEW_LINE> pattern = '<%sf'%length <NEW_LINE> start = end <NEW_LINE> end += struct.calcsize(pattern) <NEW_LINE> self.traj.blend_radius = struct.unpack(pattern, str[start:end]) <NEW_LINE> start = end <NEW_LINE> end += 4 <NEW_LINE> (self.traj.options,) = _struct_I.unpack(str[start:end]) <NEW_LINE> return self <NEW_LINE> <DEDENT> except struct.error as e: <NEW_LINE> <INDENT> raise genpy.DeserializationError(e)
|
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
|
625941bd187af65679ca5010
|
def Col(self, *args) -> "void": <NEW_LINE> <INDENT> return _gskernel.GsTile_Col(self, *args)
|
*Overload 1:*
获取列
|
*Overload 2:*
设置列
|
625941bd50485f2cf553cc8b
|
def increase_speed(self): <NEW_LINE> <INDENT> self.ship_speed_factor *= self.speedup_scale <NEW_LINE> self.bullet_speed_factor *= self.speedup_scale <NEW_LINE> self.alien_speed_factor *= self.speedup_scale <NEW_LINE> self.alien_points = int(self.score_scale * self.alien_points) <NEW_LINE> print(self.alien_points)
|
提高游戏速度设置
|
625941bdcc40096d61595844
|
def __init__(self, genes): <NEW_LINE> <INDENT> self.genes = {gene: None for gene in genes} <NEW_LINE> self.cache_attrs = False <NEW_LINE> self._cache = {}
|
Args:
genes: list of the genes that each `Evolvable` must have
Note:
- Subclasses of this ABC should call super on this method.
- `_cache` and `cache_attrs` are for performance optimizations.
|
625941bda17c0f6771cbdf45
|
def QueryEvaluation(query, Index, Movies, userid, option): <NEW_LINE> <INDENT> if option == 'tf-idf': <NEW_LINE> <INDENT> return tf_idf(query, Index, Movies, userid) <NEW_LINE> <DEDENT> elif option == 'language-model': <NEW_LINE> <INDENT> return LanguageModel(query, Index, Movies, userid) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pass
|
read user's option and use desired algorithm
|
625941bd3c8af77a43ae3690
|
def __init__(self, instance=None, methods=[]): <NEW_LINE> <INDENT> self.dispatcher = SimpleXMLRPCDispatcher(allow_none=True, encoding=None) <NEW_LINE> if instance is not None: <NEW_LINE> <INDENT> self.dispatcher.register_instance(instance) <NEW_LINE> <DEDENT> for method in methods: <NEW_LINE> <INDENT> self.dispatcher.register_function(method) <NEW_LINE> <DEDENT> self.dispatcher.register_introspection_functions()
|
Create windmill xmlrpc dispatcher
|
625941bd4f6381625f114930
|
def event_m10_30_x3(z12=10302000, z13=100000): <NEW_LINE> <INDENT> ChangeObjState(z12, 70) <NEW_LINE> CompareObjState(8, z12, 30, 0) <NEW_LINE> IsPlayerInsidePoint(8, z13, z13, 0) <NEW_LINE> IsPlayerAnActor(0, 1) <NEW_LINE> DoesActorExist(0, 0) <NEW_LINE> SetConditionGroup(8, 0) <NEW_LINE> assert HostConditionGroup(8) <NEW_LINE> ChangeObjState(z12, 71) <NEW_LINE> assert CompareObjStateId(z12, 40, 0) <NEW_LINE> return 0
|
[Execution] Elevator_Rise
z12: Elevator OBJ instance ID
z13: On point ID_
|
625941bd85dfad0860c3ad4c
|
def __init__(self, params, cand_id=None, worker_information=None): <NEW_LINE> <INDENT> if cand_id is None: <NEW_LINE> <INDENT> cand_id = uuid.uuid4().hex <NEW_LINE> <DEDENT> self.cand_id = cand_id <NEW_LINE> self._logger = get_logger(self, extra_info="cand_id " + str(cand_id)) <NEW_LINE> self._logger.debug("Initializing new candidate. Params %s, cand_id %s," "worker_info %s", params, cand_id, worker_information) <NEW_LINE> if not isinstance(params, dict): <NEW_LINE> <INDENT> self._logger.error("No parameter dict given, received %s instead", params) <NEW_LINE> raise ValueError("No parameter dictionary given, received %s " "instead" %params) <NEW_LINE> <DEDENT> self.failed = False <NEW_LINE> self.params = params <NEW_LINE> self.worker_information = worker_information <NEW_LINE> self.last_update_time = time.time() <NEW_LINE> self.generated_time = time.time() <NEW_LINE> self._logger.debug("Finished initializing the candidate.")
|
Initializes the unevaluated candidate object.
Parameters
----------
params : dict of string keys
A dictionary of parameter value. The keys must correspond to the
problem definition.
The dictionary requires one key - and value - per parameter
defined.
cand_id : uuid.UUID, optional
The uuid identifying this candidate. This is used to compare
candidates over server and client borders.
Note that this should only be set explicitly if you are
instantiating an already known candidate with its already known
UUID. Do not explicitly set the uuid for a new candidate!
worker_information : string, optional
This is worker-settable information which might be used for
communicating things necessary for resuming evaluations et cetera.
Raises
------
ValueError
Iff params is not a dictionary.
|
625941bda79ad161976cc037
|
def friedman_test(*args: dict) -> [float, float, list, list]: <NEW_LINE> <INDENT> k = len(args) <NEW_LINE> if k < 2: <NEW_LINE> <INDENT> raise ValueError("Less than 2 levels") <NEW_LINE> <DEDENT> n = len(args[0]) <NEW_LINE> if len(set([len(v) for v in args])) != 1: <NEW_LINE> <INDENT> raise ValueError("Unequal number of samples") <NEW_LINE> <DEDENT> rankings = [] <NEW_LINE> for i in range(n): <NEW_LINE> <INDENT> row = [col[i] for col in args] <NEW_LINE> row_sort = sorted(row) <NEW_LINE> rankings.append( [row_sort.index(v) + 1 + (row_sort.count(v) - 1) / 2.0 for v in row] ) <NEW_LINE> <DEDENT> rankings_avg = [np.mean([case[j] for case in rankings]) for j in range(k)] <NEW_LINE> rankings_cmp = [ r / np.lib.scimath.sqrt(k * (k + 1) / (6.0 * n)) for r in rankings_avg ] <NEW_LINE> chi2 = ((12 * n) / float((k * (k + 1)))) * ( (sum(r ** 2 for r in rankings_avg)) - ((k * (k + 1) ** 2) / float(4)) ) <NEW_LINE> iman_davenport = ((n - 1) * chi2) / float((n * (k - 1) - chi2)) <NEW_LINE> p_value = 1 - sp.stats.f.cdf(iman_davenport, k - 1, (k - 1) * (n - 1)) <NEW_LINE> return iman_davenport, p_value, rankings_avg, rankings_cmp
|
Performs a Friedman ranking test.
Tests the hypothesis that in a set of k dependent samples groups (where k >= 2) at least two of the groups represent populations with different median values.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
rankings : array_like
The ranking for each group.
pivots : array_like
The pivotal quantities for each group.
References
----------
M. Friedman, The use of ranks to avoid the assumption of normality implicit in the analysis of variance, Journal of the American Statistical Association 32 (1937) 674–701.
D.J. Sheskin, Handbook of parametric and nonparametric statistical procedures. crc Press, 2003, Test 25: The Friedman Two-Way Analysis of Variance by Ranks
|
625941bd82261d6c526ab38e
|
def change_font(self): <NEW_LINE> <INDENT> label_font = "Courier" <NEW_LINE> font_size = self.point_size.get() <NEW_LINE> label_font = label_font + " " + font_size <NEW_LINE> if self.bold_on.get() == 1: <NEW_LINE> <INDENT> label_font = label_font + " bold" <NEW_LINE> <DEDENT> if self.underline_on.get() == 1: <NEW_LINE> <INDENT> label_font = label_font + " underline" <NEW_LINE> <DEDENT> self.message.config (font = label_font)
|
make bold and/or underline
|
625941bd090684286d50ebd4
|
def _get_minimal_core_reservations(core_resource, cores, chip=None): <NEW_LINE> <INDENT> reservation = None <NEW_LINE> for core in cores: <NEW_LINE> <INDENT> if reservation is None: <NEW_LINE> <INDENT> reservation = slice(core, core + 1) <NEW_LINE> <DEDENT> elif reservation.stop == core: <NEW_LINE> <INDENT> reservation = slice(reservation.start, core + 1) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> yield ReserveResourceConstraint( core_resource, reservation, chip) <NEW_LINE> reservation = slice(core, core + 1) <NEW_LINE> <DEDENT> <DEDENT> if reservation is not None: <NEW_LINE> <INDENT> yield ReserveResourceConstraint(core_resource, reservation, chip)
|
Yield a minimal set of
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`
objects which reserve the specified set of cores.
Parameters
----------
core_resource : resource type
The type of resource representing cores.
cores : [int, ...]
The core numbers to reserve *in ascending order*.
chip : None or (x, y)
Which chip the constraints should be applied to or None for a global
constraint.
Yields
------
:py:class:`~rig.place_and_route.constraints.ReserveResourceConstraint`
|
625941bd7d43ff24873a2b90
|
def parseSTAR(filename, **kwargs): <NEW_LINE> <INDENT> if not os.path.isfile(filename) and not os.path.isfile(filename + '.star'): <NEW_LINE> <INDENT> raise IOError('There is no file called {0}.'.format(filename)) <NEW_LINE> <DEDENT> start = kwargs.get('start', None) <NEW_LINE> if start is not None and not isinstance(start, Integral): <NEW_LINE> <INDENT> raise TypeError('start should be an integer or None') <NEW_LINE> <DEDENT> stop = kwargs.get('stop', None) <NEW_LINE> if stop is not None and not isinstance(stop, Integral): <NEW_LINE> <INDENT> raise TypeError('stop should be an integer or None') <NEW_LINE> <DEDENT> shlex = kwargs.get('shlex', False) <NEW_LINE> if not isinstance(shlex, bool): <NEW_LINE> <INDENT> raise TypeError('shlex should be a boolean') <NEW_LINE> <DEDENT> starfile = openFile(filename, 'r') <NEW_LINE> lines = [pystr(line) for line in starfile.readlines()] <NEW_LINE> starfile.close() <NEW_LINE> parsingDict, prog = parseSTARLines(lines, **kwargs) <NEW_LINE> return StarDict(parsingDict, prog, filename)
|
Returns a dictionary containing data parsed from a STAR file.
:arg filename: a filename
The .star extension can be omitted.
:type filename: str
:arg start: line number for starting
Default is **None**, meaning start at the beginning
:type start: int, None
:arg stop: line number for stopping
Default is **None**, meaning don't stop.
:type stop: int, None
:arg shlex: whether to use shlex for splitting lines so as to preserve quoted substrings
Default is **False**
:type shlex: bool
|
625941bd4c3428357757c21c
|
def color_table_length(c): <NEW_LINE> <INDENT> if c & 0x80: <NEW_LINE> <INDENT> return 3 << ((c & 0x07) + 1) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return 0
|
Read the flags in a GIF file to get the length of the color table.
|
625941bd21a7993f00bc7bdd
|
def getImageHistogram(self): <NEW_LINE> <INDENT> if not self.histogram: <NEW_LINE> <INDENT> from PIL import Image <NEW_LINE> size = (128,128) <NEW_LINE> if len(self.profile_images) > 0: <NEW_LINE> <INDENT> imgfile = self.profile_images[0] <NEW_LINE> if imgfile: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> image = Image.open(imgfile) <NEW_LINE> image.thumbnail(size, Image.ANTIALIAS) <NEW_LINE> self.histogram = image.histogram() <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> logging.warn(e) <NEW_LINE> return None <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return self.histogram
|
Returns and creates a histogram of
the primary profile image associated with
this profile.
:return: A histogram as generated by Image.histogram().
|
625941bd96565a6dacc8f5bf
|
@app.route('/controle_func') <NEW_LINE> def controle_funcionarios(): <NEW_LINE> <INDENT> envia_pagina_arduino(usuarios=session['usuario_logado'], cargo=session['usuario_cargo'], pagina='Controle Funcionarios') <NEW_LINE> if(session['usuario_cargo'] == 'CCO'): <NEW_LINE> <INDENT> usuarios = usuario_dao.listar() <NEW_LINE> cargos = usuario_dao.cargos() <NEW_LINE> logs = usuario_dao.listar_logs() <NEW_LINE> status_dict_cargos = {} <NEW_LINE> for user in usuarios: <NEW_LINE> <INDENT> if user.cargo not in status_dict_cargos: <NEW_LINE> <INDENT> status_dict_cargos.setdefault(user.cargo, False) <NEW_LINE> <DEDENT> for i in range(len(cargos)): <NEW_LINE> <INDENT> if user.cargo == cargos[i] and user.status == "Online": <NEW_LINE> <INDENT> status_dict_cargos[cargos[i]] = True <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return render_template('controle_funcionariosCCO.html', usuarios = usuarios, cargos = status_dict_cargos, logs = logs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> flash('Não foi possivel acessar essa página') <NEW_LINE> return redirect(url_for('index'))
|
Faz a verificação para ver se o usuarios possui o nivel de acesso
adequado e busca no banco de dados os logs e os usuarios,
e exibe informaçoes de forma organizada para o CCO,
como os logs, quem está online e qual o cargo dos que estão online.
Recebe duas listas de objetos de duas tabelas diferentes do banco de dados.
logs_func e usuarios
|
625941bd29b78933be1e55a3
|
def _take_action(self, action_idx): <NEW_LINE> <INDENT> raise NotImplementedError("Implement within subclass.")
|
An environment dependent function that sends an action to the simulator.
:param action_idx: the action to perform on the environment
:return: None
|
625941bd71ff763f4b54957a
|
def deviceHeaterOn(self, siteId, deviceId, **kwargs): <NEW_LINE> <INDENT> allParams = ['siteId', 'deviceId'] <NEW_LINE> params = locals() <NEW_LINE> for (key, val) in params['kwargs'].iteritems(): <NEW_LINE> <INDENT> if key not in allParams: <NEW_LINE> <INDENT> raise TypeError("Got an unexpected keyword argument '%s' to method deviceHeaterOn" % key) <NEW_LINE> <DEDENT> params[key] = val <NEW_LINE> <DEDENT> del params['kwargs'] <NEW_LINE> resourcePath = '/site/{siteId}/device/{deviceId}/heater/eco' <NEW_LINE> resourcePath = resourcePath.replace('{format}', 'json') <NEW_LINE> method = 'POST' <NEW_LINE> queryParams = {} <NEW_LINE> headerParams = {} <NEW_LINE> if ('siteId' in params): <NEW_LINE> <INDENT> replacement = str(self.apiClient.toPathValue(params['siteId'])) <NEW_LINE> resourcePath = resourcePath.replace('{' + 'siteId' + '}', replacement) <NEW_LINE> <DEDENT> if ('deviceId' in params): <NEW_LINE> <INDENT> replacement = str(self.apiClient.toPathValue(params['deviceId'])) <NEW_LINE> resourcePath = resourcePath.replace('{' + 'deviceId' + '}', replacement) <NEW_LINE> <DEDENT> postData = (params['body'] if 'body' in params else None) <NEW_LINE> response = self.apiClient.callAPI(resourcePath, method, queryParams, postData, headerParams)
|
Set a heater to 'eco' mode
Args:
siteId, integer: ID of user's site (required)
deviceId, integer: ID of site's device (required)
Returns:
|
625941bdd53ae8145f87a167
|
def get_page(url): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> with closing(requests.get(url, stream=True)) as resp: <NEW_LINE> <INDENT> if is_good_response(resp): <NEW_LINE> <INDENT> return resp.content <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> except requests.exceptions.RequestException as e: <NEW_LINE> <INDENT> print(e) <NEW_LINE> return False
|
Gets content at HTML if possible.
Else returns False
|
625941bd627d3e7fe0d68d41
|
def transform(self, x, labels=None, num_class=0, **kwargs): <NEW_LINE> <INDENT> if self.size == (0, 0): <NEW_LINE> <INDENT> print("Please set size of Resized Images. Return Original Images") <NEW_LINE> return x <NEW_LINE> <DEDENT> resized_images = None <NEW_LINE> batch_x, batch_size, original_size = self.check_x_dim(x.copy()) <NEW_LINE> resized_images = np.zeros( (batch_x.shape[0], self.size[0], self.size[1], batch_x.shape[3]), dtype=batch_x.dtype) <NEW_LINE> if batch_x.shape[-1] == 1: <NEW_LINE> <INDENT> for index, image in enumerate(batch_x.copy() / 255.): <NEW_LINE> <INDENT> if with_cv2: <NEW_LINE> <INDENT> resized_images[index] = cv2.resize(image[:, :, 0], (self.size[1], self.size[0]))[ :, :, np.newaxis] * 255. <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> resized_images[index] = ( rs(image, (self.size[0], self.size[1], batch_x.shape[3])) * 255).astype(batch_x.dtype) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> for index, image in enumerate(batch_x.copy() / 255.): <NEW_LINE> <INDENT> if with_cv2: <NEW_LINE> <INDENT> resized_images[index] = cv2.resize(image, (self.size[1], self.size[0])) * 255. <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> resized_images[index] = rs( image, (self.size[0], self.size[1], batch_x.shape[3])) * 255. <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if isinstance(labels, np.ndarray): <NEW_LINE> <INDENT> return resized_images, self._labels_transform(labels, num_class, batch_x.shape[1:3]) <NEW_LINE> <DEDENT> return resized_images
|
Performs a resize transformation of a Numpy Image x.
if x is a Batch, apply Resize transform to Batch.
if arguments include labels, apply label transformation.
:param ndarray x: 3 or 4(batch) dimensional x
:param ndarray labels: rectangle labels(2-dimensional array)
ex:) np.array([[center x, center y, x_top_left, height, 0, 0, 0, 1, 0]])
:param int num_class: number of class of datasets
:return: Images(4 dimension) of resize transformed. If including labels, return with transformed labels
:rtype: ndarray
:Example:
>>> from renom.utility.image.data_augmentation.resize import Resize
>>> from PIL import Image as im
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> image = im.open(image path) # ex:) "./image_folder/camera.jpg"
>>> image = np.array(image).astype(np.float32)
>>> rs = Resize(size=(300, 500))
>>> resize_image = rs.transform(image)
>>> fig, axes = plt.subplots(2, 1)
>>> axes[0].imshow(image/255); axes[0].set_title("Original Image")
>>> axes[1].imshow(resize_image[0] / 255); axes[1].set_title("Resize One Image")
>>> plt.show()
|
625941bd5e10d32532c5ee1a
|
def with_cb(func): <NEW_LINE> <INDENT> def wrapper(ax, *args, **kwargs): <NEW_LINE> <INDENT> ret = func(ax, *args, **kwargs) <NEW_LINE> colorbar(ax.get_figure(), ax, ret) <NEW_LINE> return ret <NEW_LINE> <DEDENT> return wrapper
|
Calls the wrapped function and generates a colorbar for the
plot (axes Object, which has to be the first argument of the function)
|
625941bd4428ac0f6e5ba6e4
|
def display(filename): <NEW_LINE> <INDENT> segments = load_segments(filename) <NEW_LINE> g = Graph(segments) <NEW_LINE> tycat(g) <NEW_LINE> print("{}: nous avons {} segments".format(filename, len(segments))) <NEW_LINE> t1 = time.time() <NEW_LINE> g.reconnect(True) <NEW_LINE> t2 = time.time() <NEW_LINE> tps = t2 - t1 <NEW_LINE> print("Temps reconnect hash:" + str(tps)) <NEW_LINE> t1 = time.time() <NEW_LINE> g.even_degrees(True) <NEW_LINE> t2 = time.time() <NEW_LINE> tps = t2 - t1 <NEW_LINE> print("Temps degré pair hash:" + str(tps)) <NEW_LINE> g = Graph(segments) <NEW_LINE> t1 = time.time() <NEW_LINE> g.reconnect(False) <NEW_LINE> t2 = time.time() <NEW_LINE> tps = t2 - t1 <NEW_LINE> print("Temps reconnect quad:" + str(tps)) <NEW_LINE> t1 = time.time() <NEW_LINE> g.even_degrees(False) <NEW_LINE> t2 = time.time() <NEW_LINE> tps = t2 - t1 <NEW_LINE> print("Temps degré pair quad:" + str(tps)) <NEW_LINE> t1 = time.time() <NEW_LINE> g.eulerian_cycle() <NEW_LINE> t2 = time.time() <NEW_LINE> tps = t2 - t1 <NEW_LINE> print("Temps cycle eulérien:" + str(tps))
|
load segment file, get back connexity, get even degrees, display eulerian path.
|
625941bd4c3428357757c21d
|
def __iadd__(self, ic_node: (FEICBlock,FEICNode)): <NEW_LINE> <INDENT> ic_node.set_parent( self._current ) <NEW_LINE> self._current += ic_node <NEW_LINE> if isinstance( ic_node, FEICBlock ): <NEW_LINE> <INDENT> self._current = ic_node <NEW_LINE> <DEDENT> return self
|
In-place appends a new block or node to this intermediate code tree.
Returns a reference to this IC tree.
|
625941bd01c39578d7e74d2e
|
def set_weight(self, weight) -> None: <NEW_LINE> <INDENT> from .data import dispatch_meta_backend <NEW_LINE> dispatch_meta_backend(self, weight, 'weight', 'float')
|
Set weight of each instance.
Parameters
----------
weight : array like
Weight for each data point
.. note:: For ranking task, weights are per-group.
In ranking task, one weight is assigned to each group (not each
data point). This is because we only care about the relative
ordering of data points within each group, so it doesn't make
sense to assign weights to individual data points.
|
625941bd23e79379d52ee459
|
def test_help(self): <NEW_LINE> <INDENT> output = self.run_command("sha1sum --help", exitcode=0) <NEW_LINE> self.assertIn("sha1sum", output) <NEW_LINE> self.assertIn("-h", output) <NEW_LINE> self.assertIn("-c", output)
|
test sha1sum --help
|
625941bd9c8ee82313fbb667
|
def save_results(self): <NEW_LINE> <INDENT> filepath, _ = QtWidgets.QFileDialog.getSaveFileName( self, SAVE_CHART_MESSAGE, f'{self._environment.id}-{self._environment.timestamp}-chart', "JPG (*.jpg);;PNG (*.png)", options=QtWidgets.QFileDialog.Options() ) <NEW_LINE> if filepath: <NEW_LINE> <INDENT> output = QtGui.QPixmap(self.percentage_bars_chartview.grab()) <NEW_LINE> output.save(filepath, quality=100) <NEW_LINE> if output: <NEW_LINE> <INDENT> QtWidgets.QMessageBox.information( self, SAVE_CHART_MESSAGE, SAVE_CHART_SUCCESS, QtWidgets.QMessageBox.Ok ) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> QtWidgets.QMessageBox.critical( self, SAVE_CHART_MESSAGE, 'Error', QtWidgets.QMessageBox.Ok )
|
Save the results chart in one of the available formats
|
625941bd8c3a8732951582aa
|
def numSquares(self, n): <NEW_LINE> <INDENT> i = 1 <NEW_LINE> sqlist = [] <NEW_LINE> while i**2 <= n: <NEW_LINE> <INDENT> sqlist.append(i**2) <NEW_LINE> i += 1 <NEW_LINE> <DEDENT> cnt = 0 <NEW_LINE> remain_list = {n} <NEW_LINE> while remain_list: <NEW_LINE> <INDENT> cnt += 1 <NEW_LINE> tmp = set() <NEW_LINE> for r in remain_list: <NEW_LINE> <INDENT> for sq in sqlist: <NEW_LINE> <INDENT> if r == sq: <NEW_LINE> <INDENT> return cnt <NEW_LINE> <DEDENT> elif r < sq: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> tmp.add(r-sq) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> remain_list = tmp
|
:type: n: int
:rtype: int
|
625941bd4e4d5625662d42ce
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.