code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
|---|---|---|
def read_hardware_id(self, retry: bool = True) -> int: <NEW_LINE> <INDENT> self.logger.info("Reading hardware ID") <NEW_LINE> try: <NEW_LINE> <INDENT> return int(self.i2c.read_register(0x20, retry=retry)) <NEW_LINE> <DEDENT> except I2CError as e: <NEW_LINE> <INDENT> raise exceptions.ReadRegisterError( message="hw id reg", logger=self.logger ) from e
|
Reads hardware ID from sensor.
|
625941bb24f1403a92600a1d
|
def create_web_config(new_dir, filename): <NEW_LINE> <INDENT> with open(new_dir + '/' + filename, 'wb') as f: <NEW_LINE> <INDENT> f.write(pkg_resources.resource_string(app_name, '/config/' + filename))
|
The function searches for the specified *filename* in *config* directory of this module
and, if that file exists, copies it to the *new_dir* directory.
Args:
new_dir (str): Config file *filename* will be created in this directory.
filename (str): Config file to copy.
|
625941bba8370b7717052755
|
def connect(self, sanity_check=True): <NEW_LINE> <INDENT> logging.debug("Directory Mock: Connection")
|
Connect to AD server
|
625941bbfb3f5b602dac3543
|
def __init__(self, method: str, handler: Callable, price: int, call_flags: contracts.CallFlags): <NEW_LINE> <INDENT> self.method = method <NEW_LINE> self.hash: int = int.from_bytes(hashlib.sha256(self.method.encode()).digest()[:4], 'little', signed=False) <NEW_LINE> self.handler = handler <NEW_LINE> self.parameters = [] <NEW_LINE> for k, v in get_type_hints(handler).items(): <NEW_LINE> <INDENT> if k == 'return': <NEW_LINE> <INDENT> self.has_return_value = v != self.NONE_TYPE <NEW_LINE> continue <NEW_LINE> <DEDENT> self.parameters.append(v) <NEW_LINE> <DEDENT> self.parameters = self.parameters[1:] <NEW_LINE> self.price = price <NEW_LINE> self.required_call_flags = call_flags
|
Create a interoperability call descriptor.
This are the functions that can be called using the SYSCALL OpCode in the virtual machine.
Use the alternative constructor `create_with_price_calculator` if the price needs to be determined dynamically.
Args:
method: name of call.
handler: the function that will be executed when called.
price: the price of calling the handler.
call_flags: ExecutionContext rights needed.
|
625941bb956e5f7376d70d2c
|
def delete_users_of_blacklist(userId, accessToken=None): <NEW_LINE> <INDENT> if not isinstance(userId, list): <NEW_LINE> <INDENT> userId = [userId] <NEW_LINE> <DEDENT> data = {'openid_list': userId} <NEW_LINE> data = encode_send_dict(data) <NEW_LINE> r = requests.post('%s/cgi-bin/tags/members/batchunblacklist?access_token=%s' % (SERVER_URL, accessToken), data=data) <NEW_LINE> def _wrap_result(result): <NEW_LINE> <INDENT> return ReturnValue(result.json()) <NEW_LINE> <DEDENT> r._wrap_result = _wrap_result <NEW_LINE> return r
|
userId can be a userId or a list of userId
|
625941bb9b70327d1c4e0c88
|
def scrape_deputy(link_part): <NEW_LINE> <INDENT> url = urljoin(BASE_URL, link_part) <NEW_LINE> response = requests.get(url) <NEW_LINE> document = html.fromstring(response.content) <NEW_LINE> box1 = document.cssselect(".info-diputados-principal1").pop() <NEW_LINE> box2 = document.cssselect(".info-diputados-principal2").pop() <NEW_LINE> data = { 'source_url': url, 'name': box1.find('.//h2').text.strip(), 'party': box2.find('.//h3').text.strip(), 'email': box2.find('.//a').text.strip() } <NEW_LINE> pprint(data) <NEW_LINE> table = engine['deputies'] <NEW_LINE> table.upsert(data, ['source_url'])
|
Extract information from a deputy's page.
|
625941bb91af0d3eaac9b8c9
|
def index(request, *args, **kwargs): <NEW_LINE> <INDENT> article_type_list = models.Article.type_choices <NEW_LINE> if kwargs: <NEW_LINE> <INDENT> article_type_id = int(kwargs['article_type_id']) <NEW_LINE> base_url = reverse('index', kwargs=kwargs) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> article_type_id = None <NEW_LINE> base_url = '/' <NEW_LINE> <DEDENT> data_count = models.Article.objects.filter(**kwargs).count() <NEW_LINE> page_obj = Pagination(request.GET.get('page'), data_count) <NEW_LINE> article_list = models.Article.objects.filter(**kwargs).order_by('-nid')[page_obj.start:page_obj.end] <NEW_LINE> page_str = page_obj.page_str(base_url) <NEW_LINE> return render( request, 'index.html', { 'article_list': article_list, 'article_type_id': article_type_id, 'article_type_list': article_type_list, 'page_str': page_str, } )
|
博客首页,展示全部博文
:param request:
:return:
|
625941bbbe7bc26dc91cd4b9
|
def schedule_cmd(self, command, channel, sched_time, function, user_id, event_type='message', args=None): <NEW_LINE> <INDENT> s = sched.scheduler(time.time, timedelta) <NEW_LINE> task = s.enterabs( sched_time, 1, function, (command, channel, user_id, event_type, args) ) <NEW_LINE> t = threading.Thread(target=s.run) <NEW_LINE> t.daemon = True <NEW_LINE> t.start() <NEW_LINE> self.add_task(t.ident, t, task.time, task.action.__name__, task.argument) <NEW_LINE> print(task)
|
Creates a thread to execute a bot command at a specific time.
Args:
command (str): Name of the command to be executed
channel (str): Slack Channel ID to execute the command in
sched_time (float): Timestamp representation of when to execute the command
function (function): Reference to function used to process the command (usually Bot.handle_scheduled_cmd)
user_id (str): Slack User ID to use when executing the command (usually the ID of the Bot)
event_type (str): Slack event type (default: "message")
args (str): Argumnets to be appended to the command when executed
(i.e. The "roll" command takes a valid die roll as an argument. "1d20" could be passed here
|
625941bb44b2445a33931f54
|
def storeFileFromOffset(self, service_name, path, file_obj, offset = 0, timeout = 30): <NEW_LINE> <INDENT> if not self.sock: <NEW_LINE> <INDENT> raise NotConnectedError('Not connected to server') <NEW_LINE> <DEDENT> results = [ ] <NEW_LINE> def cb(r): <NEW_LINE> <INDENT> self.is_busy = False <NEW_LINE> results.append(r[1]) <NEW_LINE> <DEDENT> def eb(failure): <NEW_LINE> <INDENT> self.is_busy = False <NEW_LINE> raise failure <NEW_LINE> <DEDENT> self.is_busy = True <NEW_LINE> try: <NEW_LINE> <INDENT> self._storeFile(service_name, path, file_obj, cb, eb, timeout = timeout) <NEW_LINE> while self.is_busy: <NEW_LINE> <INDENT> self._pollForNetBIOSPacket(timeout) <NEW_LINE> <DEDENT> <DEDENT> finally: <NEW_LINE> <INDENT> self.is_busy = False <NEW_LINE> <DEDENT> return results[0]
|
Store the contents of the *file_obj* at *path* on the *service_name*.
:param string/unicode service_name: the name of the shared folder for the *path*
:param string/unicode path: Path of the file on the remote server. If the file at *path* does not exist, it will be created. Otherwise, it will be overwritten.
If the *path* refers to a folder or the file cannot be opened for writing, an :doc:`OperationFailure<smb_exceptions>` will be raised.
:param file_obj: A file-like object that has a *read* method. Data will read continuously from *file_obj* until EOF. In Python3, this file-like object must have a *read* method which returns a bytes parameter.
:return: Number of bytes uploaded
|
625941bb4e4d5625662d4290
|
def store_scan_xml(self, fname, **kargs): <NEW_LINE> <INDENT> parser = xml.sax.make_parser() <NEW_LINE> self.start_store_hosts() <NEW_LINE> try: <NEW_LINE> <INDENT> content_handler = self.content_handler(fname, **kargs) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> utils.LOGGER.warning('Exception (file %r)', fname, exc_info=True) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> parser.setContentHandler(content_handler) <NEW_LINE> parser.setEntityResolver(xmlnmap.NoExtResolver()) <NEW_LINE> parser.parse(utils.open_file(fname)) <NEW_LINE> if self.output_function is not None: <NEW_LINE> <INDENT> self.output_function(content_handler._db, out=self.output) <NEW_LINE> <DEDENT> self.stop_store_hosts() <NEW_LINE> return True <NEW_LINE> <DEDENT> self.stop_store_hosts() <NEW_LINE> return False
|
This method parses an XML scan result, displays a JSON
version of the result, and return True if everything went
fine, False otherwise.
In backend-specific subclasses, this method stores the result
instead of displaying it, thanks to the `content_handler`
attribute.
|
625941bbaad79263cf3908f0
|
def insertion_sort(arr): <NEW_LINE> <INDENT> for i in range(1, len(arr)): <NEW_LINE> <INDENT> key = arr[i] <NEW_LINE> print("key", key) <NEW_LINE> j = i-1 <NEW_LINE> while key < arr[j] and j >= 0: <NEW_LINE> <INDENT> arr[j+1] = arr[j] <NEW_LINE> print("check", arr[j+1]) <NEW_LINE> j -= 1 <NEW_LINE> print("j", j) <NEW_LINE> <DEDENT> arr[j+1] = key <NEW_LINE> print("change", arr[j+1]) <NEW_LINE> print(arr) <NEW_LINE> <DEDENT> return arr
|
Performs an Insertion Sort on the array arr.
|
625941bb63b5f9789fde6f9a
|
def rpcf_is_service_used(self, params): <NEW_LINE> <INDENT> if not self.urfa_call(0x10001): <NEW_LINE> <INDENT> raise Exception("Fail of urfa_call(0x10001) [rpcf_is_service_used]") <NEW_LINE> <DEDENT> self.pck.init(code = U_PKT_DATA) <NEW_LINE> self.pck.add_data(params['sid'], U_TP_I) <NEW_LINE> self.pck.send(self.sck) <NEW_LINE> ret = defaultdict(dict) <NEW_LINE> self.pck.recv(self.sck) <NEW_LINE> ret['links_count'] = self.pck.get_data(U_TP_I) <NEW_LINE> if self.pck.recv(self.sck): return ret <NEW_LINE> else: raise Exception("Fail recive answer from server")
|
description
@params:
:(s) sid : (i) -
@returns:
:(s) links_count : (i) -
|
625941bb442bda511e8be2d9
|
def _makeD2PiMuMuCal(self, name, pionSel, muonSel, config): <NEW_LINE> <INDENT> return makeD2PiMuMuCal(name , pionSel , muonSel , DMAXDOCA = config['DMAXDOCA'] , DVCHI2DOF = config['DVCHI2DOF'] , DDIRA = config['DDIRA'] , DIPCHI2 = config['DIPCHI2'] , DMassWin = config['DMassWin'] , DMassLow = config['DMassLow'] , DimuonMass = config['DimuonMass'])
|
Handy interface for D2PiMuMuCal
|
625941bb96565a6dacc8f589
|
def _dflike_todf(self): <NEW_LINE> <INDENT> self._consolidate_names() <NEW_LINE> self._todf() <NEW_LINE> self._covert_data() <NEW_LINE> self._handle_value_labels() <NEW_LINE> self._handle_row_names()
|
This one is for objects that do not have the dim attribute set: dataframes
and atomic vectors. (but vectors can have the dim attribute in that case
they go to the other method)
|
625941bbb830903b967e97ca
|
def list_all( self, custom_headers=None, raw=False, **operation_config): <NEW_LINE> <INDENT> def internal_paging(next_link=None, raw=False): <NEW_LINE> <INDENT> if not next_link: <NEW_LINE> <INDENT> url = self.list_all.metadata['url'] <NEW_LINE> path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } <NEW_LINE> url = self._client.format_url(url, **path_format_arguments) <NEW_LINE> query_parameters = {} <NEW_LINE> query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> url = next_link <NEW_LINE> query_parameters = {} <NEW_LINE> <DEDENT> header_parameters = {} <NEW_LINE> header_parameters['Content-Type'] = 'application/json; charset=utf-8' <NEW_LINE> if self.config.generate_client_request_id: <NEW_LINE> <INDENT> header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) <NEW_LINE> <DEDENT> if custom_headers: <NEW_LINE> <INDENT> header_parameters.update(custom_headers) <NEW_LINE> <DEDENT> if self.config.accept_language is not None: <NEW_LINE> <INDENT> header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') <NEW_LINE> <DEDENT> request = self._client.get(url, query_parameters) <NEW_LINE> response = self._client.send( request, header_parameters, stream=False, **operation_config) <NEW_LINE> if response.status_code not in [200]: <NEW_LINE> <INDENT> exp = CloudError(response) <NEW_LINE> exp.request_id = response.headers.get('x-ms-request-id') <NEW_LINE> raise exp <NEW_LINE> <DEDENT> return response <NEW_LINE> <DEDENT> deserialized = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies) <NEW_LINE> if raw: <NEW_LINE> <INDENT> header_dict = {} <NEW_LINE> client_raw_response = models.VirtualNetworkPaged(internal_paging, self._deserialize.dependencies, header_dict) <NEW_LINE> return client_raw_response <NEW_LINE> <DEDENT> return deserialized
|
Gets all virtual networks in a subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of VirtualNetwork
:rtype:
~azure.mgmt.network.v2016_12_01.models.VirtualNetworkPaged[~azure.mgmt.network.v2016_12_01.models.VirtualNetwork]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
|
625941bbcc40096d61595807
|
def extract_patches_loop(arr, patch_shape, sub_findings): <NEW_LINE> <INDENT> arr_row, arr_col = arr.shape <NEW_LINE> patch_row, patch_col = patch_shape <NEW_LINE> findings = sub_findings[:,:2] <NEW_LINE> patches = image.extract_patches_2d(arr, patch_shape) <NEW_LINE> ppc = arr_row - patch_row + 1 <NEW_LINE> ppr = arr_col - patch_col + 1 <NEW_LINE> i2c = lambda idx: (idx // ppr, idx % ppr) <NEW_LINE> c2i = lambda xs: xs[0] * ppr + xs[1] <NEW_LINE> iden = lambda j: c2i(i2c(j)) <NEW_LINE> funcx = lambda fx: np.arange(patch_row) + (fx - patch_row + 1) <NEW_LINE> funcy = lambda fy: np.arange(patch_col) + (fy - patch_col + 1) <NEW_LINE> idx = findings_2_idx(findings, c2i, funcx, funcy) <NEW_LINE> mask = ~np.ones(patches.shape[0], dtype = bool) <NEW_LINE> mask[idx] = True <NEW_LINE> patches_with_findings = filter_empty_patches(patches[mask]) <NEW_LINE> patches_without_findings = filter_empty_patches(patches[~mask]) <NEW_LINE> return patches_without_findings, patches_with_findings
|
Extract Pathces with potential findings.
|
625941bb0a50d4780f666d44
|
def count(ptype, **options): <NEW_LINE> <INDENT> if utils.is_infinite(ptype): <NEW_LINE> <INDENT> raise ValueError("count not supported infinite PType") <NEW_LINE> <DEDENT> scale = options.get('scale', 0.1) <NEW_LINE> size = options.get('output_size', None) <NEW_LINE> memory = options.get('memory_limit', -1) <NEW_LINE> cpu = options.get('cpu_limit', -1) <NEW_LINE> return ptype.aggregate( 0, aggregate_fn=lambda x, y: x + 1, combine_fn=lambda x, y: x + y, serde=serde.IntSerde(), scale=scale, output_size=size, memory_limit=memory, cpu_limit=cpu)
|
count() implementation
|
625941bb287bf620b61d3922
|
def kelvin_to_fahrenheit(temp_kelvin): <NEW_LINE> <INDENT> return temp_kelvin * 9/5 - 459.67
|
Converts a temperature in degrees Kelvin to degrees Fahrenheit,
and returns the result.
|
625941bb6e29344779a624c9
|
def CreateClosures(value): <NEW_LINE> <INDENT> context = copy.deepcopy(value) <NEW_LINE> def GetContext(): <NEW_LINE> <INDENT> return copy.deepcopy(context) <NEW_LINE> <DEDENT> def SetContext(value): <NEW_LINE> <INDENT> context = copy.deepcopy(value) <NEW_LINE> <DEDENT> return GetContext, SetContext
|
Create two closures with share context.
|
625941bbcb5e8a47e48b7963
|
def apply_dense_block(self, x, nb_layers: int): <NEW_LINE> <INDENT> for _ in range(nb_layers): <NEW_LINE> <INDENT> conv = self.apply_bn_relu_conv(x, self.growth_rate, (3, 3)) <NEW_LINE> x = Concatenate()([x, conv]) <NEW_LINE> self.nb_channels += self.growth_rate <NEW_LINE> <DEDENT> return x
|
apply a dense block to input tensor x
:param x: input tensor
:param nb_layers: number of layers in this block
:return: output tensor
|
625941bb5f7d997b87174950
|
def create_temporary_system_function(self, name: str, function: Union[UserDefinedFunctionWrapper, AggregateFunction]): <NEW_LINE> <INDENT> function = self._wrap_aggregate_function_if_needed(function) <NEW_LINE> java_function = function._java_user_defined_function() <NEW_LINE> self._j_tenv.createTemporarySystemFunction(name, java_function)
|
Registers a python user defined function class as a temporary system function.
Compared to .. seealso:: :func:`create_temporary_function`, system functions are identified
by a global name that is independent of the current catalog and current database. Thus,
this method allows to extend the set of built-in system functions like TRIM, ABS, etc.
Temporary functions can shadow permanent ones. If a permanent function under a given name
exists, it will be inaccessible in the current session. To make the permanent function
available again one can drop the corresponding temporary system function.
Example:
::
>>> table_env.create_temporary_system_function(
... "add_one", udf(lambda i: i + 1, result_type=DataTypes.BIGINT()))
>>> @udf(result_type=DataTypes.BIGINT())
... def add(i, j):
... return i + j
>>> table_env.create_temporary_system_function("add", add)
>>> class SubtractOne(ScalarFunction):
... def eval(self, i):
... return i - 1
>>> table_env.create_temporary_system_function(
... "subtract_one", udf(SubtractOne(), result_type=DataTypes.BIGINT()))
:param name: The name under which the function will be registered globally.
:param function: The function class containing the implementation. The function must have a
public no-argument constructor and can be founded in current Java
classloader.
.. versionadded:: 1.12.0
|
625941bbd164cc6175782c02
|
def parse_agency(abb, doc): <NEW_LINE> <INDENT> agency_name = doc.h1.text.strip() <NEW_LINE> description = agency_description(doc) <NEW_LINE> departments = [] <NEW_LINE> for option in doc("option")[1:]: <NEW_LINE> <INDENT> opt_id = option['value'] <NEW_LINE> elem = doc(id=opt_id)[0] <NEW_LINE> dept_name = option.string.strip().replace('?', '–') <NEW_LINE> departments.append(parse_department(elem, dept_name)) <NEW_LINE> <DEDENT> agency = {"abbreviation": abb, "name": agency_name, "description": description, "departments": departments} <NEW_LINE> return agency
|
Make sense of a block of HTML from FOIA.gov
|
625941bb8da39b475bd64e2c
|
def Cholesky(self, X): <NEW_LINE> <INDENT> chol_dim1 = tf.cholesky( self._Kcore(self.dim1, X2=None) + eye(tf.shape(self.dim1)[0]) * settings.numerics.jitter_level) <NEW_LINE> chol_dim2 = tf.cholesky( self._Kcore(self.dim2, X2=None) + eye(tf.shape(self.dim2)[0]) * settings.numerics.jitter_level) <NEW_LINE> chol = kronecker_product(chol_dim1, chol_dim2) <NEW_LINE> var = tf.tile(tf.expand_dims(tf.expand_dims( tf.sqrt(self.variance), 0),0), [tf.shape(chol)[0],tf.shape(chol)[1],1]) <NEW_LINE> return var * tf.tile(tf.expand_dims(chol, -1),[1,1,tf.shape(var)[2]])
|
Overwrite cholesky for the speed up.
X should be dim2*dim2
|
625941bbd268445f265b4d23
|
def construct(self): <NEW_LINE> <INDENT> out = "" <NEW_LINE> for i in self.rSet: <NEW_LINE> <INDENT> if i[1] is None: <NEW_LINE> <INDENT> out += i[0] + "+" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> out += i[0][:i[0].find("(") + 1] + construct(i[1]) + i[0][ i[0].find(")"):] + "+" <NEW_LINE> <DEDENT> <DEDENT> return out[:len(out) - 1].replace("+-","-")
|
Returns the function provided.
|
625941bb38b623060ff0aca3
|
def p_external_declaration_1(p): <NEW_LINE> <INDENT> p[0] = Node("external_declaration", p[1:])
|
external_declaration : function_definition
| declaration
|
625941bb7047854f462a12c1
|
def addr(A, B, before): <NEW_LINE> <INDENT> return before[A] + before[B]
|
Reg C -> result of adding reg A and reg B
|
625941bbb5575c28eb68deb3
|
def _build_org_lookup(self, json_file): <NEW_LINE> <INDENT> document_list = [] <NEW_LINE> try: <NEW_LINE> <INDENT> json_data=open(json_file).read() <NEW_LINE> document_list = json.loads(json_data) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> raise IOError("Failed to load organization JSON file: " + str(e)) <NEW_LINE> <DEDENT> office = Office.objects.first() <NEW_LINE> org_lookup = {} <NEW_LINE> for document in document_list: <NEW_LINE> <INDENT> id = document["id"] <NEW_LINE> if id not in org_lookup: <NEW_LINE> <INDENT> ancestors = document["ancestor_relationships"] <NEW_LINE> depth = len(ancestors) - 1 <NEW_LINE> if depth == 0: <NEW_LINE> <INDENT> lab = Lab.objects.filter(Q(office = office) & Q(lab = document["name"]) & Q(abbreviation = document["abbrev"])).first() <NEW_LINE> org_lookup[id] = lab <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> for document in document_list: <NEW_LINE> <INDENT> id = document["id"] <NEW_LINE> if id not in org_lookup: <NEW_LINE> <INDENT> ancestors = document["ancestor_relationships"] <NEW_LINE> depth = len(ancestors) - 1 <NEW_LINE> if depth == 1: <NEW_LINE> <INDENT> lab = org_lookup[ancestors[depth]["ancestor"]["id"]] <NEW_LINE> division = Division.objects.filter(Q(office = office) & Q(lab = lab) & Q(division = document["name"]) & Q(abbreviation = document["abbrev"])).first() <NEW_LINE> org_lookup[id] = division <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> for document in document_list: <NEW_LINE> <INDENT> id = document["id"] <NEW_LINE> if id not in org_lookup: <NEW_LINE> <INDENT> ancestors = document["ancestor_relationships"] <NEW_LINE> depth = len(ancestors) - 1 <NEW_LINE> if depth == 2: <NEW_LINE> <INDENT> lab = org_lookup[ancestors[depth]["ancestor"]["id"]] <NEW_LINE> division = org_lookup[ancestors[depth-1]["ancestor"]["id"]] <NEW_LINE> branch = Branch.objects.filter(Q(office = office) & Q(lab = lab) & Q(division = division) & Q(branch = document["name"]) & Q(abbreviation = document["abbrev"])).first() <NEW_LINE> org_lookup[id] = branch <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return org_lookup
|
Build a lookup table from OSIM ids to QA track database objects
:param json_file: json file of organizations
:return:
|
625941bb66656f66f7cbc05f
|
def _set_all_normalisations_for_single_fit_mode(self, normalisations: list) -> None: <NEW_LINE> <INDENT> for index, normalisation in enumerate(normalisations): <NEW_LINE> <INDENT> self._set_normalisation_in_tf_asymmetry_single_fit_function(index, normalisation)
|
Sets the normalisations within the TF Asymmetry single fit functions.
|
625941bbbd1bec0571d904ee
|
def authorize(self, username=None, password=None): <NEW_LINE> <INDENT> if username is None: <NEW_LINE> <INDENT> username = raw_input("Username: ") <NEW_LINE> <DEDENT> if password is None: <NEW_LINE> <INDENT> password = getpass() <NEW_LINE> <DEDENT> self.cookies = self._auth(username, password)
|
Authorize this connection for personal SSC use
If either password or username are not provided, a prompt
is given for the missing value(s).
:type username: str|None
:type password: str|None
|
625941bbb57a9660fec33735
|
def testBaseConfigExists(self): <NEW_LINE> <INDENT> path_to_check = self.basetracini <NEW_LINE> result = os.path.exists(path_to_check) <NEW_LINE> self.assertTrue(result, "base_trac.ini not created")
|
Check that the base config file is created
|
625941bb3539df3088e2e200
|
def load_transforms(path: str): <NEW_LINE> <INDENT> if (os.path.isfile(path)): <NEW_LINE> <INDENT> archive_reader = tarfile.open(path) <NEW_LINE> filenames = archive_reader.getnames() <NEW_LINE> filedata = [archive_reader.extractfile(f).read() for f in filenames] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> filenames = glob.glob(path + "/Diffusion-*.txt") <NEW_LINE> filenames.sort() <NEW_LINE> filedata = [open(f, 'rb').read() for f in filenames] <NEW_LINE> <DEDENT> num_grads = len(filenames) <NEW_LINE> transforms = np.zeros((num_grads, 4, 4), dtype=np.float64) <NEW_LINE> for (i, xfm_txt) in enumerate(filedata): <NEW_LINE> <INDENT> xfm = np.genfromtxt(io.BytesIO(xfm_txt)) <NEW_LINE> assert xfm.shape == (4,4), "Unexpected transform shape {}, expected 4x4 matrix".format(xfm.shape) <NEW_LINE> transforms[i] = xfm <NEW_LINE> <DEDENT> return transforms
|
Load array of transforms from a directory, or tar/tgz/bz2/zip file containing
Flirt 4x4 matrices, generally with the filename format 'Diffusion-G#.txt'.
|
625941bb38b623060ff0aca4
|
def setpos(self, x, y=None, z=None): <NEW_LINE> <INDENT> if y is None and z is None: <NEW_LINE> <INDENT> x, y, z = x <NEW_LINE> <DEDENT> if z is None: <NEW_LINE> <INDENT> z = 0 <NEW_LINE> <DEDENT> new_position = rs.CreatePoint(x, y, z) <NEW_LINE> if self._pen_down: <NEW_LINE> <INDENT> self._this_line.append(new_position) <NEW_LINE> <DEDENT> self._position = new_position
|
Sets the turtle's position to the given coordinates. If the pen is down, adds a line along the path moved.
If y and z are omitted, assumes x is a point.
If z is omitted, assumes z is 0.
|
625941bbb7558d58953c4dcf
|
def __init__(self, event_name, event_args = []): <NEW_LINE> <INDENT> self.event_name = event_name <NEW_LINE> self.event_args = event_args
|
Constructor of the class.
@type event_name: String
@param event_name: The name of the event.
@type event_args: List
@param event_args: The arguments of the event.
|
625941bb009cb60464c63270
|
@pytest.mark.django_db <NEW_LINE> def test_accept_suggestion_update_wordcount(it_tutorial_po, system): <NEW_LINE> <INDENT> orig_translated = it_tutorial_po.data.translated_words <NEW_LINE> suggestions = review.get(Suggestion)() <NEW_LINE> untranslated_unit = it_tutorial_po.units[0] <NEW_LINE> suggestion_text = 'foo bar baz' <NEW_LINE> sugg, added = suggestions.add(untranslated_unit, suggestion_text) <NEW_LINE> assert sugg is not None <NEW_LINE> assert added <NEW_LINE> assert len(untranslated_unit.get_suggestions()) == 1 <NEW_LINE> assert untranslated_unit.state == UNTRANSLATED <NEW_LINE> review.get(Suggestion)([sugg], system).accept() <NEW_LINE> assert untranslated_unit.state == TRANSLATED <NEW_LINE> assert it_tutorial_po.data.translated_words > orig_translated
|
Tests that accepting a suggestion for an untranslated unit will
change the wordcount stats of the unit's store.
|
625941bb091ae35668666e19
|
def set_max_noutput_items(self, *args, **kwargs): <NEW_LINE> <INDENT> return _blocks_swig3.multiply_conjugate_cc_sptr_set_max_noutput_items(self, *args, **kwargs)
|
set_max_noutput_items(multiply_conjugate_cc_sptr self, int m)
|
625941bb3eb6a72ae02ec389
|
def __init__(self, *groups, **kwargs): <NEW_LINE> <INDENT> Sprite.__init__(self, *groups, **kwargs)
|
Return Sprite.
|
625941bb63d6d428bbe443a4
|
def initialize(self, src_sentence): <NEW_LINE> <INDENT> self.cur_trg_sentences = self.trg_sentences[self.current_sen_id] <NEW_LINE> self.history = []
|
Resets the history and loads the n-best list entries for the
next source sentence
Args:
src_sentence (list): Not used
|
625941bb6e29344779a624ca
|
def dot(v, w): <NEW_LINE> <INDENT> return sum(v_i * w_i for v_i, w_i in zip(v, w))
|
向量的点乘
|
625941bbd18da76e23532387
|
def get_entry_point_abs_path(pack=None, entry_point=None): <NEW_LINE> <INDENT> if not entry_point: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> if os.path.isabs(entry_point): <NEW_LINE> <INDENT> pack_base_path = get_pack_base_path(pack_name=pack) <NEW_LINE> common_prefix = os.path.commonprefix([pack_base_path, entry_point]) <NEW_LINE> if common_prefix != pack_base_path: <NEW_LINE> <INDENT> raise ValueError('Entry point file "%s" is located outside of the pack directory' % (entry_point)) <NEW_LINE> <DEDENT> return entry_point <NEW_LINE> <DEDENT> entry_point_abs_path = get_pack_resource_file_abs_path(pack_name=pack, resource_type='action', file_path=entry_point) <NEW_LINE> return entry_point_abs_path
|
Return full absolute path of an action entry point in a pack.
:param pack_name: Content pack name.
:type pack_name: ``str``
:param entry_point: Action entry point.
:type entry_point: ``str``
:rtype: ``str``
|
625941bb3c8af77a43ae3652
|
def update_pho(self, pho): <NEW_LINE> <INDENT> self.pho.append(pho)
|
Update rho.
|
625941bbbf627c535bc1308b
|
def toTraceObj(self, df:pd.core.frame.DataFrame, traceName:str) -> Dict: <NEW_LINE> <INDENT> df['TIME_STAMP'] = df['TIME_STAMP'].astype('str') <NEW_LINE> xVals = df['TIME_STAMP'].values.tolist() <NEW_LINE> yVals= df['DEMAND_VALUE'].values.tolist() <NEW_LINE> traceObj = {'traceName': traceName, 'xVals':xVals, 'yVals': yVals} <NEW_LINE> return traceObj
|
convert demand data to trace object dictionary
Args:
df (pd.core.frame.DataFrame): demand data dataframe
traceName : name of legend -string
Returns:
Dict: {'traceName': , 'xVals': , 'yVals': }
|
625941bbd58c6744b4257b15
|
def r_squared(x_arr, y_arr): <NEW_LINE> <INDENT> y, m, x, c = line_fitting(x_arr, y_arr) <NEW_LINE> total_var_y = ([(i-y)**2 for i in y_arr]) <NEW_LINE> variation_not_by_line = float(sum(line_error(x_arr=x_arr, y_arr=y_arr, m=m, c=c)))/sum(total_var_y) <NEW_LINE> return 1 - variation_not_by_line
|
Literally Trying to do sqrt() of scipy.stats import pearsonr val
using functions in this module: linear_regression.py.
Also called Coefficient of Determination.
It simply means total_variation_line: How much the best fit line is
"fit" Or Away from the scattered points. High value means good fit.
How much % is explained by the Fitted Line.
High R^2 = good model, probably profitable,
Low R^2 = bad model, probably dangerous
|
625941bb5fc7496912cc383b
|
def __init__(self): <NEW_LINE> <INDENT> self.commands = [] <NEW_LINE> self.enums = [] <NEW_LINE> self.structs = [] <NEW_LINE> self.types = [] <NEW_LINE> self.generic_argument_lists = [] <NEW_LINE> self.generic_reply_field_lists = []
|
Construct an empty symbol table.
|
625941bb85dfad0860c3ad0e
|
def add_keywords_from_dict(self, keyword_dict): <NEW_LINE> <INDENT> for clean_name, keywords in keyword_dict.items(): <NEW_LINE> <INDENT> if not isinstance(keywords, list): <NEW_LINE> <INDENT> raise AttributeError("Value of key {} should be a list".format(clean_name)) <NEW_LINE> <DEDENT> for keyword in keywords: <NEW_LINE> <INDENT> self.add_keyword(keyword, clean_name)
|
To add keywords from a dictionary
Args:
keyword_dict (dict): A dictionary with `str` key and (list `str`) as value
Examples:
>>> keyword_dict = {
"java": ["java_2e", "java programing"],
"product management": ["PM", "product manager"]
}
>>> keyword_processor.add_keywords_from_dict(keyword_dict)
Raises:
AttributeError: If value for a key in `keyword_dict` is not a list.
|
625941bb2c8b7c6e89b35678
|
@needs_active_game <NEW_LINE> def stand_callback(update, context): <NEW_LINE> <INDENT> chat = update.effective_chat <NEW_LINE> lang_id = Database().get_lang_id(chat.id) <NEW_LINE> game = GameStore().get_game(update.effective_chat.id) <NEW_LINE> if not is_button_affiliated(update, context, game, lang_id): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> next_player(update, context)
|
CallbackQueryHandler callback for the 'stand' inline button. Prepares round for the next player.
|
625941bb76d4e153a657e9e5
|
def verify_dependency(executable_name, min_version=None, suffix=None): <NEW_LINE> <INDENT> executable = find_executable(executable_name) <NEW_LINE> if not executable: <NEW_LINE> <INDENT> sys.exit('You must install %s before installing ssh-ecs-run-task' % executable_name) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if min_version or suffix: <NEW_LINE> <INDENT> executable = os.path.abspath(executable) <NEW_LINE> try: <NEW_LINE> <INDENT> version_bytes = subprocess.check_output("%s --version" % executable, stderr=None, shell=True).strip() <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> sys.exit("Could not determine version of %s" % executable_name) <NEW_LINE> <DEDENT> version = version_bytes.decode() <NEW_LINE> if version == '': <NEW_LINE> <INDENT> sys.exit("Could not determine version of %s" % executable_name) <NEW_LINE> <DEDENT> m = re.match(r"[^0-9]*(?P<XYZ>\b[0-9]+\.[0-9\.]+)(?P<suffix>(\-[^\s]+|))\b", version) <NEW_LINE> if not m: <NEW_LINE> <INDENT> print("Unrecognized format for version string %s" % version) <NEW_LINE> <DEDENT> XYZ = m.group('XYZ') <NEW_LINE> if XYZ == '': <NEW_LINE> <INDENT> sys.exit("Could not determine version of %s" % executable_name) <NEW_LINE> <DEDENT> if min_version: <NEW_LINE> <INDENT> version_nums = str.split(XYZ, '.') <NEW_LINE> for idx, val in enumerate(str.split(min_version, '.')): <NEW_LINE> <INDENT> if idx >= len(version_nums) or int(val) > int(version_nums[idx]): <NEW_LINE> <INDENT> if exit: <NEW_LINE> <INDENT> sys.exit("You must upgrade %s to %s or higher" % (executable_name, min_version)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> print("WARNING: you should upgrade %s to %s or higher" % (executable_name, min_version)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> if suffix and version.find(suffix) == -1: <NEW_LINE> <INDENT> sys.exit("Suffix %s was not found in version %s" % (suffix, version))
|
Verify dependencies on non-python executables upon which some shell scripts in ssh-ecs-run-task rely
|
625941bbec188e330fd5a65a
|
def _eeprom_read(self, address, length): <NEW_LINE> <INDENT> if (self._check_eeprom_access_args(address=address, length=length)): <NEW_LINE> <INDENT> return self.send_cmd(cmds.NodeEEPROMAccess(cmd=cmds.CMD_PARAM_READ, address=address, length=length))
|
Reads 'length' values from EEPROM starting at 'address'
Args:
address (int): Address must be in [0 .. 16383]
length (int): Length must be in [1 .. 320] (ie fit in a 1400 byte packet)
Returns:
values (list of u8): List of u8 values received from the node
|
625941bb851cf427c661a3c7
|
def cache(*cache_control_items, **cache_control_kw): <NEW_LINE> <INDENT> cache_control_kw.update(cache_control_items) <NEW_LINE> def decorate_func(func): <NEW_LINE> <INDENT> @wraps(func) <NEW_LINE> def decorate_func_call(*a, **kw): <NEW_LINE> <INDENT> callback = SetCacheControlHeadersCallback(**cache_control_kw) <NEW_LINE> registry_provider = AfterThisRequestCallbackRegistryProvider() <NEW_LINE> registry = registry_provider.provide() <NEW_LINE> registry.add(callback) <NEW_LINE> return func(*a, **kw) <NEW_LINE> <DEDENT> return decorate_func_call <NEW_LINE> <DEDENT> return decorate_func
|
Set Cache-Control headers.
Expects keyword arguments and/or an item list.
Each pair is used to set Flask Response.cache_control attributes,
where the key is the attribute name and the value is its value.
Use True as value for attributes without values.
In case of an invalid attribute, CacheControlAttributeInvalidError
will be thrown.
|
625941bb23849d37ff7b2f46
|
def forward(inputs, masks, labels, model, arr): <NEW_LINE> <INDENT> batch_size = len(inputs[0]) <NEW_LINE> model.reset() <NEW_LINE> inputs = [Variable(arr.array(row, dtype='float32')) for row in inputs] <NEW_LINE> masks = [Variable(arr.array(row, dtype='float32')) for row in masks] <NEW_LINE> labels = Variable(arr.array(labels, dtype='int32')) <NEW_LINE> model.encode(inputs) <NEW_LINE> ys = model.predict(masks) <NEW_LINE> loss = functions.softmax_cross_entropy(ys, labels) <NEW_LINE> return loss
|
順伝播の計算
:params inputs: 入力ベクトル
:params masks: マスクベクトル
:params labels: 正解ラベル
:params model: インスタンス化したモデル
:params ARR: cupyかnumpy
|
625941bb0fa83653e4656e72
|
@hook.command() <NEW_LINE> def slogan(text): <NEW_LINE> <INDENT> out = random.choice(slogans) <NEW_LINE> if text.lower() and out.startswith("<text>"): <NEW_LINE> <INDENT> text = formatting.capitalize_first(text) <NEW_LINE> <DEDENT> return out.replace('<text>', text)
|
slogan <word> -- Makes a slogan for <word>.
|
625941bb379a373c97cfaa00
|
def RPR_CSurf_OnSelectedChange(trackid,selected): <NEW_LINE> <INDENT> a=_ft['CSurf_OnSelectedChange'] <NEW_LINE> f=CFUNCTYPE(c_byte,c_uint64,c_int)(a) <NEW_LINE> t=(rpr_packp('MediaTrack*',trackid),c_int(selected)) <NEW_LINE> r=f(t[0],t[1]) <NEW_LINE> return r
|
Python: Boolean RPR_CSurf_OnSelectedChange(MediaTrack trackid, Int selected)
|
625941bbbe8e80087fb20afd
|
def _next_batch(self, data, batch_idxs, labels=None): <NEW_LINE> <INDENT> def _normalize_length(_data, max_length): <NEW_LINE> <INDENT> return _data + [self.PAD] * (max_length - len(_data)) <NEW_LINE> <DEDENT> seq1_data, seq1_lengths, seq2_pos_data, seq2_pos_lengths, seq2_neg_data, seq2_neg_lengths = [], [], [], [], [], [] <NEW_LINE> for idx in batch_idxs: <NEW_LINE> <INDENT> seq1, seq2_pos, seq2_neg = data[idx] <NEW_LINE> seq1_lengths.append(len(seq1)) <NEW_LINE> seq2_pos_lengths.append(len(seq2_pos)) <NEW_LINE> seq2_neg_lengths.append(len(seq2_neg)) <NEW_LINE> <DEDENT> seq1_max_length = max(seq1_lengths) <NEW_LINE> seq2_max_length = max(seq2_pos_lengths + seq2_neg_lengths) <NEW_LINE> for idx in batch_idxs: <NEW_LINE> <INDENT> seq1, seq2_pos, seq2_neg = data[idx] <NEW_LINE> seq1_data.append(_normalize_length(seq1, seq1_max_length)) <NEW_LINE> seq2_pos_data.append(_normalize_length(seq2_pos, seq2_max_length)) <NEW_LINE> seq2_neg_data.append(_normalize_length(seq2_neg, seq2_max_length)) <NEW_LINE> <DEDENT> batch_data_dict = { 'sentence1_inputs': np.asarray(seq1_data, dtype=np.int32), 'sentence1_lengths': np.asarray(seq1_lengths, dtype=np.int32), 'sentence2_pos_inputs': np.asarray(seq2_pos_data, dtype=np.int32), 'sentence2_pos_lengths': np.asarray(seq2_pos_lengths, dtype=np.int32), 'sentence2_neg_inputs': np.asarray(seq2_neg_data, dtype=np.int32), 'sentence2_neg_lengths': np.asarray(seq2_neg_lengths, dtype=np.int32), } <NEW_LINE> if labels: <NEW_LINE> <INDENT> batch_data_dict['labels'] = np.asarray( [labels[idx] for idx in batch_idxs], dtype=np.int32) <NEW_LINE> <DEDENT> return batch_data_dict
|
Generate next batch.
:param data: data list to process
:param batch_idxs: idxs to process
:param labels: additional labels to include
:return: next data dict of batch_size amount data
|
625941bbd7e4931a7ee9ddd1
|
def symlink_files(out_dir): <NEW_LINE> <INDENT> for fname in glob.glob(os.path.join(ROOT_DIR, '*')): <NEW_LINE> <INDENT> in_path = os.path.join(os.getcwd(), fname) <NEW_LINE> out_path = os.path.join(out_dir, os.path.basename(fname)) <NEW_LINE> os.symlink(in_path, out_path)
|
Symlink files from ROOT_DIR to the current |out_dir|.
|
625941bbbe383301e01b5341
|
def _pre_setup(self, *args, **kwargs): <NEW_LINE> <INDENT> super(LexiconPluginTest2, self)._pre_setup(*args, **kwargs) <NEW_LINE> defaults = { "markup": MARKUP_CREOLE, "is_public": True, } <NEW_LINE> self.entry_es = self.easy_create(LexiconEntry, defaults, term="Spanish", language=self.languages["es"], tags="shared, Spain, other", short_definition="Spanish is a language ;)", content="Spanish or Castilian (español or castellano) is a Romance language...", )
|
create some blog articles
|
625941bb099cdd3c635f0b12
|
def post(self): <NEW_LINE> <INDENT> data = json.loads(self.request.body.decode("utf-8")) <NEW_LINE> top_repo_path = data["top_repo_path"] <NEW_LINE> if data["add_all"]: <NEW_LINE> <INDENT> my_output = self.git.add_all(top_repo_path) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> filename = data["filename"] <NEW_LINE> my_output = self.git.add(filename, top_repo_path) <NEW_LINE> <DEDENT> self.finish(my_output)
|
POST request handler, adds one or all files into the staging area.
|
625941bb627d3e7fe0d68d04
|
def make_mass_quantile_plot(hist, quantile_pairs, color, savename): <NEW_LINE> <INDENT> y_range = [0, hist.GetMaximum() * 1.05] <NEW_LINE> set_range_hists([hist], y_range=y_range) <NEW_LINE> can = mkplot([hist], colors=[color], drawOpt='H') <NEW_LINE> leg = r.TLegend(0.1, 0.91, 0.9, 0.95) <NEW_LINE> leg.SetNColumns(len(quantile_pairs)) <NEW_LINE> leg.SetBorderSize(0) <NEW_LINE> line_styles = [2, 3, 5, 6] <NEW_LINE> lines = [] <NEW_LINE> for i, pair in enumerate(quantile_pairs): <NEW_LINE> <INDENT> quantiles = get_quantiles(hist, pair) <NEW_LINE> qline_1, qline_2 = get_quantile_lines(quantiles, y_range, default_colors()[0], line_styles[i]) <NEW_LINE> qline_2.Draw() <NEW_LINE> qline_1.Draw() <NEW_LINE> leg.AddEntry(qline_1, '[{}, {}]'.format(*pair), 'l') <NEW_LINE> lines.append(qline_1) <NEW_LINE> lines.append(qline_2) <NEW_LINE> <DEDENT> leg.Draw() <NEW_LINE> can.Draw() <NEW_LINE> can.SaveAs(savename)
|
Quantile plots for one state.
TODO:
|
625941bb0c0af96317bb809e
|
def floodFill(self, image, sr, sc, newColor): <NEW_LINE> <INDENT> def dfs(pos): <NEW_LINE> <INDENT> image[pos[0]][pos[1]] = newColor <NEW_LINE> for d in dirs: <NEW_LINE> <INDENT> x, y = pos[0] + d[0], pos[1] + d[1] <NEW_LINE> if x in (-1, heigth) or y in (-1, width) or image[x][y] != color: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> dfs((x, y)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> if image[sr][sc] == newColor: <NEW_LINE> <INDENT> return image <NEW_LINE> <DEDENT> dirs = [(0, 1), (0, -1), (1, 0), (-1, 0)] <NEW_LINE> width = len(image[0]) <NEW_LINE> heigth = len(image) <NEW_LINE> color = image[sr][sc] <NEW_LINE> dfs((sr, sc)) <NEW_LINE> return image
|
:type image: List[List[int]]
:type sr: int
:type sc: int
:type newColor: int
:rtype: List[List[int]]
|
625941bb57b8e32f52483356
|
def lr_schedule(epoch): <NEW_LINE> <INDENT> lr = 1e-4 <NEW_LINE> if epoch > 40: <NEW_LINE> <INDENT> lr *= 1e-2 <NEW_LINE> <DEDENT> elif epoch > 20: <NEW_LINE> <INDENT> lr *= 1e-1 <NEW_LINE> <DEDENT> return lr
|
Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
|
625941bb23e79379d52ee41d
|
def t_NUMBER(t): <NEW_LINE> <INDENT> if t.value[-1] == 'h': <NEW_LINE> <INDENT> t.value = int(t.value[:-1],16) <NEW_LINE> <DEDENT> elif len(t.value) > 2 and t.value[0:2] == '0x': <NEW_LINE> <INDENT> t.value = int(t.value[2:],16) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> t.value = int(t.value) <NEW_LINE> <DEDENT> if t.lexer.placement: <NEW_LINE> <INDENT> t.lexer.placement = False <NEW_LINE> t.type = 'PLACEMENT' <NEW_LINE> <DEDENT> return t
|
0x[0-9a-fA-F]+|[0-9a-fA-F]+h|\d+
|
625941bbf548e778e58cd432
|
def _unload(self): <NEW_LINE> <INDENT> pass
|
Unload the USBIP daemon object from the layout and from the configuration
|
625941bbc432627299f04afa
|
def fit(self, X, Y): <NEW_LINE> <INDENT> self._kd_tree(X) <NEW_LINE> self.y = Y <NEW_LINE> return self
|
training, store distance
use kd tree
:para X: array like
|
625941bb3539df3088e2e201
|
def one_hot_nonoverlap(segmask_array, class_list): <NEW_LINE> <INDENT> returnSeg = [] <NEW_LINE> for i in range(0, len(class_list)): <NEW_LINE> <INDENT> returnSeg.append((segmask_array == class_list[i]).astype(np.uint8)) <NEW_LINE> <DEDENT> return np.stack(returnSeg, axis=0)
|
This function takes an N-D mask and a class list and returns a dictionary of one-hot encoded segmentations
|
625941bb8e71fb1e9831d663
|
def from_url(url: str) -> str: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> with requests.get(url=url, allow_redirects=True, stream=True, headers={"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) Gecko/20100101 Firefox/80.0"}) as r: <NEW_LINE> <INDENT> downloaded_file = os.path.join(mkdtemp(), get_filename(url)) <NEW_LINE> with open(downloaded_file, "wb") as f: <NEW_LINE> <INDENT> for chunk in r.iter_content(chunk_size=1024): <NEW_LINE> <INDENT> if chunk: <NEW_LINE> <INDENT> f.write(chunk) <NEW_LINE> f.flush() <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return downloaded_file <NEW_LINE> <DEDENT> <DEDENT> except RequestException as e: <NEW_LINE> <INDENT> print(e)
|
Download the file from its url.
:param url: The url of the file to download.
:type url: str
:return: The full path of the downloaded file.
:rtype: str
|
625941bb9f2886367277a746
|
def read_words(words): <NEW_LINE> <INDENT> with open(words) as f: <NEW_LINE> <INDENT> return f.read().split("\n")
|
Read a words file, return a list of all the words.
|
625941bbd99f1b3c44c6744c
|
def createComponentMenu(self,name,lambda1,lambda2): <NEW_LINE> <INDENT> aMenu = Menu(self.mainMenu, tearoff=0) <NEW_LINE> self.mainMenu.add_cascade(label=name, menu=aMenu) <NEW_LINE> aMenu.add_command(label="Open Window",command= lambda1) <NEW_LINE> aMenu.add_command(label="Close Window",command= lambda2)
|
Create the components of a specific menu
|
625941bba4f1c619b28afef6
|
def removeAllFiles(dires): <NEW_LINE> <INDENT> if not isinstance(dires, list): <NEW_LINE> <INDENT> raise ValueError <NEW_LINE> <DEDENT> for dire in dires: <NEW_LINE> <INDENT> for file in os.listdir(dire): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> os.remove(os.path.join(dire, file)) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> pass
|
删除指定目录的所有文件,异常则跳过
:param dires: 目录列表
:return: None
|
625941bbcb5e8a47e48b7964
|
def update_file( text, filename, return_different=False): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> with open( filename) as f: <NEW_LINE> <INDENT> text0 = f.read() <NEW_LINE> <DEDENT> <DEDENT> except OSError: <NEW_LINE> <INDENT> text0 = None <NEW_LINE> <DEDENT> if text != text0: <NEW_LINE> <INDENT> if return_different and text0 is not None: <NEW_LINE> <INDENT> return text <NEW_LINE> <DEDENT> log( 'Updating: ' + filename) <NEW_LINE> filename_temp = f'{filename}-jlib-temp' <NEW_LINE> with open( filename_temp, 'w') as f: <NEW_LINE> <INDENT> f.write( text) <NEW_LINE> <DEDENT> rename( filename_temp, filename)
|
Writes <text> to <filename>. Does nothing if contents of <filename> are
already <text>.
If <return_different> is true, we return existing contents if <filename>
already exists and differs from <text>.
|
625941bbd53ae8145f87a12b
|
def Ft(self): <NEW_LINE> <INDENT> N = self.N <NEW_LINE> F = (1-6/(N*np.pi)**2)*(-1)**(N+1) <NEW_LINE> return F
|
Calculate Fn function, for use in Rg calculation
|
625941bb1b99ca400220a967
|
def get(self, line_id: int, **options) -> Dict: <NEW_LINE> <INDENT> return self._call(f"{line_id}", **options)
|
Retrieve a line by its ID.
:see: https://developers.hubspot.com/docs/methods/line-items/get_line_item_by_id
|
625941bbd8ef3951e32433f3
|
def find_cliques(graph,min_clique_size=3): <NEW_LINE> <INDENT> cliques = nx.algorithms.clique.find_cliques(graph) <NEW_LINE> clique_list = [] <NEW_LINE> for c in cliques: <NEW_LINE> <INDENT> if len(c)>=min_clique_size: <NEW_LINE> <INDENT> clique_list.append(c) <NEW_LINE> <DEDENT> <DEDENT> return clique_list
|
Find maximal cliques in the graph
Parameters
----------
graph : networkx graph
Description of parameter `graph`.
min_clique_size : int
Returns
-------
list(clique)
|
625941bb5166f23b2e1a500f
|
def test_char_offset(): <NEW_LINE> <INDENT> skimmer = FileToSkim('/some/path', u'abc\r\nde\nfghi', 'dummy_tree') <NEW_LINE> eq_(skimmer.char_offset(1, 1), 1) <NEW_LINE> eq_(skimmer.char_offset(2, 1), 6) <NEW_LINE> eq_(skimmer.char_offset(3, 1), 9)
|
Make sure char_offset() deals with different kinds of line breaks and
handles the first and last lines correctly.
|
625941bba8370b7717052756
|
def analysis_describe(object_id, input_params={}, always_retry=True, **kwargs): <NEW_LINE> <INDENT> return DXHTTPRequest('/%s/describe' % object_id, input_params, always_retry=always_retry, **kwargs)
|
Invokes the /analysis-xxxx/describe API method.
For more info, see: https://documentation.dnanexus.com/developer/api/running-analyses/workflows-and-analyses#api-method-analysis-xxxx-describe
|
625941bb0a366e3fb873e6cd
|
def setUp(self): <NEW_LINE> <INDENT> self.repo = self.client.post(REPO_PATH, gen_repo()) <NEW_LINE> self.addCleanup(self.client.delete, self.repo['_href']) <NEW_LINE> for content in self.contents[:10]: <NEW_LINE> <INDENT> self.client.post( self.repo['_versions_href'], {'add_content_units': [content['_href']]} ) <NEW_LINE> sleep(1) <NEW_LINE> <DEDENT> self.repo = self.client.get(self.repo['_href'])
|
Create a repository and give it new versions.
|
625941bb16aa5153ce36232e
|
def __init__(self, item, children): <NEW_LINE> <INDENT> self.item = item <NEW_LINE> self.children = children <NEW_LINE> self.parent = None <NEW_LINE> self.timeStamps = _NodeSummaries()
|
Initializing the Node class
:param item: Storing the item of a node
:type item: int
:param children: To maintain the children of a node
:type children: dict
|
625941bb7d847024c06be16f
|
def splitAtLocation(self, point, location, node_id=-1, edge_f_id=-1, edge_t_id=-2): <NEW_LINE> <INDENT> num_points = self.geom.numPoints() <NEW_LINE> points = [self.geom.pointN(i) for i in range(num_points)] <NEW_LINE> N = int(num_points * location) or 1 <NEW_LINE> if N == num_points: <NEW_LINE> <INDENT> N -= 1 <NEW_LINE> <DEDENT> edge_f_points = points[:N] + [point] <NEW_LINE> edge_t_points = [point] + points[N:] <NEW_LINE> srs = self.geom.srs <NEW_LINE> edge_f_geom = geometry.LineString(points=edge_f_points, srs=srs) <NEW_LINE> edge_t_geom = geometry.LineString(points=edge_t_points, srs=srs) <NEW_LINE> RegionEdge = self.__class__ <NEW_LINE> edge_f = RegionEdge(id=edge_f_id, node_f_id=self.node_f_id, node_t_id=node_id, street_name=self.street_name, geom=edge_f_geom) <NEW_LINE> edge_t = RegionEdge(id=edge_t_id, node_f_id=node_id, node_t_id=self.node_t_id, street_name=self.street_name, geom=edge_t_geom) <NEW_LINE> RegionNode = self.node_f.__class__ <NEW_LINE> geom = self.geom <NEW_LINE> shared_node = RegionNode(id=node_id, geom=geom.pointN(N)) <NEW_LINE> edge_f.node_f = RegionNode(id=self.node_f_id, geom=geom.startPoint()) <NEW_LINE> edge_f.node_t = shared_node <NEW_LINE> edge_t.node_f = shared_node <NEW_LINE> edge_t.node_t = RegionNode(id=self.node_t_id, geom=geom.endPoint()) <NEW_LINE> return edge_f, edge_t
|
Split this edge at ``location`` and return two new edges.
The first edge is `node_f`=>``num``; the second is ``num``=>`node_t`.
Distribute attributes of original edge to the two new edges.
``location`` `float` -- Location in range [0, 1] to split at
``node_id`` -- Node ID to assign the node at the split
``edge_f_id`` -- Edge ID to assign the `node_f`=>``num`` edge
``edge_t_id`` -- Edge ID to assign the ``num``=>`node_t` edge
return `Edge`, `Edge` -- `node_f`=>``num``, ``num``=>`node_t` edges
Recipe:
- Determine location of num along edge; use .5 as default
- Get XY at location
- Get line geometry on either side of XY
- Transfer geometry and attributes to two new edges
- Return the two new edges
|
625941bb15baa723493c3e28
|
def clustering_performance_evaluation(X, y_pred, y_true): <NEW_LINE> <INDENT> result = {} <NEW_LINE> result['ARI'] = metrics.adjusted_rand_score(y_true, y_pred) <NEW_LINE> result['AMI'] = metrics.adjusted_mutual_info_score(y_true, y_pred) <NEW_LINE> result['NMI'] = metrics.normalized_mutual_info_score(y_true, y_pred) <NEW_LINE> h,c,v = metrics.homogeneity_completeness_v_measure(y_true, y_pred) <NEW_LINE> result['Homo'] = h <NEW_LINE> result['Comp'] = c <NEW_LINE> result['V'] = v <NEW_LINE> result['FM'] = metrics.fowlkes_mallows_score(y_true, y_pred) <NEW_LINE> result['Sil'] = metrics.silhouette_score(X[['entropy', 'joint_entropy']], y_pred, metric='euclidean') <NEW_LINE> return result
|
this function implement multiple evaluation metrics for clustering analysis.
this method will be used in order to asses the quality of a clustering solution based on multiple criteria
:param X: input matrix
:param y_pred: predicted vector
:param y_true: ground truth - if none - one do not have this knowledge
:return: a dictionary with all measures
|
625941bbfb3f5b602dac3545
|
def startServer( self, persistFilename: str = "networktables.ini", listenAddress: str = "", port: int = constants.NT_DEFAULT_PORT, ): <NEW_LINE> <INDENT> return self._api.startServer(persistFilename, listenAddress, port)
|
Starts a server using the specified filename, listening address, and port.
:param persistFilename: the name of the persist file to use
:param listenAddress: the address to listen on, or empty to listen on any
address
:param port: port to communicate over
.. versionadded:: 2018.0.0
|
625941bb94891a1f4081b95e
|
def asymmetry(self, id_min=None, id_max=None): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> T = self.gyration_tensor(id_min, id_max) <NEW_LINE> l1, l2 = np.linalg.eigvals(T) <NEW_LINE> return - np.log(1 - (l1 - l2)**2 / (2 * (l1 + l2)**2)) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> return np.nan
|
Returns a feature which controls how assymetric the random walk is.
May help to detect drift.
|
625941bb56b00c62f0f1450d
|
def get_pa_actions(self): <NEW_LINE> <INDENT> action = ['still', 'up', 'down', 'left', 'right'] <NEW_LINE> if self.pa_loc[0] == 0: <NEW_LINE> <INDENT> action.remove('up') <NEW_LINE> <DEDENT> if self.pa_loc[0] == self.row_num - 1: <NEW_LINE> <INDENT> action.remove('down') <NEW_LINE> <DEDENT> if self.pa_loc[1] == 0: <NEW_LINE> <INDENT> action.remove('left') <NEW_LINE> <DEDENT> if self.pa_loc[1] == self.column_num - 1: <NEW_LINE> <INDENT> action.remove('right') <NEW_LINE> <DEDENT> return action
|
For building game tree usage
|
625941bb956e5f7376d70d2e
|
def test_post_to_home_404_no_category(testapp): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> testapp.post('/', {'category': None}) <NEW_LINE> <DEDENT> except AppError as err: <NEW_LINE> <INDENT> assert '404 Not Found' in err.args[0]
|
Post to home page should redirect to results.
|
625941bb3d592f4c4ed1cf34
|
def lock(self, auth_no_user_interaction=None): <NEW_LINE> <INDENT> return self._I.Encrypted.method.Lock( '(a{sv})', filter_opt({ 'auth.no_user_interaction': ('b', auth_no_user_interaction), }) )
|
Lock Luks device.
|
625941bb15fb5d323cde09c1
|
def configerus_bootstrap(config: Config): <NEW_LINE> <INDENT> pass
|
Bootstrap a config object.
We don't actually do anything, so this bootstrapper is here only to ensure
that the above factory decorators are run
|
625941bb9b70327d1c4e0c8a
|
def encode_landm(matched, priors, variances): <NEW_LINE> <INDENT> matched = torch.reshape(matched, (matched.size(0), 5, 2)) <NEW_LINE> priors_cx = priors[:, 0].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) <NEW_LINE> priors_cy = priors[:, 1].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) <NEW_LINE> priors_w = priors[:, 2].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) <NEW_LINE> priors_h = priors[:, 3].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) <NEW_LINE> priors = torch.cat([priors_cx, priors_cy, priors_w, priors_h], dim=2) <NEW_LINE> g_cxcy = matched[:, :, :2] - priors[:, :, :2] <NEW_LINE> g_cxcy /= (variances[0] * priors[:, :, 2:]) <NEW_LINE> g_cxcy = g_cxcy.reshape(g_cxcy.size(0), -1) <NEW_LINE> return g_cxcy
|
Encode the variances from the priorbox layers into the ground truth boxes
we have matched (based on jaccard overlap) with the prior boxes.
Args:
matched: (tensor) Coords of ground truth for each prior in point-form
Shape: [num_priors, 10].
priors: (tensor) Prior boxes in center-offset form
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
encoded landm (tensor), Shape: [num_priors, 10]
|
625941bb44b2445a33931f56
|
def wallsAndGates(self, rooms: List[List[int]]) -> None: <NEW_LINE> <INDENT> if len(rooms) == 0 or len(rooms[0]) == 0: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> for i in range(len(rooms)): <NEW_LINE> <INDENT> for j in range(len(rooms[0])): <NEW_LINE> <INDENT> if rooms[i][j] == 0: <NEW_LINE> <INDENT> self.findPath(rooms, i, j)
|
Do not return anything, modify rooms in-place instead.
|
625941bb2ae34c7f2600cfe8
|
def plot_mean_execution_time(n, m): <NEW_LINE> <INDENT> print(mean_execution_time(int(n), int(m))) <NEW_LINE> mean_vectorized, mean_plain = mean_execution_time(int(n), int(m)) <NEW_LINE> p1 = plt.bar([0], mean_vectorized, color='g') <NEW_LINE> p2 = plt.bar([1], mean_plain, color='r') <NEW_LINE> plt.ylabel("Time spent") <NEW_LINE> plt.yticks(np.arange(0, mean_plain)) <NEW_LINE> plt.xticks(range(0, 1)) <NEW_LINE> plt.legend(("vectorized", "non - vectorized")) <NEW_LINE> plt.show()
|
рисует графики среднего времени выполнения forward_pass и vectorized_forward_pass
|
625941bba79ad161976cbffb
|
def emitter_reputation(self, mx): <NEW_LINE> <INDENT> if self.is_blacklist_mx(mx): <NEW_LINE> <INDENT> return 'blacklisted' <NEW_LINE> <DEDENT> if self.is_whitelist_mx(mx): <NEW_LINE> <INDENT> return 'whitelisted' <NEW_LINE> <DEDENT> return 'unknown'
|
Return features about emitter.
|
625941bbaad79263cf3908f2
|
def rst_pat_level_find(endpoint, node, port, auth, query_map): <NEW_LINE> <INDENT> PATNAME = "00100010" <NEW_LINE> PATID = "00100020" <NEW_LINE> PATBDATE = "00100030" <NEW_LINE> PATSEX = "00100040" <NEW_LINE> accept = {'Accept': 'application/json'} <NEW_LINE> url = 'http://%s:%d/%s/studies/' % (node, port, endpoint) <NEW_LINE> with requests.Session() as s: <NEW_LINE> <INDENT> if auth is not None: <NEW_LINE> <INDENT> user, passwd = auth.split(':') <NEW_LINE> http_response = s.get(url, auth=(user, passwd), headers=accept, params=query_map) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> http_response = s.get(url, headers=accept, params=query_map) <NEW_LINE> <DEDENT> <DEDENT> matches = http_response.json() <NEW_LINE> patients = [ PatientLevelFields( str(match[PATNAME]['Value'][0]), str(match[PATID]['Value'][0]), str(match[PATBDATE]['Value'][0]), str(match[PATSEX]['Value'][0]), 0 ) for match in matches ] <NEW_LINE> return sorted(set(patients))
|
Patient level find using DICOM QIDO-RS rest API.
Behaves similarly to a DICOM Patient Level C-FIND
but uses StudyRoot hierarchy of QI-RS API.
Parameters
----------
endpoint : str
Root of QI-RS/WADO-RS in URL.
node : str
Hostname.
port: int
TCP port number
auth: str
Colon separated username:password combination
Returns
-------
list
Sorted list of matching PatientLevelFields structures
|
625941bb4e4d5625662d4292
|
def parse_environment_variable_strings(envvar_strings: Iterable[str]) -> Dict[str, str]: <NEW_LINE> <INDENT> environment_variables = {} <NEW_LINE> for envstr in envvar_strings: <NEW_LINE> <INDENT> key, _, value = envstr.partition('=') <NEW_LINE> key = key.strip() <NEW_LINE> if not key: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> environment_variables[key] = value.strip() <NEW_LINE> <DEDENT> return environment_variables
|
Parse a list of environment variable strings into a dict.
|
625941bbbe383301e01b5342
|
def buf_getlastframedata(self): <NEW_LINE> <INDENT> return self.buf_getframedata(-1)
|
Return NumPy buffer of image data of last updated frame
Returns:
npBuf: NumPy buffer
False: error happens. lasterr() returns the DCAMERR value
|
625941bb8e05c05ec3eea228
|
def __init__(self, name=None, **kwargs): <NEW_LINE> <INDENT> self.name = name <NEW_LINE> for key in kwargs: <NEW_LINE> <INDENT> setattr(self, key, kwargs[key]) <NEW_LINE> <DEDENT> self.raw = self.__dict__
|
Initialize the object.
|
625941bb5510c4643540f2a6
|
def combine_masks(masks_paths, image_name): <NEW_LINE> <INDENT> masks = [] <NEW_LINE> for mask_path in tqdm(masks_paths, desc='Processing masks for image {}'.format(image_name), leave=False): <NEW_LINE> <INDENT> mask = preprocess_image(str(mask_path), is_mask=True) <NEW_LINE> mask = np.expand_dims(mask, axis=-1) <NEW_LINE> mask = mask.astype(int) <NEW_LINE> masks.append(mask) <NEW_LINE> <DEDENT> return np.maximum.reduce(masks)
|
Combine the different masks of a single image into one mask
|
625941bb30c21e258bdfa352
|
def test_is_valid_dm_int16_array_F(self): <NEW_LINE> <INDENT> D = np.zeros((5, 5), dtype='i') <NEW_LINE> self.assertTrue(is_valid_dm(D) == False)
|
Tests is_valid_dm(*) on an int16 array. False expected.
|
625941bb9f2886367277a747
|
def compute_optimal_bb_area(self, proj_dict=None): <NEW_LINE> <INDENT> if proj_dict is None: <NEW_LINE> <INDENT> proj_dict = {} <NEW_LINE> <DEDENT> projection = proj_dict.setdefault('proj', 'omerc') <NEW_LINE> area_id = projection + '_otf' <NEW_LINE> description = 'On-the-fly ' + projection + ' area' <NEW_LINE> lines, cols = self.lons.shape <NEW_LINE> x_size = int(cols * 1.1) <NEW_LINE> y_size = int(lines * 1.1) <NEW_LINE> proj_dict = self.compute_bb_proj_params(proj_dict) <NEW_LINE> if projection == 'omerc': <NEW_LINE> <INDENT> x_size, y_size = y_size, x_size <NEW_LINE> <DEDENT> area = DynamicAreaDefinition(area_id, description, proj_dict) <NEW_LINE> lons, lats = self.get_edge_lonlats() <NEW_LINE> return area.freeze((lons, lats), size=(x_size, y_size))
|
Compute the "best" bounding box area for this swath with `proj_dict`.
By default, the projection is Oblique Mercator (`omerc` in proj.4), in
which case the right projection angle `alpha` is computed from the
swath centerline. For other projections, only the appropriate center of
projection and area extents are computed.
|
625941bba4f1c619b28afef7
|
def test_add_columns(self): <NEW_LINE> <INDENT> db_metadata = MetaData() <NEW_LINE> table_infomatic = Table('infomatic', db_metadata, Column('id', Integer, primary_key=True), Column('info', Unicode(255)), Column('expectedoffset', Integer)) <NEW_LINE> self.assertTrue(len(table_infomatic.columns) == 3) <NEW_LINE> tzaware_datetime.helper.append_columns(table_infomatic, 'newdate') <NEW_LINE> self.assertTrue(len(table_infomatic.columns) == 6)
|
Add Column objects
|
625941bbde87d2750b85fc45
|
def run(self): <NEW_LINE> <INDENT> working_directory = self.getWorkingDirectory() <NEW_LINE> if os.path.exists(working_directory): <NEW_LINE> <INDENT> shutil.rmtree(working_directory) <NEW_LINE> <DEDENT> os.makedirs(working_directory) <NEW_LINE> saved_pwd = os.getcwd() <NEW_LINE> os.chdir(working_directory) <NEW_LINE> try: <NEW_LINE> <INDENT> return self._doImport() <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> shutil.rmtree(working_directory) <NEW_LINE> os.chdir(saved_pwd)
|
Run the code import job.
This is the primary public interface to the `ImportWorker`. This
method:
1. Retrieves an up-to-date foreign tree to import.
2. Gets the Bazaar branch to import into.
3. Imports the foreign tree into the Bazaar branch. If we've
already imported this before, we synchronize the imported Bazaar
branch with the latest changes to the foreign tree.
4. Publishes the newly-updated Bazaar branch, making it available to
Launchpad users.
5. Archives the foreign tree, so that we can update it quickly next
time.
|
625941bb627d3e7fe0d68d05
|
def get_language_from_request(request): <NEW_LINE> <INDENT> language = request.GET.get('language', None) <NEW_LINE> if language: <NEW_LINE> <INDENT> return language <NEW_LINE> <DEDENT> if hasattr(request, 'LANGUAGE_CODE'): <NEW_LINE> <INDENT> lang = settings.PAGE_LANGUAGE_MAPPING(str(request.LANGUAGE_CODE)) <NEW_LINE> if lang not in LANGUAGE_KEYS: <NEW_LINE> <INDENT> return settings.PAGE_DEFAULT_LANGUAGE <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return lang <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> return settings.PAGE_DEFAULT_LANGUAGE
|
Return the most obvious language according the request.
|
625941bbd164cc6175782c04
|
def get(self,request): <NEW_LINE> <INDENT> api = twitter_setup() <NEW_LINE> latest_tweets = [] <NEW_LINE> startDate = datetime( datetime.now().year,datetime.now().month,datetime.now().day,0,0,0) <NEW_LINE> try: <NEW_LINE> <INDENT> last_tweet_id = Tweet.objects.order_by("-id_str")[0].id_str <NEW_LINE> tweets = tweepy.Cursor(api.user_timeline, screen_name='@Yallakoranow',since_id = last_tweet_id ).items() <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> tweets = tweepy.Cursor(api.user_timeline, screen_name='@Yallakoranow').items() <NEW_LINE> <DEDENT> for status in tweets: <NEW_LINE> <INDENT> if status.created_at < startDate: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> text = status.text[0:status.text.index('https')] <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> text = status.text <NEW_LINE> <DEDENT> tweet = {'text': text, 'id_str': status.id_str, 'created_at': status.created_at, 'links':[]} <NEW_LINE> if 'media' in status.entities.keys(): <NEW_LINE> <INDENT> for curr_media in status.entities['media'] : <NEW_LINE> <INDENT> tweet['links'].append({'link_url':curr_media['media_url_https']}) <NEW_LINE> <DEDENT> <DEDENT> latest_tweets.append(tweet) <NEW_LINE> <DEDENT> if latest_tweets: <NEW_LINE> <INDENT> Tweet.objects.mongo_insert_many(latest_tweets) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> res = Tweet.objects.filter(text__icontains=request.GET.get('text')) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> res = {} <NEW_LINE> <DEDENT> return render(request,self.template_name,{'res':res})
|
Function to get first new today tweets from yallakora using Twitter API and
add new tweets in mongo db in then retrive result form search text from mongo db.
|
625941bb3346ee7daa2b2c20
|
def getData(self): <NEW_LINE> <INDENT> return (bool(), float(), float(), float())
|
Gets the data defining the point.
x : The output x coordinate of the point.
y : The output y coordinate of the point.
z : The output z coordinate of the point.
Returns true if successful.
|
625941bb91f36d47f21ac3a6
|
def get_expect_city_count(city="hz"): <NEW_LINE> <INDENT> url = "http://" + city + ".lianjia.com/ershoufang" <NEW_LINE> count_in_page = 0 <NEW_LINE> while True: <NEW_LINE> <INDENT> page = urllib2.urlopen(url) <NEW_LINE> soup = BeautifulSoup(page, "html.parser") <NEW_LINE> error = soup.title.text <NEW_LINE> if error == u"验证异常流量-链家网" or error == u"人机认证": <NEW_LINE> <INDENT> count_in_page = count_in_page + 1 <NEW_LINE> time.sleep(600) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> for link in soup.find_all('div', 'resultDes clear'): <NEW_LINE> <INDENT> context = link.get_text() <NEW_LINE> total_house = re.findall(r"\d+\.?\d*", context)[0] <NEW_LINE> return total_house
|
输入城市 去查db 获取这个城市在该月份网页中展示的数量
:param city:
:return:
|
625941bb7047854f462a12c3
|
def tweets_from_JSON_file_IT(tweetfile): <NEW_LINE> <INDENT> with open(tweetfile) as handle: <NEW_LINE> <INDENT> for line in handle: <NEW_LINE> <INDENT> yield loads(line.strip())
|
Returns an iterator for tweets in given tweetfile. Tweets are considered dict
representations of JSON in given tweetfile
|
625941bbd8ef3951e32433f4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.