blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
453b93fc1b6f3e0f8ee45cf3702ee4dbb7370a83 | 4d6975caece0acdc793a41e8bc6d700d8c2fec9a | /leetcode/963.three-equal-parts/963.three-equal-parts.py | ac79ab4d212a72437194a3c510f1cb0e9831a6e0 | [] | no_license | guiconti/workout | 36a3923f2381d6e7023e127100409b3a2e7e4ccb | 5162d14cd64b720351eb30161283e8727cfcf376 | refs/heads/master | 2021-08-03T10:32:02.108714 | 2021-07-26T04:38:14 | 2021-07-26T04:38:14 | 221,025,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | class Solution:
def threeEqualParts(self, arr: List[int]) -> List[int]:
| [
"guibasconti@gmail.com"
] | guibasconti@gmail.com |
2e61a75b1f3159a26f99c6856de297e1936462a3 | 8310622d9f504b6ffdac62b57727afbc0af9992e | /problems/problems@101~200/problem_162/Hexadecimal_numbers.py | 0ab26d0339b45f05f153ada5056add6219c8610c | [] | no_license | smsxgz/euler_project | aba61131682d04ee614167181e7d77e979db7e02 | df373a5cdf2c3c106763ee2c25671f85f9ec3a9b | refs/heads/master | 2023-02-22T12:00:24.801942 | 2023-02-04T13:23:27 | 2023-02-04T13:23:27 | 98,957,903 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | n = 0
for k in range(3, 17):
m = 15 * 16**(k - 1) - (15**k + 2 * 14 * 15**
(k - 1)) + (2 * 14**k + 13 * 14**(k - 1)) - 13**k
if k == 3:
print(m)
n += m
print(hex(n))
| [
"smsxgz@gmail.com"
] | smsxgz@gmail.com |
123ef74cbbbc9c71c0e72270501ad9436e4674cf | f8d3f814067415485bb439d7fe92dc2bbe22a048 | /models/research/swivel/wordsim.py | 439d093f11de75607754632fc84a7d4dea95747e | [
"Apache-2.0"
] | permissive | gmonkman/python | 2f9ab8f159c01f6235c86cb0cd52062cd3fdedd3 | 9123aa6baf538b662143b9098d963d55165e8409 | refs/heads/master | 2023-04-09T15:53:29.746676 | 2022-11-26T20:35:21 | 2022-11-26T20:35:21 | 60,254,898 | 0 | 2 | null | 2023-03-24T22:58:39 | 2016-06-02T10:25:27 | Python | UTF-8 | Python | false | false | 2,517 | py | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computes Spearman's rho with respect to human judgements.
Given a set of row (and potentially column) embeddings, this computes Spearman's
rho between the rank ordering of predicted word similarity and human judgements.
Usage:
wordim.py --embeddings=<binvecs> --vocab=<vocab> eval1.tab eval2.tab ...
Options:
--embeddings=<filename>: the vectors to test
--vocab=<filename>: the vocabulary file
Evaluation files are assumed to be tab-separated files with exactly three
columns. The first two columns contain the words, and the third column contains
the scored human judgement.
"""
from __future__ import print_function
import scipy.stats
import sys
from getopt import GetoptError, getopt
from vecs import Vecs
try:
opts, args = getopt(sys.argv[1:], '', ['embeddings=', 'vocab='])
except GetoptError as e:
print(e, file=sys.stderr)
sys.exit(2)
opt_embeddings = None
opt_vocab = None
for o, a in opts:
if o == '--embeddings':
opt_embeddings = a
if o == '--vocab':
opt_vocab = a
if not opt_vocab:
print('please specify a vocabulary file with "--vocab"', file=sys.stderr)
sys.exit(2)
if not opt_embeddings:
print('please specify the embeddings with "--embeddings"', file=sys.stderr)
sys.exit(2)
try:
vecs = Vecs(opt_vocab, opt_embeddings)
except IOError as e:
print(e, file=sys.stderr)
sys.exit(1)
def evaluate(lines):
acts, preds = [], []
with open(filename, 'r') as lines:
for line in lines:
w1, w2, act = line.strip().split('\t')
pred = vecs.similarity(w1, w2)
if pred is None:
continue
acts.append(float(act))
preds.append(pred)
rho, _ = scipy.stats.spearmanr(acts, preds)
return rho
for filename in args:
with open(filename, 'r') as lines:
print('%0.3f %s' % (evaluate(lines), filename))
| [
"gmonkman@mistymountains.biz"
] | gmonkman@mistymountains.biz |
5f0f0571c66281c5a92ae6f76a226bf6109f2588 | 5515b79ab3dc12f9b5117bd9c3beb39fbad198cc | /middlewares/__init__.py | 5aad822c19c4ed2df42ad9f60452ce62891f688d | [] | no_license | Chenger1/SwipeTelegramBot | 17904eb44776052938da18a452594ef6ae9313eb | 1ac4e86339aec3ae17b22a4d2c06001cd4b01c99 | refs/heads/main | 2023-07-14T05:56:11.596278 | 2021-08-18T12:31:58 | 2021-08-18T12:31:58 | 388,511,755 | 0 | 0 | null | 2021-08-08T22:19:24 | 2021-07-22T15:32:36 | Python | UTF-8 | Python | false | false | 488 | py | from aiogram import Dispatcher
from data.config import I18N_DOMAIN, LOCALES_DIR
from loader import dp
from .throttling import ThrottlingMiddleware
from .language import ACLMiddleware
def setup_middleware():
# Устанавливаем миддлварь
i18n_ = ACLMiddleware(I18N_DOMAIN, LOCALES_DIR)
dp.middleware.setup(i18n_)
return i18n_
i18n = setup_middleware()
_ = i18n.gettext
if __name__ == "middlewares":
dp.middleware.setup(ThrottlingMiddleware())
| [
"exs2199@gmail.com"
] | exs2199@gmail.com |
c187a7a01c74659a57be0805011863c63eec89f1 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-deh/huaweicloudsdkdeh/v1/model/batch_delete_dedicated_host_tags_request.py | d10a3ef88aa3aa3ffda7173164316f9be54b17ed | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,519 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class BatchDeleteDedicatedHostTagsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'dedicated_host_id': 'str',
'body': 'ReqSetOrDeleteTags'
}
attribute_map = {
'dedicated_host_id': 'dedicated_host_id',
'body': 'body'
}
def __init__(self, dedicated_host_id=None, body=None):
"""BatchDeleteDedicatedHostTagsRequest
The model defined in huaweicloud sdk
:param dedicated_host_id: 专属主机ID。 可以从专属主机控制台查询,或者通过调用查询专属主机列表API获取。
:type dedicated_host_id: str
:param body: Body of the BatchDeleteDedicatedHostTagsRequest
:type body: :class:`huaweicloudsdkdeh.v1.ReqSetOrDeleteTags`
"""
self._dedicated_host_id = None
self._body = None
self.discriminator = None
self.dedicated_host_id = dedicated_host_id
if body is not None:
self.body = body
@property
def dedicated_host_id(self):
"""Gets the dedicated_host_id of this BatchDeleteDedicatedHostTagsRequest.
专属主机ID。 可以从专属主机控制台查询,或者通过调用查询专属主机列表API获取。
:return: The dedicated_host_id of this BatchDeleteDedicatedHostTagsRequest.
:rtype: str
"""
return self._dedicated_host_id
@dedicated_host_id.setter
def dedicated_host_id(self, dedicated_host_id):
"""Sets the dedicated_host_id of this BatchDeleteDedicatedHostTagsRequest.
专属主机ID。 可以从专属主机控制台查询,或者通过调用查询专属主机列表API获取。
:param dedicated_host_id: The dedicated_host_id of this BatchDeleteDedicatedHostTagsRequest.
:type dedicated_host_id: str
"""
self._dedicated_host_id = dedicated_host_id
@property
def body(self):
"""Gets the body of this BatchDeleteDedicatedHostTagsRequest.
:return: The body of this BatchDeleteDedicatedHostTagsRequest.
:rtype: :class:`huaweicloudsdkdeh.v1.ReqSetOrDeleteTags`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this BatchDeleteDedicatedHostTagsRequest.
:param body: The body of this BatchDeleteDedicatedHostTagsRequest.
:type body: :class:`huaweicloudsdkdeh.v1.ReqSetOrDeleteTags`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchDeleteDedicatedHostTagsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
a054496745bb40792965f65968850a5df725cb90 | cf59d92614a3505aeed9455482ef327572578228 | /venv/bin/jupyter-troubleshoot | 2f72a7c774175ff127ffcad344d20be8894ef2f4 | [
"MIT"
] | permissive | slarkjm0803/autobets | e1d1a3b00cf94ee90fd1fed7464431677b4f9e11 | f92a5d999acaf5d7c83ca2768a260c2282eabbee | refs/heads/master | 2020-09-23T21:40:46.057648 | 2019-11-29T11:42:37 | 2019-11-29T11:42:37 | 225,591,526 | 1 | 0 | MIT | 2019-12-03T10:22:21 | 2019-12-03T10:22:20 | null | UTF-8 | Python | false | false | 262 | #!/Users/mac/PycharmProjects/autobets/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from jupyter_core.troubleshoot import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"mac@macs-MacBook-Pro.local"
] | mac@macs-MacBook-Pro.local | |
2bbf8c99ea9aaba3001ad14fe8ab365dc25d0f5d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_joshes.py | 899a037817f7722c99dc8c579c857e84046b438a | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
from xai.brain.wordbase.verbs._josh import _JOSH
#calss header
class _JOSHES(_JOSH, ):
def __init__(self,):
_JOSH.__init__(self)
self.name = "JOSHES"
self.specie = 'verbs'
self.basic = "josh"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
5795e4bfa392c6ec404a4e3cc63f74cba6c53480 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/aio/operations/_operation_results_operations.py | c5156f79a9bc2d390f074d3850fb41f3b4cff510 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 5,316 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class OperationResultsOperations:
"""OperationResultsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~video_analyzer.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
account_name: str,
name: str,
operation_id: str,
**kwargs: Any
) -> Optional["_models.PrivateEndpointConnection"]:
"""Get operation result.
Get private endpoint connection operation result.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: The Video Analyzer account name.
:type account_name: str
:param name: Private endpoint connection name.
:type name: str
:param operation_id: Operation Id.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~video_analyzer.models.PrivateEndpointConnection or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-11-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'operationId': self._serialize.url("operation_id", operation_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/privateEndpointConnections/{name}/operationResults/{operationId}'} # type: ignore
| [
"noreply@github.com"
] | Azure.noreply@github.com |
1e6b0f2307a497e5e53ec48c5671ce7a0188ec39 | bef3a5af16d50e68db158a906fbfdb323d7d0733 | /scripts/do_scatters.py | c3f208da50821f2cd63dc4e2d4abde280aff3450 | [] | no_license | flaviovdf/competition-models | 413a29054ec77e5a093ca1eb4e6ec112b1b95728 | 6b52b082f93145921b5b17de718aa6316d2b1c40 | refs/heads/master | 2020-07-04T09:15:21.940045 | 2015-12-28T16:44:42 | 2015-12-28T16:44:42 | 20,268,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,591 | py | #!/usr/bin/env python
from __future__ import division, print_function
from sklearn import linear_model
from matplotlib import pyplot as plt
import numpy as np
import plac
def main(ids_fpath, rates_fpath, plays_fpath):
mb_to_name = {}
name_to_mb = {}
with open(ids_fpath) as f:
for line in f:
spl = line.split()
mbid = spl[0]
name = ' '.join(spl[1:])
mb_to_name[mbid] = name
name_to_mb[name] = mbid
rates = {}
with open(rates_fpath) as f:
for line in f:
spl = line.split()
mbid = spl[0]
rates[mbid] = np.array([float(x) for x in spl[1:]])
plays = {}
with open(plays_fpath) as f:
for line in f:
spl = line.split()
mbid = spl[0]
plays[mbid] = np.array([float(x) for x in spl[1:]])
i = 0
n_bins = 10
for artist in ['Ladytron', 'Britney Spears', 'Radiohead', \
'Metallica', 'Daft Punk', 'Yann Tiersen']:
rate = rates[name_to_mb[artist]]
play = plays[name_to_mb[artist]]
lifetime = (play / rate)
ols = linear_model.LinearRegression(fit_intercept=True)
ols.fit(np.array([lifetime]).T, play)
regr = ols.predict(np.array([sorted(lifetime)]).T)
idx_sorted = lifetime.argsort()
bin_size = int(idx_sorted.shape[0] / n_bins)
mean_lifetime = []
mean_plays = []
for j in range(0, idx_sorted.shape[0], bin_size):
idx = idx_sorted[j:j + bin_size]
mean_lifetime.append(lifetime[idx].mean())
mean_plays.append(play[idx].mean())
median_lifetime = []
median_plays = []
for j in range(0, idx_sorted.shape[0], bin_size):
idx = idx_sorted[j:j + bin_size]
median_lifetime.append(np.median(lifetime[idx]))
median_plays.append(np.median(play[idx]))
plt.subplot(2, 3, i + 1)
plt.title(artist)
plt.semilogy(lifetime, play, 'wo')
plt.semilogy(sorted(lifetime), regr, 'k-')
plt.semilogy(mean_lifetime, mean_plays, 'bo')
plt.semilogy(mean_lifetime, mean_plays, 'b-', label='Mean')
plt.semilogy(median_lifetime, median_plays, 'ro')
plt.semilogy(median_lifetime, median_plays, 'r-', label='Median')
plt.legend()
plt.xlabel('Lifetime')
plt.ylabel('Plays')
i += 1
plt.tight_layout(pad=0)
#plt.savefig('time_plays.pdf')
plt.show()
if __name__ == '__main__':
plac.call(main)
| [
"flaviovdf@gmail.com"
] | flaviovdf@gmail.com |
c1a23830661aae1169a40e347d789f2684bf1e48 | 34e8d8702a26e33622ec9c1cf21f55abf910bd8c | /bw2io/extractors/csv.py | 8bd22371f76b60dc65e6eea2ad02655143102574 | [] | no_license | PascalLesage/brightway2-io | 72775595f8964a21b8c571db4dcdffc2e7b88329 | 3076770e1fd8b38fef31fa0e547facbcb6650cd8 | refs/heads/master | 2022-12-04T07:09:35.633060 | 2020-07-01T10:39:56 | 2020-07-01T10:39:56 | 285,389,011 | 0 | 0 | null | 2020-08-05T19:47:10 | 2020-08-05T19:47:09 | null | UTF-8 | Python | false | false | 373 | py | # -*- coding: utf-8 -*-
import os
import csv
class CSVExtractor(object):
@classmethod
def extract(cls, filepath):
assert os.path.exists(filepath), "Can't file file at path {}".format(filepath)
with open(filepath) as f:
reader = csv.reader(f)
data = [row for row in reader]
return [os.path.basename(filepath), data]
| [
"cmutel@gmail.com"
] | cmutel@gmail.com |
40c830c44eb8d5a8f86678b190be3e83ca70d750 | 70ed0a22937378b923a77749df38a61b7c1f7add | /jagare/converter/commit.py | c81c5546394706c0bccdd70e64aa1a4f859a7cc8 | [] | no_license | tclh123/jagare-rpc | cffdee5e87b4ad00f0c33c58b5a00e3ad06a3eab | de12c8c5d540e90aeaa360fbe6254688ad4c08cd | refs/heads/master | 2021-01-15T09:37:28.232631 | 2014-06-04T16:22:15 | 2014-06-04T16:22:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | # coding: utf-8
"""
struct Commit {
1: required string type, # 'commit'
2: required string sha,
3: required list<string> parents, # shas
4: required string tree, # sha of the tree object attached to the commit
5: required Signature committer,
6: required Signature author,
7: required string email,
8: required i64 time,
9: required i16 offset,
10: required string commit,
11: required string message,
12: required string body, # commit message body
}
"""
from .base import Converter, Commit
from .signature import SignatureConverter
class CommitConverter(Converter):
target_type = Commit
def prepare(self):
self.drop('parent')
self.type = 'commit'
self.committer = SignatureConverter(**self.committer).convert()
self.author = SignatureConverter(**self.author).convert()
self.unicode_str('message')
self.unicode_str('body')
| [
"tclh123@gmail.com"
] | tclh123@gmail.com |
986fafde8562495ace1dfe67a159589060bca348 | 02338bb8111fc1aa88e830ac09a11664720eb2d4 | /tmp/azure_rm_restorabledroppeddatabase_info.py | 0d5b7fd4e73f119db9340d53142e7fc9283cfd6a | [] | no_license | Fred-sun/fred_yaml | a49977b0e8505c7447df23dd80c7fef1be70e6bc | 295ca4cd2b59b8d2758f06eb7fd79920327ea524 | refs/heads/master | 2023-04-28T05:51:56.599488 | 2023-04-25T13:52:10 | 2023-04-25T13:52:10 | 131,376,340 | 0 | 1 | null | 2020-07-06T14:22:46 | 2018-04-28T05:34:49 | TSQL | UTF-8 | Python | false | false | 9,467 | py | #!/usr/bin/python
#
# Copyright (c) 2020 GuopengLin, (@t-glin)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_restorabledroppeddatabase_info
version_added: '2.9'
short_description: Get RestorableDroppedDatabase info.
description:
- Get info of RestorableDroppedDatabase.
options:
resource_group_name:
description:
- >-
The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
required: true
type: str
server_name:
description:
- The name of the server.
required: true
type: str
restorable_droppeded_database_id:
description:
- >-
The id of the deleted database in the form of
databaseName,deletionTimeInFileTimeFormat
type: str
extends_documentation_fragment:
- azure
author:
- GuopengLin (@t-glin)
'''
EXAMPLES = '''
- name: Get a restorable dropped database
azure_rm_restorabledroppeddatabase_info:
resource_group_name: restorabledroppeddatabasetest-1257
restorable_droppeded_database_id: 'restorabledroppeddatabasetest-7654,131403269876900000'
server_name: restorabledroppeddatabasetest-2389
- name: Get list of restorable dropped databases
azure_rm_restorabledroppeddatabase_info:
resource_group_name: restorabledroppeddatabasetest-1349
server_name: restorabledroppeddatabasetest-1840
'''
RETURN = '''
restorable_dropped_databases:
description: >-
A list of dict results where the key is the name of the
RestorableDroppedDatabase and the values are the facts for that
RestorableDroppedDatabase.
returned: always
type: complex
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: null
name:
description:
- Resource name.
returned: always
type: str
sample: null
type:
description:
- Resource type.
returned: always
type: str
sample: null
location:
description:
- The geo-location where the resource lives
returned: always
type: str
sample: null
database_name:
description:
- The name of the database
returned: always
type: str
sample: null
edition:
description:
- The edition of the database
returned: always
type: str
sample: null
max_size_bytes:
description:
- The max size in bytes of the database
returned: always
type: str
sample: null
service_level_objective:
description:
- The service level objective name of the database
returned: always
type: str
sample: null
elastic_pool_name:
description:
- The elastic pool name of the database
returned: always
type: str
sample: null
creation_date:
description:
- The creation date of the database (ISO8601 format)
returned: always
type: str
sample: null
deletion_date:
description:
- The deletion date of the database (ISO8601 format)
returned: always
type: str
sample: null
earliest_restore_date:
description:
- The earliest restore date of the database (ISO8601 format)
returned: always
type: str
sample: null
value:
description:
- A list of restorable dropped databases
returned: always
type: list
sample: null
contains:
location:
description:
- The geo-location where the resource lives
returned: always
type: str
sample: null
database_name:
description:
- The name of the database
returned: always
type: str
sample: null
edition:
description:
- The edition of the database
returned: always
type: str
sample: null
max_size_bytes:
description:
- The max size in bytes of the database
returned: always
type: str
sample: null
service_level_objective:
description:
- The service level objective name of the database
returned: always
type: str
sample: null
elastic_pool_name:
description:
- The elastic pool name of the database
returned: always
type: str
sample: null
creation_date:
description:
- The creation date of the database (ISO8601 format)
returned: always
type: str
sample: null
deletion_date:
description:
- The deletion date of the database (ISO8601 format)
returned: always
type: str
sample: null
earliest_restore_date:
description:
- The earliest restore date of the database (ISO8601 format)
returned: always
type: str
sample: null
'''
import time
import json
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from copy import deepcopy
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.sql import SqlManagementClient
from msrestazure.azure_operation import AzureOperationPoller
from msrest.polling import LROPoller
except ImportError:
# This is handled in azure_rm_common
pass
class AzureRMRestorableDroppedDatabaseInfo(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group_name=dict(
type='str',
required=True
),
server_name=dict(
type='str',
required=True
),
restorable_droppeded_database_id=dict(
type='str'
)
)
self.resource_group_name = None
self.server_name = None
self.restorable_droppeded_database_id = None
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.url = None
self.status_code = [200]
self.query_parameters = {}
self.query_parameters['api-version'] = '2014-04-01'
self.header_parameters = {}
self.header_parameters['Content-Type'] = 'application/json; charset=utf-8'
self.mgmt_client = None
super(AzureRMRestorableDroppedDatabaseInfo, self).__init__(self.module_arg_spec, supports_tags=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
self.mgmt_client = self.get_mgmt_svc_client(SqlManagementClient,
base_url=self._cloud_environment.endpoints.resource_manager,
api_version='2014-04-01')
if (self.resource_group_name is not None and
self.server_name is not None and
self.restorable_droppeded_database_id is not None):
self.results['restorable_dropped_databases'] = self.format_item(self.get())
elif (self.resource_group_name is not None and
self.server_name is not None):
self.results['restorable_dropped_databases'] = self.format_item(self.listbyserver())
return self.results
def get(self):
response = None
try:
response = self.mgmt_client.restorable_dropped_databases.get(resource_group_name=self.resource_group_name,
server_name=self.server_name,
restorable_droppeded_database_id=self.restorable_droppeded_database_id)
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return response
def listbyserver(self):
response = None
try:
response = self.mgmt_client.restorable_dropped_databases.list_by_server(resource_group_name=self.resource_group_name,
server_name=self.server_name)
except CloudError as e:
self.log('Could not get info for @(Model.ModuleOperationNameUpper).')
return response
def format_item(self, item):
if hasattr(item, 'as_dict'):
return [item.as_dict()]
else:
result = []
items = list(item)
for tmp in items:
result.append(tmp.as_dict())
return result
def main():
AzureRMRestorableDroppedDatabaseInfo()
if __name__ == '__main__':
main()
| [
"xiuxi.sun@qq.com"
] | xiuxi.sun@qq.com |
e8699382e8ae88507b27988b724c70ac96d54c2f | d57b51ec207002e333b8655a8f5832ed143aa28c | /.history/l3/dz/GameBot_20200620182301.py | 828c8db971f7acb419cae9d32967a89d315790f2 | [] | no_license | yevheniir/python_course_2020 | b42766c4278a08b8b79fec77e036a1b987accf51 | a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b | refs/heads/master | 2022-11-15T07:13:24.193173 | 2020-07-11T15:43:26 | 2020-07-11T15:43:26 | 278,890,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,293 | py | import discord
import random
TOKEN = 'NzIzNTQzNDk1OTc2OTQzNjI2.XuzKbw.sVfmaFwGyA7agglqtgDdoUBKiDA'
list1 = []
client = discord.Client()
bot_player = {}
win_bot = []
win_bot.append(0)
win_player = []
win_player.append(0)
bot_player = {"Бот": win_bot,
"Игрок": win_player
}
first_comand = [
"##################################################",
" Начать игру - !start ",
"##################################################"
]
second_comand = [
"#################",
" Вы вышли из игры ",
"#################"
]
def player():
win_player.append(win_player[-1]+1)
bot_player = {"Бот": win_bot[-1],
"Игрок": win_player[-1]
}
def bot():
win_bot.append(win_player[-1]+1)
bot_player = {"Бот": win_bot[-1],
"Игрок": win_player[-1]
}
def draw():
bot_player = {"Бот": win_bot[-1],
"Игрок": win_player[-1]
}
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
@client.event
async def on_message(message):
# we do not want the bot to reply to itself
if message.author == client.user:
return
if message.content.startswith('!start'):
# ")
await message.channel.send("Введи !Камень,!Ножницы или !Бумага")
if message.content.startswith('!Камень'):
list1 = []
list1.append("Камень")
print(message.author.mention + "\n" +
"поставил\n"+list1[0].lower()+".\n\n")
list2 = ['Бумага', 'Камень', 'Ножницы']
rand = random.choice(list2)
if(list1[0] == rand):
await message.channel.send("Бот выбрал " + rand.lower()+".")
await message.channel.send("Ничья!")
bot_player = {"Бот": win_bot[0],
"Игрок": win_player[0]
}
await message.channel.send("Бот - " + str(bot_player["Бот"]) + "\n")
await message.channel.send("Игрок - " + str(bot_player["Игрок"]) + "\n")
print("\n\nБот - " + str(bot_player["Бот"]) + "\n")
print("Игрок - " + str(bot_player["Игрок"]) + "\n")
if(list1[0] == 'Бумага'and rand == 'Камень'):
await message.channel.send("Бот выбрал " + rand.lower()+".")
await message.channel.send("Ты победил")
win_player.append(win_player[-1]+1)
bot_player = {"Бот": win_bot[-1],
"Игрок": win_player[-1]
}
await message.channel.send("Бот - " + str(bot_player["Бот"]) + "\n")
await message.channel.send("Игрок - " + str(bot_player["Игрок"]) + "\n")
print("\n\nБот - " + str(bot_player["Бот"]) + "\n")
print("Игрок - " + str(bot_player["Игрок"]) + "\n")
if(list1[0] == 'Камень'and rand == 'Бумага'):
await message.channel.send("Бот выбрал " + rand.lower()+".")
await message.channel.send("Ты проиграл(")
win_bot.append(win_bot[-1]+1)
bot_player = {"Бот": win_bot[-1],
"Игрок": win_player[-1]
"Бог":
}
await message.channel.send("Бот - " + str(bot_player["Бот"]) + "\n")
await message.channel.send("Игрок - " + str(bot_player["Игрок"]) + "\n")
print("\n\nБот - " + str(bot_player["Бот"]) + "\n")
print("Игрок - " + str(bot_player["Игрок"]) + "\n")
if(list1[0] == 'Ножницы'and rand == 'Камень'):
await message.channel.send("Бот поставил " + rand.lower()+".")
await message.channel.send("Вы проиграли!")
win_bot.append(win_bot[-1]+1)
bot_player = {"Бот": win_bot[-1],
"Игрок": win_player[-1]
}
await message.channel.send("Бот - " + str(bot_player["Бот"]) + "\n")
await message.channel.send("Игрок - " + str(bot_player["Игрок"]) + "\n")
print("\n\nБот - " + str(bot_player["Бот"]) + "\n")
print("Игрок - " + str(bot_player["Игрок"]) + "\n")
if(list1[0] == 'Камень'and rand == 'Ножницы'):
await message.channel.send("Бот поставил " + rand.lower()+".")
await message.channel.send("Вы победили!")
win_player.append(win_player[-1]+1)
bot_player = {"Бот": win_bot[-1],
"Игрок": win_player[-1]
}
await message.channel.send("Бот - " + str(bot_player["Бот"]) + "\n")
await message.channel.send("Игрок - " + str(bot_player["Игрок"]) + "\n")
print("\n\nБот - " + str(bot_player["Бот"]) + "\n")
print("Игрок - " + str(bot_player["Игрок"]) + "\n")
if(list1[0] == 'Бумага'and rand == 'Ножницы'):
await message.channel.send("Бот поставил " + rand.lower()+".")
await message.channel.send("Вы проиграли!")
win_bot.append(win_bot[-1]+1)
bot_player = {"Бот": win_bot[-1],
"Игрок": win_player[-1]
}
await message.channel.send("Бот - " + str(bot_player["Бот"]) + "\n")
await message.channel.send("Игрок - " + str(bot_player["Игрок"]) + "\n")
print("\n\nБот - " + str(bot_player["Бот"]) + "\n")
print("Игрок - " + str(bot_player["Игрок"]) + "\n")
if(list1[0] == 'Ножницы'and rand == 'Бумага'):
await message.channel.send("Бот поставил " + rand.lower()+".")
await message.channel.send("Вы победили!")
win_player.append(win_player[-1]+1)
bot_player = {"Бот": win_bot[-1],
"Игрок": win_player[-1]
}
await message.channel.send("Бот - " + str(bot_player["Бот"]) + "\n")
await message.channel.send("Игрок - " + str(bot_player["Игрок"]) + "\n")
print("\n\nБот - " + str(bot_player["Бот"]) + "\n")
print("Игрок - " + str(bot_player["Игрок"]) + "\n")
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
if message.content.startswith('!Бумага'):
list1 = []
list1.append("Бумага")
print(message.author.mention + "\n" +
"поставил\n"+list1[0].lower()+".\n\n")
list2 = ['Бумага', 'Камень', 'Ножницы']
rand = random.choice(list2)
check_win(answer, rand)
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
if message.content.startswith('!Ножницы'):
list1 = []
list1.append("Ножницы")
print(message.author.mention + "\n" +
"поставил\n"+list1[0].lower()+".\n\n")
list2 = ['Бумага', 'Камень', 'Ножницы']
rand = random.choice(list2)
check_win(answer, rand)
if message.content.startswith('!leave'):
await message.channel.send(second_comand[0]+'\n'+second_comand[1]+'\n'+second_comand[2])
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------------
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
client.run(TOKEN)
def check_win(answer1, aswer2):
if(list1[0] == rand):
await message.channel.send("Бот поставил " + rand.lower()+".")
await message.channel.send("Ничья!")
bot_player = {"Бот": win_bot[-1],
"Игрок": win_player[-1]
}
await message.channel.send("Бот - " + str(bot_player["Бот"]) + "\n")
await message.channel.send("Игрок - " + str(bot_player["Игрок"]) + "\n")
print("\n\nБот - " + str(bot_player["Бот"]) + "\n")
print("Игрок - " + str(bot_player["Игрок"]) + "\n")
if(list1[0] == 'Бумага'and rand == 'Камень'):
await message.channel.send("Бот поставил " + rand.lower()+".")
await message.channel.send("Вы победили!")
win_player.append(win_player[-1]+1)
bot_player = {"Бот": win_bot[-1],
"Игрок": win_player[-1]
}
await message.channel.send("Бот - " + str(bot_player["Бот"]) + "\n")
await message.channel.send("Игрок - " + str(bot_player["Игрок"]) + "\n")
print("\n\nБот - " + str(bot_player["Бот"]) + "\n")
print("Игрок - " + str(bot_player["Игрок"]) + "\n")
if(list1[0] == 'Камень'and rand == 'Бумага'):
await message.channel.send("Бот поставил " + rand.lower()+".")
await message.channel.send("Вы проиграли!")
win_bot.append(win_bot[-1]+1)
bot_player = {"Бот": win_bot[-1],
"Игрок": win_player[-1]
}
await message.channel.send("Бот - " + str(bot_player["Бот"]) + "\n")
await message.channel.send("Игрок - " + str(bot_player["Игрок"]) + "\n")
print("\n\nБот - " + str(bot_player["Бот"]) + "\n")
print("Игрок - " + str(bot_player["Игрок"]) + "\n")
if(list1[0] == 'Ножницы'and rand == 'Камень'):
await message.channel.send("Бот поставил " + rand.lower()+".")
await message.channel.send("Вы проиграли!")
win_bot.append(win_bot[-1]+1)
bot_player = {"Бот": win_bot[-1],
"Игрок": win_player[-1]
}
await message.channel.send("Бот - " + str(bot_player["Бот"]) + "\n")
await message.channel.send("Игрок - " + str(bot_player["Игрок"]) + "\n")
print("\n\nБот - " + str(bot_player["Бот"]) + "\n")
print("Игрок - " + str(bot_player["Игрок"]) + "\n")
if(list1[0] == 'Камень'and rand == 'Ножницы'):
await message.channel.send("Бот поставил " + rand.lower()+".")
await message.channel.send("Вы победили!")
win_player.append(win_player[-1]+1)
bot_player = {"Бот": win_bot[-1],
"Игрок": win_player[-1]
}
await message.channel.send("Бот - " + str(bot_player["Бот"]) + "\n")
await message.channel.send("Игрок - " + str(bot_player["Игрок"]) + "\n")
print("\n\nБот - " + str(bot_player["Бот"]) + "\n")
print("Игрок - " + str(bot_player["Игрок"]) + "\n")
if(list1[0] == 'Бумага'and rand == 'Ножницы'):
await message.channel.send("Бот поставил " + rand.lower()+".")
await message.channel.send("Вы проиграли!")
win_bot.append(win_bot[-1]+1)
bot_player = {"Бот": win_bot[-1],
"Игрок": win_player[-1]
}
await message.channel.send("Бот - " + str(bot_player["Бот"]) + "\n")
await message.channel.send("Игрок - " + str(bot_player["Игрок"]) + "\n")
print("\n\nБот - " + str(bot_player["Бот"]) + "\n")
print("Игрок - " + str(bot_player["Игрок"]) + "\n")
if(list1[0] == 'Ножницы'and rand == 'Бумага'):
await message.channel.send("Бот поставил " + rand.lower()+".")
await message.channel.send("Вы победили!")
win_player.append(win_player[-1]+1)
bot_player = {"Бот": win_bot[-1],
"Игрок": win_player[-1]
}
await message.channel.send("Бот - " + str(bot_player["Бот"]) + "\n")
await message.channel.send("Игрок - " + str(bot_player["Игрок"]) + "\n")
print("\n\nБот - " + str(bot_player["Бот"]) + "\n")
print("Игрок - " + str(bot_player["Игрок"]) + "\n")
def get_bot_player():
return {"Бот": win_bot[-1],
"Игрок": win_player[-1]
}
bot_player = get_bot_player() | [
"yevheniira@intelink-ua.com"
] | yevheniira@intelink-ua.com |
a667310187aa30b7808931181ffd30a86d1be517 | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-sddp/aliyunsdksddp/request/v20190103/DescribeDataLimitDetailRequest.py | 66a2e93d3f972d45756dd03fad798e75f38d411c | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 1,613 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeDataLimitDetailRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sddp', '2019-01-03', 'DescribeDataLimitDetail','sddp')
def get_SourceIp(self):
return self.get_query_params().get('SourceIp')
def set_SourceIp(self,SourceIp):
self.add_query_param('SourceIp',SourceIp)
def get_id(self):
return self.get_query_params().get('id')
def set_id(self,id):
self.add_query_param('id',id)
def get_NetworkType(self):
return self.get_query_params().get('NetworkType')
def set_NetworkType(self,NetworkType):
self.add_query_param('NetworkType',NetworkType)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
32037052f37c116da8781fdd7a71e71184b79a30 | 8015f1c62a2cb4efd21aa8938336913bf8117868 | /bamap/ba1481.pngMap.py | ae9d5d4a691b6f1fcb1c427248bf9a0e98856ee1 | [] | no_license | GamerNoTitle/Beepers-and-OLED | 675b5e3c179df0f0e27b42bf594c43860d03b9af | afe1340e5394ae96bda5f9022a8a66824368091e | refs/heads/master | 2020-04-20T00:09:47.122471 | 2019-04-29T04:59:35 | 2019-04-29T04:59:35 | 168,515,579 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,468 | py | ba1481.pngMap = [
'11111111110000000000000000111111111111111111111111111111110000000000000000000000000000000000000111111111111111100000000000000000',
'11111111110000000000000000111111111111111111111111111111110000000000000000000000000000000000001111111111111111100001000000000000',
'11111111110000000000000000111111111111111111111111111111000000000000000000000000000000000101001111111111111111100000110101110000',
'11111111110100000000000000111111111111111111111111111111000000000000000000000000000000000000001111111111111111110000111100000000',
'11111111111100000000000000111111111111111111111111111111000000000000000000000000000111111111111111111111111111111111111111111111',
'11111111111100000000001000111111111111111111111111111111000000000000000000000000001111111111111111111111111111111111111111111111',
'11111111111000000000000111111111111111111111111111111110000000000000000000001111111111111111111111111111111111111111111111111111',
'11111111111100000000000111111111111111111111111111111100000001000000000000001111111111111111111111111111111111111111111111111111',
'11111111111100000000000011111111111111111111111111111110000000000000000000010111111111111111111111111111111111111111111111111111',
'11111111111100000000000011111111111111111111111111111110000000000000000000001111111111111111111111111111111111111111111111111111',
'11111111111110000000000011111111111111111111111111111111000000000000000000000011111111111111111111111111111111111111111111111111',
'11111111111110000000000011111111111111111111111111111111000000000000000000000011111111111111111111111111111111111111111111111111',
'11111111111111000000000000111111111111111111111111111111110100000000000000000011111111111111111111111111111111111111111111111111',
'11111111111111000000000000111111111111111111111111111111111100000000000000000011111111111111111111111111111111111111111111111111',
'11111111111110100000000000111111111111111111111111111111111000000000000000000000111111111111111111111111111111111111111111111111',
'11111111111111000000000000111111111111111111111111111111110000000000000000000001111111111111111111111111111111111111111111111111',
'11111111111111100000000000111111111111111111111111111111111000000000000000000000001111111111111111111111111111111111111111111111',
'11111111111111110000000000111111111111111111111111111111100000000000000000000000001111111111111111111111111111111111111111111111',
'11111111111111111000000000001111111111111111111111111111000000000000000000000000001111111111111111111111111111111111111111111111',
'11111111111111110100000000101111111111111111111111111111000000000000000000000000000111111111111111111111111111111111111111111111',
'11111111111111111100000000011111111111111111111111111100000000000000000000000000000011111111111111111111111111111111111111111111',
'11111111111111111000000000011111111111111111111111111110000000000000000000000000000011111111111111111111111111111111111111111111',
'11111111111111111000000000111111111111111111111111111000000000000000000000000000001111111111111111111111111111111111111111111111',
'11111111111111111000000000111111111111111111111111111000000000000000000000000000000111111111111111111111111111111111111111111111',
'11111111111111111000000011111111111111111111111111110000000000000000000000000000001111111111111111111111111111111111111111111111',
'11111111111111110000000011111111111111111111111111100000000000000000000000000000000111111111111111111111111111111111111111111111',
'11111111111111111100000011111111111111111111111111000000000000000000000000000000000011111111111111111111111111111111111111111111',
'11111111111111111000000011111111111111111111111101000000000000000000000000000000000111111111111111111111111111111111111111111111',
'11111111111111111100000011111111111111111111111100000000000000000000000000000000000011111111111111111111111111111111111111111111',
'11111111111111111100000001111111111111111111110000000000000000000000000000000000000011111111111111111111111111111111111111111111',
'11111111111111111111000000111111111111111111100000000000000000000000000000000000000111111111111111111111111111111111111111111111',
'11111111111111111111000000011111111111111111000000000000000000000000000000000000000011111111111111111111111111111111111111111000',
'11111111111111111111100000000111111111111110000100000000000000000000000000000000000011111111111111111111111111111111000000000000',
'11111111111111111111110100000011111111111101010000000000000000000000000000000000001111111111111111111111111111111000000000000000',
'11111111111111111111111111111101111111111111000000000000000000000000000000000001001111111111111111111111110000000000000000000000',
'11111111111111111111111111111011111111111111100000000000000000000000000000000001001111111111111111111111000000000000000000000000',
'11111111111111111111111111111111111111111110000000000000000000000000000000000001011111111111111100000000000000000000000000000000',
'11111111111111111111111111111111111111111100000000000000000000000000000000000000011111111111010000000000000000000000000000000000',
'11111111111111111111111111111111111111110000000000000000000000000000000000000000010000010000000000000000000000000000000000000000',
'11111111111111111111111111111111111111100000000000000000000000000000000000000000100000000000000000000000000000000000000000000000',
'11111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'11111111111111111111111111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'11111111111111111111111111111111111111100011111111110101000000000000000000000000000000000000000000000000000000000000000000000000',
'11111111111111111111111111111111111111111001100000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'11111111111111111111110001000111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'11111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
]
| [
"bili33@87ouo.top"
] | bili33@87ouo.top |
db59a9d35475b939c95421a85a469899fb221212 | 2aac5c508641e0e9f8f7de9f0f7833a9b9107b10 | /source1/bsp/lumps/model_lump.py | 7f490ce65fed7db66830de55c7a8ce83b00cbe47 | [
"MIT"
] | permissive | half5life/SourceIO | 1d2e90a05706d9dd5721afd6501327e0c16a1160 | f3dc6db92daa537acbb487ce09f371866f6e3e7f | refs/heads/master | 2023-03-22T08:27:15.792325 | 2021-03-15T11:26:10 | 2021-03-15T11:26:10 | 345,146,708 | 0 | 0 | MIT | 2021-03-15T11:26:11 | 2021-03-06T16:56:37 | Python | UTF-8 | Python | false | false | 577 | py | from typing import List
from .. import Lump, lump_tag
from ..datatypes.model import Model, RespawnModel
@lump_tag(14, 'LUMP_MODELS')
class ModelLump(Lump):
def __init__(self, bsp, lump_id):
super().__init__(bsp, lump_id)
self.models: List[Model] = []
def parse(self):
reader = self.reader
while reader:
if self._bsp.version < 29:
self.models.append(Model(self, self._bsp).parse(reader))
else:
self.models.append(RespawnModel(self, self._bsp).parse(reader))
return self
| [
"med45c@gmail.com"
] | med45c@gmail.com |
164bef8814cf463fbd8347f8ed0b69148afade9a | d92b87b49ccda07c1523769ad91bd07d319eaab1 | /toqito/states/chessboard.py | b88eb0ce9554bb8fdf8725d1f3ea871bade9a724 | [
"MIT"
] | permissive | ayazskhan/toqito | a965720d4f896d3e6727a08e3ed0f7cde4dc5c8f | 0846fb13bc25e82dc602f6184b8d5ecfcfcf8218 | refs/heads/master | 2022-12-25T22:50:27.313455 | 2020-09-28T16:24:16 | 2020-09-28T16:24:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,040 | py | """Chessboard state."""
from typing import List
import numpy as np
def chessboard(
mat_params: List[float], s_param: float = None, t_param: float = None
) -> np.ndarray:
r"""
Produce a chessboard state [BP00]_.
Generates the chessboard state defined in [BP00]_. Note that, for certain choices of
:code:`s_param` and :code:`t_param`, this state will not have positive partial transpose, and
thus may not be bound entangled.
Examples
==========
The standard chessboard state can be invoked using :code:`toqito` as
>>> from toqito.states import chessboard
>>> chessboard([1, 2, 3, 4, 5, 6], 7, 8)
[[ 0.22592593, 0. , 0.12962963, 0. , 0. ,
0. , 0.17777778, 0. , 0. ],
[ 0. , 0.01851852, 0. , 0. , 0. ,
0.01111111, 0. , 0.02962963, 0. ],
[ 0.12962963, 0. , 0.18148148, 0. , 0.15555556,
0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0.01851852, 0. ,
0.02222222, 0. , -0.01481481, 0. ],
[ 0. , 0. , 0.15555556, 0. , 0.22592593,
0. , -0.14814815, 0. , 0. ],
[ 0. , 0.01111111, 0. , 0.02222222, 0. ,
0.03333333, 0. , 0. , 0. ],
[ 0.17777778, 0. , 0. , 0. , -0.14814815,
0. , 0.23703704, 0. , 0. ],
[ 0. , 0.02962963, 0. , -0.01481481, 0. ,
0. , 0. , 0.05925926, 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ]]
References
==========
.. [BP00] Three qubits can be entangled in two inequivalent ways.
D. Bruss and A. Peres
Phys. Rev. A, 61:30301(R), 2000
arXiv: 991.1056
:param mat_params:
:param s_param:
:param t_param:
:return: A chessboard state.
"""
if s_param is None:
s_param = np.conj(mat_params[2]) / np.conj(mat_params[5])
if t_param is None:
t_param = mat_params[0] * mat_params[3] / mat_params[4]
v_1 = np.array([[mat_params[4], 0, s_param, 0, mat_params[5], 0, 0, 0, 0]])
v_2 = np.array([[0, mat_params[0], 0, mat_params[1], 0, mat_params[2], 0, 0, 0]])
v_3 = np.array(
[[np.conj(mat_params[5]), 0, 0, 0, -np.conj(mat_params[4]), 0, t_param, 0, 0]]
)
v_4 = np.array(
[
[
0,
np.conj(mat_params[1]),
0,
-np.conj(mat_params[0]),
0,
0,
0,
mat_params[3],
0,
]
]
)
rho = (
v_1.conj().T * v_1
+ v_2.conj().T * v_2
+ v_3.conj().T * v_3
+ v_4.conj().T * v_4
)
return rho / np.trace(rho)
| [
"vincentrusso1@gmail.com"
] | vincentrusso1@gmail.com |
ae73832910611ed728d4352f954d63507b078cd3 | ae3e09f8aa3c54a91d29833bed26de032ee1f0d6 | /C. Yuhao and a Parenthesis.py | 430da23202adc5eb6b9c026f95a960ff60d967dd | [] | no_license | thecodearrow/100-Days-Of-Code | c0604c8922cb4b9c9655bcf7db8fba6fa0a629d2 | 790ef030375340bf7824380775ee239557034836 | refs/heads/master | 2021-11-24T18:56:45.611895 | 2021-10-29T00:03:38 | 2021-10-29T00:03:38 | 118,704,025 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | #http://codeforces.com/contest/1097/problem/C
import sys
import math
from collections import defaultdict
def getInputFromLine():
return [int(x) for x in input().split()]
try:
sys.stdin = open('input.txt', 'r')
sys.stdout = open('output.txt', 'w')
except:
pass
def mirror(s):
s=s[::-1]
m=""
for c in s:
if(c==")"):
m+="("
else:
m+=")"
return m
def getScore(s):
score=0
for b in s:
if(b=="("):
score+=1
elif(b==")"):
score-=1
if(score<0):
return -1
return score
n=int(input())
brackets=[]
for i in range(n):
t=input()
brackets.append(t)
scores=[]
count=0
bal=defaultdict(lambda:0)
lead=0 #counting existing good brackets
for b in brackets:
balance=getScore(b)
if(balance!=-1):
bal[balance]+=1
if(balance==0):
lead+=1
for b in brackets:
m=mirror(b)
balance=getScore(m)
if(balance!=-1):
if(bal[balance]>0):
bal[balance]-=1
count+=1
print(count-lead+(lead//2)) | [
"you@example.com"
] | you@example.com |
65a607155c70bfe5aad3197898c156c38f26f59c | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/impl/lobby/awards/__init__.py | 465f87ba720e314ee0cbbb36c279e990182e8066 | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 311 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/lobby/awards/__init__.py
from shared_utils import CONST_CONTAINER
class SupportedTokenTypes(CONST_CONTAINER):
BATTLE_TOKEN = 'battleToken'
TOKENS = 'tokens'
PROGRESSION_XP_TOKEN = 'progressionXPToken'
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
67c3ed498ff90804fddcd1b5670e11105fde15f2 | 5acc20092ee93935594a7e0522924245a43e5531 | /feature_selection/plot_select_from_model_boston.py | 9f27a604d83a531bb95997cee1558fc059ba8a52 | [] | no_license | shengchaohua/sklearn-examples | aae2332c4382a57a70c1887777c125e6dc4579d6 | 1dac6a9b5e703185a8da1df7c724022fbd56a9e4 | refs/heads/master | 2020-05-05T01:19:20.037746 | 2019-10-18T08:55:01 | 2019-10-18T08:55:01 | 179,599,221 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,164 | py | import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_boston
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LassoCV
# Load the boston dataset.
boston = load_boston()
X, y = boston.data, boston.target
# We use the base estimator LassoCV since the L1 norm promotes sparsity of
# features.
clf = LassoCV(cv=5)
# Set a minimum threshold of 0.25
sfm = SelectFromModel(clf, threshold=0.25)
sfm.fit(X, y)
n_features = sfm.transform(X).shape[1]
# Reset the threshold till the number of features equals two.
# Note that the attribute can be set directly instead of repeatedly
# fitting the metatransformer.
while n_features > 2:
sfm.threshold += 0.1
X_transform = sfm.transform(X)
n_features = X_transform.shape[1]
# Plot the selected two features from X.
plt.title(
"Features selected from Boston using SelectFromModel with "
"threshold %0.3f." % sfm.threshold)
feature1 = X_transform[:, 0]
feature2 = X_transform[:, 1]
plt.plot(feature1, feature2, 'r.')
plt.xlabel("Feature number 1")
plt.ylabel("Feature number 2")
plt.ylim([np.min(feature2), np.max(feature2)])
plt.show()
| [
"shengchaohua163@163.com"
] | shengchaohua163@163.com |
70f621d07f3916ead89774507c510d3cdc1fe1bd | 67f3ef9af94ad92677ea772e3e671eae4934c10b | /Stepik/Python3_1/Lesson452.py | 9913cfcaf4181f210e8dc5e8c8e0310c7b507738 | [] | no_license | Ihar-Limitless/PythonWorkspace | a820a69c2a610bf3c209e783ac16962eb99f1751 | 4b684a209cd35a6ea8af349afc1a380aed84662e | refs/heads/master | 2020-07-17T14:56:32.552712 | 2020-05-20T08:47:40 | 2020-05-20T08:47:40 | 206,040,300 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | # import csv, collections
# with open(r'd:/PythonWorkspace/Stepik/crimes.csv') as f:
# data = csv.reader(f)
# for i in data:
# print(i)
# print(collections.Counter(row[5] for row in data if '2015' in row[2]))
#
# import csv
#
# with open("Crimes.csv") as fi:
# reader = csv.reader(fi)
# next(reader)
# crime_cnt = dict()
# for row in reader:
# year = row[2][6:10]
# if year == "2015":
# crime_type = row[5]
# if crime_type not in crime_cnt:
# crime_cnt[crime_type] = 0
# crime_cnt[crime_type] += 1
#
# a = list(map(lambda x: (crime_cnt[x], x), crime_cnt))
# a.sort(key=lambda x: -x[0])
#
# print(a[0][1])
import csv
from collections import Counter
with open(r'd:/PythonWorkspace/Stepik/crimes.csv', "r") as f:
reader = csv.DictReader(f)
crimes = []
for row in reader:
crimes.append(row['Primary Type'])
c = Counter(crimes).most_common(1)
print(c[0][0])
print(crimes) | [
"01codename01@gmail.com"
] | 01codename01@gmail.com |
781eef84dfbe21f5c7fd4840436c42051d0c3859 | 029aa4fa6217dbb239037dec8f2e64f5b94795d0 | /Python算法指南/栈_heapq_deque/200_逆波兰表达式求值_栈的应用.py | 41595da2b1b998c1229b1e528e91e30e1db5f53d | [] | no_license | tonyyo/algorithm | 5a3f0bd4395a75703f9ee84b01e42a74283a5de9 | 60dd5281e7ce4dfb603b795aa194a67ff867caf6 | refs/heads/master | 2022-12-14T16:04:46.723771 | 2020-09-23T06:59:33 | 2020-09-23T06:59:33 | 270,216,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,208 | py | class Solution:
def evalRPN2(self, tokens):
stack = []
for i in tokens:
if i not in ('+', '-', '*', '/'):
stack.append(int(i))
else:
op2 = stack.pop()
op1 = stack.pop()
if i == '+': stack.append(op1 + op2)
elif i == '-': stack.append(op1 - op2)
elif i == '*': stack.append(op1 * op2)
else: stack.append(int(op1 * 1.0 / op2))
return stack[0]
def evalRPN1(self, tokens):
size = len(tokens)
stack = []
for x in tokens:
if x not in ["+", "-", "*", "/"]:
stack.append(x)
else:
a = int(stack.pop())
b = int(stack.pop())
result = 0
if x == "+":
result = a + b
if x == "-":
result = a - b
if x == "*":
result = a * b
if x == "/":
result = a / b
stack.append(result)
return stack[-1]
def evalRPN(self, tokens):
stack = []
for i in range(len(tokens)):
if tokens[i] not in ['+', '-', '*', '/']:
stack.append(tokens[i]) # 存储非数字
else:
if len(stack) == 1: # 因为要弹出两次,最好做一次判断
return stack[-1]
temp2 = int(stack.pop()) #两个操作数的顺序不要搞错了
temp1 = int(stack.pop())
if tokens[i] == '+':
stack.append(temp1 + temp2)
elif tokens[i] == '_':
stack.append(temp1 - temp2)
elif tokens[i] == '*':
stack.append(temp1 * temp2)
else:
stack.append(temp1 // temp2)
return stack[-1] if stack else -1
#主函数
if __name__=="__main__":
tokens=["2", "1", "+", "3", "*"]
#创建对象
solution=Solution()
print("输入的逆波兰表达式是:",tokens)
print("计算逆波兰表达式的结果是:", solution.evalRPN(tokens)) | [
"1325338208@qq.com"
] | 1325338208@qq.com |
dfcfe43bab212cefb635a2746e2d5c8710e65661 | cb9281a34c3c5a36d4b3a846fb6ff22ede12f2f6 | /communities_CK_one_network_with_main_GC.py | 3b90ea26d0159d6b50b08a9cd6c7187a9378dcb5 | [] | no_license | juliettapc/CalorieKing | 9cb9f35ae9b239d2284175b0802cf2c60dc79d1d | 5f80bffb65fe4644a81ae2ab0b1738861e028331 | refs/heads/master | 2022-02-10T07:52:24.133379 | 2022-02-08T01:25:18 | 2022-02-08T01:25:18 | 153,174,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,612 | py | import subprocess as sp
import networkx as nx
def main():
file1 = open('summary_modularity_analysis_GC','wt') # one summary file for everything
print >> file1, "data: #_points time_scale GC_size <k> k(hub) modularity #_communities <community_size> max_community_size\n"
file1.close()
name_list=[] # list of names of the input files
scale_list=[10]
for scale in scale_list:
#name_list.append(str(scale)+'_points_network/data/friend_graph_all0')
name_list.append(str(scale)+'_points_network/data/friend_graph_all0')
map (str, name_list) # recordar que lo que escribo tiene que ser un string!!!!
for name in name_list: # loop to go over files (== over networks)
calculations=[] # list of atributes for every network (modularity, number of communities, averages,...)
list_of_data_list=[] #list of atributes that are lists (5top hubs, communitiy sizes,...)
#print "\n\nfile: "+name
edge_data = open(name).readlines()
H=nx.read_edgelist(name) # create the network from the original input file
components=nx.connected_component_subgraphs(H)
G=components[0] # i take just the GC as a subgraph to perform the community ID algorithm
# G is a list of tuples: [(n1,n2),(n3,n4),(n2,n3),...]
calculations.append("\n") # just to separate from the next set of data
calculations.append(len(G))
new_edge_data = [] #this list is what i will pass to Roger's code
for e in G.edges(): # e is a list of two neighbors: [n1,n2]
#i have to convert e to str because it is in some other format and the algorithm may not recognise it
new_edge_data.append(" ".join(map(str,e))) # i join the two neighbors, separating them just by a space, so now they are just one element of the edge_list, which is: [n1 n2, n3 n4, n2 n3,...]
degree_values=sorted(nx.degree(G).values())
most_connected=[]
for i in range (1,11):
most_connected.append(degree_values[-i])
list_of_data_list.append(most_connected) # save the connectivity values of the 5 top highest connected nodes
average_network_degree=int(round(sum(G.degree().values())/float(len(G)),0) )
calculations.append(average_network_degree)
calculations.append(degree_values[-1])
p = sp.Popen(["/opt/communityID"], stdin=sp.PIPE, stdout=sp.PIPE)
output, error = p.communicate("".join(new_edge_data)) # ojo le paso solo la GC
community_lines = output.split("part")
modularity = float(community_lines[0])
partition_lines = community_lines[1].split("\n")
modules = []
calculations.append(modularity)
max_max_degree=0
max_size=0
average_size=0
average_max_degree=0
size_list=[]
max_conect_list=[]
average_k_list=[]
for p in partition_lines:
this_module = p.split("---")
if len(this_module) > 1:
this_module = this_module[1] # 'this_module' is the list of nodes in the current module
this_module = map(int, this_module.split())
modules.append(this_module) # list of modules (list of lists)
size=0
conect_list=[]
averageK=0
for node in this_module: # loop over the nodes of the current module
node=str(node)
conect_list.append(G.degree(node)) #create a connectivity list for the nodes in the module
averageK=averageK+G.degree(node)
size=size+1
size_list.append(size)# list of community sizes
averageK=averageK/float(size)
average_k_list.append(int(round(averageK,0)))
if max_size < size:
max_size = size
if max_max_degree < max(conect_list):
max_max_degree = max(conect_list)
average_size=average_size+size
average_max_degree=average_max_degree+max(conect_list)
max_conect_list.append(max(conect_list))
#average over communities
average_size=average_size/len(modules)
average_max_degree=average_max_degree/len(modules)
calculations.append(len(modules)) #number of cummunities
calculations.append(average_size) # average sizes of communities
calculations.append(max_size) # maximum size of communities
list_of_data_list.append(max_conect_list) # list of maximum conectivity per each community
list_of_data_list.append(average_k_list) # list of average conectivity per each community
list_of_data_list.append(size_list) # list of community sizes
#print the results
#print "number_of_communities_detected:"+str(len(modules))
#print "average_size:", average_size,"average_max_degree:",average_max_degree
#print "max_size:", max_size,"max_max_degree:",max_max_degree
output_string = "modularity:" + str(modularity) +"\n" #print modularity
for s in modules:
module_string = ",".join(map(str,s))
output_string += module_string + ";\n" # print the elements of every community
#print output_string
print modularity
# write the output files
file2 = open(name+'_list_modularity_analysis_GC','wt') #one output file per each input file
print >> file2, "data: list_10top_hubs list_max(k)_each_comm list_<k>_each_comm list_community_sizes\n"
for item in list_of_data_list:
print >> file2, item
print >> file2, "\n"
file2.close()
file1 = open('summary_modularity_analysis_GC','at') # one summary file for everything
for calculation in calculations:
print >> file1, calculation, # with a comma at the end, there is not \n between values
file1.close()
if __name__== "__main__":
main()
| [
"julia@chem-eng.northwestern.edu"
] | julia@chem-eng.northwestern.edu |
eb8fa5427f66f56ef70b366075195c87104efe23 | c5e86a17588fe447f5708c809334fa1ba0f55823 | /beacot/qa/rpc-tests/test_framework/test_framework.py | 3696906ab0d191f171983669d14dd9c40991fbac | [
"MIT"
] | permissive | ashwinshetttty/Beacot-Coin | bcc934e9d74df7c2e0c61c8d568414167636f665 | 3ab9a57ccefc5fc05448eff8d1bb6e1e6790f4e7 | refs/heads/master | 2023-08-30T18:01:54.913801 | 2021-10-14T07:13:38 | 2021-10-14T07:13:38 | 417,014,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,221 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Base class for RPC testing
import logging
import optparse
import os
import sys
import shutil
import tempfile
import traceback
from .util import (
initialize_chain,
start_nodes,
connect_nodes_bi,
sync_blocks,
sync_mempools,
stop_nodes,
stop_node,
enable_coverage,
check_json_precision,
initialize_chain_clean,
PortSeed,
)
from .authproxy import JSONRPCException
class BitcoinTestFramework(object):
def __init__(self):
self.num_nodes = 4
self.setup_clean_chain = False
self.nodes = None
def run_test(self):
raise NotImplementedError
def add_options(self, parser):
pass
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
if self.setup_clean_chain:
initialize_chain_clean(self.options.tmpdir, self.num_nodes)
else:
initialize_chain(self.options.tmpdir, self.num_nodes, self.options.cachedir)
def stop_node(self, num_node):
stop_node(self.nodes[num_node], num_node)
def setup_nodes(self):
return start_nodes(self.num_nodes, self.options.tmpdir)
def setup_network(self, split = False):
self.nodes = self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
# If we joined network halves, connect the nodes from the joint
# on outward. This ensures that chains are properly reorganised.
if not split:
connect_nodes_bi(self.nodes, 1, 2)
sync_blocks(self.nodes[1:3])
sync_mempools(self.nodes[1:3])
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 2, 3)
self.is_network_split = split
self.sync_all()
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
assert not self.is_network_split
stop_nodes(self.nodes)
self.setup_network(True)
def sync_all(self):
if self.is_network_split:
sync_blocks(self.nodes[:2])
sync_blocks(self.nodes[2:])
sync_mempools(self.nodes[:2])
sync_mempools(self.nodes[2:])
else:
sync_blocks(self.nodes)
sync_mempools(self.nodes)
def join_network(self):
"""
Join the (previously split) network halves together.
"""
assert self.is_network_split
stop_nodes(self.nodes)
self.setup_network(False)
def main(self):
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave beacotds and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop beacotds after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing beacotd/beacot-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
help="Root directory for datadirs")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
# backup dir variable for removal at cleanup
self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)
if self.options.trace_rpc:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
if self.options.coveragedir:
enable_coverage(self.options.coveragedir)
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir+":"+self.options.srcdir+"/qt:"+os.environ['PATH']
check_json_precision()
success = False
try:
os.makedirs(self.options.tmpdir, exist_ok=False)
self.setup_chain()
self.setup_network()
self.run_test()
success = True
except JSONRPCException as e:
print("JSONRPC error: "+e.error['message'])
traceback.print_tb(sys.exc_info()[2])
except AssertionError as e:
print("Assertion failed: " + str(e))
traceback.print_tb(sys.exc_info()[2])
except KeyError as e:
print("key not found: "+ str(e))
traceback.print_tb(sys.exc_info()[2])
except Exception as e:
print("Unexpected exception caught during testing: " + repr(e))
traceback.print_tb(sys.exc_info()[2])
except KeyboardInterrupt as e:
print("Exiting after " + repr(e))
if not self.options.noshutdown:
print("Stopping nodes")
stop_nodes(self.nodes)
else:
print("Note: beacotds were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success:
print("Cleaning up")
shutil.rmtree(self.options.tmpdir)
if not os.listdir(self.options.root):
os.rmdir(self.options.root)
else:
print("Not cleaning up dir %s" % self.options.tmpdir)
if os.getenv("PYTHON_DEBUG", ""):
# Dump the end of the debug logs, to aid in debugging rare
# travis failures.
import glob
filenames = glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
MAX_LINES_TO_PRINT = 1000
for f in filenames:
print("From" , f, ":")
from collections import deque
print("".join(deque(open(f), MAX_LINES_TO_PRINT)))
if success:
print("Tests successful")
sys.exit(0)
else:
print("Failed")
sys.exit(1)
# Test framework for doing p2p comparison testing, which sets up some bitcoind
# binaries:
# 1 binary: test binary
# 2 binaries: 1 test binary, 1 ref binary
# n>2 binaries: 1 test binary, n-1 ref binaries
class ComparisonTestFramework(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("LITECOIND", "beacotd"),
help="beacotd binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("LITECOIND", "beacotd"),
help="beacotd binary to use for reference nodes (if any)")
def setup_network(self):
self.nodes = start_nodes(
self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1']] * self.num_nodes,
binary=[self.options.testbinary] +
[self.options.refbinary]*(self.num_nodes-1))
| [
"you@example.com"
] | you@example.com |
160289ff688bc965bd43eefb6191f2d312a43beb | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02694/s200861425.py | 4b469ff53f2bc90a3841359fd40ef2a55766bd47 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 90 | py | x = int(input())
a = 100
i = 0
while a < x:
i += 1
a += a // 100
print(i)
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5be1057357ca33e65e6cafbd5328dd42ab0dd6cb | abeec076f89231c4dd589e84def8301e653d6e20 | /pages/views.py | 5052137eabc399ac2ce69d4c3ee03c3ff3a1008c | [] | no_license | gibil5/pcm_restaurant | 1cde6ee2780d3aa39dbc26dd9583f8465a1ff13a | a56ec01c533ed2b6e198de9813f9518a3eca2d14 | refs/heads/master | 2020-08-29T20:10:13.606229 | 2019-12-01T19:48:47 | 2019-12-01T19:48:47 | 218,160,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
# Create your views here.
# ------------------------------------------------ Home ---------------------
def home(request):
ctx = {}
output = render(request, 'pages/home.html', ctx)
return HttpResponse(output)
| [
"jrevilla55@gmail.com"
] | jrevilla55@gmail.com |
2f335708256187e77033287574a3fb0a96501daa | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Rasterio_osgeo_shapely_PIL_pyproj_numpy/source/docutils/languages/fa.py | 48aa588deb4066e1034b9e603893fded6779dba7 | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 2,044 | py | # -*- coding: utf-8 -*-
# $Id: fa.py 4564 2016-08-10 11:48:42Z
# Author: Shahin <me@5hah.in>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Persian-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
u'author': u'نویسنده',
u'authors': u'نویسندگان',
u'organization': u'سازمان',
u'address': u'آدرس',
u'contact': u'تماس',
u'version': u'نسخه',
u'revision': u'بازبینی',
u'status': u'وضعیت',
u'date': u'تاریخ',
u'copyright': u'کپیرایت',
u'dedication': u'تخصیص',
u'abstract': u'چکیده',
u'attention': u'توجه!',
u'caution': u'احتیاط!',
u'danger': u'خطر!',
u'error': u'خطا',
u'hint': u'راهنما',
u'important': u'مهم',
u'note': u'یادداشت',
u'tip': u'نکته',
u'warning': u'اخطار',
u'contents': u'محتوا'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
u'نویسنده': u'author',
u'نویسندگان': u'authors',
u'سازمان': u'organization',
u'آدرس': u'address',
u'تماس': u'contact',
u'نسخه': u'version',
u'بازبینی': u'revision',
u'وضعیت': u'status',
u'تاریخ': u'date',
u'کپیرایت': u'copyright',
u'تخصیص': u'dedication',
u'چکیده': u'abstract'}
"""Persian (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [u'؛', u'،']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
a8c82d26a3f03fe05f4ce33a1d93a3003463983e | 18b3d06a8a93839f7e7a1cf536a71bfc0adf8e20 | /devel/lib/python2.7/dist-packages/msgs_demo/msg/_GetMapFeedback.py | 446f7fdb8b177dd8f01c4c16e0931a2dccbd66a2 | [] | no_license | akingse/ros_tutorial_ws | dc52cbbf443f7823a0abd9223fef076cf959a24e | 7c776d2f62af0455a899c80e171d5210e0a8b382 | refs/heads/main | 2023-03-01T04:48:54.510004 | 2021-02-08T14:08:18 | 2021-02-08T14:09:30 | 337,094,532 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,118 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from msgs_demo/GetMapFeedback.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class GetMapFeedback(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "msgs_demo/GetMapFeedback"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
#无返回部分
"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GetMapFeedback, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
| [
"akingse@qq.com"
] | akingse@qq.com |
a02a3e1aaed8310d24cd73055fd40d4605004c89 | b46594de5c173e891e04556d9a3866fa5bbbf32d | /python/argon/common.py | 0257923dd35bf5864ccb710efdbbc513cdf1659d | [] | no_license | ssorj/argon | 7e36cd64b7ac3b79a9b94a759347963673b05f3c | 1db9e74428ac0ade435cf36f5e3d3c82f460961d | refs/heads/master | 2021-05-11T03:19:02.661533 | 2019-01-06T14:48:31 | 2019-01-06T14:48:31 | 117,912,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,133 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys as _sys
_micropython = _sys.implementation.name == "micropython"
if _micropython:
import gc as _gc
import uos as _os
import urandom as _random
import uselect as _select
import usocket as _socket
import ustruct as _struct
import utime as _time
else:
_gc = None
import os as _os
import random as _random
import select as _select
import socket as _socket
import struct as _struct
import time as _time
try:
_DEBUG = _os.getenv("ARGON_DEBUG") is not None
except AttributeError:
_DEBUG = False
class Buffer:
def __init__(self):
self._octets = bytearray(256)
self._view = memoryview(self._octets)
def skip(self, offset, size):
return offset + size, offset
def read(self, offset, size):
end = offset + size
return end, self._view[offset:end]
def write(self, offset, octets):
end = offset + len(octets)
self.ensure(end)
self._octets[offset:end] = octets
return end
def unpack(self, offset, size, format_string):
assert len(self) > offset + size
values = _struct.unpack_from(format_string, self._view, offset)
return (offset + size,) + values
def __getitem__(self, index):
return self._view[index]
def __setitem__(self, index, value):
self._view[index] = value
def __len__(self):
return len(self._octets)
def ensure(self, size):
if len(self._octets) < size:
new_size = max(size, 2 * len(self._octets))
self._octets = self._octets + bytearray(new_size - len(self._octets))
self._view = memoryview(self._octets)
def pack(self, offset, size, format_string, *values):
self.ensure(offset + size)
_struct.pack_into(format_string, self._octets, offset, *values)
return offset + size
def _uuid_bytes():
_random.seed(round(_time.time() * 1000))
values = (
_random.getrandbits(32),
_random.getrandbits(32),
_random.getrandbits(32),
_random.getrandbits(32),
)
return _struct.pack("IIII", *values)
def _hex(data):
return "".join(["{:02x}".format(x) for x in data])
def _shorten(string, max_=20):
if string is None:
return string
return string[:min(max_, len(string))]
| [
"jross@apache.org"
] | jross@apache.org |
e6507dc8f2184041c78bf5342bf5f63ee0e6e5bd | 725abfa74e3800622837e60615dc15c6e91442c0 | /venv/Lib/site-packages/django/db/backends/utils.py | 1aefb0653aa9e254988d4018f9fc091650b27b84 | [] | no_license | Malak-Abdallah/TODOlist | 4840e2e0a27e6499ae6b37524bb3e58455d08bfb | fd35754e8eac9b262fae17ec16ad9fb510a12f5d | refs/heads/master | 2023-07-16T11:38:48.759232 | 2021-08-31T09:43:11 | 2021-08-31T09:43:11 | 401,600,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,652 | py | import datetime
import decimal
import functools
import hashlib
import logging
import time
from contextlib import contextmanager
from django.db import NotSupportedError
logger = logging.getLogger("django.db.backends")
class CursorWrapper:
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(["fetchone", "fetchmany", "fetchall", "nextset"])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
with self.db.wrap_database_errors:
yield from self.cursor
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Close instead of passing through to avoid backend-specific behavior
# (#17671). Catch errors liberally because errors in cleanup code
# aren't useful.
try:
self.close()
except self.db.Database.Error:
pass
# The following methods cannot be implemented in __getattr__, because the
# code must run when the method is invoked, not just when it is accessed.
def callproc(self, procname, params=None, kparams=None):
# Keyword parameters for callproc aren't supported in PEP 249, but the
# database driver may support them (e.g. cx_Oracle).
if kparams is not None and not self.db.features.supports_callproc_kwargs:
raise NotSupportedError(
"Keyword parameters for callproc are not supported on this "
"database backend."
)
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None and kparams is None:
return self.cursor.callproc(procname)
elif kparams is None:
return self.cursor.callproc(procname, params)
else:
params = params or ()
return self.cursor.callproc(procname, params, kparams)
def execute(self, sql, params=None):
return self._execute_with_wrappers(
sql, params, many=False, executor=self._execute
)
def executemany(self, sql, param_list):
return self._execute_with_wrappers(
sql, param_list, many=True, executor=self._executemany
)
def _execute_with_wrappers(self, sql, params, many, executor):
context = {"connection": self.db, "cursor": self}
for wrapper in reversed(self.db.execute_wrappers):
executor = functools.partial(wrapper, executor)
return executor(sql, params, many, context)
def _execute(self, sql, params, *ignored_wrapper_args):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
# params default might be backend specific.
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def _executemany(self, sql, param_list, *ignored_wrapper_args):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
class CursorDebugWrapper(CursorWrapper):
# XXX callproc isn't instrumented at this time.
def execute(self, sql, params=None):
with self.debug_sql(sql, params, use_last_executed_query=True):
return super().execute(sql, params)
def executemany(self, sql, param_list):
with self.debug_sql(sql, param_list, many=True):
return super().executemany(sql, param_list)
@contextmanager
def debug_sql(
self, sql=None, params=None, use_last_executed_query=False, many=False
):
start = time.monotonic()
try:
yield
finally:
stop = time.monotonic()
duration = stop - start
if use_last_executed_query:
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
try:
times = len(params) if many else ""
except TypeError:
# params could be an iterator.
times = "?"
self.db.queries_log.append(
{
"sql": "%s times: %s" % (times, sql) if many else sql,
"time": "%.3f" % duration,
}
)
logger.debug(
"(%.3f) %s; args=%s",
duration,
sql,
params,
extra={"duration": duration, "sql": sql, "params": params},
)
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return (
datetime.date(*map(int, s.split("-"))) if s else None
) # return None if s is null
def typecast_time(s): # does NOT store time zone information
if not s:
return None
hour, minutes, seconds = s.split(":")
if "." in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split(".")
else:
microseconds = "0"
return datetime.time(
int(hour), int(minutes), int(seconds), int((microseconds + "000000")[:6])
)
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s:
return None
if " " not in s:
return typecast_date(s)
d, t = s.split()
# Remove timezone information.
if "-" in t:
t, _ = t.split("-", 1)
elif "+" in t:
t, _ = t.split("+", 1)
dates = d.split("-")
times = t.split(":")
seconds = times[2]
if "." in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split(".")
else:
microseconds = "0"
return datetime.datetime(
int(dates[0]),
int(dates[1]),
int(dates[2]),
int(times[0]),
int(times[1]),
int(seconds),
int((microseconds + "000000")[:6]),
)
###############################################
# Converters from Python to database (string) #
###############################################
def split_identifier(identifier):
"""
Split an SQL identifier into a two element tuple of (namespace, name).
The identifier could be a table, column, or sequence name might be prefixed
by a namespace.
"""
try:
namespace, name = identifier.split('"."')
except ValueError:
namespace, name = "", identifier
return namespace.strip('"'), name.strip('"')
def truncate_name(identifier, length=None, hash_len=4):
"""
Shorten an SQL identifier to a repeatable mangled version with the given
length.
If a quote stripped name contains a namespace, e.g. USERNAME"."TABLE,
truncate the table portion only.
"""
namespace, name = split_identifier(identifier)
if length is None or len(name) <= length:
return identifier
digest = names_digest(name, length=hash_len)
return "%s%s%s" % (
'%s"."' % namespace if namespace else "",
name[: length - hash_len],
digest,
)
def names_digest(*args, length):
"""
Generate a 32-bit digest of a set of arguments that can be used to shorten
identifying names.
"""
h = hashlib.md5()
for arg in args:
h.update(arg.encode())
return h.hexdigest()[:length]
def format_number(value, max_digits, decimal_places):
"""
Format a number into a string with the requisite number of digits and
decimal places.
"""
if value is None:
return None
context = decimal.getcontext().copy()
if max_digits is not None:
context.prec = max_digits
if decimal_places is not None:
value = value.quantize(
decimal.Decimal(1).scaleb(-decimal_places), context=context
)
else:
context.traps[decimal.Rounded] = 1
value = context.create_decimal(value)
return "{:f}".format(value)
def strip_quotes(table_name):
"""
Strip quotes off of quoted table names to make them safe for use in index
names, sequence names, etc. For example '"USER"."TABLE"' (an Oracle naming
scheme) becomes 'USER"."TABLE'.
"""
has_quotes = table_name.startswith('"') and table_name.endswith('"')
return table_name[1:-1] if has_quotes else table_name
| [
"malkobeidallah@gmail.com"
] | malkobeidallah@gmail.com |
b624987f59ba7fdd8d21977422b1355a8e3e5847 | c7792b5e5ae5e74d643518a5b0644020288fc6da | /whichbugs.py | e8aa2354b97b600b4c5a6a49f4954a3a80a38897 | [
"BSD-2-Clause"
] | permissive | agroce/fuzzgoattriage | 0dc99daf2d061aaa0f58ceef3657b6f9ff411613 | 173c585cc7e87bcb2b82ae22fde56935352cd597 | refs/heads/master | 2020-07-29T14:49:39.691056 | 2019-09-20T18:07:19 | 2019-09-20T18:07:19 | 209,830,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | import subprocess
import glob
import os
import sys
dnull = open(os.devnull, 'w')
ms = glob.glob("justbug*")
ms = map(lambda x: "./" +x, ms)
if True:
for t in glob.glob("out/crashes/id*"):
print t
i = 0
for m in ms:
r = subprocess.call(["ulimit -t 1;" + m + " " + t], shell=True, stdout=dnull, stderr=dnull)
if r != 0:
print m
sys.stdout.flush()
print
| [
"agroce@gmail.com"
] | agroce@gmail.com |
57b3d7204d3454cd3a8b66c7f36e2c1017b07255 | 2a54a1d9996778362421299a936bb0dadaace958 | /units/adms/mysite/iclock/models/model_devoperate.py | b05e500934966760202e07897b6ed3452ae1eb50 | [] | no_license | icprog/zktime_wlm | 6d0719b5210c4d3196b5958bccbb7e606785ece3 | 449c487ce4664dde734f8007a974ed883801d106 | refs/heads/master | 2021-03-21T10:20:54.157131 | 2018-11-24T04:10:42 | 2018-11-24T04:10:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,786 | py | # -*- coding: utf-8 -*-
#! /usr/bin/env python
import datetime
from django.db import models, connection
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.core.cache import cache
from base.operation import OperationBase, Operation, ModelOperation
from base.cached_model import CachingModel
from django.conf import settings
from mysite.utils import get_option
CMM_TYPE=(
(1,_(u'设备自动命令')),
(2,_(u'用户下发命令')),
)
SUCCESS_FLAG=(
(0,_(u'未处理')),
(1,_(u'成功')),
(2,_(u'失败')),
)
CMM_SYSTEM=(
(1,_(u'门禁')),
(2,_(u'考勤')),
)
class OperateCmd(CachingModel):
Author=models.ForeignKey(User, null=True,blank=True)
CmdContent = models.TextField(verbose_name=_(u'命令描述'),max_length=2048)
CmdCommitTime = models.DateTimeField( verbose_name=_(u'命令创建时间'),default=datetime.datetime.now())
commit_time = models.DateTimeField(verbose_name=_(u'命令处理完成时间'), null=True, blank=True)
CmdReturn = models.IntegerField(_(u'返回值'), null=True, blank=True)
process_count=models.SmallIntegerField(verbose_name=_(u'处理次数'),default=0)
success_flag=models.SmallIntegerField(verbose_name=_(u'处理标志'),default=0,choices=SUCCESS_FLAG)
receive_data = models.TextField(verbose_name=_(u'命令数据'), null=True, blank=True)
cmm_type=models.SmallIntegerField(verbose_name=_(u'命令类型'),default=1,blank=False,null=False,choices=CMM_TYPE)
cmm_system=models.SmallIntegerField(verbose_name=_(u'命令系统'),default=1,blank=False,null=False,choices=CMM_SYSTEM)
class Admin(CachingModel.Admin):
list_display=('create_operator','CmdCommitTime','cmm_type','CmdContent','commit_time','process_count','success_flag',)
sort_fields =["create_operator","CmdCommitTime","commit_time","success_flag"]
search_fields = ["CmdContent"]
query_fields=('cmm_type','process_count','success_flag')
cache=False
read_only=True
log=False
visible = get_option("DEVOPERATE_VISIBLE")#暂只有考勤使用
disabled_perms=["add_operatecmd",'change_operatecmd','delete_operatecmd']
hide_perms=["dataexport_operatecmd",]
class Meta:
app_label='iclock'
db_table = 'operatecmds'
verbose_name = _(u'通信命令详情')
verbose_name_plural=verbose_name
def save(self, *args, **kwargs):
super(OperateCmd, self).save(log_msg=False)
@staticmethod
def clear():
OperateCmd.objects.all().delete()
class _delete(Operation):
verbose_name=_(u'删除')
visible=False
def action():
pass
class _add(Operation):
visible=False
verbose_name=_(u'新增')
def action():
pass
class _change(Operation):
visible=False
verbose_name=_(u'编辑')
def action():
pass
def get_process_status(self):
total=self.devcmd_set.all().count()
current=self.devcmd_set.filter(CmdOverTime__isnull=False).count()
from decimal import ROUND_HALF_UP,Decimal
if total>0:
return str(Decimal(str(float(current)/float(total)*100)).quantize(Decimal('0'),ROUND_HALF_UP))+"%"
else:
if self.success_flag==1:
return "100%"
else:
return "0%"
def limit_operatecmd_to(self,qs,user):
from django.db.models import Q
filter={'cmm_system':2}
if user.is_superuser:
pass
else:
filter['create_operator__exact']=u"%s"%user
return qs.filter(Q(**filter))
| [
"657984027@qq.com"
] | 657984027@qq.com |
88177735f8868cfda2b92a9a23a7e0b8b1b50b4e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/109/usersdata/224/63305/submittedfiles/av2_p3_civil.py | 74e486626635478f0d456283a47e71aba216271f | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | # -*- coding: utf-8 -*-
import numpy as np
def somatorioDaColuna(A,j):
soma2=0
for i in range(0,A.shape[0],1):
soma2=soma2+1
return(soma2)
def somatorioDaLinha(A,i):
soma=0
for j in range(0,A.shape[1],1):
soma=soma+A[i,j]
return (soma)
n=int(input('Digite a dimensão da Matriz A: '))
x=int(input('Digite o índice da linha: '))
y=int(input('Digite o índice da coluna: '))
A=np.zeros((n,n))
for i in range(0,A.shape[0],1):
for j in range(0,A.shape[1],1):
A[i,j]=int(input('Digite os pesos em cada posição: '))
pesoDaPosicao=somatorioDaColuna(A,y)+somatorioDaLinha(A,x)-(2*(A[x,y])
print(pesoDaPosicao)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
49fe190f0f418cee0e3a2eede3469a3c263b4dd6 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/got-tyrion-web/flask/lib/python2.7/site-packages/pip/_vendor/cachecontrol/__init__.py | 612abcc11e161b614239ba31c3000f491bd9b5ce | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:bb3ecec6cf61426531b71a1dd56c95a148fa25a6fb35f88b1023c716e73edb65
size 302
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
279bccd834532c0a1dd881e625f22c20d4468ac6 | 55e28e35db5bf6a844df3fb47080500b115a893e | /day13/s7.py | 60b61e339ca0556100e396336b41b952c378ed49 | [] | no_license | pylarva/Python | 5743ffa4a69db42b642d51b62f9e9b69ddbc1a72 | 71b484950e6dbdcf708726a68a3386d0d6ddc07f | refs/heads/master | 2020-04-19T09:11:11.195393 | 2017-11-16T07:32:59 | 2017-11-16T07:32:59 | 67,507,687 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,045 | py | # !/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:pylarva
# bolg:www.lichengbing.com
from sqlalchemy import create_engine,and_,or_,func,Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String,ForeignKey,UniqueConstraint,DateTime
from sqlalchemy.orm import sessionmaker,relationship
engine = create_engine("mysql+pymysql://root:123@10.0.0.111:3306/s13", max_overflow=5)
Base = declarative_base()
# 程序登陆用户和服务器账户,一个人可以有多个服务器账号,一个服务器账号可以给多个人用
UserProfile2HostUser= Table('userprofile_2_hostuser',Base.metadata,
Column('userprofile_id',ForeignKey('user_profile.id'),primary_key=True),
Column('hostuser_id',ForeignKey('host_user.id'),primary_key=True),
)
class Host(Base):
__tablename__='host'
id = Column(Integer,primary_key=True,autoincrement=True)
hostname = Column(String(64),unique=True,nullable=False)
ip_addr = Column(String(128),unique=True,nullable=False)
port = Column(Integer,default=22)
def __repr__(self):
return "<id=%s,hostname=%s, ip_addr=%s>" %(self.id,
self.hostname,
self.ip_addr)
class HostUser(Base):
__tablename__ = 'host_user'
id = Column(Integer, primary_key=True)
AuthTypes = [
(u'ssh-passwd', u'SSH/Password'),
(u'ssh-key', u'SSH/KEY'),
]
# auth_type = Column(ChoiceType(AuthTypes))
auth_type = Column(String(64))
username = Column(String(64), unique=True, nullable=False)
password = Column(String(255))
host_id = Column(Integer, ForeignKey('host.id'))
__table_args__ = (UniqueConstraint('host_id', 'username', name='_host_username_uc'),)
class Group(Base):
__tablename__ = 'group'
id = Column(Integer,primary_key=True)
name = Column(String(64),unique=True,nullable=False)
class UserProfile(Base):
__tablename__ = 'user_profile'
id = Column(Integer,primary_key=True)
username = Column(String(64),unique=True,nullable=False)
password = Column(String(255),nullable=False)
# 一个人只能在一个组
group_id = Column(Integer, ForeignKey('group.id'))
host_list =relationship('HostUser', secondary=UserProfile2HostUser, backref='userprofiles')
Session = sessionmaker(bind=engine)
session = Session()
obj = session.query(UserProfile).filter(usename='输入的用户名', password='输入的密码').first()
if not obj:
# 输入这个人的所有机器
for item in obj.host_list:
# item 是一个HostUser对象
item.password, item.username,
# item.host 对象 host对象
item.host.hostname,item.host.port
class AuditLog(Base):
__tablename__ = 'audit_log'
id = Column(Integer,primary_key=True)
userprofile_id = Column(Integer,ForeignKey('user_profile.id'))
hostuser_id = Column(Integer,ForeignKey('host_user.id'))
cmd = Column(String(255))
date = Column(DateTime)
| [
"1326126359@qq.com"
] | 1326126359@qq.com |
bd37d5d53f0d15aec98b1f5d09c49c7ab1b8e1ac | 3be1ddf42236a1b33ec74ed3bfdd0f8918513733 | /coding-challenges/week12/day03/Q.2.py | 1a8fd269545bd4e6e303677b5387c464cca85f59 | [] | no_license | aabhishek-chaurasia-au17/MyCoding_Challenge | 84ef926b550b3f511f1c642fe35f4303c8abb949 | 419d02ad8740a2c00403fd30c661074266d2ba8f | refs/heads/main | 2023-08-29T09:52:36.796504 | 2021-11-07T07:32:09 | 2021-11-07T07:32:09 | 359,842,173 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | """
Q-2 )Write steps in heapify/percolate down method, and write time
complexity and space complexity analysis.(5 marks)
(Super Easy)
"""
heap = [111, 22, 33, 4, 15, 64, 7, 8, 9]
def heapsort():
global heap
while len(heap) != 0:
print(heap[0])
heap[0], heap[-1] = heap[-1], heap[0]
heap.pop()
heapify(0)
def heapify(i):
global heap
left_idx = 2 * i + 1
right_idx = 2 * i + 2
if left_idx > len(heap) - 1 and right_idx > len(heap) - 1:
return
max_idx = i
if left_idx < len(heap) and heap[max_idx] < heap[left_idx]:
max_idx = left_idx
if right_idx < len(heap) and heap[max_idx] < heap[right_idx]:
max_idx = right_idx
if max_idx != i:
heap[max_idx], heap[i] = heap[i], heap[max_idx]
heapify(max_idx)
if __name__ == "__main__":
n = len(heap)
for i in range(n - 1, -1, -1):
heapify(i)
heapsort()
| [
"abhishekc838@gmail.com"
] | abhishekc838@gmail.com |
f5ba5767d75aa13b3c93663c3462dbf7a20eff7b | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/detection/SSD_for_PyTorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py | dd7eddfab7b40870df07c1e6045e9372de27c830 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 1,051 | py | # Copyright 2022 Huawei Technologies Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py'
model = dict(roi_head=dict(bbox_head=dict(num_classes=1)))
classes = ('person', )
data = dict(
train=dict(classes=classes),
val=dict(classes=classes),
test=dict(classes=classes))
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa
| [
"chenyong84@huawei.com"
] | chenyong84@huawei.com |
e4820e819afe8515dcfc4c70add913784166710c | 70cbc3e6002ccc0e2bf570c90e675c34a84b7ce9 | /device_list_api/wsgi.py | 9b5fa97e8c000353672604ae680c71b2545301e3 | [] | no_license | shotaro0726/dvice_manager | 61d31da512b3401a5864d06ad1c9269f22bace87 | 431ea9c7098dfcea46a57404541a34847559ddba | refs/heads/master | 2022-09-21T03:25:34.702838 | 2020-05-30T17:27:40 | 2020-05-30T17:27:40 | 268,010,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for device_list_api project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'device_list_api.settings')
application = get_wsgi_application()
| [
"shoutaro0726@gmail.com"
] | shoutaro0726@gmail.com |
7c8d25b8c58f79d83c263c0664d30b7997f41902 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R1/benchmark/startQiskit_QC428.py | 65e336bc4f3e6fcbdf417489f4dbaa53551064de | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,508 | py | # qubit number=3
# total number=77
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[1]) # number=70
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=33
prog.y(input_qubit[2]) # number=56
prog.cz(input_qubit[2],input_qubit[1]) # number=34
prog.h(input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC428.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
769fb486715718f376703c406914f71e8c68b786 | 29540b843fa1fc8e0fa5979c2b0029ec7f957b55 | /unit/migrations/0007_airman_phone_number.py | 90ca3ae6f04a044c2d9e21c2b87e37b693a15739 | [] | no_license | lopezjronald/asts-fitness-program-v2 | 7ae5282e03f124d912e834e43907af3af4608a4e | a6e111265e9d86dfb101b9df1d2629010cf9066d | refs/heads/main | 2023-03-07T12:11:18.704931 | 2021-02-17T11:04:18 | 2021-02-17T11:04:18 | 316,976,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | # Generated by Django 3.1.3 on 2021-02-17 07:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('unit', '0006_auto_20201201_2013'),
]
operations = [
migrations.AddField(
model_name='airman',
name='phone_number',
field=models.CharField(max_length=10, null=True),
),
]
| [
"lopez.j.ronald@gmail.com"
] | lopez.j.ronald@gmail.com |
885b6854611137eb3ef2001628a9f8a75b508036 | 5b52320d3cc707285390e02bec8b33c51229054d | /server/user_settings/migrations/0007_auto_20201214_1822.py | 7b18e97498a11c5e817f81fbcb340f637d8387e6 | [] | no_license | Aviemusca/bjj-digraph | abb541f81a72acb2020e480dfac2f85a98cbfe73 | 9e01ff8ab73f6d9d16606ec1c8b7c91cdfa9cd2c | refs/heads/main | 2023-03-05T02:08:17.260158 | 2021-02-19T14:22:50 | 2021-02-19T14:22:50 | 337,366,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | # Generated by Django 3.1.3 on 2020-12-14 18:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("user_settings", "0006_auto_20201211_1347"),
]
operations = [
migrations.AddField(
model_name="usergamenodesettings",
name="fill_opacity",
field=models.FloatField(default=1.0),
),
migrations.AddField(
model_name="usermetanodesettings",
name="fill_opacity",
field=models.FloatField(default=1.0),
),
]
| [
"yvan@metatech.ie"
] | yvan@metatech.ie |
c700637541affd3d5b62be7bfe9b067396960cf7 | c32d1463914c8fb3d597361ba5a4a17e499bd888 | /models/backbones/resnet.py | ba23b353a0533b4903a778a6504886b2f8e86fd2 | [
"MIT"
] | permissive | DonYum/face_recognition_framework | aee7d6a902f6c7c0ea8513fd0204b73b34d7994b | 924b74f868bb06295d21122e3926d37a5d419d6e | refs/heads/master | 2020-12-03T23:38:58.082336 | 2020-01-03T11:29:51 | 2020-01-03T11:29:51 | 231,524,940 | 0 | 0 | MIT | 2020-01-03T06:17:28 | 2020-01-03T06:17:28 | null | UTF-8 | Python | false | false | 5,551 | py | import torch.nn as nn
import math
__all__ = ['resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, feature_dim, spatial_size=224):
fc_map = {224: 12544, 112: 4096}
self.inplanes = 64
super(ResNet, self).__init__()
self.feature_dim = feature_dim
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# face
self.layer1x1 = nn.Sequential(
nn.Conv2d(512 * block.expansion, 256, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=False))
self.drop1 = nn.Dropout(0.5)
self.feature = nn.Linear(fc_map[spatial_size], feature_dim)
self.drop2 = nn.Dropout(0.5)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Linear):
scale = math.sqrt(3. / m.in_features)
m.weight.data.uniform_(-scale, scale)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# face
x = self.layer1x1(x)
x = self.drop1(x)
x = x.view(x.size(0), -1)
x = self.feature(x)
x = self.drop2(x)
return x
def resnet18(**kwargs):
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(**kwargs):
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50(**kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnet101(**kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def resnet152(**kwargs):
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
| [
"xiaohangzhan@outlook.com"
] | xiaohangzhan@outlook.com |
b38317fd7e15bf95d4c1d380413c57ceab3a3398 | ad9bd58a3ec8fa08dfcc994d4101ee815a9f5bc0 | /02_algorithm/baekjoon/all_problem/1076.py | 67a85516fa5d7cb85edc31570f22d7c7fd99149f | [] | no_license | wally-wally/TIL | 93fc1d0e3bc7d030341ed54155294c68c48b4c7d | 936783bc86f563646c0398c24e2fcaa707f0ed23 | refs/heads/master | 2023-04-28T08:59:48.235747 | 2023-04-12T12:06:52 | 2023-04-12T12:06:52 | 195,918,111 | 40 | 7 | null | 2020-09-29T16:20:46 | 2019-07-09T02:31:02 | Python | UTF-8 | Python | false | false | 356 | py | import sys
sys.stdin = open('input_1076.txt', 'r')
color_code = {'black': 0, 'brown': 1, 'red': 2, 'orange': 3, 'yellow': 4,
'green': 5, 'blue': 6, 'violet': 7, 'grey': 8, 'white': 9}
result = 0
for i in range(3):
if i <= 1: result += color_code[input()] * (10 if not i else 1)
else: result *= 10 ** color_code[input()]
print(result) | [
"wallys0213@gmail.com"
] | wallys0213@gmail.com |
a46201db45bff3afbcf83488cf2e86acdbb5a26b | 327b5efff2b24d42f1b1c7d13b6788c240d3b8d4 | /sapi_app/urls.py | 9730a941d9b75507e9bf636135ee1db10c010c34 | [
"MIT"
] | permissive | calixo888/sapi | 523c35240d19faeed4d4673bdb4ca4ec210d5671 | a2ff327795a7ea088cb158f7738af9121e465a08 | refs/heads/master | 2022-04-30T14:06:35.474634 | 2020-10-25T23:18:49 | 2020-10-25T23:18:49 | 237,724,096 | 0 | 0 | MIT | 2022-04-22T23:03:11 | 2020-02-02T05:34:45 | JavaScript | UTF-8 | Python | false | false | 422 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url("^$", views.index, name="index"),
url("^forgot-api-key/$", views.forgot_api_key, name="forgot_api_key"),
url("^documentation/$", views.documentation, name="documentation"),
url("^get-api-key/$", views.get_api_key, name="get_api_key"),
# API Routes
url("^api/personal/$", views.personal_storage, name="personal_storage"),
]
| [
"calix.huang1@gmail.com"
] | calix.huang1@gmail.com |
788c0a4dc0b12e463c564f5fcf938d4a40d4b387 | e5ae250c070a4e23717f25bdaa99b2e310b05cd2 | /sorting3.py | 95855664abeb1c26e873f0b51baeb2835c1fb52d | [
"MIT"
] | permissive | matthewmuccio/InterviewPrepKit | d3c95c1c2dfb58761f3a61f35ffc72be3c0aebb7 | 13dabeddc3c83866c88bef1c80498c313e4c233e | refs/heads/master | 2020-03-27T15:10:21.540776 | 2018-11-05T16:13:12 | 2018-11-05T16:13:12 | 146,702,058 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | #!/usr/bin/env python3
from functools import cmp_to_key
class Player:
def __init__(self, name, score):
self.name = name
self.score = score
def __repr__(self):
pass
def comparator(a, b):
val = b.score - a.score
if val == 0:
return -1 if a.name < b.name else 1
return val
if __name__ == "__main__":
n = int(input())
data = []
for i in range(n):
name, score = input().split()
score = int(score)
player = Player(name, score)
data.append(player)
data = sorted(data, key=cmp_to_key(Player.comparator))
for i in data:
print(i.name, i.score)
| [
"me@matthewmuccio.com"
] | me@matthewmuccio.com |
88f58ea4b1a9bc0da59f6bb65127b965dd49e239 | a900b91c2a5f901097a9f4a4d6a64df848f655dd | /onmt/train_utils/accent_gan_trainer.py | 2d3a2c220780c66ff4481823091cd9ecf625ba7d | [
"MIT"
] | permissive | thanhleha-kit/NMTGMinor | 3c2c89811a72cbc7955144a2f19e20a2f51c334c | 6db79a28634405df5aba19cc940290caaa21096e | refs/heads/master | 2021-06-12T08:55:24.977989 | 2021-06-01T13:45:44 | 2021-06-01T13:45:44 | 375,394,363 | 1 | 0 | NOASSERTION | 2021-06-09T14:59:10 | 2021-06-09T14:59:09 | null | UTF-8 | Python | false | false | 39,439 | py | from __future__ import division
import datetime
import gc
import inspect
import math
import os
import re
import time
import torch
from apex import amp
import onmt
import onmt.markdown
import onmt.modules
from onmt.data.data_iterator import DataIterator
from onmt.data.dataset import rewrap
from onmt.model_factory import build_model, build_language_model, optimize_model
from onmt.model_factory import init_model_parameters
from onmt.train_utils.stats import Logger
from onmt.utils import checkpoint_paths, normalize_gradients
from .trainer import BaseTrainer
def varname(p):
for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:
m = re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)', line)
if m:
return m.group(1)
def generate_data_iterator(dataset, seed, num_workers=1, epoch=1., buffer_size=0):
# check if dataset is a list:
if isinstance(dataset, list):
# this is a multidataset
data_iterator = MultiDataIterator(dataset, seed=seed, num_workers=num_workers,
epoch=epoch, buffer_size=buffer_size)
else:
data_iterator = DataIterator(dataset, dataset.collater, dataset.batches, seed=seed,
num_workers=num_workers, epoch=epoch, buffer_size=buffer_size)
return data_iterator
class SpeechAETrainer(BaseTrainer):
def __init__(self, model, loss_function, train_data, valid_data, dicts, opt, setup_optimizer=True):
super().__init__(model, loss_function, train_data, valid_data, dicts, opt)
self.n_gpus = len(self.opt.gpus)
if self.cuda:
torch.cuda.set_device(self.opt.gpus[0])
if self.opt.seed >= 0:
torch.manual_seed(self.opt.seed)
self.loss_function = self.loss_function.cuda()
self.model = self.model.cuda()
if setup_optimizer:
self.optim = onmt.Optim(opt)
self.optim.set_parameters(self.model.parameters())
if not self.opt.fp16:
opt_level = "O0"
keep_batchnorm_fp32 = False
elif self.opt.fp16_mixed:
opt_level = "O1"
keep_batchnorm_fp32 = None
else:
opt_level = "O2"
keep_batchnorm_fp32 = False
if self.cuda:
self.model, self.optim.optimizer = amp.initialize(self.model,
self.optim.optimizer,
opt_level=opt_level,
keep_batchnorm_fp32=keep_batchnorm_fp32,
loss_scale="dynamic",
verbosity=1 if self.opt.verbose else 0)
def warm_up(self):
"""
Warmup the memory allocator, by attempting to fit the largest batch
:return:
"""
print("Tacotron_warmup")
if self.opt.memory_profiling:
from pytorch_memlab import MemReporter
reporter = MemReporter()
batch = self.train_data[0].get_largest_batch() if isinstance(self.train_data, list) \
else self.train_data.get_largest_batch()
opt = self.opt
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
self.model.train()
self.model.zero_grad()
oom = False
if self.opt.memory_profiling:
print("Input size: ")
print(batch.size, batch.src_size, batch.tgt_size)
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
try:
targets = batch.get('target_output')
tgt_mask = None
outputs = self.model(batch)
gate_padded = batch.get('gate_padded')
if self.opt.n_frames_per_step > 1:
slice = torch.arange(self.opt.n_frames_per_step - 1, gate_padded.size(1), self.opt.n_frames_per_step)
gate_padded = gate_padded[:, slice]
src_org = batch.get('source_org')
src_org = src_org.narrow(2, 1, src_org.size(2) - 1)
target = [src_org.permute(1,2,0).contiguous(), gate_padded]
loss = self.loss_function(outputs, target)
# loss_dict = self.loss_function(outputs, targets, model=self.model)
loss = loss # a little trick to avoid gradient overflow with fp16
full_loss = loss
optimizer = self.optim.optimizer
if self.opt.memory_profiling:
reporter.report(verbose=True)
if self.cuda:
with amp.scale_loss(full_loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.div_(batch.tgt_size).backward()
if self.opt.memory_profiling:
print('========= after backward =========')
reporter.report(verbose=True)
self.model.zero_grad()
self.optim.zero_grad()
except RuntimeError as e:
if 'out of memory' in str(e):
oom = True
else:
raise e
if oom:
print("* Warning: out-of-memory in warming up. This is due to the largest batch is too big for the GPU.")
else:
print("* Warming up successuflly.")
if self.opt.memory_profiling:
if hasattr(torch.cuda, 'memory_summary'):
print(torch.cuda.memory_summary())
exit()
def save(self, epoch, valid_ppl, itr=None):
opt = self.opt
model = self.model
dicts = self.dicts
model_state_dict = self.model.state_dict()
optim_state_dict = self.optim.state_dict()
if itr:
itr_state_dict = itr.state_dict()
else:
itr_state_dict = None
# drop a checkpoint
checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': opt,
'epoch': epoch,
'itr': itr_state_dict,
'optim': optim_state_dict,
'amp': amp.state_dict()
}
file_name = '%s_ppl_%.6f_e%.2f.pt' % (opt.save_model, valid_ppl, epoch)
print('Writing to %s' % file_name)
torch.save(checkpoint, file_name)
# check the save directory here
checkpoint_dir = os.path.dirname(opt.save_model)
existed_save_files = checkpoint_paths(checkpoint_dir)
for save_file in existed_save_files[opt.keep_save_files:]:
print(" * Deleting old save file %s ...." % save_file)
os.remove(save_file)
def run(self, checkpoint=None):
opt = self.opt
model = self.model
optim = self.optim
if checkpoint is not None:
self.model.load_state_dict(checkpoint['model'])
prec_opt = checkpoint['opt'] if 'opt' in checkpoint else None
if not opt.reset_optim:
print("* Loading optimizer states ... ")
self.optim.load_state_dict(checkpoint['optim'])
if prec_opt is not None and hasattr(prec_opt, "fp16_mixed"):
# Only load amp information if the mode is the same
# Maybe its better to change between optimization mode?
if opt.fp16_mixed == prec_opt.fp16_mixed and opt.fp16 == prec_opt.fp16:
if 'amp' in checkpoint:
amp.load_state_dict(checkpoint['amp'])
# Only load the progress when we use the same optimizer
if 'itr' in checkpoint:
itr_progress = checkpoint['itr']
else:
itr_progress = None
resume = True
start_epoch = checkpoint['epoch'] if 'epoch' in checkpoint else 1
if start_epoch is None:
start_epoch = 1
else:
itr_progress = None
resume = False
start_epoch = 1
del checkpoint['model']
del checkpoint['optim']
del checkpoint
else:
itr_progress = None
print('Initializing model parameters')
init_model_parameters(model, opt)
resume = False
start_epoch = 1
if opt.load_encoder_from:
self.load_encoder_weight(opt.load_encoder_from)
if opt.load_decoder_from:
self.load_decoder_weight(opt.load_decoder_from)
# if we are on a GPU: warm up the memory allocator
if self.cuda:
self.warm_up()
valid_loss = self.eval(self.valid_data)
print('Validation loss: %g' % valid_loss)
self.start_time = time.time()
for epoch in range(start_epoch, start_epoch + opt.epochs):
print('')
# (1) train for one epoch on the training set
train_loss = self.train_epoch(epoch, resume=resume, itr_progress=itr_progress)
print('Train loss: %g' % train_loss)
# (2) evaluate on the validation set
valid_loss = self.eval(self.valid_data)
print('Validation loss: %g' % valid_loss)
self.save(epoch, valid_loss)
itr_progress = None
resume = False
def eval(self, data):
total_loss = 0
total_tgt_frames = 0
total_sent = 0
opt = self.opt
self.model.eval()
self.loss_function.eval()
# self.model.reset_states()
# the data iterator creates an epoch iterator
data_iterator = generate_data_iterator(data, seed=self.opt.seed,
num_workers=opt.num_workers, epoch=1, buffer_size=opt.buffer_size)
epoch_iterator = data_iterator.next_epoch_itr(False, pin_memory=False)
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
""" PyTorch semantics: save space by not creating gradients """
data_size = len(epoch_iterator)
i = 0
with torch.no_grad():
# for i in range(len()):
while not data_iterator.end_of_epoch():
# batch = data.next()[0]
batch = next(epoch_iterator)
if isinstance(batch, list):
batch = batch[0]
batch = rewrap(batch)
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
""" outputs can be either
hidden states from decoder or
prob distribution from decoder generator
"""
outputs = self.model(batch)
gate_padded = batch.get('gate_padded')
if self.opt.n_frames_per_step > 1:
slice = torch.arange(self.opt.n_frames_per_step - 1, gate_padded.size(1), self.opt.n_frames_per_step)
gate_padded = gate_padded[:, slice]
src_org = batch.get('source_org')
src_org = src_org.narrow(2, 1, src_org.size(2) - 1)
target = [src_org.permute(1, 2, 0).contiguous(), gate_padded]
loss = self.loss_function(outputs, target)
loss_data = loss.data.item()
total_loss += loss_data
total_tgt_frames += batch.src_size
total_sent += batch.size
i = i + 1
self.model.train()
self.loss_function.train()
return total_loss / data_size * 100
def train_epoch(self, epoch, resume=False, itr_progress=None):
global rec_ppl
opt = self.opt
train_data = self.train_data
streaming = opt.streaming
self.model.train()
self.loss_function.train()
# Clear the gradients of the model
# self.runner.zero_grad()
self.model.zero_grad()
dataset = train_data
data_iterator = generate_data_iterator(dataset, seed=self.opt.seed, num_workers=opt.num_workers,
epoch=epoch, buffer_size=opt.buffer_size)
if resume:
data_iterator.load_state_dict(itr_progress)
epoch_iterator = data_iterator.next_epoch_itr(not streaming, pin_memory=opt.pin_memory)
total_loss, total_frames = 0, 0
report_loss, report_tgt_frames,report_sent = 0, 0, 0
start = time.time()
n_samples = len(epoch_iterator)
counter = 0
num_accumulated_sents = 0
grad_scaler = -1
nan = False
nan_counter = 0
i = data_iterator.iterations_in_epoch if not isinstance(train_data, list) else epoch_iterator.n_yielded
while not data_iterator.end_of_epoch():
curriculum = (epoch < opt.curriculum)
# this batch generator is not very clean atm
batch = next(epoch_iterator)
if isinstance(batch, list) and self.n_gpus == 1:
batch = batch[0]
batch = rewrap(batch)
if grad_scaler == -1:
grad_scaler = 1 # if self.opt.update_frequency > 1 else batch.tgt_size
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
oom = False
try:
# outputs is a dictionary containing keys/values necessary for loss function
# can be flexibly controlled within models for easier extensibility
# targets = batch.get('target_output')
# tgt_mask = targets.ne(onmt.constants.PAD)
outputs = self.model(batch)
gate_padded = batch.get('gate_padded')
if self.opt.n_frames_per_step > 1:
slice = torch.arange(0, gate_padded.size(1), self.opt.n_frames_per_step)
gate_padded = gate_padded[:, slice]
src_org = batch.get('source_org')
src_org = src_org.narrow(2, 1, src_org.size(2) - 1)
target = [src_org.permute(1, 2, 0).contiguous(), gate_padded]
loss = self.loss_function(outputs, target)
batch_size = batch.size
loss_data = loss.data.item()
# a little trick to avoid gradient overflow with fp16
full_loss = loss
optimizer = self.optim.optimizer
# When the batch size is large, each gradient step is very easy to explode on fp16
# Normalizing the loss to grad scaler ensures this will not happen
full_loss.div_(grad_scaler)
if self.cuda:
with amp.scale_loss(full_loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
full_loss.backward()
del outputs
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory on GPU , skipping batch')
oom = True
torch.cuda.empty_cache()
loss = 0
if opt.streaming: # reset stream in this case ...
streaming_state = self.model.init_stream()
else:
raise e
if loss != loss:
# catching NAN problem
oom = True
self.model.zero_grad()
self.optim.zero_grad()
nan_counter = nan_counter + 1
print("Warning!!! Loss is Nan")
if nan_counter >= 15:
raise ValueError("Training stopped because of multiple NaN occurence. "
"For ASR, using the Relative Transformer is more stable and recommended.")
else:
nan_counter = 0
if not oom:
src_size = batch.src_size
counter = counter + 1
# We only update the parameters after getting gradients from n mini-batches
update_flag = False
if counter >= opt.update_frequency > 0:
update_flag = True
elif i == n_samples: # update for the last minibatch
update_flag = True
if update_flag:
# accumulated gradient case, in this case the update frequency
if (counter == 1 and self.opt.update_frequency != 1) or counter > 1:
grad_denom = 1 / grad_scaler
# if self.opt.normalize_gradient:
# grad_denom = num_accumulated_words * grad_denom
else:
grad_denom = 1.0
# When we accumulate the gradients, each gradient is already normalized by a constant grad_scaler
normalize_gradients(amp.master_params(optimizer), grad_denom)
# Update the parameters.
if self.opt.max_grad_norm > 0:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), self.opt.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
self.model.zero_grad()
counter = 0
# num_accumulated_words = 0
grad_scaler = -1
num_updates = self.optim._step
if opt.save_every > 0 and num_updates % opt.save_every == -1 % opt.save_every:
valid_loss = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
print('Validation perplexity: %g' % valid_ppl)
ep = float(epoch) - 1. + ((float(i) + 1.) / n_samples)
self.save(ep, valid_ppl, itr=data_iterator)
report_loss += loss_data
# report_tgt_words += num_words
num_accumulated_sents += batch_size
report_sent += batch_size
total_frames+= src_size
report_tgt_frames += src_size
total_loss += loss_data
optim = self.optim
# batch_efficiency = total_non_pads / total_tokens
if i == 0 or (i % opt.log_interval == -1 % opt.log_interval):
log_string = ("Epoch %2d, %5d/%5d; ; loss : %6.2f ; " %
(epoch, i + 1, len(data_iterator),
report_loss ))
log_string += ("lr: %.7f ; updates: %7d; " %
(optim.getLearningRate(),
optim._step))
#
log_string += ("%5.0f src tok/s " %
(report_tgt_frames / (time.time() - start)))
log_string += ("%s elapsed" %
str(datetime.timedelta(seconds=int(time.time() - self.start_time))))
print(log_string)
report_loss = 0
report_tgt_frames = 0
report_sent = 0
start = time.time()
i = i + 1
return total_loss / n_samples * 100
class XETrainer(BaseTrainer):
def __init__(self, model, loss_function, train_data, valid_data, dicts, opt, setup_optimizer=True):
super().__init__(model, loss_function, train_data, valid_data, dicts, opt)
if opt.lfv_multilingual or opt.lid_loss:
from onmt.models.speech_recognizer.lid_loss import CrossEntropyLIDLoss
lid_loss = CrossEntropyLIDLoss(opt.n_languages, opt.label_smoothing, opt.fast_xentropy)
self.loss_function.add_loss_function(lid_loss, 'lid_loss')
self.n_gpus = len(self.opt.gpus)
if self.cuda:
torch.cuda.set_device(self.opt.gpus[0])
if self.opt.seed >= 0:
torch.manual_seed(self.opt.seed)
self.loss_function = self.loss_function.cuda()
self.model = self.model.cuda()
if setup_optimizer:
self.optim = onmt.Optim(opt)
self.optim.set_parameters(self.model.parameters())
if not self.opt.fp16:
opt_level = "O0"
keep_batchnorm_fp32 = False
elif self.opt.fp16_mixed:
opt_level = "O1"
keep_batchnorm_fp32 = None
else:
opt_level = "O2"
keep_batchnorm_fp32 = False
if self.cuda:
# print(234)
self.model, self.optim.optimizer = amp.initialize(self.model,
self.optim.optimizer,
opt_level=opt_level,
keep_batchnorm_fp32=keep_batchnorm_fp32,
loss_scale="dynamic",
verbosity=1 if self.opt.verbose else 0)
# An ugly hack to switch between align right and align left
if hasattr(self.model, 'relative'):
if self.model.relative:
self.train_data.src_align_right = True
self.train_data.tgt_align_right = False
self.valid_data.src_align_right = True
self.valid_data.tgt_align_right = False
self.valid_data.tgt_align_right = False
def save(self, epoch, valid_ppl, itr=None):
opt = self.opt
model = self.model
dicts = self.dicts
model_state_dict = self.model.state_dict()
optim_state_dict = self.optim.state_dict()
if itr:
itr_state_dict = itr.state_dict()
else:
itr_state_dict = None
# drop a checkpoint
checkpoint = {
'model': model_state_dict,
'dicts': dicts,
'opt': opt,
'epoch': epoch,
'itr': itr_state_dict,
'optim': optim_state_dict,
'amp': amp.state_dict()
}
file_name = '%s_ppl_%.6f_e%.2f.pt' % (opt.save_model, valid_ppl, epoch)
print('Writing to %s' % file_name)
torch.save(checkpoint, file_name)
# check the save directory here
checkpoint_dir = os.path.dirname(opt.save_model)
existed_save_files = checkpoint_paths(checkpoint_dir)
for save_file in existed_save_files[opt.keep_save_files:]:
print(" * Deleting old save file %s ...." % save_file)
os.remove(save_file)
def eval(self, data):
total_loss = 0
total_words = 0
opt = self.opt
self.model.eval()
self.loss_function.eval()
self.model.reset_states()
# the data iterator creates an epoch iterator
data_iterator = generate_data_iterator(data, seed=self.opt.seed,
num_workers=opt.num_workers, epoch=1, buffer_size=opt.buffer_size)
epoch_iterator = data_iterator.next_epoch_itr(False, pin_memory=False)
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
""" PyTorch semantics: save space by not creating gradients """
data_size = len(epoch_iterator)
i = 0
with torch.no_grad():
# for i in range(len()):
while not data_iterator.end_of_epoch():
# batch = data.next()[0]
batch = next(epoch_iterator)
if isinstance(batch, list):
batch = batch[0]
batch = rewrap(batch)
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
""" outputs can be either
hidden states from decoder or
prob distribution from decoder generator
"""
targets = batch.get('target_output')
tgt_mask = targets.ne(onmt.constants.PAD)
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
mirror=opt.mirror_loss, streaming_state=streaming_state, nce=opt.nce)
if opt.streaming:
streaming_state = outputs['streaming_state']
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model, eval=True)
loss_data = loss_dict['data']
total_loss += loss_data
total_words += batch.tgt_size
i = i + 1
self.model.train()
self.loss_function.train()
return total_loss / total_words
def train_epoch(self, epoch, resume=False, itr_progress=None):
global rec_ppl
opt = self.opt
train_data = self.train_data
streaming = opt.streaming
self.model.train()
self.loss_function.train()
# Clear the gradients of the model
# self.runner.zero_grad()
self.model.zero_grad()
self.model.reset_states()
dataset = train_data
data_iterator = generate_data_iterator(dataset, seed=self.opt.seed, num_workers=opt.num_workers,
epoch=epoch, buffer_size=opt.buffer_size)
if resume:
data_iterator.load_state_dict(itr_progress)
epoch_iterator = data_iterator.next_epoch_itr(not streaming, pin_memory=opt.pin_memory)
total_tokens, total_loss, total_words = 0, 0, 0
total_non_pads = 0
report_loss, report_tgt_words = 0, 0
report_src_words = 0
report_rec_loss, report_rev_loss, report_mirror_loss = 0, 0, 0
start = time.time()
n_samples = len(epoch_iterator)
counter = 0
num_accumulated_words = 0
num_accumulated_sents = 0
grad_scaler = -1
nan = False
nan_counter = 0
if opt.streaming:
streaming_state = self.model.init_stream()
else:
streaming_state = None
i = data_iterator.iterations_in_epoch if not isinstance(train_data, list) else epoch_iterator.n_yielded
while not data_iterator.end_of_epoch():
curriculum = (epoch < opt.curriculum)
# this batch generator is not very clean atm
batch = next(epoch_iterator)
if isinstance(batch, list) and self.n_gpus == 1:
batch = batch[0]
batch = rewrap(batch)
if grad_scaler == -1:
grad_scaler = 1 # if self.opt.update_frequency > 1 else batch.tgt_size
if self.cuda:
batch.cuda(fp16=self.opt.fp16 and not self.opt.fp16_mixed)
# if opt.streaming:
# if train_data.is_new_stream():
# streaming_state = self.model.init_stream()
# else:
# streaming_state = None
oom = False
try:
# outputs is a dictionary containing keys/values necessary for loss function
# can be flexibly controlled within models for easier extensibility
targets = batch.get('target_output')
tgt_mask = targets.ne(onmt.constants.PAD)
outputs = self.model(batch, streaming=opt.streaming, target_mask=tgt_mask,
zero_encoder=opt.zero_encoder,
mirror=opt.mirror_loss, streaming_state=streaming_state,
nce=opt.nce)
# print("time " + str(time.time() - start_time_t))
batch_size = batch.size
outputs['tgt_mask'] = tgt_mask
loss_dict = self.loss_function(outputs, targets, model=self.model)
loss_data = loss_dict['data']
loss = loss_dict['loss'] # a little trick to avoid gradient overflow with fp16
full_loss = loss
if opt.mirror_loss:
rev_loss = loss_dict['rev_loss']
rev_loss_data = loss_dict['rev_loss_data']
mirror_loss = loss_dict['mirror_loss']
full_loss = full_loss + rev_loss + mirror_loss
mirror_loss_data = loss_dict['mirror_loss'].item()
else:
rev_loss_data = None
mirror_loss_data = 0
# reconstruction loss
if opt.reconstruct:
rec_loss = loss_dict['rec_loss']
rec_loss = rec_loss
full_loss = full_loss + rec_loss
rec_loss_data = loss_dict['rec_loss_data']
else:
rec_loss_data = None
if opt.lfv_multilingual or opt.lid_loss:
lid_logits = outputs['lid_logits']
lid_labels = batch.get('target_lang')
lid_loss_function = self.loss_function.get_loss_function('lid_loss')
lid_loss = lid_loss_function([lid_logits.unsqueeze(0)] , lid_labels)
full_loss = full_loss + lid_loss
optimizer = self.optim.optimizer
# When the batch size is large, each gradient step is very easy to explode on fp16
# Normalizing the loss to grad scaler ensures this will not happen
full_loss.div_(grad_scaler)
if self.cuda:
with amp.scale_loss(full_loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
full_loss.backward()
del outputs
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory on GPU , skipping batch')
oom = True
torch.cuda.empty_cache()
loss = 0
if opt.streaming: # reset stream in this case ...
streaming_state = self.model.init_stream()
else:
raise e
if loss != loss:
# catching NAN problem
oom = True
self.model.zero_grad()
self.optim.zero_grad()
num_accumulated_words = 0
num_accumulated_sents = 0
nan_counter = nan_counter + 1
print("Warning!!! Loss is Nan")
if nan_counter >= 15:
raise ValueError("Training stopped because of multiple NaN occurence. "
"For ASR, using the Relative Transformer is more stable and recommended.")
else:
nan_counter = 0
if not oom:
src_size = batch.src_size
tgt_size = batch.tgt_size
counter = counter + 1
num_accumulated_words += tgt_size
num_accumulated_sents += batch_size
# We only update the parameters after getting gradients from n mini-batches
update_flag = False
if counter >= opt.update_frequency > 0:
update_flag = True
elif 0 < opt.batch_size_update <= num_accumulated_words:
update_flag = True
elif i == n_samples: # update for the last minibatch
update_flag = True
if update_flag:
# accumulated gradient case, in this case the update frequency
if (counter == 1 and self.opt.update_frequency != 1) or counter > 1:
grad_denom = 1 / grad_scaler
if self.opt.normalize_gradient:
grad_denom = num_accumulated_words * grad_denom
else:
grad_denom = 1
# When we accumulate the gradients, each gradient is already normalized by a constant grad_scaler
normalize_gradients(amp.master_params(optimizer), grad_denom)
# Update the parameters.
if self.opt.max_grad_norm > 0:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), self.opt.max_grad_norm)
self.optim.step()
self.optim.zero_grad()
self.model.zero_grad()
counter = 0
num_accumulated_words = 0
num_accumulated_sents = 0
grad_scaler = -1
num_updates = self.optim._step
if opt.save_every > 0 and num_updates % opt.save_every == -1 % opt.save_every:
valid_loss = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
print('Validation perplexity: %g' % valid_ppl)
ep = float(epoch) - 1. + ((float(i) + 1.) / n_samples)
self.save(ep, valid_ppl, itr=data_iterator)
num_words = tgt_size
report_loss += loss_data
report_tgt_words += num_words
report_src_words += src_size
total_loss += loss_data
total_words += num_words
total_tokens += batch.get('target_output').nelement()
total_non_pads += batch.get('target_output').ne(onmt.constants.PAD).sum().item()
optim = self.optim
batch_efficiency = total_non_pads / total_tokens
if opt.reconstruct:
report_rec_loss += rec_loss_data
if opt.mirror_loss:
report_rev_loss += rev_loss_data
report_mirror_loss += mirror_loss_data
if i == 0 or (i % opt.log_interval == -1 % opt.log_interval):
log_string = ("Epoch %2d, %5d/%5d; ; ppl: %6.2f ; " %
(epoch, i + 1, len(data_iterator),
math.exp(report_loss / report_tgt_words)))
if opt.reconstruct:
rec_ppl = math.exp(report_rec_loss / report_src_words.item())
log_string += (" rec_ppl: %6.2f ; " % rec_ppl)
if opt.mirror_loss:
rev_ppl = math.exp(report_rev_loss / report_tgt_words)
log_string += (" rev_ppl: %6.2f ; " % rev_ppl)
# mirror loss per word
log_string += (" mir_loss: %6.2f ; " % (report_mirror_loss / report_tgt_words))
log_string += ("lr: %.7f ; updates: %7d; " %
(optim.getLearningRate(),
optim._step))
log_string += ("%5.0f src tok/s; %5.0f tgt tok/s; " %
(report_src_words / (time.time() - start),
report_tgt_words / (time.time() - start)))
log_string += ("%s elapsed" %
str(datetime.timedelta(seconds=int(time.time() - self.start_time))))
print(log_string)
report_loss = 0
report_tgt_words, report_src_words = 0, 0
report_rec_loss, report_rev_loss, report_mirror_loss = 0, 0, 0
start = time.time()
i = i + 1
return total_loss / total_words
# def run(self, save_file=None):
def run(self, checkpoint=None):
opt = self.opt
model = self.model
optim = self.optim
if checkpoint is not None:
self.model.load_state_dict(checkpoint['model'])
prec_opt = checkpoint['opt'] if 'opt' in checkpoint else None
if not opt.reset_optim:
print("* Loading optimizer states ... ")
self.optim.load_state_dict(checkpoint['optim'])
if prec_opt is not None and hasattr(prec_opt, "fp16_mixed"):
# Only load amp information if the mode is the same
# Maybe its better to change between optimization mode?
if opt.fp16_mixed == prec_opt.fp16_mixed and opt.fp16 == prec_opt.fp16:
if 'amp' in checkpoint:
amp.load_state_dict(checkpoint['amp'])
# Only load the progress when we use the same optimizer
if 'itr' in checkpoint:
itr_progress = checkpoint['itr']
else:
itr_progress = None
resume = True
start_epoch = checkpoint['epoch'] if 'epoch' in checkpoint else 1
if start_epoch is None:
start_epoch = 1
else:
itr_progress = None
resume = False
start_epoch = 1
del checkpoint['model']
del checkpoint['optim']
del checkpoint
else:
itr_progress = None
print('Initializing model parameters')
init_model_parameters(model, opt)
resume = False
start_epoch = 1
if opt.load_encoder_from:
self.load_encoder_weight(opt.load_encoder_from)
if opt.load_decoder_from:
self.load_decoder_weight(opt.load_decoder_from)
# if we are on a GPU: warm up the memory allocator
self.start_time = time.time()
if self.cuda:
self.warm_up()
valid_loss = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
print('Validation perplexity: %g' % valid_ppl)
# valid_loss = self.train_epoch(0)
# valid_ppl = math.exp(min(valid_loss, 100))
#
# print('Validation perplexity: %g' % valid_ppl)
for epoch in range(start_epoch, start_epoch + opt.epochs):
print('')
# (1) train for one epoch on the training set
train_loss = self.train_epoch(epoch, resume=resume, itr_progress=itr_progress)
train_ppl = math.exp(min(train_loss, 100))
print('Train perplexity: %g' % train_ppl)
# (2) evaluate on the validation set
valid_loss = self.eval(self.valid_data)
valid_ppl = math.exp(min(valid_loss, 100))
print('Validation perplexity: %g' % valid_ppl)
self.save(epoch, valid_ppl)
itr_progress = None
resume = False
| [
"quanpn90@gmail.com"
] | quanpn90@gmail.com |
e47dcd7a4fc5c0e91cf776cb94d341b8a1c11633 | 3ededad93e7e3cbcea4baad101812187fc449d89 | /torch_geometric_temporal/data/discrete/static_graph_discrete_signal.py | 82751531b4259edae4213d0b08979778a0133f32 | [
"MIT"
] | permissive | LFrancesco/pytorch_geometric_temporal | 04612030d3ef3ef34f856dd2c03a57d006287e0d | 0964515a6041ce0cceb12e36ed640df22c046b4d | refs/heads/master | 2023-03-27T13:51:37.134564 | 2021-03-20T14:54:19 | 2021-03-20T14:54:19 | 349,710,635 | 0 | 0 | MIT | 2021-03-20T14:54:20 | 2021-03-20T11:51:32 | null | UTF-8 | Python | false | false | 3,227 | py | import torch
import numpy as np
from typing import List, Union
from torch_geometric.data import Data
Edge_Index = Union[np.ndarray, None]
Edge_Weight = Union[np.ndarray, None]
Features = List[Union[np.ndarray, None]]
Targets = List[Union[np.ndarray, None]]
class StaticGraphDiscreteSignal(object):
r""" A data iterator object to contain a static graph with a dynamically
changing discrete temporal feature set (multiple signals). The node labels
(target) are also temporal. The iterator returns a single discrete temporal
snapshot for a time period (e.g. day or week). This single temporal snapshot
is a Pytorch Geometric Data object. Between two temporal snapshots the feature
matrix and the target matrix might change. However, the underlying graph is
the same.
Args:
edge_index (Numpy array): Index tensor of edges.
edge_weight (Numpy array): Edge weight tensor.
features (List of Numpy arrays): List of node feature tensors.
targets (List of Numpy arrays): List of node label (target) tensors.
"""
def __init__(self, edge_index: Edge_Index, edge_weight: Edge_Weight,
features: Features, targets: Targets):
self.edge_index = edge_index
self.edge_weight = edge_weight
self.features = features
self.targets = targets
self._check_temporal_consistency()
self._set_snapshot_count()
def _check_temporal_consistency(self):
assert len(self.features) == len(self.targets), "Temporal dimension inconsistency."
def _set_snapshot_count(self):
self.snapshot_count = len(self.features)
def _get_edge_index(self):
if self.edge_index is None:
return self.edge_index
else:
return torch.LongTensor(self.edge_index)
def _get_edge_weight(self):
if self.edge_weight is None:
return self.edge_weight
else:
return torch.FloatTensor(self.edge_weight)
def _get_features(self):
if self.features[self.t] is None:
return self.features[self.t]
else:
return torch.FloatTensor(self.features[self.t])
def _get_target(self):
if self.targets[self.t] is None:
return self.targets[self.t]
else:
if self.targets[self.t].dtype.kind == 'i':
return torch.LongTensor(self.targets[self.t])
elif self.targets[self.t].dtype.kind == 'f':
return torch.FloatTensor(self.targets[self.t])
def _get_snapshot(self):
x = self._get_features()
edge_index = self._get_edge_index()
edge_weight = self._get_edge_weight()
y = self._get_target()
snapshot = Data(x = x,
edge_index = edge_index,
edge_attr = edge_weight,
y = y)
return snapshot
def __next__(self):
if self.t < len(self.features):
snapshot = self._get_snapshot()
self.t = self.t + 1
return snapshot
else:
self.t = 0
raise StopIteration
def __iter__(self):
self.t = 0
return self
| [
"benedek.rozemberczki@gmail.com"
] | benedek.rozemberczki@gmail.com |
702bc20295f20d7d22a54fc0691eb5d930eb225d | 73b21ee53c73f37f0295534e21da5a83e77b328d | /ML/day3/test6.py | 04ecbd0b6555569bc336692db2d58679c08ebdc1 | [] | no_license | tauovir/Nielit | 2f5e0d49a5c20e90de8a74644a8a0ed1da07dd0f | 225351b770b88b2655431ab3ec04533e30b36057 | refs/heads/master | 2020-04-05T08:00:45.199440 | 2019-05-29T10:43:54 | 2019-05-29T10:43:54 | 156,697,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score,confusion_matrix,classification_report
import matplotlib.pyplot as plt
iris = load_iris()
#======Load Data=======
#print iris.data
#Load Target Data======
#print iris.target
X = iris.data
y = iris.target
#print y
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.2)
#print X_train.shape
#print X_test.shape
#Knn Classifier
knn = KNeighborsClassifier(n_neighbors = 1)
knn.fit(X_train,y_train)
p = knn.predict(X_test)
#==Print prediction
#print p
#===Print Confusion matrix
#print confusion_matrix(y_test,p)
#print accuracy
#print accuracy_score(y_test,p)
#=====Plot graph=======
score = cross_val_score(knn, X,y,cv = 10)
print"Report", classification_report(y_test, p)
print "score=",score
| [
"taukir707@gmail.com"
] | taukir707@gmail.com |
b3a53c0c17fa29b1ed7afe845c1b6db30deba156 | d0f2f7f220c825d827643ca81a08a23cfb871965 | /backend/code/alembic/versions/32038b09fa26_new_initial_commit.py | 0631ea23ef8f36803d341f6925285a39fc1171d9 | [] | no_license | socek/rankor | 7e5e73f8f13bc3d12bd1b18ef01bef04f8f38f0a | eaf5002dd1e852895670517a8cdcb07bf7c69f66 | refs/heads/master | 2021-04-12T07:52:20.341699 | 2018-06-03T20:07:17 | 2018-06-03T20:07:17 | 125,769,351 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,355 | py | """new initial commit
Revision ID: 32038b09fa26
Revises:
Create Date: 2018-05-13 20:36:23.880081
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '32038b09fa26'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(), nullable=True),
sa.Column('email', sa.String(), nullable=False),
sa.Column('is_admin', sa.Boolean(), nullable=False),
sa.Column('password', sa.Binary(), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_users'))
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_table('contests',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(), nullable=False),
sa.Column('owner_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['owner_id'], ['users.id'], name=op.f('fk_contests_owner_id_users')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_contests'))
)
op.create_table('games',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(), nullable=False),
sa.Column('contest_id', postgresql.UUID(), nullable=False),
sa.Column('owner_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['contest_id'], ['contests.id'], name=op.f('fk_games_contest_id_contests')),
sa.ForeignKeyConstraint(['owner_id'], ['users.id'], name=op.f('fk_games_owner_id_users')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_games'))
)
op.create_table('questions',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('category', sa.String(), nullable=True),
sa.Column('contest_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['contest_id'], ['contests.id'], name=op.f('fk_questions_contest_id_contests')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_questions'))
)
op.create_index(op.f('ix_questions_category'), 'questions', ['category'], unique=False)
op.create_table('answers',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(), nullable=False),
sa.Column('is_correct', sa.Boolean(), nullable=False),
sa.Column('question_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['question_id'], ['questions.id'], name=op.f('fk_answers_question_id_questions')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_answers'))
)
op.create_table('teams',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(), nullable=False),
sa.Column('game_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['game_id'], ['games.id'], name=op.f('fk_teams_game_id_games')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_teams'))
)
op.create_table('game_answers',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('game_id', postgresql.UUID(), nullable=False),
sa.Column('question_id', postgresql.UUID(), nullable=False),
sa.Column('answer_id', postgresql.UUID(), nullable=True),
sa.Column('team_id', postgresql.UUID(), nullable=True),
sa.ForeignKeyConstraint(['answer_id'], ['answers.id'], name=op.f('fk_game_answers_answer_id_answers')),
sa.ForeignKeyConstraint(['game_id'], ['games.id'], name=op.f('fk_game_answers_game_id_games')),
sa.ForeignKeyConstraint(['question_id'], ['questions.id'], name=op.f('fk_game_answers_question_id_questions')),
sa.ForeignKeyConstraint(['team_id'], ['teams.id'], name=op.f('fk_game_answers_team_id_teams')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_game_answers'))
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('game_answers')
op.drop_table('teams')
op.drop_table('answers')
op.drop_index(op.f('ix_questions_category'), table_name='questions')
op.drop_table('questions')
op.drop_table('games')
op.drop_table('contests')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
# ### end Alembic commands ###
| [
"d.dlugajczyk@clearcode.cc"
] | d.dlugajczyk@clearcode.cc |
2fa9b51aec2e39ae9b67da9ffec162473eb20bb7 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4496/codes/1712_2507.py | 1c645da33dc35399c4e2e0dc8179eaa9c5bcc4e3 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | qip = int(input("qtd inicial de pirarucus: "))
pc = int(input("percentual de creescimento: "))
t = 0
while(qip<8000 and qip>0):
qv=int(input("retirados para venda: "))
qip= qip+((qip*pc)/100)-qv
t=t+1
if(qip>8000):
print("MAXIMO")
else:
print("ZERO")
print(t) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
a68dd26e9ee124b1f7779140374b4b4cc7cac486 | d83118503614bb83ad8edb72dda7f449a1226f8b | /src/dprj/platinumegg/app/cabaret/views/mgr/model_edit/card_level_exp.py | 00ad22f0b5e7b05bfe2992f08674a0fae99eae83 | [] | no_license | hitandaway100/caba | 686fe4390e182e158cd9714c90024a082deb8c69 | 492bf477ac00c380f2b2758c86b46aa7e58bbad9 | refs/heads/master | 2021-08-23T05:59:28.910129 | 2017-12-03T19:03:15 | 2017-12-03T19:03:15 | 112,512,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,516 | py | # -*- coding: utf-8 -*-
from platinumegg.app.cabaret.views.mgr.model_edit import AdminModelEditHandler,\
AppModelForm, ModelEditValidError
from platinumegg.app.cabaret.models.CardLevelExp import CardLevelExpMster
from defines import Defines
class Handler(AdminModelEditHandler):
"""マスターデータの操作.
"""
class Form(AppModelForm):
class Meta:
model = CardLevelExpMster
exclude = (
Defines.MASTER_EDITTIME_COLUMN,
)
def setting_property(self):
self.MODEL_LABEL = u'カード経験値テーブル'
def valid_write_end(self):
master_all = {}
for master in CardLevelExpMster.fetchValues():
master_all[master.level] = master
errors = []
for master in master_all.values():
if master.level == 1:
if master.exp != 0:
errors.append(u'レベル1は経験値を0に設定してください, level=%d' % master.level)
continue
pre = master_all.get(master.level - 1)
if pre is None:
errors.append(u'レベルが抜けています, level=%d' % (master.level - 1))
elif master.exp <= pre.exp:
errors.append(u'前のレベルの経験値よりも大きくありません, level=%d' % master.level)
if errors:
raise ModelEditValidError('<br />'.join(errors))
def main(request):
return Handler.run(request)
| [
"shangye@mail.com"
] | shangye@mail.com |
f2da2236b2af5e8cc428d8bc6dac3e08b19e574c | 4f4776eb69cbea9ee1c87a22732c5d778855c83a | /leetcode/Set_Matrix_Zeroes.py | a92db433c9375325332006cef0655b1f3ede7dd5 | [] | no_license | k4u5h4L/algorithms | 4a0e694109b8aadd0e3b7a66d4c20692ecdef343 | b66f43354792b1a6facff90990a7685f5ed36a68 | refs/heads/main | 2023-08-19T13:13:14.931456 | 2021-10-05T13:01:58 | 2021-10-05T13:01:58 | 383,174,341 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | '''
Set Matrix Zeroes
Medium
Given an m x n integer matrix matrix, if an element is 0, set its entire row and column to 0's, and return the matrix.
You must do it in place.
Example 1:
Input: matrix = [[1,1,1],[1,0,1],[1,1,1]]
Output: [[1,0,1],[0,0,0],[1,0,1]]
Example 2:
Input: matrix = [[0,1,2,0],[3,4,5,2],[1,3,1,5]]
Output: [[0,0,0,0],[0,4,5,0],[0,3,1,0]]
'''
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
res = []
for i in range(len(matrix)):
res.append(matrix[i].copy())
for i in range(len(res)):
for j in range(len(res[i])):
if res[i][j] == 0:
for k in range(len(res)):
matrix[k][j] = 0
for k in range(len(res[i])):
matrix[i][k] = 0
| [
"noreply@github.com"
] | k4u5h4L.noreply@github.com |
3777e1676ddc316177897ba2a27039f169052c00 | f3fdfdf714e23ef69c9ce6631c188f1ebc328546 | /spider/utilities/util_urlfilter.py | 6c9b9fbf5967dd5550b517c4295741067f36b2e4 | [
"BSD-2-Clause"
] | permissive | liujie40/PSpider | bf2a134812ce81357588b260cee9e3d039c73df0 | f1162c777ec87250edfd2532882eb15b8d712e6a | refs/heads/master | 2022-02-21T18:20:41.468852 | 2022-01-19T06:55:54 | 2022-01-19T06:56:00 | 112,547,656 | 1 | 0 | null | 2017-11-30T01:17:47 | 2017-11-30T01:17:47 | null | UTF-8 | Python | false | false | 1,175 | py | # _*_ coding: utf-8 _*_
"""
util_urlfilter.py by xianhu
"""
from .util_config import CONFIG_RE_URL_LEGAL, CONFIG_RE_URL_ILLEGAL
class UrlFilter(object):
"""
class of UrlFilter, to filter urls by regexs and set
"""
def __init__(self, black_patterns=(CONFIG_RE_URL_ILLEGAL,), white_patterns=(CONFIG_RE_URL_LEGAL,)):
"""
constructor
"""
self._url_set = set()
self._re_black_list = black_patterns
self._re_white_list = white_patterns
return
def check(self, url):
"""
check the url based on re_black_list and re_white_list
"""
for re_black in self._re_black_list:
if re_black.search(url):
return False
for re_white in self._re_white_list:
if re_white.search(url):
return True
return False if self._re_white_list else True
def check_and_add(self, url):
"""
check whether url is in set, and add url to it
"""
result = False
if self.check(url):
result = (url not in self._url_set)
self._url_set.add(url)
return result
| [
"qixianhu@qq.com"
] | qixianhu@qq.com |
6a86f70dfd375605c250defd38f4fa8d093d11e7 | 0bb8296a1bfdba0c264ad7d764482dd3c724563a | /torcms/core/privilege.py | f0eff529668ff0889c7c2885bf5c165386453b1d | [
"MIT"
] | permissive | dlnan/TorCMS | c489a6ab573815f288d11efe4738f4c23323c9ea | 8bf71beb98d867cde5ef3aa749adae61f66356be | refs/heads/master | 2023-06-22T17:23:45.281281 | 2021-07-13T14:07:05 | 2021-07-13T14:07:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,998 | py | # -*- coding:utf-8 -*-
'''
针对增删改查的权限进行处理。
'''
from config import ROLE_CFG
def is_prived(usr_rule, def_rule):
'''
Compare between two role string.
'''
for iii in range(4):
if def_rule[iii] == '0':
continue
if usr_rule[iii] >= def_rule[iii]:
return True
return False
def auth_view(method):
'''
role for view.
'''
def wrapper(self, *args, **kwargs):
'''
wrapper.
'''
if ROLE_CFG['view'] == '':
return method(self, *args, **kwargs)
elif self.current_user:
if is_prived(self.userinfo.role, ROLE_CFG['view']):
return method(self, *args, **kwargs)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html',
kwd=kwd,
userinfo=self.userinfo)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html', kwd=kwd, userinfo=self.userinfo)
return wrapper
def auth_add(method):
'''
role for add.
'''
def wrapper(self, *args, **kwargs):
'''
wrapper.
'''
if self.current_user:
if is_prived(self.userinfo.role, ROLE_CFG['add']):
return method(self, *args, **kwargs)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html',
kwd=kwd,
userinfo=self.userinfo)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html', kwd=kwd, userinfo=self.userinfo)
return wrapper
def auth_edit(method):
'''
role for edit.
'''
def wrapper(self, *args, **kwargs):
'''
wrapper.
'''
if self.current_user:
if is_prived(self.userinfo.role, ROLE_CFG['edit']):
return method(self, *args, **kwargs)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html',
kwd=kwd,
userinfo=self.userinfo)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html', kwd=kwd, userinfo=self.userinfo)
return wrapper
def auth_delete(method):
'''
role for delete.
'''
def wrapper(self, *args, **kwargs):
'''
wrapper.
'''
if self.current_user:
if is_prived(self.userinfo.role, ROLE_CFG['delete']):
return method(self, *args, **kwargs)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html',
kwd=kwd,
userinfo=self.userinfo)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html', kwd=kwd, userinfo=self.userinfo)
return wrapper
def auth_admin(method):
'''
role for admin.
'''
def wrapper(self, *args, **kwargs):
'''
wrapper.
'''
if self.current_user:
if is_prived(self.userinfo.role, ROLE_CFG['admin']):
return method(self, *args, **kwargs)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html',
kwd=kwd,
userinfo=self.userinfo)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html', kwd=kwd, userinfo=self.userinfo)
return wrapper
| [
"bukun@osgeo.cn"
] | bukun@osgeo.cn |
af96329dc1386af2578b528f0c0b0d3626ce19a3 | cc1472e5c7409db30b3e17271d1bd123f1c8abb3 | /3.Lambda functions and error-handling/Error handling with try-except.py | c7fa20b1591f40f231ecbcbc56155681e66f5a24 | [] | no_license | Mat4wrk/Python-Data-Science-Toolbox-Part-1-Datacamp | 8fd4528397f6be1fdbca06be4b7d22630bbf2f7f | 5d8d1320263021e02341a392fd8766a201ad8a8b | refs/heads/main | 2023-03-04T16:18:56.631589 | 2021-02-13T11:29:12 | 2021-02-13T11:29:12 | 338,530,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | # Define shout_echo
def shout_echo(word1, echo=1):
"""Concatenate echo copies of word1 and three
exclamation marks at the end of the string."""
# Initialize empty strings: echo_word, shout_words
echo_word = ""
shout_words = ""
# Add exception handling with try-except
try:
# Concatenate echo copies of word1 using *: echo_word
echo_word = word1 * echo
# Concatenate '!!!' to echo_word: shout_words
shout_words = echo_word + '!!!'
except:
# Print error message
print("word1 must be a string and echo must be an integer.")
# Return shout_words
return shout_words
# Call shout_echo
shout_echo("particle", echo="accelerator")
| [
"noreply@github.com"
] | Mat4wrk.noreply@github.com |
efd5051555a1671ac149d0de3d0e37719c92eba1 | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/System/Windows/Media/Animation_parts/EasingMode.py | 21e313268898de24943c5fb9a0285de8c2aec25f | [
"MIT"
] | permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | class EasingMode(Enum,IComparable,IFormattable,IConvertible):
"""
Defines the modes in which classes derived from System.Windows.Media.Animation.EasingFunctionBase perform their easing.
enum EasingMode,values: EaseIn (0),EaseInOut (2),EaseOut (1)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
EaseIn=None
EaseInOut=None
EaseOut=None
value__=None
| [
"magnetscoil@gmail.com"
] | magnetscoil@gmail.com |
7082cde9cf2222c4bd04b2f2f4926e998a7aeb4b | e1d9fe469422519084fbe9d8cea4c75e6c0828a3 | /import.py | d3dc52f2f40bc4ff2f176db4003b5415f3c1e528 | [] | no_license | DerThorsten/theplantlist | 3c9a1b0d38d793aad3030c9ea6c4afbd7a6b7b71 | f8be85a3c0d0fb644d0049be5d998c3768ab50d1 | refs/heads/master | 2020-03-18T16:48:05.148593 | 2017-03-01T04:40:04 | 2017-03-01T04:40:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py |
import pandas as pd
import gc
import os
store = []
for file in os.listdir("."):
if file.endswith(".csv"):
data = pd.read_csv(file, dtype=pd.np.object)
store += data['Genus'].tolist()
gc.collect()
store = map(lambda x: x.lower(), store)
print(len(store), len(pd.np.unique(store)))
| [
"kislov@easydan.com"
] | kislov@easydan.com |
f04cda7b171c4dffe9a99f2306de25129b8f8c29 | c922252e52eea97b7557937a2558bbec664d2e07 | /newsfeed/wsgi.py | 8a3ec5a03277be826eece4cbf71cfac7dc213775 | [] | no_license | strar-buck/twitter_insta_news_feed | cfe1d4cd88b6dc938134d82ec0c960090390aee3 | 22858054ebf7821d4e5469163b14b542983fadff | refs/heads/master | 2021-06-11T01:26:30.671568 | 2017-02-01T09:11:09 | 2017-02-01T09:11:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | """
WSGI config for newsfeed project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "newsfeed.settings")
application = get_wsgi_application()
try:
from dj_static import Cling
application = Cling(get_wsgi_application())
except:
pass
| [
"djangopycon@gmail.com"
] | djangopycon@gmail.com |
4a8e1050ebad2ddf5f56ca78e92c3f67009533d6 | e0980f704a573894350e285f66f4cf390837238e | /.history/streams/blocks_20201019102609.py | 77ffd2756680b25d322e412add76538e7e19698c | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | from wagtail.core import blocks
class TitleBlock(blocks.StructBlock):
text = blocks.CharBlock(
required = True,
elp_text='Tekst do wyświetlenia',
)
class Meta:
template = 'streams/title_block.html'
icon = 'edycja'
label = 'Tytuł'
help_text = 'Wyśrodkowany tekst do wyświetlenia na stronie.'
| [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
0d97595c8e7c5dbc0fbc7767f79e518f3093d714 | 4970f0d662ca0d5d8c270b36e6858aa313c67dcc | /lk/classes/commands_config_keys.py | 51eeabf18415c7f2bd97d18edc0b53cbd5816584 | [] | no_license | eyalev/lk | 3168de19edf09f32b9277d3bf786d445855a6df1 | 59079e9071d7fbccc438e3ea3f9c8914f4767b78 | refs/heads/master | 2020-12-24T08:24:11.079329 | 2017-03-24T16:48:17 | 2017-03-24T16:48:17 | 40,901,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py |
commands_dir_key = 'commands_dir'
commands_key = 'commands'
relative_path_key = 'relative_path'
repo_url_key = 'repo_url'
file_name_key = 'file_name'
local_path_key = 'local_path'
local_repo_path_key = 'local_repo_path'
local_repo_command_path_key = 'local_repo_command_path'
last_push_timestamp_key = 'last_push_timestamp'
info_key = 'info'
| [
"eyalev@gmail.com"
] | eyalev@gmail.com |
e0b3f02bf9aab17200129623ef552108b30151b0 | a660f0674e816e7f97353c0eec7c9960eed36889 | /ipde/annular/modified_helmholtz.py | e46efe73a9c0e28e019350b7d5ce0fe3d4bfc96e | [
"Apache-2.0"
] | permissive | dbstein/ipde | da4642cbd26e4857c966123ed6654f38ddf5dff6 | a254bf128eba835284935290b8de09eb1374aa3f | refs/heads/master | 2022-07-22T14:29:47.420137 | 2022-07-13T18:30:10 | 2022-07-13T18:30:10 | 215,557,734 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,712 | py | import numpy as np
from ipde.utilities import fast_dot, concat, fast_LU_solve, mfft, mifft, fourier_multiply, fft, ifft, ffourier_multiply
import scipy as sp
import scipy.linalg
from personal_utilities.scipy_gmres import right_gmres, gmres
import numexpr as ne
from ipde.sparse_matvec import zSpMV_viaMKL
import numba
def scalar_laplacian(CO, AAG, RAG, uh):
R01 = CO.R01
R12 = CO.R12
D01 = CO.D01
D12 = CO.D12
iks = AAG.iks
psi1 = RAG.psi1
ipsi1 = RAG.inv_psi1
ipsi2 = RAG.inv_psi2
uh_t = R01.dot(uh*iks)
uh_tt = R12.dot(fourier_multiply(uh_t, ipsi1)*iks)
uh_rr = D12.dot(fourier_multiply(D01.dot(uh), psi1))
luh = fourier_multiply(uh_rr+uh_tt, ipsi2)
return luh
def fscalar_laplacian(CO, AAG, RAG, uh):
R01 = CO.R01
R12 = CO.R12
D01 = CO.D01
D12 = CO.D12
iks = AAG.iks
psi1 = RAG.psi1
ipsi1 = RAG.inv_psi1
ipsi2 = RAG.inv_psi2
uh_t = R01.dot(uh*iks)
uh_tt = R12.dot(ffourier_multiply(uh_t, ipsi1)*iks)
uh_rr = D12.dot(ffourier_multiply(D01.dot(uh), psi1))
luh = ffourier_multiply(uh_rr+uh_tt, ipsi2)
return luh
# custom numba function for preconditioner
# basically a batched matvec; but note the ordering for the input vector
# this conforms with how that vector is stored in the rest of the solve
@numba.njit(parallel=True, fastmath=True)
def batch_matvecT_par(A, x):
sh = (A.shape[0], A.shape[1])
out = np.zeros(sh, dtype=np.complex128)
for i in numba.prange(A.shape[0]):
for j in range(A.shape[1]):
for k in range(A.shape[2]):
out[i, j] += A[i, j, k] * x[k, i]
return out
@numba.njit(parallel=False, fastmath=True)
def batch_matvecT_ser(A, x):
sh = (A.shape[0], A.shape[1])
out = np.zeros(sh, dtype=np.complex128)
for i in range(A.shape[0]):
for j in range(A.shape[1]):
for k in range(A.shape[2]):
out[i, j] += A[i, j, k] * x[k, i]
return out
def batch_matvecT(A, x):
if A.shape[0]*A.shape[1] > 10000:
return batch_matvecT_par(A, x)
else:
return batch_matvecT_ser(A, x)
@numba.njit(parallel=True, fastmath=True)
def optim_batch_matvecT_par(A, x, out):
for i in numba.prange(A.shape[0]):
for j in range(A.shape[1]):
kaccum = 0.0
for k in range(A.shape[2]):
kaccum += A[i, j, k] * x[i+k*A.shape[0]]
out[i+j*A.shape[0]] = kaccum
@numba.njit(parallel=False, fastmath=True)
def optim_batch_matvecT_ser(A, x, out):
for i in numba.prange(A.shape[0]):
for j in range(A.shape[1]):
kaccum = 0.0
for k in range(A.shape[2]):
kaccum += A[i, j, k] * x[i+k*A.shape[0]]
out[i+j*A.shape[0]] = kaccum
def optim_batch_matvecT(A, x, out):
if A.shape[0]*A.shape[1] > 10000:
optim_batch_matvecT_par(A, x, out)
else:
optim_batch_matvecT_ser(A, x, out)
class AnnularModifiedHelmholtzSolver(object):
"""
Spectrally accurate Modified Helmholtz solver on annular domain
Solves (k^2-L)u = f in the annulus described by the Annular Geometry AG
Subject to the Robin boundary condition:
ia*u(ri) + ib*u_r(ri) = ig (boundary condition at the inner radius)
oa*u(ro) + ob*u_r(ro) = og (boundary condition at the outer radius)
On instantionation, a preconditioner is formed with ia, ib, ua, ub
defining the boundary conditions
These can be changed at solvetime, but preconditioning may not work so well
"""
def __init__(self, AAG, k, ia=1.0, ib=0.0, oa=1.0, ob=0.0):
self.AAG = AAG
self.ia = ia
self.ib = ib
self.oa = oa
self.ob = ob
self.k = k
M = AAG.M
ns = AAG.ns
n = AAG.n
NB = M*ns
self.M = M
self.ns = ns
self.n = n
self.NB = NB
self.small_shape = (self.M, self.ns)
self.shape = (self.M, self.n)
self._construct()
self.APPLY = scipy.sparse.linalg.LinearOperator((self.NB, self.NB), dtype=complex, matvec=self._apply)
self.PREC = scipy.sparse.linalg.LinearOperator((self.NB, self.NB), dtype=complex, matvec=self._optim_preconditioner)
def _construct(self):
AAG = self.AAG
CO = AAG.CO
apsi1 = AAG.approx_psi1
aipsi1 = AAG.approx_inv_psi1
aipsi2 = AAG.approx_inv_psi2
ks = AAG.ks
D01 = CO.D01
D12 = CO.D12
R01 = CO.R01
R12 = CO.R12
R02 = CO.R02
ibcd = CO.ibc_dirichlet
ibcn = CO.ibc_neumann
obcd = CO.obc_dirichlet
obcn = CO.obc_neumann
ns = self.ns
M = self.M
self._KLUS = []
self._KINVS = []
for i in range(ns):
K = np.empty((M,M), dtype=complex)
LL = fast_dot(aipsi2, fast_dot(D12, fast_dot(apsi1, D01))) - \
fast_dot(np.ones(M-2)*ks[i]**2, fast_dot(R12, fast_dot(aipsi1, R01)))
K[:M-2] = self.k**2*R02 - LL
K[M-2:M-1] = self.ia*ibcd + self.ib*ibcn
K[M-1:M-0] = self.oa*obcd + self.ob*obcn
# self._KLUS.append(sp.linalg.lu_factor(K)) # for old preconditioner
self._KINVS.append(sp.linalg.inv(K.real))
self.KINV = sp.sparse.block_diag(self._KINVS, 'csr').astype('complex') # for SpMV preconditioner
self.Stacked_KINVS = np.stack(self._KINVS).copy()
self.prealloc = np.zeros(self.M*self.ns, dtype=complex)
def _preconditioner(self, fh):
return batch_matvecT(self.Stacked_KINVS, fh.reshape(self.small_shape)).ravel('F')
def _optim_preconditioner(self, fh):
optim_batch_matvecT(self.Stacked_KINVS, fh, self.prealloc)
return self.prealloc
def _SpMV_preconditioner(self, fh):
# could avoid these reshapes if self.KINV constructed differently
# however, they don't seem to take a huge amount of time
w1 = fh.reshape(self.small_shape).ravel('F')
w2 = zSpMV_viaMKL(self.KINV, w1)
return w2.reshape(self.small_shape, order='F').ravel()
def _old_preconditioner(self, fh):
fh = fh.reshape(self.small_shape)
fo = np.empty(self.small_shape, dtype=complex)
for i in range(self.ns):
fo[:,i] = fast_LU_solve(self._KLUS[i], fh[:,i])
return fo.ravel()
def _apply(self, uh):
AAG = self.AAG
RAG = self.RAG
CO = self.AAG.CO
ibcd = CO.ibc_dirichlet
ibcn = CO.ibc_neumann
obcd = CO.obc_dirichlet
obcn = CO.obc_neumann
R02 = CO.R02
uh = uh.reshape(self.small_shape)
luh = fscalar_laplacian(CO, AAG, RAG, uh)
fuh = self.k**2*R02.dot(uh) - luh
ibc = (self.ia*ibcd + self.ib*ibcn).dot(uh)
obc = (self.oa*obcd + self.ob*obcn).dot(uh)
return concat(fuh, ibc, obc)
def solve(self, RAG, f, ig, og, ia=None, ib=None, oa=None, ob=None,
verbose=False, **kwargs):
self.RAG = RAG
self.ia = ia if ia is not None else self.ia
self.ib = ib if ib is not None else self.ib
self.oa = oa if oa is not None else self.oa
self.ob = ob if ob is not None else self.ob
R02 = self.AAG.CO.R02
ff = concat(R02.dot(f), ig, og)
# ffh = mfft(ff.reshape(self.shape)).ravel()
ffh = fft(ff.reshape(self.shape)).ravel()
out = right_gmres(self.APPLY, ffh, M=self.PREC, verbose=verbose, **kwargs)
res = out[0]
if verbose:
print('GMRES took:', len(out[2]), 'iterations.')
self.iterations_last_call = len(out[2])
return ifft(res.reshape(self.small_shape)).real
| [
"dstein@flatironinstitute.org"
] | dstein@flatironinstitute.org |
760b723d21d604814cee51e8a214cc8cc7d6fd3c | 152ff2ef15245883b0b7cc3208fe71edcb4ba446 | /my_uu/__migrations/0008_auto__chg_field_unsubscribe_user__add_unique_unsubscribe_user.py | a347708702d7da7f84875413ebc6e8a1906f20a5 | [] | no_license | pvoytko/my-uu.ru | 302d947f48446ec8c86177867ef9607f692a4a0f | 00b77f02d230bf2f7adb5bd02eebbaf165f70d39 | refs/heads/master | 2021-06-10T08:11:09.282716 | 2021-04-08T10:21:55 | 2021-04-08T10:21:55 | 15,873,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,167 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Unsubscribe.user'
db.alter_column(u'my_uu_unsubscribe', 'user_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True))
# Adding unique constraint on 'Unsubscribe', fields ['user']
db.create_unique(u'my_uu_unsubscribe', ['user_id'])
def backwards(self, orm):
# Removing unique constraint on 'Unsubscribe', fields ['user']
db.delete_unique(u'my_uu_unsubscribe', ['user_id'])
# Changing field 'Unsubscribe.user'
db.alter_column(u'my_uu_unsubscribe', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'my_uu.account': {
'Meta': {'object_name': 'Account'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'my_uu.category': {
'Meta': {'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'my_uu.event': {
'Meta': {'object_name': 'Event'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'my_uu.eventlog': {
'Meta': {'object_name': 'EventLog'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['my_uu.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'my_uu.uchet': {
'Meta': {'object_name': 'Uchet'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['my_uu.Account']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['my_uu.Category']"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sum': ('django.db.models.fields.DecimalField', [], {'max_digits': '11', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'utype': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['my_uu.UType']"})
},
u'my_uu.unsubscribe': {
'Meta': {'object_name': 'Unsubscribe'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'my_uu.utype': {
'Meta': {'object_name': 'UType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['my_uu'] | [
"devnull@localhost"
] | devnull@localhost |
eb4def72637372ee312f3439627f207f24dc0828 | 71f3ecb8fc4666fcf9a98d39caaffc2bcf1e865c | /.history/第3章/ling_20200607212959.py | b5ed0fb3001268d28f8627a5245565a97d0a52b5 | [
"MIT"
] | permissive | dltech-xyz/Alg_Py_Xiangjie | 03a9cac9bdb062ce7a0d5b28803b49b8da69dcf3 | 877c0f8c75bf44ef524f858a582922e9ca39bbde | refs/heads/master | 2022-10-15T02:30:21.696610 | 2020-06-10T02:35:36 | 2020-06-10T02:35:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,154 | py | #!/usr/bin/env python
# coding=utf-8
'''
@version:
@Author: steven
@Date: 2020-05-27 22:20:22
@LastEditors: steven
@LastEditTime: 2020-06-07 21:29:59
@Description:最少的零钱数给顾客。
'''
# 检验一个输入数是否为正整数:#https://www.quora.com/How-can-I-make-sure-the-user-inputs-a-positive-integer-in-Python
def pos_num(n,i):
d = [0.01,0.02,0.05,0.1,0.2,0.5,1.0]
while type(n) is not int:
try:
# n = input("Please enter a positive integer: ")
num = int(n)
if num < 0:
print(f"{d[i]}:{n} is not positive.\n")
pos_num()
return num
except ValueError:
print(f"{d[i]}:{n} is not positive.\n")
def check_money(need2pay,all_money):
while type(need2pay) is not float:
try:
if need2pay > all_money:
# 当输入的总金额比收银员的总金额多时,无法进行找零
print("付不起,请检查输入的钱是否有误。\n")
# 选择重新输入钱
if_reinput = input("是否重新输入,y表示是,n表示否:")
if if_reinput == 'y' or 'Y':
# TODO:检验再次的输入。
need2pay = float(input("请输入需要找的零钱:"))
check_money(need2pay,all_money)
elif if_reinput == 'n' or 'N':
exit()
return 0 # ?
except ValueError:
print(f"输入的{need2pay}不是数字,请重新输入")
def main():
# 初始化钱数为0和储存各面值的硬币的列表:
face_value = [0.01,0.02,0.05,0.1,0.2,0.5,1.0] # 存储每种硬币面值
fval_num = [] # 存储每种硬币的数量
all_money = 0 # 总钱数(初始)为0
# 输入现在拥有的零钱:
temp = input("请按【1分,2分,5分,1角,2角,5角,1元】的顺序,来输入每种零钱的数量,用空格来分割:")
fval_num0 = temp.split(" ")
# 检验输入的序列是否为正整数:
for item in fval_num0: # x in mylist is better and more readable than x in mylist[:]
pos_num(fval_num0(item),item)
# 拥有的零钱总和
for i in range(0, len(fval_num0)):
fval_num.append(int(fval_num0[i]))
all_money += d[i] * fval_num[i] # 计算出收银员总共拥有多少钱
need2pay = float(input("请输入需要找的零钱:"))
check_money(need2pay,all_money)
# all_money = all_money - need2pay #更新还剩的钱。
# 要想用的钱币数量最少,那么需要利用所有面值大的钱币,因此从数组的面值大的元素开始遍历
i = len(d)-1
while i >= 0:
if need2pay >= d[i]:
n = int(need2pay / d[i])
if n > fval_num[i]:
n = fval_num[i] # 最多用已有的硬币数,来支付。
fval_num[i] -= n # 更新硬币数。
need2pay -= n * d[i] # 贪心的关键步骤,令sum动态的改变,
print(f'用了{n}个{d[i]}元硬币')
i -= 1
if __name__ == "__main__":
main()
| [
"a867907127@gmail.com"
] | a867907127@gmail.com |
10d126a84296968c3598e1dcecbc83d5f10b8b6a | bec99dad2e4d4bc893a317409d95a344d38756f5 | /store/management/commands/data/units.py | ef24804822f878fe8636a62b0a83c84e824710b9 | [] | no_license | solotony/go5 | bfde50028db37de0dc0f6342a46b0a2d693b9de1 | 3a7e47c81e1b485d5ef9e6911ab064baa4d63cf5 | refs/heads/master | 2023-03-03T06:08:22.706991 | 2021-02-14T10:53:06 | 2021-02-14T10:53:06 | 302,860,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,157 | py | from django.utils.translation import gettext_lazy as _
UNITS = [
(16, _('Length'), _('INCH'), _('Inch'), 2, None, _('"')),
(17, _('Weight'), _('KG'), _('kilogram'), 1, 1000, _('kg')),
(18, _('Frequency'), _('MHZ'), _('megahertz'), 1, 1e+6, _('MHz')),
(19, _('Byte size'), _('MB'), _('megabyte'), 1, 1e+6, _('MB')),
(20, _('Byte size'), _('GB'), _('gigabyte'), 1, 1e+9, _('GB')),
(22, _('Bit speed'), _('KBIT_S'), _('kilobit per second'),1, 1000, _('кбит/с')),
(23, _('Bit speed'), _('MBIT_S'), _('megabit per second'),1, 1e+6, _('Мбит/с')),
(24, _('Length'), _('MM'), _('millimetre'), 1, 1, _('mm')),
(27, _('Time'), _('MS'), _('millisecond'), 1, None, _('ms')),
(28, _('Frequency'), _('RPM'), _('revolutions per minute'), 0, 60, _('RPM')),
(31, _('Frequency'), _('KHZ'), _('kilohertz'), 1, 1000, _('kHz')),
(32, _('Corner'), _('DEG'), _('degree'), 0, None, _('°')),
(34, _('Length'), _('CM'), _('cantimetre'), 1, 10, _('cm')),
(35, _('Resolution'), _('DPI'), _('dot per inch'), 2, None, _('DPI')),
(36, _('Frequency'), _('HZ'), _('hetrz'), 1, 1, _('Hz')),
(37, _('Frequency'), _('GHZ'), _('gigahetrz'), 1, 1e+9, _('GHz')),
(38, _('Weight'), _('G'), _('gram'), 1, 1, _('g')),
(40, _('Byte speed'), _('MB_S'), _('megabyte per second'),1, None, _('MB/s')),
(41, _('Electric current'), _('A'), _('ampere '), 0, None, _('A')),
(42, _('Voltage'), _('V'), _('volt'), 0, None, _('V')),
(43, _('luminance'), _('CD_M2'), _('candela per square metre'),1,None, _('cd/m²')),
(44, _('Power'), _('W'), _('watt'), 0, None, _('W')),
(45, _('Bit size'), _('MBIT'), _('megabit'), 0, None, _('Mbit')),
(46, _('Time'), _('HOUR'), _('hour'), 0, None, _('h')),
(47, _('Byte size'), _('KB'), _('kilobyte'), 0, 1000, _('KB')),
(50, _('Bit size'), _('BIT'), _('bit'), 0, 1, _('бит')),
(51, _('Electric charge'), _('MA_H'), _('milliampere hour'), 0, None, _('mAh')),
(54, _('Length'), _('M'), _('metre'), 0, 1000, _('m')),
(58, _(''), _('CPM'), _(''), 0, None, _('cpm')),
(59, _(''), _('PPM'), _(''), 0, None, _('ppm')),
(60, _('Time'), _('SECOND'), _('second'), 0, None, _('s')),
(64, _(''), _('PERCENT'), _(''), 0, None, _('%')),
(65, _('Temperature'), _('CELSIUS'), _('degree Celsius'), 0, None, _('°C')),
(66, _('Time'), _('MINUTE'), _('minute'), 0, None, _('min')),
(68, _(''), _('GBIT_S'), _(''), 0, None, _('Gbit/s')),
(70, _('Length'), _('MICRON'), _('micrometre'), 0, 1e-6, _('µm')),
(72, _(''), _('GEE'), _(''), 0, None, _('G')),
(73, _('Electric current'), _('MA'), _('milliampere'), 1, 1e-3, _('mA')),
(75, _(''), _(''), _(''), 0, None, _('лет')),
(76, _(''), _(''), _(''), 0, None, _('dB')),
(77, _(''), _(''), _(''), 0, None, _('стр/мес')),
(80, _(''), _(''), _(''), 0, None, _('VA')),
(81, _(''), _(''), _(''), 0, None, _('J')),
(82, _(''), _(''), _(''), 0, None, _('cpi')),
(83, _(''), _(''), _(''), 0, None, _('cps')),
(84, _(''), _(''), _(''), 0, None, _('lpm')),
(85, _(''), _(''), _(''), 0, None, _('fps')),
(87, _(''), _(''), _(''), 0, None, _('Ω')),
(90, _(''), _(''), _(''), 0, None, _('лм')),
(91, _(''), _(''), _(''), 0, None, _('°F')),
(98, _(''), _(''), _(''), 0, None, _('nm')),
(99, _(''), _(''), _(''), 0, None, _('dBi')),
(100, _('Volume'), _('L'), _('litre'), 1, 1000, _('L')),
(103, _(''), _(''), _(''), 0, None, _('кВт·ч')),
(105, _(''), _(''), _(''), 0, None, _('N')),
(107, _(''), _(''), _(''), 0, None, _('диск (ов)')),
(109, _(''), _(''), _(''), 0, None, _('BTU/ч')),
(112, _(''), _(''), _(''), 0, None, _('символов')),
(116, _(''), _(''), _(''), 0, None, _('с/стор')),
(120, _(''), _(''), _(''), 0, None, _('адресатов')),
(121, _(''), _(''), _(''), 0, None, _('ips')),
(122, _(''), _(''), _(''), 0, None, _('мм/с')),
(126, _(''), _(''), _(''), 0, None, _('млн. симв.')),
(127, _('Weight'), _('LBS'), _('pound'), 2, None, _('lbs')),
(132, _(''), _(''), _(''), 0, None, _('скоб')),
(138, _(''), _(''), _(''), 0, None, _('дюйм/мин')),
(142, _('Volume'), _('ML'), _('milliliter'), 1, None, _('ml')),
(143, _(''), _(''), _(''), 0, None, _('мес')),
(144, _('Count'), _('ITEM'), _(''), 0, None, _('шт')),
(146, _('Volume'), _('CM_3'), _('cantimetre³'), 1, None, _('cm³')),
(156, _(''), _(''), _(''), 0, None, _('g/m²')),
(159, _(''), _(''), _(''), 0, None, _('TB')),
(160, _(''), _(''), _(''), 0, None, _('GB/s')),
(165, _(''), _(''), _(''), 0, None, _('MP')),
(169, _(''), _(''), _(''), 0, None, _('кВт·ч/неделя')),
(175, _(''), _(''), _(''), 0, None, _('диск/ч')),
(176, _(''), _(''), _(''), 0, None, _('GT/s')),
(181, _(''), _(''), _(''), 0, None, _('dBmW')),
(185, _(''), _(''), _(''), 0, None, _('lps')),
(196, _(''), _(''), _(''), 0, None, _('Mpps')),
(206, _(''), _(''), _(''), 0, None, _('ipm')),
(210, _(''), _(''), _(''), 0, None, _('IOPS')),
(211, _(''), _(''), _(''), 0, None, _('Wh')),
(222, _(''), _(''), _(''), 0, None, _('точка')),
(224, _(''), _(''), _(''), 0, None, _('mmH2O')),
(228, _(''), _(''), _(''), 0, None, _('µs')),
(229, _(''), _(''), _(''), 0, None, _('mm²')),
(237, _(''), _(''), _(''), 0, None, _('pph')),
(243, _(''), _(''), _(''), 0, None, _('ppi')),
(299, _(''), _(''), _(''), 0, None, _('pps')),
(439, _(''), _(''), _(''), 0, None, _('лет 2')),
(443, _(''), _(''), _(''), 0, None, _('pl')),
(39, _('Count'), _(''), _(''), 0, None, _('пикселей')),
(55, _('Count'), _(''), _(''), 0, None, _('страниц')),
(56, _('Count'), _(''), _(''), 0, None, _('записей')),
(61, _('Count'), _(''), _(''), 0, None, _('копий')),
(62, _('Count'), _(''), _(''), 0, None, _('листов')),
(316, _('Count'), _(''), _(''), 0, None, _('скан')),
(349, _('Count'), _(''), _(''), 0, None, _('канала')),
(350, _('Count'), _(''), _(''), 0, None, _('лампы')),
(354, _('Count'), _(''), _(''), 0, None, _('клавиши')),
(362, _('Count'), _(''), _(''), 0, None, _('полка(и)')),
(363, _('Count'), _(''), _(''), 0, None, _('ящик(и)')),
(373, _('Count'), _(''), _(''), 0, None, _('символы')),
(375, _('Count'), _(''), _(''), 0, None, _('цвета')),
(378, _('Count'), _(''), _(''), 0, None, _('вентилятор(ы)')),
(384, _('Count'), _(''), _(''), 0, None, _('розетка(и)')),
(386, _('Count'), _(''), _(''), 0, None, _('колесо(а)')),
(429, _('Count'), _(''), _(''), 0, None, _('лицензия(и)')),
(69, _('Count'), _(''), _(''), 0, None, _('линий')),
(164, _('Count'), _(''), _(''), 0, None, _('млн. отрезаний')),
(444, _('Count'), _(''), _(''), 0, None, _('точки/строка')),
(86, _('Count'), _(''), _(''), 0, None, _('пользов.')),
(89, _('Count'), _(''), _(''), 0, None, _('снимков')),
(154, _('Count'), _(''), _(''), 0, None, _('этикетка (-ок)')),
(21, _('Count'), _(''), _(''), 0, None, _('x')),
(26, _('Count'), _(''), _(''), 0, None, _('None')),
(29, _('Count'), _(''), _(''), 0, None, _('None 2')),
(33, _('Count'), _(''), _(''), 0, None, _('x 2')),
(30, _('Count'), _(''), _(''), 0, None, _('M 2')),
]
| [
"as@solotony.com"
] | as@solotony.com |
8f339cb9c53e93b193dba8fd6b109b15ecaf18d3 | 694427fd2155fea664241f9e029f955d83fef2a2 | /deploy_tools/fabfile.py | 61536b3938dd326356a8d9cf17e7d5c1e2159022 | [] | no_license | fbidu/superlists | 2ea67f5405ac8237d135f8ad315f4bc53a06239f | 391f8fa8396f8c8890a30f3b18d95a47778c2505 | refs/heads/master | 2021-06-18T20:43:58.159824 | 2019-09-12T15:48:52 | 2019-09-12T15:48:52 | 172,391,723 | 0 | 0 | null | 2021-01-28T11:16:37 | 2019-02-24T21:20:57 | JavaScript | UTF-8 | Python | true | false | 1,182 | py | import random
from fabric.contrib.files import append, exists
from fabric.api import cd, env, local, run
REPO_URL = "https://github.com/fbidu/superlists.git"
def _get_latest_source():
if exists(".git"):
run("git fetch")
else:
run(f"git clone {REPO_URL} .")
current_commit = local("git log -n 1 --format=%H", capture=True)
run(f"git reset --hard {current_commit}")
def _update_pipenv():
if not exists(f"/home/{env.user}/miniconda3/bin/pipenv"):
run(f"pip install --user pipenv")
run(f"/home/{env.user}/miniconda3/bin/pipenv install")
def _update_dotenv():
pass
def _update_static_files():
run(f"/home/{env.user}/miniconda3/bin/pipenv run python manage.py collectstatic --noinput")
def _update_database():
run(f"/home/{env.user}/miniconda3/bin/pipenv run python manage.py migrate --noinput")
def deploy():
site_folder = f"/home/{env.user}/django-apps/{env.host}"
run(f"mkdir -p {site_folder}")
with cd(site_folder):
_get_latest_source()
with cd(f"{site_folder}/superlists"):
_update_pipenv()
_update_dotenv()
_update_static_files()
_update_database() | [
"felipe@felipevr.com"
] | felipe@felipevr.com |
e9bb9fa6c3131fe1c09815de84bbcae7570fe34c | bf2aa4eab14a6a5347fe4af65cc4a37f512a465d | /people/migrations/0108_auto_20200530_0602.py | 76b15ad9f9532587e2fa3e0edf18453d24e8c970 | [] | no_license | drdavidknott/betterstart | 0cda889f5cd6bb779f6d1fa75cb4f2ef08eb626c | 59e2f8282b34b7c75e1e19e1cfa276b787118adf | refs/heads/master | 2023-05-04T07:32:24.796488 | 2023-04-16T15:26:30 | 2023-04-16T15:26:30 | 173,626,906 | 0 | 0 | null | 2023-02-18T07:27:55 | 2019-03-03T20:37:01 | Python | UTF-8 | Python | false | false | 1,058 | py | # Generated by Django 3.0.3 on 2020-05-30 05:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0107_auto_20200530_0549'),
]
operations = [
migrations.AddField(
model_name='profile',
name='requested_resets',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='profile',
name='reset_code',
field=models.CharField(default=0, max_length=16),
),
migrations.AddField(
model_name='profile',
name='reset_timeout',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='profile',
name='successful_resets',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='site',
name='password_reset_timeout',
field=models.IntegerField(default=15),
),
]
| [
"dkoysta@gmail.com"
] | dkoysta@gmail.com |
f9aa2c51297ea2624dae2f4826d88086a7d086af | 07f815078189169cd2944105ef373b522aef8f33 | /Scrapy_spider/build_medals_table/build_medals_table/pipelines.py | 2c36fefd6e33f0cfbf1cb15f83f538e1451b98e2 | [
"MIT"
] | permissive | chrisjdavie/Olympics_redo | ecc5c0a593085ba5d3d9ce608362ccb1fece8eec | 43abe8d9bd9da4e9b15013b12bc3b5a740c55871 | refs/heads/master | 2021-01-20T10:11:05.994665 | 2015-01-23T14:58:10 | 2015-01-23T14:58:10 | 29,680,905 | 0 | 0 | null | 2015-01-22T16:51:11 | 2015-01-22T13:50:53 | OpenEdge ABL | UTF-8 | Python | false | false | 296 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class BuildMedalsTablePipeline(object):
def process_item(self, item, spider):
return item
| [
"chris.d@theasi.co"
] | chris.d@theasi.co |
b329f311049bd7d44b6015d99b738c1211c60ac4 | ecf257614e183bda87f73ce8d77daa563ea66fa7 | /hebpipe/lib/xrenner/modules/xrenner_coref.py | fe1c2485719ecc353c3a6bc1eb0111d7b3bfdb23 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | amir-zeldes/HebPipe | 7aa9990d49f86ce97cb1e094791b480514e98d44 | 4f395349b58a055c778d4db46d8d9f54daa71794 | refs/heads/master | 2023-08-31T20:39:18.991834 | 2023-08-22T00:49:44 | 2023-08-22T00:49:44 | 148,917,890 | 31 | 8 | NOASSERTION | 2023-08-22T00:49:40 | 2018-09-15T16:10:05 | Lex | UTF-8 | Python | false | false | 11,367 | py | from .xrenner_marker import *
from .xrenner_compatible import *
from .xrenner_propagate import *
from .xrenner_rule import CorefRule, ConstraintMatcher
"""
Coreference resolution module. Iterates through markables to find possible matches based on rules.
Author: Amir Zeldes
"""
def find_antecedent(markable, previous_markables, lex, restrict_rule=""):
"""
Search for antecedents by cycling through coref rules for previous markables
:param markable: Markable object to find an antecedent for
:param previous_markables: Markables in all sentences up to and including current sentence
:param lex: the LexData object with gazetteer information and model settings
:param restrict_rule: a string specifying a subset of rules that should be checked (e.g. only rules with 'appos')
:return: candidate, matching_rule - the best antecedent and the rule that matched it
"""
# DEBUG point
if markable.text == lex.debug["ana"]:
a=5
candidate = None
matching_rule = None
for i, rule in enumerate(lex.coref_rules):
# If this call of find_antecedent is limited to certain rules, check that the restriction is in the rule
if restrict_rule == "" or restrict_rule in rule.ana_spec:
if coref_rule_applies(lex, rule.ana_constraints, markable):
candidate = search_prev_markables(markable, previous_markables, rule, lex)
if candidate is not None:
matching_rule = rule.propagation
break
return candidate, matching_rule
def search_prev_markables(markable, previous_markables, rule, lex):
"""
Search for antecedent to specified markable using a specified rule
:param markable: The markable object to find an antecedent for
:param previous_markables: The list of know markables up to and including the current sentence; markables beyond current markable but in its sentence are included for cataphora.
:param ante_constraints: A list of ContraintMatcher objects describing the antecedent
:param ante_spec: The antecedent specification part of the coref rule being checked, as a string
:param lex: the LexData object with gazetteer information and model settings
:param max_dist: Maximum distance in sentences for the antecedent search (0 for search within sentence)
:param propagate: Whether to progpagate features upon match and in which direction
:return: the selected candidate Markable object
"""
ante_constraints, ante_spec, rule_num, max_dist, propagate, clf_name = rule.ante_constraints, rule.ante_spec, rule.rule_num, rule.max_distance, rule.propagation, rule.clf_name
candidate_set = set([])
if ante_spec.find("lookahead") > -1:
referents_to_loop = previous_markables
else:
referents_to_loop = reversed(previous_markables)
for candidate in referents_to_loop: # loop through previous markables backwards
#DEBUG breakpoint:
if markable.text == lex.debug["ana"]:
a = 5
if candidate.text == lex.debug["ante"]:
b=6
if markable.sentence.sent_num - candidate.sentence.sent_num <= max_dist:
if ((int(markable.head.id) > int(candidate.head.id) and
ante_spec.find("lookahead") == -1) or (int(markable.head.id) < int(candidate.head.id) and ante_spec.find("lookahead") > -1)):
if candidate.group not in markable.non_antecdent_groups:
if coref_rule_applies(lex, ante_constraints, candidate, markable):
if not markables_overlap(markable, candidate, lex):
if markable.form == "pronoun":
if agree_compatible(markable, candidate, lex) or (ante_spec.find("anyagree") > -1 and group_agree_compatible(markable,candidate,previous_markables,lex)):
if entities_compatible(markable, candidate, lex) and cardinality_compatible(markable, candidate, lex):
candidate_set.add(candidate)
elif markable.text == candidate.text or (len(markable.text) > 4 and (candidate.text.lower() == markable.text.lower())):
#propagate_entity(markable, candidate, propagate)
candidate_set.add(candidate)
#return candidate
elif markable.text + "|" + candidate.text in lex.coref and entities_compatible(
markable, candidate, lex) and agree_compatible(markable, candidate, lex):
candidate_set.add(candidate)
#return candidate
elif markable.core_text + "|" + candidate.core_text in lex.coref and entities_compatible(
markable, candidate, lex) and agree_compatible(markable, candidate, lex):
candidate_set.add(candidate)
#return candidate
elif markable.entity == candidate.entity and agree_compatible(markable, candidate, lex) and (markable.head.text == candidate.head.text or
(len(markable.head.text) > 3 and (candidate.head.text.lower() == markable.head.text.lower())) or
(markable.core_text.count(" ") > 2 and (markable.core_text.lower() == candidate.core_text.lower())) or
(markable.head.lemma == candidate.head.lemma and lex.filters["lemma_match_pos"].match(markable.head.pos) is not None
and lex.filters["lemma_match_pos"].match(candidate.head.pos) is not None)):
if modifiers_compatible(markable, candidate, lex) and modifiers_compatible(candidate, markable, lex):
candidate_set.add(candidate)
elif (markable.entity == candidate.entity or len(set(markable.alt_entities) & set(candidate.alt_entities))>0) and isa(markable, candidate, lex):
candidate.isa = True # This is an 'isa' candidate
candidate_set.add(candidate)
elif agree_compatible(markable,candidate,lex) and ((markable.head.text == candidate.head.text) or (markable.head.lemma == candidate.head.lemma and
lex.filters["lemma_match_pos"].match(markable.head.pos) is not None and lex.filters["lemma_match_pos"].match(candidate.head.pos) is not None)):
if merge_entities(markable, candidate, previous_markables, lex):
candidate_set.add(candidate)
elif entities_compatible(markable, candidate, lex) and isa(markable, candidate, lex):
if merge_entities(markable, candidate, previous_markables, lex):
candidate.isa = True # This is an 'isa' candidate
candidate_set.add(candidate)
elif lex.filters["match_acronyms"] and markable.head.text.isupper() or candidate.head.text.isupper():
if acronym_match(markable, candidate, lex) or acronym_match(candidate, markable, lex):
if modifiers_compatible(markable, candidate, lex) and modifiers_compatible(candidate, markable, lex):
if merge_entities(markable, candidate, previous_markables, lex):
candidate_set.add(candidate)
if ante_spec.find("anytext") > -1:
if (ante_spec.find("anyagree") > -1 and group_agree_compatible(markable,candidate,previous_markables,lex)) or agree_compatible(markable, candidate, lex):
if (ante_spec.find("anycardinality") > -1 or cardinality_compatible(markable,candidate,lex)):
if (ante_spec.find("anyentity") > -1 or entities_compatible(markable,candidate,lex)):
candidate_set.add(candidate)
elif ante_spec.find("lookahead") == -1:
# Reached back too far according to max_dist, stop looking
break
if len(candidate_set) > 0:
candidates_to_remove = set([])
for candidate_item in candidate_set:
# Remove items that are prohibited by entity agree mapping
for agree, ent in iteritems(lex.filters["agree_entity_mapping"]):
if markable.agree == agree and candidate_item.entity != ent:
candidates_to_remove.add(candidate_item)
if candidate_item.entity == lex.filters["person_def_entity"] and (candidate_item.form != "pronoun" or markable.entity_certainty == "certain") and lex.filters["no_person_agree"].match(markable.agree) is not None:
candidates_to_remove.add(candidate_item)
elif markable.entity == lex.filters["person_def_entity"] and (markable.form != "pronoun" or markable.entity_certainty == "certain") and lex.filters["no_person_agree"].match(candidate_item.agree) is not None:
candidates_to_remove.add(candidate_item)
for removal in candidates_to_remove:
candidate_set.remove(removal)
if len(candidate_set) > 0:
take_first = True if ante_spec.find("takefirst") > -1 else False
best = best_candidate(markable, candidate_set, lex, rule, take_first=take_first)
if best is not None:
if markable.text + "|" + best.text in lex.coref:
markable.coref_type = lex.coref[markable.text + "|" + best.text]
propagate_entity(markable, best, propagate)
propagate_entity(markable, best)
elif markable.core_text + "|" + best.core_text in lex.coref:
markable.coref_type = lex.coref[markable.core_text + "|" + best.core_text]
propagate_entity(markable, candidate_item)
elif propagate.startswith("propagate"):
propagate_entity(markable, best, propagate)
if hasattr(best,"isa"):
if hasattr(best,"isa_dir"):
if best.isa_dir == "markable":
markable.isa_partner_head = best.lemma
else:
best.isa_partner_head = markable.lemma
delattr(best,"isa_dir")
delattr(best,"isa")
return best
else:
return None
else:
return None
def coref_rule_applies(lex, constraints, mark, anaphor=None):
"""
Check whether a markable definition from a coref rule applies to this markable
:param lex: the LexData object with gazetteer information and model settings
:param constraints: the constraints defining the relevant Markable
:param mark: the Markable object to check constraints against
:param anaphor: if this is an antecedent check, the anaphor is passed for $1-style constraint checks
:return: bool: True if 'mark' fits all constraints, False if any of them fail
"""
for constraint in constraints:
if not constraint.match(mark,lex,anaphor):
return False
return True
def antecedent_prohibited(markable, conll_tokens, lex):
"""
Check whether a Markable object is prohibited from having an antecedent
:param markable: The Markable object to check
:param conll_tokens: The list of ParsedToken objects up to and including the current sentence
:param lex: the LexData object with gazetteer information and model settings
:return: bool
"""
mismatch = True
if "/" in lex.filters["no_antecedent"]:
constraints = lex.filters["no_antecedent"].split(";")
for constraint in constraints:
if not mismatch:
return True
descriptions = constraint.split("&")
mismatch = False
for token_description in descriptions:
if token_description.startswith("^"):
test_token = conll_tokens[markable.start]
elif token_description.startswith("$"):
test_token = conll_tokens[markable.end]
elif token_description.startswith("@"):
test_token = markable.head
else:
# Invalid token description
return False
token_description = token_description[1:]
pos, word = token_description.split("/")
if pos.startswith("!"):
pos = pos[1:]
negative_pos = True
else:
negative_pos = False
if word.startswith("!"):
word = word[1:]
negative_word = True
else:
negative_word = False
pos_matcher = re.compile(pos)
word_matcher = re.compile(word)
if (pos_matcher.match(test_token.pos) is None and not negative_pos) or (pos_matcher.match(test_token.pos) is not None and negative_pos) or \
(word_matcher.match(test_token.text) is None and not negative_word) or (word_matcher.match(test_token.text) is not None and negative_word):
mismatch = True
break
if mismatch:
return False
else:
return True
| [
"amir.zeldes@georgetown.edu"
] | amir.zeldes@georgetown.edu |
410a8345d5cf16c2a42d45dd39d76a69d0927350 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2241/60658/264268.py | fe070c1a30602ace785b1942e5e375a843dd86ec | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | target = int(input())
ans = 0
i = 1
while target>0:
ans+=(target%i==0)
target-=i
i+=1
print(ans) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
349c0c0214a8062a6fed23e1e2968bfb83a10f1e | d838bed08a00114c92b73982a74d96c15166a49e | /docs/data/learn/Bioinformatics/input/ch4_code/src/helpers/DnaUtils.py | 7e027494e7ddf73aff79ccb6bbbd85e722d9f961 | [] | no_license | offbynull/offbynull.github.io | 4911f53d77f6c59e7a453ee271b1e04e613862bc | 754a85f43159738b89dd2bde1ad6ba0d75f34b98 | refs/heads/master | 2023-07-04T00:39:50.013571 | 2023-06-17T20:27:05 | 2023-06-17T23:27:00 | 308,482,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py | from random import Random
from typing import Optional, List
def generate_random_genome(size: int, r: Optional[Random] = None) -> str:
if r is None:
r = Random()
return ''.join([r.choice(['A', 'C', 'T', 'G']) for i in range(size)])
def generate_random_cyclic_genome(size: int, copies: int, r: Optional[Random] = None) -> List[str]:
if r is None:
r = Random()
copies = [''.join([r.choice(['A', 'C', 'T', 'G']) for i in range(size)])] * copies
for i, copy in enumerate(copies):
offset = r.randint(0, size)
copies[i] = copy[offset+1:] + copy[:offset]
return copies
def dna_reverse_complement(dna: str):
return dna_complement(dna)[::-1]
def dna_complement(dna: str):
ret = ''
for ch in dna:
if ch == 'A':
ret += 'T'
elif ch == 'C':
ret += 'G'
elif ch == 'T':
ret += 'A'
elif ch == 'G':
ret += 'C'
else:
raise
return ret
# MARKDOWN_DNA_TO_RNA
def dna_to_rna(dna: str):
ret = ''
for ch in dna:
if ch == 'A' or ch == 'C' or ch == 'G':
ret += ch
elif ch == 'T':
ret += 'U'
else:
raise
return ret
# MARKDOWN_DNA_TO_RNA
def rna_to_dna(rna: str):
ret = ''
for ch in rna:
if ch == 'A' or ch == 'C' or ch == 'G':
ret += ch
elif ch == 'U':
ret += 'T'
else:
raise
return ret | [
"offbynull@gmail.com"
] | offbynull@gmail.com |
3643aaa66a11bc0bb9a6cbd437878ad7d0c62ef8 | 64c8d431c751b1b7a7cb7224107ee40f67fbc982 | /code/python/echomesh/command/Broadcast.py | f9eb8a40450ff4d2509558545ac4f074fcf98b99 | [
"MIT"
] | permissive | silky/echomesh | 6ac4755e4ff5ea3aa2b2b671c0979068c7605116 | 2fe5a00a79c215b4aca4083e5252fcdcbd0507aa | refs/heads/master | 2021-01-12T20:26:59.294649 | 2013-11-16T23:29:05 | 2013-11-16T23:29:05 | 14,458,268 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | from __future__ import absolute_import, division, print_function, unicode_literals
from echomesh.command import Show
from echomesh.util import Log
LOGGER = Log.logger(__name__)
def broadcast(echomesh_instance, on_or_off=None):
if on_or_off is None:
Show.broadcast(echomesh_instance)
else:
on_or_off = on_or_off.lower()
b_on = on_or_off in ['on', 'true']
if not (b_on or on_or_off in ['off', 'false']):
raise Exception('You can only turn broadcast mode "on" or "off".')
name = 'ON' if b_on else 'off'
if b_on == echomesh_instance.broadcasting():
message = 'was already'
else:
echomesh_instance.set_broadcasting(b_on)
message = 'is now'
LOGGER.info('broadcast mode %s %s.', message, name)
HELP = """
Set the broadcast mode on or off.
When broadcast mode is on, all start and pause commands are sent to all echomesh
nodes; when broadcast mode is off, start and pause only go to this node.
"""
SEE_ALSO = ['show broadcast']
| [
"tom@swirly.com"
] | tom@swirly.com |
733d2b44064406a6ec1bfc170f23600b23a3d6b0 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_4945.py | 0761af1c78b2bcc7bb0e79dc2d58cf94ac6c73e6 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,846 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((478.695, 491.618, 534.974), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((450.728, 438.992, 571.113), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((420.944, 371.937, 607.18), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((499.981, 362.186, 493.433), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((338.529, 244.809, 727.359), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((453.438, 458.283, 555.029), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((453.487, 459.378, 554.207), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((426.44, 464.904, 559.516), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((412.763, 488.784, 553.622), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((404.654, 502.651, 530.337), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((405.526, 488.842, 505.397), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((385.844, 472.309, 492.679), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((469.191, 481.848, 558.095), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((298.112, 462.255, 432.399), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((259.758, 343.869, 591.668), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((259.758, 343.869, 591.668), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((283.204, 352.518, 603.959), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((300.243, 370.994, 590.963), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((322.038, 388.645, 595.983), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((345.128, 405.471, 598.879), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((369.728, 420.439, 597.168), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((395.865, 431.223, 590.333), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((205.981, 333.036, 445.315), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((598.405, 534.572, 716.502), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((403.357, 414.699, 621.968), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((403.357, 414.699, 621.968), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((427.759, 399.706, 624.797), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((450.3, 385.471, 613.269), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((450.333, 364.906, 592.291), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((513.337, 446.467, 521.88), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((386.052, 276.451, 653.213), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((473.752, 433.46, 555.492), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((474.214, 433.381, 555.26), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((459.17, 410.898, 550.528), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((443.415, 421.138, 529.463), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((421.133, 435.907, 538.626), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((398.334, 450.855, 531.667), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((373.485, 463.226, 528.482), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((375.177, 489.518, 519.092), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((404.183, 491.253, 600.111), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((353.742, 484.147, 434.656), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((435.58, 469.772, 626.287), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((442.451, 448.375, 614.267), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((458.895, 402.98, 585.995), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((476.676, 353.682, 558.606), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((544.667, 395.707, 545.915), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((449.222, 255.85, 531.598), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((481.179, 393.997, 519.567), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((481.722, 384.38, 545.965), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((484.816, 383.104, 574.091), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((482.386, 374.729, 600.64), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((477.453, 370.516, 627.842), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((458.852, 370.638, 648.655), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((462.48, 436.145, 600.066), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((454.734, 305.751, 697.003), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
ccae95d130e10c420a1123ca43de367cbe9d79fe | 6bc4160d9f9e59df4f019cd0979b9c1266e6feec | /src/swarm_worker.py | 31359438848bc9742fbb7f01fde8dd622ff9b8ba | [
"MIT"
] | permissive | raymondlwb/docker-pygen | f00c3f17fa8ffde4426e55150775767f945d3038 | a7d1e70daba58c8bf44949b56453bdf017a56b4d | refs/heads/master | 2021-05-01T21:13:57.990953 | 2018-01-25T22:52:24 | 2018-01-25T22:52:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,646 | py | import re
import sys
import json
import signal
import argparse
import six
import requests
from actions import Action
from api import DockerApi
from http_server import HttpServer
from metrics import MetricsServer, Counter
from utils import get_logger, set_log_level
logger = get_logger('pygen-worker')
request_counter = Counter(
'pygen_worker_request_count', 'Number of requests handled by the Swarm worker',
labelnames=('client',)
)
send_counter = Counter(
'pygen_worker_send_count', 'Number of requests sent by the Swarm worker',
labelnames=('target',)
)
class Worker(HttpServer):
manager_port = 9411
worker_port = 9412
DEFAULT_EVENTS = ['start', 'stop', 'die', 'health_status']
EMPTY_DICT = dict()
def __init__(self, managers, retries=0, events=None, metrics_port=9414):
super(Worker, self).__init__(self.worker_port)
if any(isinstance(managers, string_type) for string_type in six.string_types):
self.managers = [managers]
else:
self.managers = managers
self.retries = retries
self.events = events or self.DEFAULT_EVENTS
self.metrics = MetricsServer(metrics_port)
self.api = DockerApi()
def start(self):
super(Worker, self).start()
if self.metrics:
self.metrics.start()
def _handle_request(self, request):
request_counter.labels(request.address_string()).inc()
length = int(request.headers['Content-Length'])
data = json.loads(request.rfile.read(length).decode('utf-8'))
self.handle_action(data.get('action'), *data.get('args', list()))
def handle_action(self, action_name, *args):
action_type = Action.by_name(action_name)
self.api.run_action(action_type, *args)
def watch_events(self):
for event in self.api.events(decode=True):
if self.is_watched(event):
logger.info('Received %s event from %s',
event.get('status'),
event.get('Actor', self.EMPTY_DICT).get('Attributes', self.EMPTY_DICT).get('name', '<?>'))
self.send_update(event.get('status'))
def is_watched(self, event):
if event.get('status') in self.events:
return True
# health_status comes as 'health_status: healthy' for example
if any(re.match(r'%s:.+' % item, event.get('status', '')) for item in self.events):
return True
return False
def send_update(self, status):
for manager in self.managers:
for _ in range(self.retries + 1):
try:
response = requests.post('http://%s:%d/' % (manager, self.manager_port), timeout=(5, 30))
logger.info('Update (%s) sent to http://%s:%d/ : HTTP %s : %s',
status, manager, self.manager_port, response.status_code, response.text.strip())
send_counter.labels(manager).inc()
break
except Exception as ex:
logger.error('Failed to send update to http://%s:%d/: %s',
manager, self.manager_port, ex, exc_info=1)
def shutdown(self):
super(Worker, self).shutdown()
if self.metrics:
self.metrics.shutdown()
def parse_arguments(args=sys.argv[1:]):
parser = argparse.ArgumentParser(description='PyGen cli to send HTTP updates on Docker events')
parser.add_argument('--manager',
metavar='<HOSTNAME>', required=True, nargs='+',
help='The target hostnames of the PyGen manager instances listening on port 9411')
parser.add_argument('--retries',
required=False, type=int, default=0,
help='Number of retries for sending an update to the manager')
parser.add_argument('--events',
metavar='<EVENT>', required=False, nargs='+',
default=['start', 'stop', 'die', 'health_status'],
help='Docker events to watch and trigger updates for '
'(default: start, stop, die, health_status)')
parser.add_argument('--metrics',
metavar='<PORT>', required=False, type=int, default=9414,
help='HTTP port number for exposing Prometheus metrics (default: 9414)')
parser.add_argument('--debug',
required=False, action='store_true',
help='Enable debug log messages')
return parser.parse_args(args)
def setup_signals(worker): # pragma: no cover
def exit_signal(*args):
logger.info('Exiting ...')
exit(0 if signal.SIGTERM else 1)
signal.signal(signal.SIGTERM, exit_signal)
signal.signal(signal.SIGINT, exit_signal)
def update_signal(*args):
worker.send_update()
signal.signal(signal.SIGHUP, update_signal)
if __name__ == '__main__': # pragma: no cover
set_log_level('INFO')
arguments = parse_arguments()
if arguments.debug:
set_log_level('DEBUG')
worker = Worker(arguments.manager, arguments.retries, arguments.events, arguments.metrics)
setup_signals(worker)
logger.debug('Signal handlers set up for SIGTERM, SIGINT and SIGHUP')
try:
worker.start()
logger.info('Starting event watch loop')
worker.watch_events()
except SystemExit:
logger.info('Exiting...')
worker.shutdown()
raise
except Exception:
worker.shutdown()
raise
| [
"rycus86@gmail.com"
] | rycus86@gmail.com |
20cefe9ac9bcbd200cb13ed15b3144b20d500662 | 928b064b76b4218bfddd8c648a07709d0c7d1b93 | /htmlthingy/_converter.py | 5b0fb9224e185eb7d23a3da48e1738e1c896f735 | [
"MIT"
] | permissive | Akuli/htmlthingy | ebc6ebda2e0421f526f973992da34485d6c9a939 | 9f554e9d8e9bc11acb4aeb43e14b76ee1b164c59 | refs/heads/master | 2022-12-09T02:52:09.431346 | 2020-09-07T15:23:14 | 2020-09-07T15:23:19 | 107,789,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,542 | py | import operator
import re
import textwrap
from htmlthingy import tags
def _match_after(regex, the_string, startpos):
for match in regex.finditer(the_string):
if match.start() >= startpos:
return match
return None
class MarkupConverter:
"""Convert markup into HTML.
This class uses regular expressions and functions from
:mod:`htmlthingy.tags`, but you can easily customize it with custom
regexes and callbacks.
Example:
>>> markup = '''
... # Hello World!
...
... ## Subtitle
...
... **Bold**, *italic*, _underline_, ``some code``
...
... (some-id)
... Blah blah blah.
... '''
>>> print(''.join(MarkupConverter().convert(markup)))
<h1 id="hello-world">Hello World!</h1>
<h2 id="subtitle">Subtitle</h2>
<p><b>Bold</b>, <i>italic</i>, <u>underline</u>, <code>some code</code>
</p>
<p id="some-id">Blah blah blah.
</p>
"""
def __init__(self):
self.pygments_style = 'default'
self._inliners = {}
self._multiliners = {}
self._add_basic_stuff()
def convert(self, string, filename='<string>'):
"""Produce output from an input string.
The *filename* will be used as an argument to callbacks, so they
can do different things depending on which file is processed.
Usually it's a relative path to a ``.txt`` input file with
:data:`os.sep` replaced by ``'/'``.
This yields pieces of the final string that are supposed to be
joined together. You can do e.g. this::
with open('input.rst', 'r', encoding='utf-8') as file:
markup = file.read()
with open('output.html', 'w', encoding='utf-8') as file:
for chunk in MarkupConverter().convert(content):
file.write(chunk)
Use ``''.join(converter.convert(contents))`` if you want the
output as a string.
"""
for chunk in re.split(r'\n\n(?=\S)', string):
matches = {regex.search(chunk.strip('\n') + '\n')
for regex in self._multiliners} - {None}
if len(matches) > 1:
# TODO: better error
raise ValueError("ambiguous markup:\n\n" + chunk)
if matches:
[match] = matches
yield from self._multiliners[match.re](match, filename)
elif chunk.strip():
yield '<p>'
yield from self.convert_chunk(chunk, filename)
yield '</p>\n\n'
def convert_chunk(self, chunk, filename):
"""
Like :meth:`convert`, but doesn't handle multi-line things (e.g.
titles and code blocks).
Use this instead of :meth:`convert` if you want to parse nested
markup, like ``**a [link](something) inside bold**``.
"""
index = 0
while index < len(chunk):
matches = {_match_after(regex, chunk, index)
for regex in self._inliners} - {None}
if not matches:
yield chunk[index:]
break
firstmatch = min(matches, key=operator.methodcaller('start'))
yield chunk[index:firstmatch.start()]
yield self._inliners[firstmatch.re](firstmatch, filename)
chunk = chunk[firstmatch.end():]
def add_inliner(self, regex):
"""Add a new non-multiline processor function.
Use this as a decorator, like this::
@converter.add_inliner(r'``(.+?)``')
def code(match, filename):
return '<code>' + match.group(1) + '</code>'
The regex can be a string or a compiled regex from
:func:`re.compile`. Use a compiled regex if you want to use
regex flags.
The ``filename`` is a name of the HTML output file, with
:data:`os.sep` replaced with ``'/'``.
"""
if isinstance(regex, str):
regex = re.compile(regex)
def inner(function):
self._inliners[regex] = function
return function
return inner
def add_multiliner(self, regex):
"""Add a new multi-line processor function.
The function should return a three-tuple
``(prefix, content, suffix)`` where ``content`` will be
processed normally, and it will be inserted between ``prefix``
and ``suffix``.
.. TODO: explain more, show decorator usage, add an example
"""
if isinstance(regex, str):
regex = re.compile(regex)
def inner(function):
self._multiliners[regex] = function
return function
return inner
def _add_basic_stuff(self):
@self.add_multiliner(r'^(#{1,5})\s*(.*)$')
def title_handler(match, filename):
content = ''.join(self.convert_chunk(match.group(2), filename))
yield tags.title(content, len(match.group(1)))
@self.add_multiliner(r'^\(([\w-]+)\)\n')
def id_adder(match, filename):
markup = match.string[match.end():]
assert markup, "blank line after (...)"
content = ''.join(self.convert(markup, filename)).lstrip()
regex = re.compile(r'^<(\w+)') # fuck stackoverflow
assert regex.search(content) is not None, "cannot use (...) here"
yield regex.sub(r'<\1 id="%s"' % match.group(1), content, count=1)
@self.add_multiliner(r'^indent:\n')
def indent_handler(match, filename):
markup = textwrap.dedent(match.string[match.end():])
assert markup, "blank line after 'indent:'"
yield '<div class="indent">'
yield from self.convert(markup, filename)
yield '</div>'
# prevent adding a <p> tag
@self.add_multiliner(r'^noparagraph:\n')
def no_paragraph_handler(match, filename):
markup = textwrap.dedent(match.string[match.end():])
assert markup, "blank line after 'noparagraph:'"
yield from self.convert(markup, filename)
@self.add_multiliner(r'^(gray|red)box:(.*)\n')
def box_handler(match, filename):
content = textwrap.dedent(match.string[match.end():])
yield '<div class="box %sbox">' % match.group(1)
if match.group(2).strip():
yield '<h2>'
yield from self.convert_chunk(match.group(2), filename)
yield '</h2>'
yield from self.convert(content, filename)
yield '</div>'
@self.add_multiliner(r'^floatingbox:(.*)\n')
def floating_box_handler(match, filename):
content = textwrap.dedent(match.string[match.end():])
yield '<div class="floatingbox">'
if match.group(1).strip():
yield '<h2>'
yield from self.convert_chunk(match.group(1), filename)
yield '</h2>'
yield from self.convert(content, filename)
yield '</div>'
@self.add_multiliner(r'^image:\s*(\S.*)\n')
def image_handler(match, filename):
css = match.string[match.end():]
yield tags.image(match.group(1), css.replace('\n', ' '))
@self.add_multiliner(r'^comment:')
def do_nothing(match, filename):
if False:
yield
@self.add_multiliner(r'^code:(.*)\n')
def code_handler(match, filename):
code = textwrap.dedent(match.string[match.end():])
yield tags.multiline_code(code, match.group(1).strip() or 'text',
self.pygments_style)
@self.add_multiliner(r'^\* ')
def list_handler(match, filename):
yield '<ul>'
for item in re.split(r'\n\* ', match.string[match.end():]):
yield '<li>'
yield from self.convert_chunk(item, filename)
yield '</li>'
yield '</ul>'
@self.add_multiliner(r'^1\. ')
def numbered_list_handler(match, filename):
yield '<ol>'
for item in re.split(r'\n\d\. ', match.string[match.end():]):
yield '<li>'
yield from self.convert_chunk(item, filename)
yield '</li>'
yield '</ol>'
@self.add_inliner(r'\B\*\*(.+?)\*\*\B')
def bold_handler(match, filename):
content = ''.join(self.convert_chunk(match.group(1), filename))
return tags.bold(content)
@self.add_inliner(r'\B\*([^\*].*?)\*\B')
def italic_handler(match, filename):
content = ''.join(self.convert_chunk(match.group(1), filename))
return tags.italic(content)
@self.add_inliner(r'\b_(.+?)_\b')
def underline_handler(match, filename):
content = ''.join(self.convert_chunk(match.group(1), filename))
return tags.underline(content)
@self.add_inliner(r'``(.+?)``')
def inline_code_handler(match, filename):
return tags.inline_code(match.group(1))
@self.add_inliner(r'\[([\S\s]+?)\]\((.+?)\)')
def link_handler(match, filename):
content = ''.join(self.convert_chunk(match.group(1), filename))
return tags.link(content, match.group(2))
@self.add_inliner(r'\s--\s')
def en_dash(match, filename):
return ' \N{EN DASH} '
if __name__ == '__main__':
import doctest
print(doctest.testmod())
| [
"akuviljanen17@gmail.com"
] | akuviljanen17@gmail.com |
c94c93a473f20902ced88bade76c7e8d1ae31b1f | d5e94042ac2b248b7701117a6ea941bcc862067a | /upvote/gae/modules/santa_api/main_test.py | cacbfc13901e6c3eb8f97c8ec22633bb4a553014 | [
"Apache-2.0"
] | permissive | codegrande/upvote | f373105203a0595f76c29e138a18a95dc24a63df | e05d477bb13e470127b109eb8905a66a06eed5ac | refs/heads/master | 2020-03-07T19:40:47.185833 | 2019-06-20T14:35:20 | 2019-06-20T14:35:20 | 127,677,753 | 0 | 0 | null | 2018-04-01T22:49:28 | 2018-04-01T22:49:27 | null | UTF-8 | Python | false | false | 957 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for main.py."""
from upvote.gae.shared.common import basetest
class RouteTest(basetest.UpvoteTestCase):
def testImport(self):
# pylint: disable=g-import-not-at-top, unused-variable
from upvote.gae.modules.santa_api import main
# pylint: enable=g-import-not-at-top, unused-variable
if __name__ == '__main__':
basetest.main()
| [
"msuozzo@google.com"
] | msuozzo@google.com |
d66de0e407ab4f4ed1c8b9e8493cb21c10148fc4 | a9d5033858bf54768dbed97e5bac85af2eb7ce2c | /models/engine/file_storage.py | 270bc1b30472aed3e76b3a4c9308f5d64a2195e4 | [] | no_license | merryta/AirBnB_clone | 71942d65e91fd61029e4619ccdeebbb6ed13f9eb | 4e476d2a3565798f60cab82961146210a6170bf9 | refs/heads/main | 2023-07-18T14:30:49.227401 | 2021-09-01T13:15:53 | 2021-09-01T13:15:53 | 390,437,596 | 0 | 1 | null | 2021-07-28T17:14:06 | 2021-07-28T17:14:05 | null | UTF-8 | Python | false | false | 1,489 | py | #!/usr/bin/python3
"""
File storage module
"""
import json
from models.base_model import BaseModel
from models.user import User
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.place import Place
from models.review import Review
class FileStorage:
"""serializes instances to a JSON file and deserializes
JSON file to instances"""
__file_path = "file.json"
__objects = {}
def all(self):
"""returns the dictionary __objects"""
return FileStorage.__objects
def new(self, obj):
""" sets in __objects the obj with key
<obj class name>.id"""
FileStorage.__objects["{}.{}".format(obj.__class__.__name__,
obj.id)] = obj
def save(self):
"""serializes __objects to the JSON file"""
dict_o = FileStorage.__objects
obj_dict = {obj: dict_o[obj].to_dict() for obj in dict_o.keys()}
with open(FileStorage.__file_path, "w") as f:
json.dump(obj_dict, f)
def reload(self):
"""deserializes the JSON file to __objects"""
try:
with open(FileStorage.__file_path) as f:
obj_dict = json.load(f)
for item in obj_dict.values():
class_name = item["__class__"]
del item["__class__"]
self.new(eval(class_name)(**item))
except FileNotFoundError:
return
| [
"shikandadennis07@gmail.com"
] | shikandadennis07@gmail.com |
561e2326627bbb9767ed6374fef7c9e77c6c0ae9 | b7fc15a7aa3596facb7373bae9ea12124729cec3 | /Anotações de aula/Unidade20 - Hashing/criar.py | 8412b3adaae8d9a438e0593c13ca051b54b66acd | [] | no_license | gigennari/mc202 | 0801d081eb347b1106626dfa80c9f2a3e09e49ad | 22c88873028204fe86138bc241c547042417889e | refs/heads/master | 2023-03-30T17:02:32.569080 | 2021-03-18T18:03:15 | 2021-03-18T18:03:15 | 354,131,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | """
1º caso de teste: 10 100
2º caso de teste: 100000 72000
3º caso de teste: 100000 720000
"""
import random
numero_palavras = 10000
numero_ocorrencias = 72000
palavras = list()
num = 0
while len(palavras) < numero_palavras:
palavras.append('palavra' + str(num))
num += 1
num = 0
while num < numero_ocorrencias:
print(random.choice(palavras))
num += 1
| [
"g198010@dac.unicamp.br"
] | g198010@dac.unicamp.br |
ab8fb83994515cf9e89d97044d98deb791d77949 | f75632bafa7d9771655a2030cbf009e73682d0f2 | /woot/apps/distribution/tasks.py | 3f68728a4931983442732570a6c290a6a7202037 | [] | no_license | NicholasPiano/arktic | 563acab350e9d4f1ef5375d9237cba6d2df04295 | f28d868373f00aa8a817239f5bc167dadb50b053 | refs/heads/master | 2016-09-08T01:20:06.885034 | 2015-02-28T16:28:57 | 2015-02-28T16:28:57 | 19,547,469 | 0 | 1 | null | 2014-09-23T11:27:00 | 2014-05-07T19:31:26 | JavaScript | UTF-8 | Python | false | false | 3,292 | py | #apps.distribution.tasks
#django
from django.conf import settings
#local
from apps.distribution.models import Client, Project
from apps.transcription.models import Grammar
from apps.transcription.models import Transcription, CSVFile, WavFile
from libs.utils import generate_id_token
#util
import os
#third party
from celery import task
#from apps.distribution.tasks import scan_data; scan_data();
@task()
def scan_data():
'''
Walks through data directory and finds new grammars, creating them and adding them to the right clients and projects.
'''
#1. get all filenames+paths in project dir
#2. get all filenames from all csv files in project dir -> dictionary
#3.
data_dir = os.path.join(settings.DJANGO_ROOT, 'data')
for name in os.listdir(data_dir):
client, created = Client.objects.get_or_create(name=name)
if created: #scan directory for grammars
client.client_path = os.path.join(data_dir, name)
client.save()
print('created client: ' + str(client))
for project_name in [dir_i for dir_i in os.listdir(client.client_path) if os.path.isdir(os.path.join(client.client_path, dir_i))]:
project, created = client.projects.get_or_create(name=project_name)
if created:
project.id_token = generate_id_token(Project)
project.project_path = os.path.join(client.client_path, project_name)
project.save()
print('created project: ' + str(project))
#generate list of .csv files and list of .wav files
csv_file_list = []
wav_file_dictionary = {}
for sup, subs, file_list in os.walk(project.project_path):
for file_name in file_list:
if '.csv' in file_name and 'Unsorted' not in sup and 'save' not in sup:
csv_file_list.append(file_name)
root, ext = os.path.splitext(file_name)
project.csv_files.get_or_create(client=client, name=root, file_name=file_name, path=sup)
elif '.wav' in file_name:
wav_file_dictionary[file_name] = os.path.join(sup, file_name)
for i, csv_file in enumerate(project.csv_files.all()):
grammar, created = project.grammars.get_or_create(client=client, name=csv_file.name)
if created:
grammar.csv_file = csv_file
grammar.id_token = generate_id_token(Grammar)
print('created grammar ' + str(grammar))
with open(os.path.join(csv_file.path, csv_file.file_name)) as open_rel_file:
lines = open_rel_file.readlines()
for j, line in enumerate(lines):
tokens = line.split('|') #this can be part of a relfile parser object with delimeter '|'
transcription_audio_file_name = os.path.basename(tokens[0])
grammar.wav_files.get_or_create(client=client, project=project, path=wav_file_dictionary[transcription_audio_file_name], file_name=transcription_audio_file_name)
print('grammar %d/%d, wav %d/%d'%(i+1,project.csv_files.count(),j+1,len(lines)), end='\r' if j<len(lines)-1 else '\n')
grammar.save()
csv_file.save()
@task()
def process_grammar(grammar_id_token):
grammar = Grammar.objects.get(id_token=grammar_id_token)
grammar.process()
for transcription in grammar.transcriptions.all():
transcription.process()
| [
"nicholas.d.piano@gmail.com"
] | nicholas.d.piano@gmail.com |
38993334e2ac97d9cadceb588d4434dc6502e8b1 | 2c84afdb7d80fd482738e2f8f715b717fecd0332 | /setup.py | 8367fcb7679105852081e44bb26831582a9372c6 | [
"Apache-2.0"
] | permissive | templeblock/g2pM | 16c36da17121a4f249f64d7895de38e50c459bda | e5b4f903364e496beebf03af7b40d4b5e8b2419f | refs/heads/master | 2022-11-23T13:46:50.952224 | 2020-07-27T03:07:28 | 2020-07-27T03:07:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 943 | py | import setuptools
setuptools.setup(
name="g2pM",
version="0.1.2.4",
license='Apache License 2.0',
author="Seanie Lee",
author_email="lsnfamily02@gmail.com",
description="g2pM: A Neural Grapheme-to-Phoneme Conversion Package for MandarinChinese",
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
url="https://github.com/kakaobrain/g2pM",
packages=setuptools.find_packages(),
python_requires=">=3.6",
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| [
"lsnfamily02@naver.com"
] | lsnfamily02@naver.com |
f822112fc9a971124348558fda6eb04497c71c8e | be2185ca694c8f476b1425128669b1fa086c7107 | /tests/sandbox/handlers/test.py | 619f8afb89036a055c9b4e52d23b928650860100 | [] | no_license | scorphus/cow | 51983b13cd81d40a938762c1da4217a5d8584601 | bd633305f8bbdc4f5773685a8533edf76503e8e6 | refs/heads/master | 2021-01-16T21:58:38.097609 | 2014-07-15T18:00:09 | 2014-07-15T18:00:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from tornado.web import RequestHandler
class TestHandler(RequestHandler):
def get(self):
self.write(self.application.config.TESTCONF)
| [
"heynemann@gmail.com"
] | heynemann@gmail.com |
f636a2d707f57716c0f778ca500d9a5f8ff46b41 | 8e69eee9b474587925e22413717eb82e4b024360 | /v1.0.0.test/toontown/coghq/DistributedCountryClubBattleAI.py | be61dbe9e49a5cd8aa2e40b365c680060ab6f387 | [
"MIT"
] | permissive | TTOFFLINE-LEAK/ttoffline | afaef613c36dc3b70514ccee7030ba73c3b5045b | bb0e91704a755d34983e94288d50288e46b68380 | refs/heads/master | 2020-06-12T15:41:59.411795 | 2020-04-17T08:22:55 | 2020-04-17T08:22:55 | 194,348,185 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,284 | py | from toontown.toonbase import ToontownGlobals
from toontown.coghq import DistributedLevelBattleAI
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import State
from direct.fsm import ClassicFSM, State
from toontown.battle.BattleBase import *
import CogDisguiseGlobals
from toontown.toonbase.ToontownBattleGlobals import getCountryClubCreditMultiplier
from direct.showbase.PythonUtil import addListsByValue
class DistributedCountryClubBattleAI(DistributedLevelBattleAI.DistributedLevelBattleAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCountryClubBattleAI')
def __init__(self, air, battleMgr, pos, suit, toonId, zoneId, level, battleCellId, roundCallback=None, finishCallback=None, maxSuits=4):
DistributedLevelBattleAI.DistributedLevelBattleAI.__init__(self, air, battleMgr, pos, suit, toonId, zoneId, level, battleCellId, 'CountryClubReward', roundCallback, finishCallback, maxSuits)
self.battleCalc.setSkillCreditMultiplier(1)
if self.bossBattle:
self.level.d_setBossConfronted(toonId)
self.fsm.addState(State.State('CountryClubReward', self.enterCountryClubReward, self.exitCountryClubReward, ['Resume']))
playMovieState = self.fsm.getStateNamed('PlayMovie')
playMovieState.addTransition('CountryClubReward')
def getTaskZoneId(self):
return self.level.countryClubId
def handleToonsWon(self, toons):
extraMerits = [
0,
0,
0,
0]
amount = ToontownGlobals.CountryClubCogBuckRewards[self.level.countryClubId]
index = ToontownGlobals.cogHQZoneId2deptIndex(self.level.countryClubId)
extraMerits[index] = amount
for toon in toons:
recovered, notRecovered = self.air.questManager.recoverItems(toon, self.suitsKilled, self.getTaskZoneId())
self.toonItems[toon.doId][0].extend(recovered)
self.toonItems[toon.doId][1].extend(notRecovered)
meritArray = self.air.promotionMgr.recoverMerits(toon, self.suitsKilled, self.getTaskZoneId(), getCountryClubCreditMultiplier(self.getTaskZoneId()), extraMerits=extraMerits)
if toon.doId in self.helpfulToons:
self.toonMerits[toon.doId] = addListsByValue(self.toonMerits[toon.doId], meritArray)
else:
self.notify.debug('toon %d not helpful list, skipping merits' % toon.doId)
def enterCountryClubReward(self):
self.joinableFsm.request('Unjoinable')
self.runableFsm.request('Unrunable')
self.resetResponses()
self.assignRewards()
self.bossDefeated = 1
self.level.setVictors(self.activeToons[:])
self.timer.startCallback(BUILDING_REWARD_TIMEOUT, self.serverRewardDone)
return
def exitCountryClubReward(self):
return
def enterResume(self):
DistributedLevelBattleAI.DistributedLevelBattleAI.enterResume(self)
if self.bossBattle and self.bossDefeated:
self.battleMgr.level.b_setDefeated()
def enterReward(self):
DistributedLevelBattleAI.DistributedLevelBattleAI.enterReward(self)
roomDoId = self.getLevelDoId()
room = simbase.air.doId2do.get(roomDoId)
if room:
room.challengeDefeated() | [
"s0mberdemise@protonmail.com"
] | s0mberdemise@protonmail.com |
3c7e3656ee88fd41d3feeb0279d5c585f3780f0f | b08f5367ffd3bdd1463de2ddc05d34cbfba6796e | /arrays/enumerate_primes.py | 6c7038972e282198034637fed64a0a7c27849707 | [] | no_license | uohzxela/fundamentals | cb611fa6c820dc8643a43fd045efe96bc43ba4ed | 6bbbd489c3854fa4bf2fe73e1a2dfb2efe4aeb94 | refs/heads/master | 2020-04-04T03:56:44.145222 | 2018-04-05T01:08:14 | 2018-04-05T01:08:14 | 54,199,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | def enum_primes(n):
primes = []
is_primes = [True for i in xrange(n+1)]
for i in xrange(2, n+1):
if is_primes[i]:
for j in xrange(i*i, n+1, i):
is_primes[j] = False
primes.append(i)
return primes
assert enum_primes(200) == [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,
61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157,
163, 167, 173, 179, 181, 191, 193, 197, 199]
assert enum_primes(18) == [2, 3, 5, 7, 11, 13, 17]
| [
"uohzxela@gmail.com"
] | uohzxela@gmail.com |
07c6192b7b075ee10dcfa5d5494b1986e19a39f6 | bc441bb06b8948288f110af63feda4e798f30225 | /topology_sdk/model/ops_automation/job_details_pb2.py | d2cba7c54d957a7db62940c518ec57ca9f04de40 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 10,214 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: job_details.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from topology_sdk.model.ops_automation import bind_resource_pb2 as topology__sdk_dot_model_dot_ops__automation_dot_bind__resource__pb2
from topology_sdk.model.ops_automation import mail_info_pb2 as topology__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='job_details.proto',
package='ops_automation',
syntax='proto3',
serialized_options=_b('ZHgo.easyops.local/contracts/protorepo-models/easyops/model/ops_automation'),
serialized_pb=_b('\n\x11job_details.proto\x12\x0eops_automation\x1a\x35topology_sdk/model/ops_automation/bind_resource.proto\x1a\x31topology_sdk/model/ops_automation/mail_info.proto\"\x87\x03\n\nJobDetails\x12\x0f\n\x07version\x18\x01 \x01(\x05\x12\x12\n\ncreateTime\x18\x02 \x01(\t\x12\x12\n\nupdateTime\x18\x03 \x01(\t\x12\x0f\n\x07\x63reator\x18\x04 \x01(\t\x12\x0b\n\x03org\x18\x05 \x01(\x05\x12\x37\n\tscheduler\x18\x06 \x01(\x0b\x32$.ops_automation.JobDetails.Scheduler\x12\x0c\n\x04name\x18\x07 \x01(\t\x12\x10\n\x08\x63\x61tegory\x18\x08 \x01(\t\x12\x0e\n\x06menuId\x18\t \x01(\t\x12\x32\n\x0c\x62indResource\x18\n \x01(\x0b\x32\x1c.ops_automation.BindResource\x12\x0c\n\x04\x64\x65sc\x18\x0b \x01(\t\x12\x13\n\x0b\x61llowModify\x18\x0c \x01(\x08\x12&\n\x04mail\x18\r \x01(\x0b\x32\x18.ops_automation.MailInfo\x12\n\n\x02id\x18\x0e \x01(\t\x1a.\n\tScheduler\x12\x0f\n\x07isBound\x18\x01 \x01(\x08\x12\x10\n\x08isActive\x18\x02 \x01(\x08\x42JZHgo.easyops.local/contracts/protorepo-models/easyops/model/ops_automationb\x06proto3')
,
dependencies=[topology__sdk_dot_model_dot_ops__automation_dot_bind__resource__pb2.DESCRIPTOR,topology__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2.DESCRIPTOR,])
_JOBDETAILS_SCHEDULER = _descriptor.Descriptor(
name='Scheduler',
full_name='ops_automation.JobDetails.Scheduler',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='isBound', full_name='ops_automation.JobDetails.Scheduler.isBound', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='isActive', full_name='ops_automation.JobDetails.Scheduler.isActive', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=489,
serialized_end=535,
)
_JOBDETAILS = _descriptor.Descriptor(
name='JobDetails',
full_name='ops_automation.JobDetails',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='version', full_name='ops_automation.JobDetails.version', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='createTime', full_name='ops_automation.JobDetails.createTime', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='updateTime', full_name='ops_automation.JobDetails.updateTime', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='ops_automation.JobDetails.creator', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='org', full_name='ops_automation.JobDetails.org', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='scheduler', full_name='ops_automation.JobDetails.scheduler', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='ops_automation.JobDetails.name', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='category', full_name='ops_automation.JobDetails.category', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='menuId', full_name='ops_automation.JobDetails.menuId', index=8,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='bindResource', full_name='ops_automation.JobDetails.bindResource', index=9,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='desc', full_name='ops_automation.JobDetails.desc', index=10,
number=11, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allowModify', full_name='ops_automation.JobDetails.allowModify', index=11,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='mail', full_name='ops_automation.JobDetails.mail', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='ops_automation.JobDetails.id', index=13,
number=14, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_JOBDETAILS_SCHEDULER, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=144,
serialized_end=535,
)
_JOBDETAILS_SCHEDULER.containing_type = _JOBDETAILS
_JOBDETAILS.fields_by_name['scheduler'].message_type = _JOBDETAILS_SCHEDULER
_JOBDETAILS.fields_by_name['bindResource'].message_type = topology__sdk_dot_model_dot_ops__automation_dot_bind__resource__pb2._BINDRESOURCE
_JOBDETAILS.fields_by_name['mail'].message_type = topology__sdk_dot_model_dot_ops__automation_dot_mail__info__pb2._MAILINFO
DESCRIPTOR.message_types_by_name['JobDetails'] = _JOBDETAILS
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
JobDetails = _reflection.GeneratedProtocolMessageType('JobDetails', (_message.Message,), {
'Scheduler' : _reflection.GeneratedProtocolMessageType('Scheduler', (_message.Message,), {
'DESCRIPTOR' : _JOBDETAILS_SCHEDULER,
'__module__' : 'job_details_pb2'
# @@protoc_insertion_point(class_scope:ops_automation.JobDetails.Scheduler)
})
,
'DESCRIPTOR' : _JOBDETAILS,
'__module__' : 'job_details_pb2'
# @@protoc_insertion_point(class_scope:ops_automation.JobDetails)
})
_sym_db.RegisterMessage(JobDetails)
_sym_db.RegisterMessage(JobDetails.Scheduler)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"service@easyops.cn"
] | service@easyops.cn |
e6634981904e2b25a3fc4a233753b07e3e20c60b | fa032ddde94e7e397b15940159c706dadc89559c | /packages/std/nodes/std___If0/std___If0___METACODE.py | 861f76d269249d5ad29ed8b79a4fe738116bed05 | [
"MIT"
] | permissive | rkoschmitzky/pyScript | dd97ccb5d196c610de5df982ea006fa58652b82d | b60c6d3cdc3856e3b59843feaa7bdd2461f10158 | refs/heads/master | 2022-11-09T12:52:54.055020 | 2020-06-13T09:17:01 | 2020-06-13T09:17:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,504 | py | from custom_src.NodeInstance import NodeInstance
from custom_src.Node import Node
# USEFUL
# self.input(index) <- access to input data
# self.outputs[index].set_val(val) <- set output data port value
# self.main_widget <- access to main widget
# self.exec_output(index) <- executes an execution output
# self.create_new_input(type_, label, append=True, widget_type='', widget_name='', widget_pos='under', pos=-1)
# self.delete_input(input or index)
# self.create_new_output(type_, label, append=True, pos=-1)
# self.delete_output(output or index)
# self.update_shape()
class %NODE_TITLE%_NodeInstance(NodeInstance):
def __init__(self, parent_node: Node, flow, configuration=None):
super(%NODE_TITLE%_NodeInstance, self).__init__(parent_node, flow, configuration)
self.special_actions['add else if'] = {'method': self.action_add_else_if}
self.else_if_enlargement_state = 0
self.initialized()
def action_add_else_if(self):
self.create_new_input('data', 'condition '+str(self.else_if_enlargement_state+1), widget_type='std line edit', widget_pos='under')
self.create_new_output('exec', 'elif '+str(self.else_if_enlargement_state+1), append=False, pos=len(self.outputs)-1)
self.else_if_enlargement_state += 1
self.special_actions['remove else if'] = {'method': self.action_remove_else_if}
self.update_shape()
def action_remove_else_if(self):
self.delete_input(self.inputs[-1])
self.delete_output(self.outputs[-2])
self.else_if_enlargement_state -= 1
if self.else_if_enlargement_state == 0:
del self.special_actions['remove else if']
self.update_shape()
def update_event(self, input_called=-1):
if input_called == 0:
self.do_if(0, self.else_if_enlargement_state)
def do_if(self, if_cnt, current_enlarment_state):
if self.input(1+if_cnt):
self.exec_output(if_cnt)
elif if_cnt < current_enlarment_state:
self.do_if(if_cnt+1, current_enlarment_state)
else:
self.exec_output(len(self.outputs)-1)
def get_data(self):
data = {'else if enlargment state': self.else_if_enlargement_state}
return data
def set_data(self, data):
self.else_if_enlargement_state = data['else if enlargment state']
# optional - important for threading - stop everything here
def removing(self):
pass
| [
"leon.thomm@gmx.de"
] | leon.thomm@gmx.de |
2b19d579f3da6bd7b7e1259c5e69e68e9e083a04 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-roma/huaweicloudsdkroma/v2/model/env_info.py | b4922f230a4b405600fc3104026de9308661d152 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,936 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class EnvInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'create_time': 'datetime',
'name': 'str',
'remark': 'str',
'id': 'str'
}
attribute_map = {
'create_time': 'create_time',
'name': 'name',
'remark': 'remark',
'id': 'id'
}
def __init__(self, create_time=None, name=None, remark=None, id=None):
"""EnvInfo
The model defined in huaweicloud sdk
:param create_time: 创建时间
:type create_time: datetime
:param name: 环境名称
:type name: str
:param remark: 描述信息
:type remark: str
:param id: 环境编号
:type id: str
"""
self._create_time = None
self._name = None
self._remark = None
self._id = None
self.discriminator = None
if create_time is not None:
self.create_time = create_time
if name is not None:
self.name = name
if remark is not None:
self.remark = remark
if id is not None:
self.id = id
@property
def create_time(self):
"""Gets the create_time of this EnvInfo.
创建时间
:return: The create_time of this EnvInfo.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this EnvInfo.
创建时间
:param create_time: The create_time of this EnvInfo.
:type create_time: datetime
"""
self._create_time = create_time
@property
def name(self):
"""Gets the name of this EnvInfo.
环境名称
:return: The name of this EnvInfo.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this EnvInfo.
环境名称
:param name: The name of this EnvInfo.
:type name: str
"""
self._name = name
@property
def remark(self):
"""Gets the remark of this EnvInfo.
描述信息
:return: The remark of this EnvInfo.
:rtype: str
"""
return self._remark
@remark.setter
def remark(self, remark):
"""Sets the remark of this EnvInfo.
描述信息
:param remark: The remark of this EnvInfo.
:type remark: str
"""
self._remark = remark
@property
def id(self):
"""Gets the id of this EnvInfo.
环境编号
:return: The id of this EnvInfo.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this EnvInfo.
环境编号
:param id: The id of this EnvInfo.
:type id: str
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EnvInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
6db96b1402f7b74098c653be32e69a153e0e21db | a660f0674e816e7f97353c0eec7c9960eed36889 | /examples/embedded_boundary.py | 97e535165514712553cbcd0b5270a719c6cb82eb | [
"Apache-2.0"
] | permissive | dbstein/ipde | da4642cbd26e4857c966123ed6654f38ddf5dff6 | a254bf128eba835284935290b8de09eb1374aa3f | refs/heads/master | 2022-07-22T14:29:47.420137 | 2022-07-13T18:30:10 | 2022-07-13T18:30:10 | 215,557,734 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,258 | py | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pybie2d
from ipde.embedded_boundary import EmbeddedBoundary
from ipde.heavisides import SlepianMollifier
from ipde.derivatives import fd_x_4, fd_y_4, fourier
from personal_utilities.arc_length_reparametrization import arc_length_parameterize
star = pybie2d.misc.curve_descriptions.star
GSB = pybie2d.boundaries.global_smooth_boundary.global_smooth_boundary.Global_Smooth_Boundary
Grid = pybie2d.grid.Grid
nb = 200
ng = int(nb/2)
M = 16
pad_zone = 4
interior = False
slepian_r = 20
reparametrize = False
# get heaviside function
MOL = SlepianMollifier(slepian_r)
# construct boundary
bdy = GSB(c=star(nb, a=0.1, f=5))
# reparametrize if reparametrizing
if reparametrize:
bdy = GSB(*arc_length_parameterize(bdy.x, bdy.y))
# construct a grid
grid = Grid([-1.5, 1.5], ng, [-1.5, 1.5], ng, x_endpoints=[True, False], y_endpoints=[True, False])
# construct embedded boundary
ebdy = EmbeddedBoundary(bdy, interior, M, grid.xh*0.75, pad_zone, MOL.step)
# register the grid
print('\nRegistering the grid')
ebdy.register_grid(grid, verbose=True)
################################################################################
# Make basic plots
fig, ax = plt.subplots()
ax.pcolormesh(grid.xg, grid.yg, ebdy.phys)
ax.scatter(bdy.x, bdy.y, color='white', s=20)
ax.set_title('Phys')
fig, ax = plt.subplots()
ax.pcolormesh(grid.xg, grid.yg, ebdy.grid_in_annulus)
ax.scatter(bdy.x, bdy.y, color='white', s=20)
ax.set_title('In Annulus')
fig, ax = plt.subplots()
ax.pcolormesh(grid.xg, grid.yg, ebdy.grid_step)
ax.scatter(bdy.x, bdy.y, color='white', s=20)
ax.set_title('Heaviside')
fig, ax = plt.subplots()
ax.scatter(ebdy.radial_x, ebdy.radial_y, color='blue', s=10, label='special coordinates')
ax.scatter(ebdy.bdy.x, ebdy.bdy.y, color='black', s=10, label='boundary')
ax.scatter(ebdy.interface.x, ebdy.interface.y, color='gray', s=10, label='interface')
ax.legend()
ax.set_title('Special Coordinates')
################################################################################
# Test interpolation operations
k = 2*np.pi/3
test_func = lambda x, y: np.exp(np.sin(k*x))*np.sin(k*y)
test_func_x = lambda x, y: k*np.exp(np.sin(k*x))*np.cos(k*x)*np.sin(k*y)
test_func_y = lambda x, y: k*np.exp(np.sin(k*x))*np.cos(k*y)
# Interpolation of a globally smooth function on grid to radial
f = test_func(grid.xg, grid.yg)
fr = test_func(ebdy.radial_x, ebdy.radial_y)
fe = ebdy.interpolate_grid_to_radial(f, order=5)
err = np.abs(fe-fr).max()
print('Error in grid --> radial interpolation: {:0.2e}'.format(err))
# Interpolation of a function to the interface
fr = test_func(ebdy.interface.x, ebdy.interface.y)
fe = ebdy.interpolate_grid_to_interface(f, order=5)
err = np.abs(fe-fr).max()
print('Error in grid --> interface interpolation: {:0.2e}'.format(err))
# Interpolation of a function from radial to grid
fr = test_func(ebdy.radial_x, ebdy.radial_y)
ft = ebdy.interpolate_radial_to_grid(fr)
fe = test_func(ebdy.grid_ia_x, ebdy.grid_ia_y)
err = np.abs(fe-ft).max()
print('Error in radial --> grid interpolation: {:0.2e}'.format(err))
################################################################################
# Test derivatives
# radial gradient
frxe, frye = ebdy.radial_grid_derivatives(fr)
frxt = test_func_x(ebdy.radial_x, ebdy.radial_y)
fryt = test_func_y(ebdy.radial_x, ebdy.radial_y)
err_x = np.abs(frxt-frxe).max()
err_y = np.abs(fryt-frye).max()
err = max(err_x, err_y)
print('Error in radial grid differentiation: {:0.2e}'.format(err))
# fourth order accurate gradient on whole domain
dx = lambda x: fd_x_4(x, grid.xh, periodic_fix=not interior)
dy = lambda x: fd_y_4(x, grid.yh, periodic_fix=not interior)
fxe, fye, fxre, fyre = ebdy.gradient(f, fr, dx, dy)
fxt = test_func_x(grid.xg, grid.yg)
fyt = test_func_y(grid.xg, grid.yg)
err_x = np.abs(fxt-fxe)[ebdy.phys].max()
err_y = np.abs(fyt-fye)[ebdy.phys].max()
err = max(err_x, err_y)
print('Error in gradient, 4th order FD: {:0.2e}'.format(err))
# spectrally accurate gradient on whole domain
kxv = np.fft.fftfreq(grid.Nx, grid.xh/(2*np.pi))
kyv = np.fft.fftfreq(grid.Ny, grid.yh/(2*np.pi))
kx, ky = np.meshgrid(kxv, kyv, indexing='ij')
ikx, iky = 1j*kx, 1j*ky
dx = lambda x: fourier(x, ikx)
dy = lambda x: fourier(x, iky)
fxe, fye, fxre, fyre = ebdy.gradient(f, fr, dx, dy)
err_x = np.abs(fxt-fxe)[ebdy.phys].max()
err_y = np.abs(fyt-fye)[ebdy.phys].max()
err = max(err_x, err_y)
print('Error in gradient, Fourier: {:0.2e}'.format(err))
################################################################################
# Plot QFS Boundaries
fig, ax = plt.subplots()
ax.scatter(ebdy.bdy.x, ebdy.bdy.y, color='black', s=10, label='boundary')
ax.scatter(ebdy.interface.x, ebdy.interface.y, color='gray', s=10, label='interface')
bb = ebdy.bdy_qfs.interior_source_bdy if interior else ebdy.bdy_qfs.exterior_source_bdy
ax.scatter(bb.x, bb.y, color='blue', s=10, label='boundary effective')
bb = ebdy.interface_qfs.exterior_source_bdy
ax.scatter(bb.x, bb.y, color='red', s=10, label='interface effective 1')
bb = ebdy.interface_qfs.interior_source_bdy
ax.scatter(bb.x, bb.y, color='pink', s=10, label='interface effective 2')
ax.legend()
ax.set_title('QFS Boundaries')
| [
"dstein@flatironinstitute.org"
] | dstein@flatironinstitute.org |
43bf04948b4c8cc7dce04c4cf69f8c9dca6edab9 | 3c4e5c4ccb10b6aa529de50c61839e9207aa6306 | /tests/models/stats_result.py | db8128a1e1c54029e9af1a5e637e2e84a178a974 | [
"MIT"
] | permissive | stefaneutu/Jelly-Bot | 8ccde6482f767e8dfe85fdd1684760e64c86d227 | e3463760c8325c160acbe87c1374c2bab73e2731 | refs/heads/master | 2022-09-10T06:02:51.557389 | 2020-05-22T00:36:03 | 2020-05-22T00:36:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44,537 | py | from datetime import datetime, timezone, date, timedelta
from time import gmtime, strftime
from bson import ObjectId
from django.test import TestCase
from pymongo.collection import Collection
from extutils.dt import TimeRange
from flags import MessageType, BotFeature
from tests.base import TestDatabaseMixin
from models import (
HourlyResult, DailyResult, HourlyIntervalAverageMessageResult, DailyMessageResult, MemberMessageByCategoryResult,
MeanMessageResultGenerator, MemberDailyMessageResult, CountBeforeTimeResult, MemberMessageCountResult,
MemberMessageCountEntry, BotFeatureUsageResult, BotFeaturePerUserUsageResult, BotFeatureHourlyAvgResult
)
from strnames.models import StatsResults
__all__ = ["TestDailyResult", "TestHourlyResult", "TestHourlyIntervalAverageMessageResult", "TestDailyMessageResult",
"TestMeanMessageResultGenerator", "TestMemberDailyMessageResult", "TestMemberMessageCountResult",
"TestMemberMessageByCategoryResult", "TestBotFeatureUsageResult", "TestBotFeaturePerUserUsageResult",
"TestBotFeatureHourlyAvgResult"]
class TestHourlyResult(TestDatabaseMixin):
class TestSample1(HourlyResult):
pass
def test_no_days_collected(self):
result = TestHourlyResult.TestSample1(0)
data = (
(self.assertFalse, (result.avg_calculatable,)),
(self.assertEqual, (result.denom, []))
)
for method, args in data:
with self.subTest(method=method, args=args):
method(*args)
def test_has_days_collected(self):
expected_denoms = {
0: [6] * 1 + [5] * 11 + [6] * 12,
1: [6] * 2 + [5] * 11 + [6] * 11,
2: [6] * 3 + [5] * 11 + [6] * 10,
3: [6] * 4 + [5] * 11 + [6] * 9,
4: [6] * 5 + [5] * 11 + [6] * 8,
5: [6] * 6 + [5] * 11 + [6] * 7,
6: [6] * 7 + [5] * 11 + [6] * 6,
7: [6] * 8 + [5] * 11 + [6] * 5,
8: [6] * 9 + [5] * 11 + [6] * 4,
9: [6] * 10 + [5] * 11 + [6] * 3,
10: [6] * 11 + [5] * 11 + [6] * 2,
11: [6] * 12 + [5] * 11 + [6] * 1,
12: [6] * 13 + [5] * 11,
13: [5] * 1 + [6] * 13 + [5] * 10,
14: [5] * 2 + [6] * 13 + [5] * 9,
15: [5] * 3 + [6] * 13 + [5] * 8,
16: [5] * 4 + [6] * 13 + [5] * 7,
17: [5] * 5 + [6] * 13 + [5] * 6,
18: [5] * 6 + [6] * 13 + [5] * 5,
19: [5] * 7 + [6] * 13 + [5] * 4,
20: [5] * 8 + [6] * 13 + [5] * 3,
21: [5] * 9 + [6] * 13 + [5] * 2,
22: [5] * 10 + [6] * 13 + [5] * 1,
23: [5] * 11 + [6] * 13
}
for hr in range(24):
result = TestHourlyResult.TestSample1(5.5, end_time=datetime(2020, 5, 7, hr))
data = (
(self.assertTrue, (result.avg_calculatable,)),
(self.assertEqual, (result.denom, expected_denoms[hr])) # Incorrect
)
for method, args in data:
with self.subTest(hr=hr, method=method.__name__, args=args):
method(*args)
now = datetime.utcnow()
result = TestHourlyResult.TestSample1(5.5)
data = (
(self.assertTrue, (result.avg_calculatable,)),
(self.assertEqual, (result.denom, expected_denoms[now.hour])) # Incorrect
)
for method, args in data:
with self.subTest(hr=now.hour, method=method.__name__, args=args):
method(*args)
def prepare_data(self) -> Collection:
col = self.get_collection("testcol")
col.insert_one({"_id": ObjectId.from_datetime(datetime(2020, 5, 1))})
return col
def test_data_days_collected_has_data(self):
col = self.prepare_data()
days_past_from_oldest = (datetime.utcnow() - datetime(2020, 5, 1)).total_seconds() / 86400
data = (
(30, {"end": datetime(2020, 5, 31)}),
(30, {"end": datetime(2020, 5, 31).replace(tzinfo=timezone.utc)}),
(HourlyResult.DAYS_NONE, {"end": datetime(2020, 4, 1)}),
(HourlyResult.DAYS_NONE, {"end": datetime(2020, 4, 1).replace(tzinfo=timezone.utc)}),
(HourlyResult.DAYS_NONE, {"start": datetime(2090, 5, 31)}),
(HourlyResult.DAYS_NONE, {"start": datetime(2090, 5, 31).replace(tzinfo=timezone.utc)}),
(days_past_from_oldest, {"start": datetime(2020, 4, 1)}),
(days_past_from_oldest, {"start": datetime(2020, 4, 1).replace(tzinfo=timezone.utc)}),
(days_past_from_oldest, {}),
(15, {"start": datetime(2020, 4, 1), "end": datetime(2020, 4, 16), "hr_range": 5})
)
for expected_value, kwargs in data:
with self.subTest(expected_value=expected_value, kwargs=kwargs):
self.assertAlmostEqual(expected_value,
TestHourlyResult.TestSample1.data_days_collected(col, {}, **kwargs),
0)
def test_data_days_collected_no_data(self):
col = self.get_collection("AAAAA")
dc = TestHourlyResult.TestSample1.data_days_collected(col, {}, end=datetime(2090, 5, 31))
self.assertEqual(dc, 0)
class TestDailyResult(TestDatabaseMixin):
def test_date_list(self):
data = (
(5, {},
[datetime.utcnow().date() - timedelta(days=i) for i in range(5, -1, -1)]),
(5, {"start": datetime(2020, 5, 1)},
[date(2020, 5, 1), date(2020, 5, 2), date(2020, 5, 3),
date(2020, 5, 4), date(2020, 5, 5), date(2020, 5, 6)]),
(5.5, {"start": datetime(2020, 5, 1)},
[date(2020, 5, 1), date(2020, 5, 2), date(2020, 5, 3),
date(2020, 5, 4), date(2020, 5, 5), date(2020, 5, 6)]),
(5, {"end": datetime(2020, 5, 1)},
[date(2020, 4, 26), date(2020, 4, 27), date(2020, 4, 28),
date(2020, 4, 29), date(2020, 4, 30), date(2020, 5, 1)]),
(5, {"start": datetime(2020, 5, 1), "end": datetime(2020, 5, 3)},
[date(2020, 5, 1), date(2020, 5, 2), date(2020, 5, 3)]),
(5, {"start": datetime(2020, 5, 1), "end": datetime(2020, 5, 3),
"trange": TimeRange(start=datetime(2020, 5, 3), end=datetime(2020, 5, 6))},
[date(2020, 5, 3), date(2020, 5, 4), date(2020, 5, 5), date(2020, 5, 6)]),
(5, {"trange": TimeRange(start=datetime(2020, 5, 3), end=datetime(2020, 5, 6))},
[date(2020, 5, 3), date(2020, 5, 4), date(2020, 5, 5), date(2020, 5, 6)])
)
for days_collected, kwargs, expected_datelist in data:
actual_datelist = DailyResult.date_list(days_collected, timezone.utc, **kwargs)
with self.subTest(days_collected=days_collected, kwargs=kwargs):
self.assertEqual(actual_datelist, expected_datelist)
def test_date_list_str(self):
data = (
(5, {},
[(datetime.utcnow().date() - timedelta(days=i)).strftime(DailyResult.FMT_DATE)
for i in range(5, -1, -1)]),
(5, {"start": datetime(2020, 5, 1)},
["2020-05-01", "2020-05-02", "2020-05-03", "2020-05-04", "2020-05-05", "2020-05-06"]),
(5.5, {"start": datetime(2020, 5, 1)},
["2020-05-01", "2020-05-02", "2020-05-03", "2020-05-04", "2020-05-05", "2020-05-06"]),
(5, {"end": datetime(2020, 5, 1)},
["2020-04-26", "2020-04-27", "2020-04-28", "2020-04-29", "2020-04-30", "2020-05-01"]),
(5, {"start": datetime(2020, 5, 1), "end": datetime(2020, 5, 3)},
["2020-05-01", "2020-05-02", "2020-05-03"]),
(5, {"start": datetime(2020, 5, 1), "end": datetime(2020, 5, 3, 14, 0)},
["2020-05-01", "2020-05-02", "2020-05-03"]),
(5, {"start": datetime(2020, 5, 1), "end": datetime(2020, 5, 3),
"trange": TimeRange(start=datetime(2020, 5, 3), end=datetime(2020, 5, 6))},
["2020-05-03", "2020-05-04", "2020-05-05", "2020-05-06"]),
(5, {"trange": TimeRange(start=datetime(2020, 5, 3), end=datetime(2020, 5, 6))},
["2020-05-03", "2020-05-04", "2020-05-05", "2020-05-06"])
)
for days_collected, kwargs, expected_datelist in data:
actual_datelist = DailyResult.date_list_str(days_collected, timezone.utc, **kwargs)
with self.subTest(days_collected=days_collected, kwargs=kwargs):
self.assertEqual(actual_datelist, expected_datelist)
def test_trange_not_inf(self):
data = (
(3, TimeRange(start=None, end=None, end_autofill_now=False), timezone.utc),
(0, TimeRange(start=None, end=None, end_autofill_now=False), timezone.utc),
(3, TimeRange(start=None, end=None, end_autofill_now=True), timezone.utc),
(0, TimeRange(start=None, end=None, end_autofill_now=True), timezone.utc)
)
for args in data:
new_trange = DailyResult.trange_ensure_not_inf(*args)
with self.subTest(args):
self.assertFalse(new_trange.is_inf)
class TestHourlyIntervalAverageMessageResult(TestCase):
@staticmethod
def get_cursor():
return [
{
"_id": {
HourlyIntervalAverageMessageResult.KEY_HR: 3,
HourlyIntervalAverageMessageResult.KEY_CATEGORY: MessageType.TEXT
},
HourlyIntervalAverageMessageResult.KEY_COUNT: 150
},
{
"_id": {
HourlyIntervalAverageMessageResult.KEY_HR: 3,
HourlyIntervalAverageMessageResult.KEY_CATEGORY: MessageType.IMAGE
},
HourlyIntervalAverageMessageResult.KEY_COUNT: 100
},
{
"_id": {
HourlyIntervalAverageMessageResult.KEY_HR: 4,
HourlyIntervalAverageMessageResult.KEY_CATEGORY: MessageType.TEXT
},
HourlyIntervalAverageMessageResult.KEY_COUNT: 50
},
{
"_id": {
HourlyIntervalAverageMessageResult.KEY_HR: 4,
HourlyIntervalAverageMessageResult.KEY_CATEGORY: MessageType.IMAGE
},
HourlyIntervalAverageMessageResult.KEY_COUNT: 1
}
]
def test_empty_data(self):
result = HourlyIntervalAverageMessageResult([], 2)
self.assertEqual(result.label_hr, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
self.assertEqual(result.hr_range, 48)
self.assertEqual(result.data, [
(StatsResults.CATEGORY_TOTAL, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"#323232", "false")
])
def test_data(self):
result = HourlyIntervalAverageMessageResult(self.get_cursor(), 2)
self.assertEqual(result.label_hr, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
self.assertEqual(result.hr_range, 48)
self.assertEqual(result.data, [
(StatsResults.CATEGORY_TOTAL, [0, 0, 0, 125, 25.5, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"#323232", "false"),
(MessageType.TEXT.key, [0, 0, 0, 75, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"#777777", "true"),
(MessageType.IMAGE.key, [0, 0, 0, 50, 0.5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"#777777", "true")
])
class TestDailyMessageResult(TestCase):
@staticmethod
def get_cursor():
return [
{
"_id": {
DailyMessageResult.KEY_DATE: "2020-05-07",
DailyMessageResult.KEY_HOUR: 0
},
DailyMessageResult.KEY_COUNT: 10
},
{
"_id": {
DailyMessageResult.KEY_DATE: "2020-05-07",
DailyMessageResult.KEY_HOUR: 1
},
DailyMessageResult.KEY_COUNT: 20
},
{
"_id": {
DailyMessageResult.KEY_DATE: "2020-05-08",
DailyMessageResult.KEY_HOUR: 0
},
DailyMessageResult.KEY_COUNT: 30
},
{
"_id": {
DailyMessageResult.KEY_DATE: "2020-05-08",
DailyMessageResult.KEY_HOUR: 1
},
DailyMessageResult.KEY_COUNT: 40
}
]
def test_data(self):
result = DailyMessageResult(self.get_cursor(), 2, timezone.utc, start=datetime(2020, 5, 7))
self.assertEqual(result.label_hr, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
self.assertEqual(result.label_date, ["2020-05-07", "2020-05-08", "2020-05-09"])
self.assertEqual(result.data_sum, [30, 70, 0])
data = (
(
"2020-05-07",
[(10, 1 / 3 * 100, False), (20, 2 / 3 * 100, True), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False)]
),
(
"2020-05-08",
[(30, 3 / 7 * 100, False), (40, 4 / 7 * 100, True), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False)]
),
(
"2020-05-09",
[(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False)]
)
)
for idx, d in enumerate(data):
with self.subTest(idx):
self.assertEqual(result.data[idx], d)
def test_empty_data(self):
result = DailyMessageResult([], 2, timezone.utc, start=datetime(2020, 5, 7))
self.assertEqual(result.label_hr, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
self.assertEqual(result.label_date, ["2020-05-07", "2020-05-08", "2020-05-09"])
self.assertEqual(result.data_sum, [0, 0, 0])
data = (
(
"2020-05-07",
[(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False)]
),
(
"2020-05-08",
[(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False)]
),
(
"2020-05-09",
[(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False),
(0, 0, False), (0, 0, False), (0, 0, False), (0, 0, False)]
)
)
for idx, d in enumerate(data):
with self.subTest(idx):
self.assertEqual(result.data[idx], d)
class TestMeanMessageResultGenerator(TestCase):
@staticmethod
def get_cursor():
return [
{
"_id": {MeanMessageResultGenerator.KEY_DATE: "2020-05-07"},
MeanMessageResultGenerator.KEY_COUNT: 500
},
{
"_id": {MeanMessageResultGenerator.KEY_DATE: "2020-05-08"},
MeanMessageResultGenerator.KEY_COUNT: 600
},
{
"_id": {MeanMessageResultGenerator.KEY_DATE: "2020-05-09"},
MeanMessageResultGenerator.KEY_COUNT: 700
},
]
def get_result(self):
return MeanMessageResultGenerator(
self.get_cursor(), 2, timezone.utc,
trange=TimeRange(start=datetime(2020, 5, 7), end=datetime(2020, 5, 9)), max_mean_days=2)
@staticmethod
def get_result_empty():
return MeanMessageResultGenerator(
[], 0, timezone.utc,
trange=TimeRange(start=datetime(2020, 5, 7), end=datetime(2020, 5, 9)), max_mean_days=2)
def test_empty_generator(self):
result = self.get_result_empty()
self.assertEqual(result.max_madays, 2)
self.assertEqual(result.trange, TimeRange(start=datetime(2020, 5, 7), end=datetime(2020, 5, 9)))
self.assertEqual(result.dates, [date(2020, 5, 7), date(2020, 5, 8), date(2020, 5, 9)])
self.assertEqual(result.data, {date(2020, 5, 7): 0,
date(2020, 5, 8): 0,
date(2020, 5, 9): 0})
def test_base_generator(self):
result = self.get_result()
self.assertEqual(result.max_madays, 2)
self.assertEqual(result.trange, TimeRange(start=datetime(2020, 5, 7), end=datetime(2020, 5, 9)))
self.assertEqual(result.dates, [date(2020, 5, 7), date(2020, 5, 8), date(2020, 5, 9)])
self.assertEqual(result.data, {date(2020, 5, 7): 500,
date(2020, 5, 8): 600,
date(2020, 5, 9): 700})
def test_generate_out_of_range(self):
result = self.get_result()
with self.assertRaises(ValueError):
result.generate_result(3)
with self.assertRaises(ValueError):
result.generate_result(0)
with self.assertRaises(ValueError):
result.generate_result(-1)
def test_generate_2_empty(self):
result = self.get_result_empty()
rst = result.generate_result(2)
self.assertEqual(rst.date_list, [date(2020, 5, 7), date(2020, 5, 8), date(2020, 5, 9)])
self.assertEqual(rst.data_list, [0, 0, 0])
self.assertEqual(rst.label, StatsResults.DAYS_MEAN.format(2))
def test_generate_2(self):
result = self.get_result()
rst = result.generate_result(2)
self.assertEqual(rst.date_list, [date(2020, 5, 7), date(2020, 5, 8), date(2020, 5, 9)])
self.assertEqual(rst.data_list, [250, 550, 650])
self.assertEqual(rst.label, StatsResults.DAYS_MEAN.format(2))
def test_generate_1_empty(self):
result = self.get_result_empty()
rst = result.generate_result(1)
self.assertEqual(rst.date_list, [date(2020, 5, 7), date(2020, 5, 8), date(2020, 5, 9)])
self.assertEqual(rst.data_list, [0, 0, 0])
self.assertEqual(rst.label, StatsResults.DAYS_MEAN.format(1))
def test_generate_1(self):
result = self.get_result()
rst = result.generate_result(1)
self.assertEqual(rst.date_list, [date(2020, 5, 7), date(2020, 5, 8), date(2020, 5, 9)])
self.assertEqual(rst.data_list, [500, 600, 700])
self.assertEqual(rst.label, StatsResults.DAYS_MEAN.format(1))
class TestMemberDailyMessageResult(TestCase):
MEMBER_1 = ObjectId()
MEMBER_2 = ObjectId()
@staticmethod
def get_cursor():
return [
{
"_id": {
MemberDailyMessageResult.KEY_DATE: "2020-05-07",
MemberDailyMessageResult.KEY_MEMBER: TestMemberDailyMessageResult.MEMBER_1
},
MemberDailyMessageResult.KEY_COUNT: 10
},
{
"_id": {
MemberDailyMessageResult.KEY_DATE: "2020-05-08",
MemberDailyMessageResult.KEY_MEMBER: TestMemberDailyMessageResult.MEMBER_1
},
MemberDailyMessageResult.KEY_COUNT: 20
},
{
"_id": {
MemberDailyMessageResult.KEY_DATE: "2020-05-07",
MemberDailyMessageResult.KEY_MEMBER: TestMemberDailyMessageResult.MEMBER_2
},
MemberDailyMessageResult.KEY_COUNT: 30
},
{
"_id": {
MemberDailyMessageResult.KEY_DATE: "2020-05-08",
MemberDailyMessageResult.KEY_MEMBER: TestMemberDailyMessageResult.MEMBER_2
},
MemberDailyMessageResult.KEY_COUNT: 40
}
]
def test_data(self):
result = MemberDailyMessageResult(self.get_cursor(), 2, timezone.utc,
trange=TimeRange(start=datetime(2020, 5, 7), end=datetime(2020, 5, 8)))
self.assertEqual(result.trange, TimeRange(start=datetime(2020, 5, 7), end=datetime(2020, 5, 8)))
self.assertEqual(result.dates, ["2020-05-07", "2020-05-08"])
self.assertEqual(result.data_count,
{
"2020-05-07": {
TestMemberDailyMessageResult.MEMBER_1: 10,
TestMemberDailyMessageResult.MEMBER_2: 30
},
"2020-05-08": {
TestMemberDailyMessageResult.MEMBER_1: 20,
TestMemberDailyMessageResult.MEMBER_2: 40
}
})
def test_empty_data(self):
result = MemberDailyMessageResult([], 2, timezone.utc,
trange=TimeRange(start=datetime(2020, 5, 7), end=datetime(2020, 5, 8)))
self.assertEqual(result.trange, TimeRange(start=datetime(2020, 5, 7), end=datetime(2020, 5, 8)))
self.assertEqual(result.dates, ["2020-05-07", "2020-05-08"])
self.assertEqual(result.data_count, {"2020-05-07": {}, "2020-05-08": {}})
class TestCountBeforeTimeResult(TestCase):
@staticmethod
def get_cursor():
return [
{
"_id": {
CountBeforeTimeResult.KEY_DATE: "2020-05-07"
},
CountBeforeTimeResult.KEY_COUNT: 100
},
{
"_id": {
CountBeforeTimeResult.KEY_DATE: "2020-05-08"
},
CountBeforeTimeResult.KEY_COUNT: 200
},
{
"_id": {
CountBeforeTimeResult.KEY_DATE: "2020-05-09"
},
CountBeforeTimeResult.KEY_COUNT: 300
},
{
"_id": {
CountBeforeTimeResult.KEY_DATE: "2020-05-10"
},
CountBeforeTimeResult.KEY_COUNT: 400
}
]
def test_data(self):
result = CountBeforeTimeResult(self.get_cursor(), 4, timezone.utc,
trange=TimeRange(start=datetime(2020, 5, 7), end=datetime(2020, 5, 10)))
self.assertEqual(result.trange, TimeRange(start=datetime(2020, 5, 7), end=datetime(2020, 5, 10)))
self.assertEqual(result.dates, ["2020-05-07", "2020-05-08", "2020-05-09", "2020-05-10"])
self.assertEqual(result.data_count, [100, 200, 300, 400])
self.assertEqual(result.title,
StatsResults.COUNT_BEFORE.format(
strftime("%I:%M:%S %p", gmtime(result.trange.end_time_seconds))))
def test_empty_data(self):
result = CountBeforeTimeResult([], 4, timezone.utc,
trange=TimeRange(start=datetime(2020, 5, 7), end=datetime(2020, 5, 10)))
self.assertEqual(result.trange, TimeRange(start=datetime(2020, 5, 7), end=datetime(2020, 5, 10)))
self.assertEqual(result.dates, ["2020-05-07", "2020-05-08", "2020-05-09", "2020-05-10"])
self.assertEqual(result.data_count, [0, 0, 0, 0])
self.assertEqual(result.title,
StatsResults.COUNT_BEFORE.format(
strftime("%I:%M:%S %p", gmtime(result.trange.end_time_seconds))))
class TestMemberMessageCountResult(TestCase):
MEMBER_1 = ObjectId()
MEMBER_2 = ObjectId()
@staticmethod
def get_cursor_1_interval():
return [
{
"_id": {
MemberMessageCountResult.KEY_MEMBER_ID: TestMemberMessageCountResult.MEMBER_1,
MemberMessageCountResult.KEY_INTERVAL_IDX: 0
},
MemberMessageCountResult.KEY_COUNT: 100
},
{
"_id": {
MemberMessageCountResult.KEY_MEMBER_ID: TestMemberMessageCountResult.MEMBER_2,
MemberMessageCountResult.KEY_INTERVAL_IDX: 0
},
MemberMessageCountResult.KEY_COUNT: 200
}
]
@staticmethod
def get_cursor_2_intervals():
return [
{
"_id": {
MemberMessageCountResult.KEY_MEMBER_ID: TestMemberMessageCountResult.MEMBER_1,
MemberMessageCountResult.KEY_INTERVAL_IDX: 0
},
MemberMessageCountResult.KEY_COUNT: 100
},
{
"_id": {
MemberMessageCountResult.KEY_MEMBER_ID: TestMemberMessageCountResult.MEMBER_1,
MemberMessageCountResult.KEY_INTERVAL_IDX: 1
},
MemberMessageCountResult.KEY_COUNT: 200
},
{
"_id": {
MemberMessageCountResult.KEY_MEMBER_ID: TestMemberMessageCountResult.MEMBER_2,
MemberMessageCountResult.KEY_INTERVAL_IDX: 0
},
MemberMessageCountResult.KEY_COUNT: 300
},
{
"_id": {
MemberMessageCountResult.KEY_MEMBER_ID: TestMemberMessageCountResult.MEMBER_2,
MemberMessageCountResult.KEY_INTERVAL_IDX: 1
},
MemberMessageCountResult.KEY_COUNT: 400
}
]
def test_data_empty(self):
result = MemberMessageCountResult([], 3,
trange=TimeRange(start=datetime(2020, 5, 7), end=datetime(2020, 5, 10)))
self.assertEqual(result.trange, TimeRange(start=datetime(2020, 5, 7), end=datetime(2020, 5, 10)))
self.assertEqual(result.interval, 3)
self.assertEqual(result.data, {})
def test_data_1_interval(self):
result = MemberMessageCountResult(self.get_cursor_1_interval(), 1,
trange=TimeRange(start=datetime(2020, 5, 7), end=datetime(2020, 5, 10)))
self.assertEqual(result.trange, TimeRange(start=datetime(2020, 5, 7), end=datetime(2020, 5, 10)))
self.assertEqual(result.interval, 1)
entry = MemberMessageCountEntry(intervals=1)
entry.count[0] = 100
self.assertEqual(result.data[TestMemberMessageCountResult.MEMBER_1], entry)
entry = MemberMessageCountEntry(intervals=1)
entry.count[0] = 200
self.assertEqual(result.data[TestMemberMessageCountResult.MEMBER_2], entry)
def test_data_2_intervals(self):
result = MemberMessageCountResult(self.get_cursor_2_intervals(), 2,
trange=TimeRange(start=datetime(2020, 5, 7), end=datetime(2020, 5, 10)))
self.assertEqual(result.trange, TimeRange(start=datetime(2020, 5, 7), end=datetime(2020, 5, 10)))
self.assertEqual(result.interval, 2)
entry = MemberMessageCountEntry(intervals=2)
entry.count[0] = 100
entry.count[1] = 200
self.assertEqual(result.data[TestMemberMessageCountResult.MEMBER_1], entry)
entry = MemberMessageCountEntry(intervals=2)
entry.count[0] = 300
entry.count[1] = 400
self.assertEqual(result.data[TestMemberMessageCountResult.MEMBER_2], entry)
class TestMemberMessageByCategoryResult(TestCase):
MEMBER_1 = ObjectId()
MEMBER_2 = ObjectId()
@staticmethod
def get_cursor():
return [
{
"_id": {
MemberMessageByCategoryResult.KEY_MEMBER_ID: TestMemberMessageByCategoryResult.MEMBER_1,
MemberMessageByCategoryResult.KEY_CATEGORY: MessageType.TEXT,
},
MemberMessageByCategoryResult.KEY_COUNT: 100
},
{
"_id": {
MemberMessageByCategoryResult.KEY_MEMBER_ID: TestMemberMessageByCategoryResult.MEMBER_1,
MemberMessageByCategoryResult.KEY_CATEGORY: MessageType.IMAGE,
},
MemberMessageByCategoryResult.KEY_COUNT: 200
},
{
"_id": {
MemberMessageByCategoryResult.KEY_MEMBER_ID: TestMemberMessageByCategoryResult.MEMBER_2,
MemberMessageByCategoryResult.KEY_CATEGORY: MessageType.TEXT,
},
MemberMessageByCategoryResult.KEY_COUNT: 300
},
{
"_id": {
MemberMessageByCategoryResult.KEY_MEMBER_ID: TestMemberMessageByCategoryResult.MEMBER_2,
MemberMessageByCategoryResult.KEY_CATEGORY: MessageType.IMAGE,
},
MemberMessageByCategoryResult.KEY_COUNT: 400
}
]
def get_result(self):
return MemberMessageByCategoryResult(self.get_cursor())
@staticmethod
def get_result_empty():
return MemberMessageByCategoryResult([])
def test_data(self):
result = self.get_result()
self.assertEqual(result.label_category, [
MessageType.TEXT, MessageType.LINE_STICKER, MessageType.IMAGE, MessageType.VIDEO,
MessageType.AUDIO, MessageType.LOCATION, MessageType.FILE
])
def test_empty_data(self):
result = self.get_result_empty()
self.assertEqual(result.label_category, [
MessageType.TEXT, MessageType.LINE_STICKER, MessageType.IMAGE, MessageType.VIDEO,
MessageType.AUDIO, MessageType.LOCATION, MessageType.FILE
])
def test_member_empty(self):
result = self.get_result_empty()
self.assertEqual(result.data, {})
def test_member1(self):
result = self.get_result()
mbr = result.data[TestMemberMessageByCategoryResult.MEMBER_1]
self.assertEqual(mbr.data, {MessageType.TEXT: 100, MessageType.IMAGE: 200, MessageType.LINE_STICKER: 0,
MessageType.LOCATION: 0, MessageType.FILE: 0, MessageType.AUDIO: 0,
MessageType.VIDEO: 0})
self.assertEqual(mbr.total, 300)
self.assertEqual(mbr.get_count(MessageType.TEXT), 100)
self.assertEqual(mbr.get_count(MessageType.IMAGE), 200)
self.assertEqual(mbr.get_count(MessageType.LINE_STICKER), 0)
def test_member2(self):
result = self.get_result()
mbr = result.data[TestMemberMessageByCategoryResult.MEMBER_2]
self.assertEqual(mbr.data, {MessageType.TEXT: 300, MessageType.IMAGE: 400, MessageType.LINE_STICKER: 0,
MessageType.LOCATION: 0, MessageType.FILE: 0, MessageType.AUDIO: 0,
MessageType.VIDEO: 0})
self.assertEqual(mbr.total, 700)
self.assertEqual(mbr.get_count(MessageType.TEXT), 300)
self.assertEqual(mbr.get_count(MessageType.IMAGE), 400)
self.assertEqual(mbr.get_count(MessageType.LINE_STICKER), 0)
class TestBotFeatureUsageResult(TestCase):
@staticmethod
def get_cursor():
return [
{"_id": BotFeature.TXT_AR_ADD, BotFeatureUsageResult.KEY: 100},
{"_id": BotFeature.TXT_AR_INFO, BotFeatureUsageResult.KEY: 20},
{"_id": BotFeature.TXT_AR_DEL, BotFeatureUsageResult.KEY: 20},
{"_id": BotFeature.TXT_CALCULATOR, BotFeatureUsageResult.KEY: 1},
]
def test_data_empty(self):
result = BotFeatureUsageResult([], True)
expected = (
(BotFeature.TXT_AR_ADD.key, 0, "T1"),
(BotFeature.TXT_AR_INFO.key, 0, "T1"),
(BotFeature.TXT_AR_DEL.key, 0, "T1"),
(BotFeature.TXT_CALCULATOR.key, 0, "T1"),
(BotFeature.TXT_AR_ADD_EXECODE.key, 0, "T1"),
)
for e in expected:
with self.subTest(e):
self.assertTrue(e in result.data)
self.assertEqual(result.chart_label, [d.feature_name for d in result.data])
self.assertEqual(result.chart_data, [d.count for d in result.data])
def test_data_incl_not_used(self):
result = BotFeatureUsageResult(self.get_cursor(), True)
expected = (
(BotFeature.TXT_AR_ADD.key, 100, "1"),
(BotFeature.TXT_AR_INFO.key, 20, "T2"),
(BotFeature.TXT_AR_DEL.key, 20, "T2"),
(BotFeature.TXT_CALCULATOR.key, 1, "4"),
(BotFeature.TXT_AR_ADD_EXECODE.key, 0, "T5"),
)
for e in expected:
with self.subTest(e):
self.assertTrue(e in result.data)
self.assertEqual(result.chart_label, [d.feature_name for d in result.data])
self.assertEqual(result.chart_data, [d.count for d in result.data])
def test_data_not_incl_not_used(self):
result = BotFeatureUsageResult(self.get_cursor(), False)
expected = (
(BotFeature.TXT_AR_ADD.key, 100, "1"),
(BotFeature.TXT_AR_INFO.key, 20, "T2"),
(BotFeature.TXT_AR_DEL.key, 20, "T2"),
(BotFeature.TXT_CALCULATOR.key, 1, "4")
)
for e in expected:
with self.subTest(e):
self.assertTrue(e in result.data)
self.assertFalse((BotFeature.TXT_AR_ADD_EXECODE.key, 0, "5") in result.data)
self.assertEqual(result.chart_label, [d.feature_name for d in result.data])
self.assertEqual(result.chart_data, [d.count for d in result.data])
class TestBotFeaturePerUserUsageResult(TestCase):
MEMBER_1 = ObjectId()
MEMBER_2 = ObjectId()
@staticmethod
def get_cursor():
return [
{
"_id": {
BotFeaturePerUserUsageResult.KEY_FEATURE: BotFeature.TXT_AR_ADD,
BotFeaturePerUserUsageResult.KEY_UID: TestBotFeaturePerUserUsageResult.MEMBER_1
},
BotFeaturePerUserUsageResult.KEY_COUNT: 10
},
{
"_id": {
BotFeaturePerUserUsageResult.KEY_FEATURE: BotFeature.TXT_AR_INFO,
BotFeaturePerUserUsageResult.KEY_UID: TestBotFeaturePerUserUsageResult.MEMBER_1
},
BotFeaturePerUserUsageResult.KEY_COUNT: 20
},
{
"_id": {
BotFeaturePerUserUsageResult.KEY_FEATURE: BotFeature.TXT_AR_ADD,
BotFeaturePerUserUsageResult.KEY_UID: TestBotFeaturePerUserUsageResult.MEMBER_2
},
BotFeaturePerUserUsageResult.KEY_COUNT: 30
},
{
"_id": {
BotFeaturePerUserUsageResult.KEY_FEATURE: BotFeature.TXT_AR_INFO,
BotFeaturePerUserUsageResult.KEY_UID: TestBotFeaturePerUserUsageResult.MEMBER_2
},
BotFeaturePerUserUsageResult.KEY_COUNT: 40
}
]
def test_empty_data(self):
result = BotFeaturePerUserUsageResult([])
self.assertEqual(result.data, {})
def test_data(self):
result = BotFeaturePerUserUsageResult(self.get_cursor())
dict_ = {ft: 0 for ft in BotFeature}
dict_.update({BotFeature.TXT_AR_ADD: 10, BotFeature.TXT_AR_INFO: 20})
self.assertEqual(result.data[TestBotFeaturePerUserUsageResult.MEMBER_1], dict_)
dict_ = {ft: 0 for ft in BotFeature}
dict_.update({BotFeature.TXT_AR_ADD: 30, BotFeature.TXT_AR_INFO: 40})
self.assertEqual(result.data[TestBotFeaturePerUserUsageResult.MEMBER_2], dict_)
class TestBotFeatureHourlyAvgResult(TestCase):
@staticmethod
def get_cursor():
return [
{
"_id": {
BotFeatureHourlyAvgResult.KEY_FEATURE: BotFeature.TXT_AR_INFO,
BotFeatureHourlyAvgResult.KEY_HR: 0
},
BotFeatureHourlyAvgResult.KEY_COUNT: 100
},
{
"_id": {
BotFeatureHourlyAvgResult.KEY_FEATURE: BotFeature.TXT_AR_INFO,
BotFeatureHourlyAvgResult.KEY_HR: 1
},
BotFeatureHourlyAvgResult.KEY_COUNT: 200
},
{
"_id": {
BotFeatureHourlyAvgResult.KEY_FEATURE: BotFeature.TXT_AR_ADD,
BotFeatureHourlyAvgResult.KEY_HR: 0
},
BotFeatureHourlyAvgResult.KEY_COUNT: 300
},
{
"_id": {
BotFeatureHourlyAvgResult.KEY_FEATURE: BotFeature.TXT_AR_ADD,
BotFeatureHourlyAvgResult.KEY_HR: 1
},
BotFeatureHourlyAvgResult.KEY_COUNT: 400
},
{
"_id": {
BotFeatureHourlyAvgResult.KEY_FEATURE: BotFeature.TXT_CALCULATOR,
BotFeatureHourlyAvgResult.KEY_HR: 8
},
BotFeatureHourlyAvgResult.KEY_COUNT: 300
},
]
def test_empty(self):
result = BotFeatureHourlyAvgResult([], True, 2.25, end_time=datetime(2020, 5, 7, 12, 0))
self.assertEqual(result.hr_range, 54)
self.assertEqual(result.label_hr, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
self.assertTrue(result.avg_calculatable)
expected = (
(BotFeature.TXT_AR_ADD, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "#9C0000", "true"),
(BotFeature.TXT_AR_INFO, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "#9C0000", "true"),
(BotFeature.TXT_CALCULATOR, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "#9C0000", "true"),
(BotFeature.TXT_AR_ADD_EXECODE, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "#9C0000", "true"),
(StatsResults.CATEGORY_TOTAL, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "#323232", "false")
)
for e in expected:
with self.subTest(e):
self.assertTrue(e in result.data)
def test_empty_not_incl_not_used(self):
result = BotFeatureHourlyAvgResult([], False, 2.25, end_time=datetime(2020, 5, 7, 12, 0))
self.assertEqual(result.hr_range, 54)
self.assertEqual(result.label_hr, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
self.assertTrue(result.avg_calculatable)
self.assertEqual(result.data,
[(StatsResults.CATEGORY_TOTAL,
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
"#323232", "false")])
def test_incl_not_used(self):
result = BotFeatureHourlyAvgResult(self.get_cursor(), True, 2.25, end_time=datetime(2020, 5, 7, 12, 0))
self.assertEqual(result.hr_range, 54)
self.assertEqual(result.label_hr, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
self.assertTrue(result.avg_calculatable)
expected = (
(BotFeature.TXT_AR_ADD, [150, 200, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "#00A14B", "true"),
(BotFeature.TXT_AR_INFO, [50, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "#00A14B", "true"),
(BotFeature.TXT_CALCULATOR, [0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "#00A14B", "true"),
(BotFeature.TXT_AR_ADD_EXECODE, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "#9C0000", "true"),
(StatsResults.CATEGORY_TOTAL, [200, 300, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "#323232", "false")
)
for e in expected:
with self.subTest(e):
self.assertTrue(e in result.data)
def test_not_incl_not_used(self):
result = BotFeatureHourlyAvgResult(self.get_cursor(), False, 2.25, end_time=datetime(2020, 5, 7, 12, 0))
self.assertEqual(result.hr_range, 54)
self.assertEqual(result.label_hr, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23])
self.assertTrue(result.avg_calculatable)
expected = (
(BotFeature.TXT_AR_ADD, [150, 200, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "#00A14B", "true"),
(BotFeature.TXT_AR_INFO, [50, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "#00A14B", "true"),
(BotFeature.TXT_CALCULATOR, [0, 0, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "#00A14B", "true"),
(StatsResults.CATEGORY_TOTAL, [200, 300, 0, 0, 0, 0, 0, 0, 100, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "#323232", "false")
)
for e in expected:
with self.subTest(e):
self.assertTrue(e in result.data)
self.assertFalse((BotFeature.TXT_AR_ADD_EXECODE,
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], "#9C0000", "true")
in result.data)
| [
"raenonx0710@gmail.com"
] | raenonx0710@gmail.com |
c6cd2fc9dc2c9064c937f58a6dbfe5b5065a343e | 5da5473ff3026165a47f98744bac82903cf008e0 | /scripts/configure_release_please/configure_release_please.py | 6d457a63a8297a9d6910a911b66280d01abbfeeb | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 5,416 | py | import json
from pathlib import Path
from typing import Union, Dict, List, Tuple
import re
SCRIPT_DIR = Path(__file__).resolve().parent
ROOT_DIR = Path(SCRIPT_DIR / ".." / "..").resolve()
PACKAGES_DIR = ROOT_DIR / "packages"
def get_version_for_package(version_path: Path) -> Tuple[int]:
"""
Given a `version_path` to a `gapic_version.py` file,
return Tuple<int> which contains the version.
Args:
version_path(pathlib.Path): Path to the gapic_version.py file
Returns:
Tuple[int] in the format (<major>, <minor>, <patch>)
"""
VERSION_REGEX = r"__version__\s=\s\"(?P<major_version>\d+)\.(?P<minor_version>\d+)\.(?P<patch_version>\d+)\""
match = re.search(VERSION_REGEX, version_path.read_text())
if match is None:
raise Exception("Could not detect version")
major_version = int(match.group("major_version"))
minor_version = int(match.group("minor_version"))
patch_version = int(match.group("patch_version"))
if any(elem is None for elem in [major_version, minor_version, patch_version]):
raise Exception("could not detect version")
return (major_version, minor_version, patch_version)
def get_packages_with_owlbot_yaml(packages_dir: Path = PACKAGES_DIR) -> List[Path]:
"""
Walks through all API packages in the specified `packages_dir` path.
Args:
packages_dir(pathlib.Path): Path to the directory which contains packages.
Returns:
List[pathlib.Path] where each entry corresponds to a package within the
specified `packages_dir`, which has a corresponding .OwlBot.yaml file.
"""
if not Path(packages_dir).exists():
raise FileNotFoundError(f"Directory {packages_dir} not found")
return [obj.parents[0].resolve() for obj in packages_dir.rglob("**/.OwlBot.yaml")]
def configure_release_please_manifest(
package_dirs: List[Path], root_dir: Path = ROOT_DIR
) -> None:
"""
This method updates the `.release-please-manifest.json` file in the directory
`root_dir`.
Args:
package_dirs(List[pathlib.Path]): A list of Paths, one for each package in the
`packages/` folder whose entry will be updated in the release-please manifest.
root_dir(pathlib.Path): The directory to update the `.release-please-manifest.json`
Returns:
None
"""
release_please_manifest = root_dir / ".release-please-manifest.json"
with open(release_please_manifest, "r") as f:
manifest_json = json.load(f)
for package_dir in package_dirs:
if f"packages/{package_dir.name}" not in manifest_json:
manifest_json[f"packages/{package_dir.name}"] = "0.0.0"
gapic_version_file = next(package_dir.rglob("**/gapic_version.py"), None)
if gapic_version_file is None:
raise Exception("Failed to find gapic_version.py")
version = get_version_for_package(gapic_version_file)
# check the version in gapic_version.py and update if newer than the default which is
# 0.0.0 or 0.1.0.
if version != (0, 0, 0) and version != (0, 1, 0):
manifest_json[
f"packages/{package_dir.name}"
] = f"{version[0]}.{version[1]}.{version[2]}"
with open(release_please_manifest, "w") as f:
json.dump(manifest_json, f, indent=4, sort_keys=True)
f.write("\n")
def configure_release_please_config(
package_dirs: List[Path], root_dir: Path = ROOT_DIR
) -> None:
"""
This method updates the `release-please-config.json` file in the directory
`root_dir`. If `root_dir` is not provided, `google-cloud-python` will be used as the root.
Args:
package_dirs(List[pathlib.Path]): A list of Paths, one for each package in
the `packages/` folder whose entry will be updated in the release-please config.
root_dir(pathlib.Path): The directory to update the `release-please-config.json`
Returns:
None
"""
release_please_config = root_dir / "release-please-config.json"
config_json = {"packages": {}}
for package_dir in package_dirs:
extra_files: List[Union[str, Dict[str, str]]] = [
str(file.relative_to(package_dir))
for file in sorted(package_dir.rglob("**/gapic_version.py"))
]
if len(extra_files) < 1:
raise Exception("Failed to find gapic_version.py")
for json_file in sorted(package_dir.glob("samples/**/*.json")):
sample_json = {}
sample_json["jsonpath"] = "$.clientLibrary.version"
sample_json["path"] = str(json_file.relative_to(package_dir))
sample_json["type"] = "json"
extra_files.append(sample_json)
config_json["packages"][f"packages/{package_dir.name}"] = {
"component": f"{package_dir.name}",
"release-type": "python",
"extra-files": extra_files,
"bump-minor-pre-major": True,
"bump-patch-for-minor-pre-major": True,
}
with open(release_please_config, "w") as f:
json.dump(config_json, f, indent=4, sort_keys=True)
f.write("\n")
if __name__ == "__main__":
owlbot_dirs = get_packages_with_owlbot_yaml()
configure_release_please_manifest(owlbot_dirs)
configure_release_please_config(owlbot_dirs)
| [
"noreply@github.com"
] | googleapis.noreply@github.com |
3159244fd02b2ff10c466b4c0a2f67ecdf62d672 | 92a619c043e0c26fb65e58619a0e1c5090a9efe0 | /Grokking_the_Coding_Interviews/p125_k_pairs_with_largest_sum.py | d4cd0c92088a92b85a86448e9706fa355f351ac0 | [] | no_license | curieshicy/My_Utilities_Code | 39150171f8e0aa4971cfc3d7adb32db7f45e6733 | 8b14a5c1112794d3451486c317d5e3c73efcd3b5 | refs/heads/master | 2022-06-22T06:06:39.901008 | 2022-06-20T16:00:51 | 2022-06-20T16:00:51 | 177,379,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py | import heapq
def find_k_largest_pairs(nums1, nums2, k):
result = []
max_heap = []
i = 0
j = 0
while len(max_heap) < k:
if i + 1 < len(nums1) and j + 1 < len(nums2):
heapq.heappush(max_heap, (-nums1[i] - nums2[j], (nums1[i], nums2[j])))
heapq.heappush(max_heap, (-nums1[i + 1] - nums2[j], (nums1[i + 1], nums2[j])))
heapq.heappush(max_heap, (-nums1[i] - nums2[j + 1], (nums1[i], nums2[j + 1])))
heapq.heappush(max_heap, (-nums1[i + 1] - nums2[j + 1], (nums1[i + 1], nums2[j + 1])))
i += 1
j += 1
while max_heap and k:
_, pair = heapq.heappop(max_heap)
result.append(list(pair))
k -= 1
return result
def find_k_largest_pairs(nums1, nums2, k):
min_heap = []
for i in range(min(len(nums1), k)):
for j in range(min(len(nums2), k)):
if len(min_heap) < k:
heapq.heappush(min_heap, (nums1[i] + nums2[j], (nums1[i], nums2[j])))
else:
if nums1[i] + nums2[j] < min_heap[0][0]:
break
else:
heapq.heappushpop(min_heap, (nums1[i] + nums2[j], (nums1[i], nums2[j])))
result = []
while min_heap:
_, pair = heapq.heappop(min_heap)
result.append(list(pair))
return result
def main():
print("Pairs with largest sum are: " + str(find_k_largest_pairs([9, 8, 2], [6, 3, 1], 3)))
print("Pairs with largest sum are: " + str(find_k_largest_pairs([5, 2, 1], [2, -1], 3)))
main()
| [
"noreply@github.com"
] | curieshicy.noreply@github.com |
bc6c4a4c0bd8880ce526d203d61b4232b89dc9e9 | 4c8295990aad62b0edbf5c31ee8db9ee485fbea7 | /tests/test_room.py | d39e4d8fb46d5a586d11bf0e1d9b7a7704a1d81e | [
"MIT"
] | permissive | frostburn/image-source-reverb | 7b72e2d215f4f2dcedafb1cf2ecddd6ffca71c2f | 84c467249fb5fb1e978009510889a0447a625cda | refs/heads/master | 2021-04-16T18:54:36.948737 | 2020-03-23T14:57:13 | 2020-03-23T14:57:13 | 249,377,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | import numpy as np
from image_source_reverb.room import Plane, Room
def test_mirrors_origin():
p = Plane([1, 0], distance=1.1)
x = p.mirror_point([0, 0])
assert np.allclose(x, [-2.2, 0])
def test_ignores_backside():
p = Plane([2, 1])
assert p.mirror_point([-2, -2]) is None
def test_triangle_reflections():
r = Room([[0, 1], [1, -1], [-1, -1]])
reflections = r.reflect_points([[0, 0]])
assert np.allclose(reflections, [
[0, -2],
[-np.sqrt(2), np.sqrt(2)],
[np.sqrt(2), np.sqrt(2)]
])
| [
"lumi.pakkanen@gmail.com"
] | lumi.pakkanen@gmail.com |
4c9ff60522b7965fb8c10f42157fec743e28aaee | 135999da46a46d95c5e8aa3e3dc840a589b46dc7 | /ch01Text/1.3re/FindingPatterns.py | 94e03f06bfbeb0fcb531ddb8957afcd53fcfdb52 | [
"MIT"
] | permissive | eroicaleo/ThePythonStandardLibraryByExample | 53e6b32f6b2c3125a0bf4023d5df8705756a756d | b9c48c026e6966eee32cd51b0c49a79a5cbdceb9 | refs/heads/master | 2016-09-05T18:48:30.325386 | 2014-09-09T06:58:21 | 2014-09-09T06:58:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | #!/usr/bin/env python3
import re
pattern = 'this'
text = 'Does this text match the pattern?'
match = re.search(pattern, text)
start = match.start()
end = match.end()
print('Found "%s"\nin "%s"\nfrom %d to %d ("%s")' % (match.re.pattern, match.string, start, end, text[start:end]))
| [
"eroicaleo@gmail.com"
] | eroicaleo@gmail.com |
bc8ec7fa1aecc078fe811fbfae170f535f47fa6a | f22ca9aecda111a019502b462ce6772cb22d9425 | /test/test_order_abandoned.py | 02747404be9cb2f354e82036be14d89526568537 | [] | no_license | sivanv-unbxd/a2c-sdk-pim | cac05bc6335ddc3c4121d43e2dc476a6fec14965 | 51a07a0b7f90d74569ad14b47b174da7ac1fc374 | refs/heads/main | 2023-05-29T05:45:32.279821 | 2021-06-09T03:52:11 | 2021-06-09T03:52:11 | 375,218,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | # coding: utf-8
"""
Swagger API2Cart
API2Cart # noqa: E501
OpenAPI spec version: 1.1
Contact: contact@api2cart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.order_abandoned import OrderAbandoned # noqa: E501
from swagger_client.rest import ApiException
class TestOrderAbandoned(unittest.TestCase):
"""OrderAbandoned unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testOrderAbandoned(self):
"""Test OrderAbandoned"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.order_abandoned.OrderAbandoned() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"sivanv@unbxd.com"
] | sivanv@unbxd.com |
56df43e391086e06e386cee6ba4bd80b14b84822 | 921aa8e9f63ddd9e671b33e01fd18af1dafaffae | /keras/dtensor/__init__.py | bcc8b6e2c8db4d14482bec1d0bd5f90dfc108152 | [
"Apache-2.0"
] | permissive | Vishu26/keras | d116f875afb6c4ce5b7fbc783918463abfd9405a | c6fcaf79242ead384709973124069281f704b41d | refs/heads/master | 2022-05-29T06:24:56.299097 | 2022-03-11T10:37:09 | 2022-03-11T10:37:09 | 119,832,678 | 0 | 0 | null | 2018-02-01T12:25:54 | 2018-02-01T12:25:54 | null | UTF-8 | Python | false | false | 1,284 | py | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras' DTensor library."""
_DTENSOR_API_ENABLED = False
# Conditional import the dtensor API, since it is currently broken in OSS.
if _DTENSOR_API_ENABLED:
try:
# pylint: disable=g-direct-tensorflow-import, g-import-not-at-top
from tensorflow.dtensor import python as dtensor_api
except ImportError:
# TODO(b/222341036): Remove this conditional import after dtensor have a
# trimmed target that can be used by Keras.
dtensor_api = None
else:
# Leave it with a placeholder, so that the import line from other python file
# will not break.
dtensor_api = None
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
5fd9bfc0c389d764ee513da806b8852833cdbb96 | 07b75ac7273a92ddb617023a90667f9609250419 | /A0/slam_03_c_find_cylinders_question.py | a9a1842270878fddf91edb9a6d633df61b8b12ec | [] | no_license | khaledgabr77/SLAM-Learn | be151e7ec01b39d2ff894403f29705c3258354f5 | a6fc208dbbcabb1b8fd8164dcebc5a5b8d212424 | refs/heads/master | 2022-11-07T05:34:39.433870 | 2020-06-29T22:47:25 | 2020-06-29T22:47:25 | 275,672,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,054 | py | # For each cylinder in the scan, find its ray and depth.
# 03_c_find_cylinders
# Claus Brenner, 09 NOV 2012
from pylab import *
from lego_robot import *
# Find the derivative in scan data, ignoring invalid measurements.
def compute_derivative(scan, min_dist):
jumps = [ 0 ]
for i in range(1, len(scan) - 1):
l = scan[i-1]
r = scan[i+1]
if l > min_dist and r > min_dist:
derivative = (r - l) / 2.0
jumps.append(derivative)
else:
jumps.append(0)
jumps.append(0)
return jumps
# For each area between a left falling edge and a right rising edge,
# determine the average ray number and the average depth.
def find_cylinders(scan, scan_derivative, jump, min_dist):
cylinder_list = []
on_cylinder = False
sum_ray, sum_depth, rays = 0.0, 0.0, 0
discard = False
direction = "Left"
for i in range(len(scan_derivative)):
# --->>> Insert your cylinder code here.
# Whenever you find a cylinder, add a tuple
# (average_ray, average_depth) to the cylinder_list.
current_der = scan_derivative[i]
if abs(current_der) > jump:
if on_cylinder and direction == 'Left':
if current_der < 0: # Left again
discard = True
else:
on_cylinder = False
average_ray = sum_ray / rays
average_depth = sum_depth / rays
cylinder_list.append((average_ray, average_depth))
sum_ray, sum_depth, rays = 0.0, 0.0, 0
if not on_cylinder and current_der < 0:
on_cylinder = True
# if current_der > 0:
# direction = 'Right'
# elif current_der < 0:
# direction = 'Left'
direction = 'Left'
if scan[i] <= min_dist and not on_cylinder:
discard = True
if scan[i] <= min_dist and on_cylinder:
continue
if on_cylinder and scan[i] > min_dist:
rays += 1
sum_ray += i
sum_depth += scan[i]
if discard:
sum_ray, sum_depth, rays = 0.0, 0.0, 0
discard = False
# Just for fun, I'll output some cylinders.
# Replace this by your code.
return cylinder_list
if __name__ == '__main__':
minimum_valid_distance = 20.0
depth_jump = 100.0
# Read the logfile which contains all scans.
logfile = LegoLogfile()
logfile.read("robot4_scan.txt")
# Pick one scan.
scan = logfile.scan_data[8]
# Find cylinders.
der = compute_derivative(scan, minimum_valid_distance)
cylinders = find_cylinders(scan, der, depth_jump,
minimum_valid_distance)
# Plot results.
plot(scan)
scatter([c[0] for c in cylinders], [c[1] for c in cylinders],
c='r', s=200)
show()
| [
"khaledgabr77@gmail.com"
] | khaledgabr77@gmail.com |
7c1452fbcea938785de3c562f1d098d900b97550 | 8a166a0ad64efccb6231b2a351a65a54d6e0fa8e | /tests/test_quadApprox.py | eb8a76a20b9a819098ae632305cf43e3785c57d4 | [] | no_license | frossie-shadow/astshim | 6c29d584b5e6522e319f81672eb9512f0f62d8d0 | f6b1fd7af3e7147e4266d1bb44b9a8be5df29870 | refs/heads/master | 2021-01-19T17:50:13.479819 | 2017-08-03T17:56:29 | 2017-08-03T17:56:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | from __future__ import absolute_import, division, print_function
import unittest
import numpy as np
from numpy.testing import assert_allclose
import astshim
from astshim.test import MappingTestCase
class TestQuadApprox(MappingTestCase):
def test_QuadApprox(self):
# simple parabola
coeff_f = np.array([
[0.5, 1, 2, 0],
[0.5, 1, 0, 2],
], dtype=float)
polymap = astshim.PolyMap(coeff_f, 1)
qa = astshim.QuadApprox(polymap, [-1, -1], [1, 1], 3, 3)
self.assertAlmostEqual(qa.rms, 0)
self.assertEqual(len(qa.fit), 6)
assert_allclose(qa.fit, [0, 0, 0, 0, 0.5, 0.5])
if __name__ == "__main__":
unittest.main()
| [
"rowen@uw.edu"
] | rowen@uw.edu |
079c41f794ad86e3d9b2fc122f2516e676cdd849 | b059c2cf1e19932abb179ca3de74ced2759f6754 | /S20/用协程实现TCP server端/server端.py | 7b4194d0d3f898fdcf9dfcf1c9dce3b34b1472bd | [] | no_license | Lwk1071373366/zdh | a16e9cad478a64c36227419d324454dfb9c43fd9 | d41032b0edd7d96e147573a26d0e70f3d209dd84 | refs/heads/master | 2020-06-18T02:11:22.740239 | 2019-07-10T08:55:14 | 2019-07-10T08:55:14 | 196,130,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | # from gevent import monkey;monkey.patch_all()
# from socket import *
# import gevent
# def sever(ipport):
# s = socket(AF_INET,SOCK_STREAM)
# s.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
# s.bind(ipport)
# s.listen(5)
# while True:
# cnn, addr = s.accept()
# print('%s is from %s'%(cnn, addr))
# gevent.spawn(talk, cnn,addr)
# s.close()
# def talk(cnn,addr):
# while True:
# try:
# res = cnn.recv(1024).decode('utf-8')
# cnn.send(res.upper().encode('utf-8'))
# except Exception:break
# cnn.close()
#
# if __name__ == '__main__':
# ipport = ('127.0.0.1', 8080,)
# sever(ipport)
#
# sever
| [
"1071373366@qq.com"
] | 1071373366@qq.com |
33ab547fa528dc26b3e4296150c3b317e78c2b17 | a4ad068e96b772786e5eeb0bec027b759924cd12 | /chatapp/migrations/0001_initial.py | f939dede1ad22b16638cd88ed590b1da3951dcc8 | [] | no_license | kolamor/chatrest | 54f04459ec323df7d8a1603a3a91432d360b1b1d | f48084b14ecc516cff7acf80349113afca18ecb9 | refs/heads/master | 2020-04-03T18:04:58.808642 | 2018-11-04T23:35:53 | 2018-11-04T23:35:53 | 155,470,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,054 | py | # Generated by Django 2.1.2 on 2018-10-31 13:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(max_length=500, verbose_name='Сообщения')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='Дата отправки')),
],
options={
'verbose_name': 'Сообщении чата',
'verbose_name_plural': 'Сообщения чатов',
},
),
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
('creater', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Комната чата')),
('invited', models.ManyToManyField(related_name='invated_user', to=settings.AUTH_USER_MODEL, verbose_name='Участники')),
],
),
migrations.AddField(
model_name='chat',
name='room',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='chatapp.Room', verbose_name='Комната чата'),
),
migrations.AddField(
model_name='chat',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Пользователь'),
),
]
| [
"kolamorev@mail.ru"
] | kolamorev@mail.ru |
41f5c58f77649619cdd25701bcfe33c729f7cb27 | c237dfae82e07e606ba9385b336af8173d01b251 | /lib/python/Products/ZCTextIndex/PipelineFactory.py | db26faffb6f4c1f06f8114d6fa52e1212d487bbf | [
"ZPL-2.0"
] | permissive | OS2World/APP-SERVER-Zope | 242e0eec294bfb1ac4e6fa715ed423dd2b3ea6ff | dedc799bd7eda913ffc45da43507abe2fa5113be | refs/heads/master | 2020-05-09T18:29:47.818789 | 2014-11-07T01:48:29 | 2014-11-07T01:48:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,755 | py | ##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
from Products.ZCTextIndex.IPipelineElementFactory \
import IPipelineElementFactory
class PipelineElementFactory:
__implements__ = IPipelineElementFactory
def __init__(self):
self._groups = {}
def registerFactory(self, group, name, factory):
if self._groups.has_key(group) and \
self._groups[group].has_key(name):
raise ValueError('ZCTextIndex lexicon element "%s" '
'already registered in group "%s"'
% (name, group))
elements = self._groups.get(group)
if elements is None:
elements = self._groups[group] = {}
elements[name] = factory
def getFactoryGroups(self):
groups = self._groups.keys()
groups.sort()
return groups
def getFactoryNames(self, group):
names = self._groups[group].keys()
names.sort()
return names
def instantiate(self, group, name):
factory = self._groups[group][name]
if factory is not None:
return factory()
element_factory = PipelineElementFactory()
| [
"martin@os2world.com"
] | martin@os2world.com |
f9708f992615e8edde8247e26bb3fbfa8054cbe0 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_19413.py | dc23e47f7fa018a4eed46872ce144ce63316e29c | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54 | py | # parsing .xml blast output with re
Bio.Blast.NCBIXML
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
77433b38a0a5abc355c4e2066d00ae7b3f9227dc | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/hashSet_20200803081841.py | 6a89f7864e8b0daabfa670dc0ec35c942841797a | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | class myHashSet:
def __init__(self):
def add(self,key:int):
def remove() | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.