blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
63ea14a37d667590081bac94e233095bdca136b6 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc028/A/4966845.py | 77d8954fc41b4806aa8730cdc1c626dcc260484e | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 145 | py | N = int(input())
if N <= 59:
print('Bad')
elif N <= 89:
print('Good')
elif N <= 99:
print('Great')
else:
print('Perfect') | [
"kwnafi@yahoo.com"
] | kwnafi@yahoo.com |
2e61a75b1f3159a26f99c6856de297e1936462a3 | 8310622d9f504b6ffdac62b57727afbc0af9992e | /problems/problems@101~200/problem_162/Hexadecimal_numbers.py | 0ab26d0339b45f05f153ada5056add6219c8610c | [] | no_license | smsxgz/euler_project | aba61131682d04ee614167181e7d77e979db7e02 | df373a5cdf2c3c106763ee2c25671f85f9ec3a9b | refs/heads/master | 2023-02-22T12:00:24.801942 | 2023-02-04T13:23:27 | 2023-02-04T13:23:27 | 98,957,903 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | n = 0
for k in range(3, 17):
m = 15 * 16**(k - 1) - (15**k + 2 * 14 * 15**
(k - 1)) + (2 * 14**k + 13 * 14**(k - 1)) - 13**k
if k == 3:
print(m)
n += m
print(hex(n))
| [
"smsxgz@gmail.com"
] | smsxgz@gmail.com |
123ef74cbbbc9c71c0e72270501ad9436e4674cf | f8d3f814067415485bb439d7fe92dc2bbe22a048 | /models/research/swivel/wordsim.py | 439d093f11de75607754632fc84a7d4dea95747e | [
"Apache-2.0"
] | permissive | gmonkman/python | 2f9ab8f159c01f6235c86cb0cd52062cd3fdedd3 | 9123aa6baf538b662143b9098d963d55165e8409 | refs/heads/master | 2023-04-09T15:53:29.746676 | 2022-11-26T20:35:21 | 2022-11-26T20:35:21 | 60,254,898 | 0 | 2 | null | 2023-03-24T22:58:39 | 2016-06-02T10:25:27 | Python | UTF-8 | Python | false | false | 2,517 | py | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Computes Spearman's rho with respect to human judgements.
Given a set of row (and potentially column) embeddings, this computes Spearman's
rho between the rank ordering of predicted word similarity and human judgements.
Usage:
wordim.py --embeddings=<binvecs> --vocab=<vocab> eval1.tab eval2.tab ...
Options:
--embeddings=<filename>: the vectors to test
--vocab=<filename>: the vocabulary file
Evaluation files are assumed to be tab-separated files with exactly three
columns. The first two columns contain the words, and the third column contains
the scored human judgement.
"""
from __future__ import print_function
import scipy.stats
import sys
from getopt import GetoptError, getopt
from vecs import Vecs
try:
opts, args = getopt(sys.argv[1:], '', ['embeddings=', 'vocab='])
except GetoptError as e:
print(e, file=sys.stderr)
sys.exit(2)
opt_embeddings = None
opt_vocab = None
for o, a in opts:
if o == '--embeddings':
opt_embeddings = a
if o == '--vocab':
opt_vocab = a
if not opt_vocab:
print('please specify a vocabulary file with "--vocab"', file=sys.stderr)
sys.exit(2)
if not opt_embeddings:
print('please specify the embeddings with "--embeddings"', file=sys.stderr)
sys.exit(2)
try:
vecs = Vecs(opt_vocab, opt_embeddings)
except IOError as e:
print(e, file=sys.stderr)
sys.exit(1)
def evaluate(lines):
acts, preds = [], []
with open(filename, 'r') as lines:
for line in lines:
w1, w2, act = line.strip().split('\t')
pred = vecs.similarity(w1, w2)
if pred is None:
continue
acts.append(float(act))
preds.append(pred)
rho, _ = scipy.stats.spearmanr(acts, preds)
return rho
for filename in args:
with open(filename, 'r') as lines:
print('%0.3f %s' % (evaluate(lines), filename))
| [
"gmonkman@mistymountains.biz"
] | gmonkman@mistymountains.biz |
5f0f0571c66281c5a92ae6f76a226bf6109f2588 | 5515b79ab3dc12f9b5117bd9c3beb39fbad198cc | /middlewares/__init__.py | 5aad822c19c4ed2df42ad9f60452ce62891f688d | [] | no_license | Chenger1/SwipeTelegramBot | 17904eb44776052938da18a452594ef6ae9313eb | 1ac4e86339aec3ae17b22a4d2c06001cd4b01c99 | refs/heads/main | 2023-07-14T05:56:11.596278 | 2021-08-18T12:31:58 | 2021-08-18T12:31:58 | 388,511,755 | 0 | 0 | null | 2021-08-08T22:19:24 | 2021-07-22T15:32:36 | Python | UTF-8 | Python | false | false | 488 | py | from aiogram import Dispatcher
from data.config import I18N_DOMAIN, LOCALES_DIR
from loader import dp
from .throttling import ThrottlingMiddleware
from .language import ACLMiddleware
def setup_middleware():
# Устанавливаем миддлварь
i18n_ = ACLMiddleware(I18N_DOMAIN, LOCALES_DIR)
dp.middleware.setup(i18n_)
return i18n_
i18n = setup_middleware()
_ = i18n.gettext
if __name__ == "middlewares":
dp.middleware.setup(ThrottlingMiddleware())
| [
"exs2199@gmail.com"
] | exs2199@gmail.com |
c187a7a01c74659a57be0805011863c63eec89f1 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-deh/huaweicloudsdkdeh/v1/model/batch_delete_dedicated_host_tags_request.py | d10a3ef88aa3aa3ffda7173164316f9be54b17ed | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,519 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class BatchDeleteDedicatedHostTagsRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'dedicated_host_id': 'str',
'body': 'ReqSetOrDeleteTags'
}
attribute_map = {
'dedicated_host_id': 'dedicated_host_id',
'body': 'body'
}
def __init__(self, dedicated_host_id=None, body=None):
"""BatchDeleteDedicatedHostTagsRequest
The model defined in huaweicloud sdk
:param dedicated_host_id: 专属主机ID。 可以从专属主机控制台查询,或者通过调用查询专属主机列表API获取。
:type dedicated_host_id: str
:param body: Body of the BatchDeleteDedicatedHostTagsRequest
:type body: :class:`huaweicloudsdkdeh.v1.ReqSetOrDeleteTags`
"""
self._dedicated_host_id = None
self._body = None
self.discriminator = None
self.dedicated_host_id = dedicated_host_id
if body is not None:
self.body = body
@property
def dedicated_host_id(self):
"""Gets the dedicated_host_id of this BatchDeleteDedicatedHostTagsRequest.
专属主机ID。 可以从专属主机控制台查询,或者通过调用查询专属主机列表API获取。
:return: The dedicated_host_id of this BatchDeleteDedicatedHostTagsRequest.
:rtype: str
"""
return self._dedicated_host_id
@dedicated_host_id.setter
def dedicated_host_id(self, dedicated_host_id):
"""Sets the dedicated_host_id of this BatchDeleteDedicatedHostTagsRequest.
专属主机ID。 可以从专属主机控制台查询,或者通过调用查询专属主机列表API获取。
:param dedicated_host_id: The dedicated_host_id of this BatchDeleteDedicatedHostTagsRequest.
:type dedicated_host_id: str
"""
self._dedicated_host_id = dedicated_host_id
@property
def body(self):
"""Gets the body of this BatchDeleteDedicatedHostTagsRequest.
:return: The body of this BatchDeleteDedicatedHostTagsRequest.
:rtype: :class:`huaweicloudsdkdeh.v1.ReqSetOrDeleteTags`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this BatchDeleteDedicatedHostTagsRequest.
:param body: The body of this BatchDeleteDedicatedHostTagsRequest.
:type body: :class:`huaweicloudsdkdeh.v1.ReqSetOrDeleteTags`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchDeleteDedicatedHostTagsRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
2bbf8c99ea9aaba3001ad14fe8ab365dc25d0f5d | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_joshes.py | 899a037817f7722c99dc8c579c857e84046b438a | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
from xai.brain.wordbase.verbs._josh import _JOSH
#calss header
class _JOSHES(_JOSH, ):
def __init__(self,):
_JOSH.__init__(self)
self.name = "JOSHES"
self.specie = 'verbs'
self.basic = "josh"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
5795e4bfa392c6ec404a4e3cc63f74cba6c53480 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/aio/operations/_operation_results_operations.py | c5156f79a9bc2d390f074d3850fb41f3b4cff510 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 5,316 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class OperationResultsOperations:
"""OperationResultsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~video_analyzer.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
account_name: str,
name: str,
operation_id: str,
**kwargs: Any
) -> Optional["_models.PrivateEndpointConnection"]:
"""Get operation result.
Get private endpoint connection operation result.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param account_name: The Video Analyzer account name.
:type account_name: str
:param name: Private endpoint connection name.
:type name: str
:param operation_id: Operation Id.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~video_analyzer.models.PrivateEndpointConnection or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-11-01-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
'operationId': self._serialize.url("operation_id", operation_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Media/videoAnalyzers/{accountName}/privateEndpointConnections/{name}/operationResults/{operationId}'} # type: ignore
| [
"noreply@github.com"
] | Azure.noreply@github.com |
1e6b0f2307a497e5e53ec48c5671ce7a0188ec39 | bef3a5af16d50e68db158a906fbfdb323d7d0733 | /scripts/do_scatters.py | c3f208da50821f2cd63dc4e2d4abde280aff3450 | [] | no_license | flaviovdf/competition-models | 413a29054ec77e5a093ca1eb4e6ec112b1b95728 | 6b52b082f93145921b5b17de718aa6316d2b1c40 | refs/heads/master | 2020-07-04T09:15:21.940045 | 2015-12-28T16:44:42 | 2015-12-28T16:44:42 | 20,268,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,591 | py | #!/usr/bin/env python
from __future__ import division, print_function
from sklearn import linear_model
from matplotlib import pyplot as plt
import numpy as np
import plac
def main(ids_fpath, rates_fpath, plays_fpath):
mb_to_name = {}
name_to_mb = {}
with open(ids_fpath) as f:
for line in f:
spl = line.split()
mbid = spl[0]
name = ' '.join(spl[1:])
mb_to_name[mbid] = name
name_to_mb[name] = mbid
rates = {}
with open(rates_fpath) as f:
for line in f:
spl = line.split()
mbid = spl[0]
rates[mbid] = np.array([float(x) for x in spl[1:]])
plays = {}
with open(plays_fpath) as f:
for line in f:
spl = line.split()
mbid = spl[0]
plays[mbid] = np.array([float(x) for x in spl[1:]])
i = 0
n_bins = 10
for artist in ['Ladytron', 'Britney Spears', 'Radiohead', \
'Metallica', 'Daft Punk', 'Yann Tiersen']:
rate = rates[name_to_mb[artist]]
play = plays[name_to_mb[artist]]
lifetime = (play / rate)
ols = linear_model.LinearRegression(fit_intercept=True)
ols.fit(np.array([lifetime]).T, play)
regr = ols.predict(np.array([sorted(lifetime)]).T)
idx_sorted = lifetime.argsort()
bin_size = int(idx_sorted.shape[0] / n_bins)
mean_lifetime = []
mean_plays = []
for j in range(0, idx_sorted.shape[0], bin_size):
idx = idx_sorted[j:j + bin_size]
mean_lifetime.append(lifetime[idx].mean())
mean_plays.append(play[idx].mean())
median_lifetime = []
median_plays = []
for j in range(0, idx_sorted.shape[0], bin_size):
idx = idx_sorted[j:j + bin_size]
median_lifetime.append(np.median(lifetime[idx]))
median_plays.append(np.median(play[idx]))
plt.subplot(2, 3, i + 1)
plt.title(artist)
plt.semilogy(lifetime, play, 'wo')
plt.semilogy(sorted(lifetime), regr, 'k-')
plt.semilogy(mean_lifetime, mean_plays, 'bo')
plt.semilogy(mean_lifetime, mean_plays, 'b-', label='Mean')
plt.semilogy(median_lifetime, median_plays, 'ro')
plt.semilogy(median_lifetime, median_plays, 'r-', label='Median')
plt.legend()
plt.xlabel('Lifetime')
plt.ylabel('Plays')
i += 1
plt.tight_layout(pad=0)
#plt.savefig('time_plays.pdf')
plt.show()
if __name__ == '__main__':
plac.call(main)
| [
"flaviovdf@gmail.com"
] | flaviovdf@gmail.com |
c1a23830661aae1169a40e347d789f2684bf1e48 | 34e8d8702a26e33622ec9c1cf21f55abf910bd8c | /bw2io/extractors/csv.py | 8bd22371f76b60dc65e6eea2ad02655143102574 | [] | no_license | PascalLesage/brightway2-io | 72775595f8964a21b8c571db4dcdffc2e7b88329 | 3076770e1fd8b38fef31fa0e547facbcb6650cd8 | refs/heads/master | 2022-12-04T07:09:35.633060 | 2020-07-01T10:39:56 | 2020-07-01T10:39:56 | 285,389,011 | 0 | 0 | null | 2020-08-05T19:47:10 | 2020-08-05T19:47:09 | null | UTF-8 | Python | false | false | 373 | py | # -*- coding: utf-8 -*-
import os
import csv
class CSVExtractor(object):
@classmethod
def extract(cls, filepath):
assert os.path.exists(filepath), "Can't file file at path {}".format(filepath)
with open(filepath) as f:
reader = csv.reader(f)
data = [row for row in reader]
return [os.path.basename(filepath), data]
| [
"cmutel@gmail.com"
] | cmutel@gmail.com |
40c830c44eb8d5a8f86678b190be3e83ca70d750 | 70ed0a22937378b923a77749df38a61b7c1f7add | /jagare/converter/commit.py | c81c5546394706c0bccdd70e64aa1a4f859a7cc8 | [] | no_license | tclh123/jagare-rpc | cffdee5e87b4ad00f0c33c58b5a00e3ad06a3eab | de12c8c5d540e90aeaa360fbe6254688ad4c08cd | refs/heads/master | 2021-01-15T09:37:28.232631 | 2014-06-04T16:22:15 | 2014-06-04T16:22:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | # coding: utf-8
"""
struct Commit {
1: required string type, # 'commit'
2: required string sha,
3: required list<string> parents, # shas
4: required string tree, # sha of the tree object attached to the commit
5: required Signature committer,
6: required Signature author,
7: required string email,
8: required i64 time,
9: required i16 offset,
10: required string commit,
11: required string message,
12: required string body, # commit message body
}
"""
from .base import Converter, Commit
from .signature import SignatureConverter
class CommitConverter(Converter):
target_type = Commit
def prepare(self):
self.drop('parent')
self.type = 'commit'
self.committer = SignatureConverter(**self.committer).convert()
self.author = SignatureConverter(**self.author).convert()
self.unicode_str('message')
self.unicode_str('body')
| [
"tclh123@gmail.com"
] | tclh123@gmail.com |
a667310187aa30b7808931181ffd30a86d1be517 | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-sddp/aliyunsdksddp/request/v20190103/DescribeDataLimitDetailRequest.py | 66a2e93d3f972d45756dd03fad798e75f38d411c | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 1,613 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeDataLimitDetailRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sddp', '2019-01-03', 'DescribeDataLimitDetail','sddp')
def get_SourceIp(self):
return self.get_query_params().get('SourceIp')
def set_SourceIp(self,SourceIp):
self.add_query_param('SourceIp',SourceIp)
def get_id(self):
return self.get_query_params().get('id')
def set_id(self,id):
self.add_query_param('id',id)
def get_NetworkType(self):
return self.get_query_params().get('NetworkType')
def set_NetworkType(self,NetworkType):
self.add_query_param('NetworkType',NetworkType)
def get_Lang(self):
return self.get_query_params().get('Lang')
def set_Lang(self,Lang):
self.add_query_param('Lang',Lang) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
db59a9d35475b939c95421a85a469899fb221212 | 2aac5c508641e0e9f8f7de9f0f7833a9b9107b10 | /source1/bsp/lumps/model_lump.py | 7f490ce65fed7db66830de55c7a8ce83b00cbe47 | [
"MIT"
] | permissive | half5life/SourceIO | 1d2e90a05706d9dd5721afd6501327e0c16a1160 | f3dc6db92daa537acbb487ce09f371866f6e3e7f | refs/heads/master | 2023-03-22T08:27:15.792325 | 2021-03-15T11:26:10 | 2021-03-15T11:26:10 | 345,146,708 | 0 | 0 | MIT | 2021-03-15T11:26:11 | 2021-03-06T16:56:37 | Python | UTF-8 | Python | false | false | 577 | py | from typing import List
from .. import Lump, lump_tag
from ..datatypes.model import Model, RespawnModel
@lump_tag(14, 'LUMP_MODELS')
class ModelLump(Lump):
def __init__(self, bsp, lump_id):
super().__init__(bsp, lump_id)
self.models: List[Model] = []
def parse(self):
reader = self.reader
while reader:
if self._bsp.version < 29:
self.models.append(Model(self, self._bsp).parse(reader))
else:
self.models.append(RespawnModel(self, self._bsp).parse(reader))
return self
| [
"med45c@gmail.com"
] | med45c@gmail.com |
164bef8814cf463fbd8347f8ed0b69148afade9a | d92b87b49ccda07c1523769ad91bd07d319eaab1 | /toqito/states/chessboard.py | b88eb0ce9554bb8fdf8725d1f3ea871bade9a724 | [
"MIT"
] | permissive | ayazskhan/toqito | a965720d4f896d3e6727a08e3ed0f7cde4dc5c8f | 0846fb13bc25e82dc602f6184b8d5ecfcfcf8218 | refs/heads/master | 2022-12-25T22:50:27.313455 | 2020-09-28T16:24:16 | 2020-09-28T16:24:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,040 | py | """Chessboard state."""
from typing import List
import numpy as np
def chessboard(
mat_params: List[float], s_param: float = None, t_param: float = None
) -> np.ndarray:
r"""
Produce a chessboard state [BP00]_.
Generates the chessboard state defined in [BP00]_. Note that, for certain choices of
:code:`s_param` and :code:`t_param`, this state will not have positive partial transpose, and
thus may not be bound entangled.
Examples
==========
The standard chessboard state can be invoked using :code:`toqito` as
>>> from toqito.states import chessboard
>>> chessboard([1, 2, 3, 4, 5, 6], 7, 8)
[[ 0.22592593, 0. , 0.12962963, 0. , 0. ,
0. , 0.17777778, 0. , 0. ],
[ 0. , 0.01851852, 0. , 0. , 0. ,
0.01111111, 0. , 0.02962963, 0. ],
[ 0.12962963, 0. , 0.18148148, 0. , 0.15555556,
0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0.01851852, 0. ,
0.02222222, 0. , -0.01481481, 0. ],
[ 0. , 0. , 0.15555556, 0. , 0.22592593,
0. , -0.14814815, 0. , 0. ],
[ 0. , 0.01111111, 0. , 0.02222222, 0. ,
0.03333333, 0. , 0. , 0. ],
[ 0.17777778, 0. , 0. , 0. , -0.14814815,
0. , 0.23703704, 0. , 0. ],
[ 0. , 0.02962963, 0. , -0.01481481, 0. ,
0. , 0. , 0.05925926, 0. ],
[ 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. ]]
References
==========
.. [BP00] Three qubits can be entangled in two inequivalent ways.
D. Bruss and A. Peres
Phys. Rev. A, 61:30301(R), 2000
arXiv: 991.1056
:param mat_params:
:param s_param:
:param t_param:
:return: A chessboard state.
"""
if s_param is None:
s_param = np.conj(mat_params[2]) / np.conj(mat_params[5])
if t_param is None:
t_param = mat_params[0] * mat_params[3] / mat_params[4]
v_1 = np.array([[mat_params[4], 0, s_param, 0, mat_params[5], 0, 0, 0, 0]])
v_2 = np.array([[0, mat_params[0], 0, mat_params[1], 0, mat_params[2], 0, 0, 0]])
v_3 = np.array(
[[np.conj(mat_params[5]), 0, 0, 0, -np.conj(mat_params[4]), 0, t_param, 0, 0]]
)
v_4 = np.array(
[
[
0,
np.conj(mat_params[1]),
0,
-np.conj(mat_params[0]),
0,
0,
0,
mat_params[3],
0,
]
]
)
rho = (
v_1.conj().T * v_1
+ v_2.conj().T * v_2
+ v_3.conj().T * v_3
+ v_4.conj().T * v_4
)
return rho / np.trace(rho)
| [
"vincentrusso1@gmail.com"
] | vincentrusso1@gmail.com |
ae73832910611ed728d4352f954d63507b078cd3 | ae3e09f8aa3c54a91d29833bed26de032ee1f0d6 | /C. Yuhao and a Parenthesis.py | 430da23202adc5eb6b9c026f95a960ff60d967dd | [] | no_license | thecodearrow/100-Days-Of-Code | c0604c8922cb4b9c9655bcf7db8fba6fa0a629d2 | 790ef030375340bf7824380775ee239557034836 | refs/heads/master | 2021-11-24T18:56:45.611895 | 2021-10-29T00:03:38 | 2021-10-29T00:03:38 | 118,704,025 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | #http://codeforces.com/contest/1097/problem/C
import sys
import math
from collections import defaultdict
def getInputFromLine():
return [int(x) for x in input().split()]
try:
sys.stdin = open('input.txt', 'r')
sys.stdout = open('output.txt', 'w')
except:
pass
def mirror(s):
s=s[::-1]
m=""
for c in s:
if(c==")"):
m+="("
else:
m+=")"
return m
def getScore(s):
score=0
for b in s:
if(b=="("):
score+=1
elif(b==")"):
score-=1
if(score<0):
return -1
return score
n=int(input())
brackets=[]
for i in range(n):
t=input()
brackets.append(t)
scores=[]
count=0
bal=defaultdict(lambda:0)
lead=0 #counting existing good brackets
for b in brackets:
balance=getScore(b)
if(balance!=-1):
bal[balance]+=1
if(balance==0):
lead+=1
for b in brackets:
m=mirror(b)
balance=getScore(m)
if(balance!=-1):
if(bal[balance]>0):
bal[balance]-=1
count+=1
print(count-lead+(lead//2)) | [
"you@example.com"
] | you@example.com |
65a607155c70bfe5aad3197898c156c38f26f59c | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/impl/lobby/awards/__init__.py | 465f87ba720e314ee0cbbb36c279e990182e8066 | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 311 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/impl/lobby/awards/__init__.py
from shared_utils import CONST_CONTAINER
class SupportedTokenTypes(CONST_CONTAINER):
BATTLE_TOKEN = 'battleToken'
TOKENS = 'tokens'
PROGRESSION_XP_TOKEN = 'progressionXPToken'
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
67c3ed498ff90804fddcd1b5670e11105fde15f2 | 5acc20092ee93935594a7e0522924245a43e5531 | /feature_selection/plot_select_from_model_boston.py | 9f27a604d83a531bb95997cee1558fc059ba8a52 | [] | no_license | shengchaohua/sklearn-examples | aae2332c4382a57a70c1887777c125e6dc4579d6 | 1dac6a9b5e703185a8da1df7c724022fbd56a9e4 | refs/heads/master | 2020-05-05T01:19:20.037746 | 2019-10-18T08:55:01 | 2019-10-18T08:55:01 | 179,599,221 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,164 | py | import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_boston
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LassoCV
# Load the boston dataset.
boston = load_boston()
X, y = boston.data, boston.target
# We use the base estimator LassoCV since the L1 norm promotes sparsity of
# features.
clf = LassoCV(cv=5)
# Set a minimum threshold of 0.25
sfm = SelectFromModel(clf, threshold=0.25)
sfm.fit(X, y)
n_features = sfm.transform(X).shape[1]
# Reset the threshold till the number of features equals two.
# Note that the attribute can be set directly instead of repeatedly
# fitting the metatransformer.
while n_features > 2:
sfm.threshold += 0.1
X_transform = sfm.transform(X)
n_features = X_transform.shape[1]
# Plot the selected two features from X.
plt.title(
"Features selected from Boston using SelectFromModel with "
"threshold %0.3f." % sfm.threshold)
feature1 = X_transform[:, 0]
feature2 = X_transform[:, 1]
plt.plot(feature1, feature2, 'r.')
plt.xlabel("Feature number 1")
plt.ylabel("Feature number 2")
plt.ylim([np.min(feature2), np.max(feature2)])
plt.show()
| [
"shengchaohua163@163.com"
] | shengchaohua163@163.com |
70f621d07f3916ead89774507c510d3cdc1fe1bd | 67f3ef9af94ad92677ea772e3e671eae4934c10b | /Stepik/Python3_1/Lesson452.py | 9913cfcaf4181f210e8dc5e8c8e0310c7b507738 | [] | no_license | Ihar-Limitless/PythonWorkspace | a820a69c2a610bf3c209e783ac16962eb99f1751 | 4b684a209cd35a6ea8af349afc1a380aed84662e | refs/heads/master | 2020-07-17T14:56:32.552712 | 2020-05-20T08:47:40 | 2020-05-20T08:47:40 | 206,040,300 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 997 | py | # import csv, collections
# with open(r'd:/PythonWorkspace/Stepik/crimes.csv') as f:
# data = csv.reader(f)
# for i in data:
# print(i)
# print(collections.Counter(row[5] for row in data if '2015' in row[2]))
#
# import csv
#
# with open("Crimes.csv") as fi:
# reader = csv.reader(fi)
# next(reader)
# crime_cnt = dict()
# for row in reader:
# year = row[2][6:10]
# if year == "2015":
# crime_type = row[5]
# if crime_type not in crime_cnt:
# crime_cnt[crime_type] = 0
# crime_cnt[crime_type] += 1
#
# a = list(map(lambda x: (crime_cnt[x], x), crime_cnt))
# a.sort(key=lambda x: -x[0])
#
# print(a[0][1])
import csv
from collections import Counter
with open(r'd:/PythonWorkspace/Stepik/crimes.csv', "r") as f:
reader = csv.DictReader(f)
crimes = []
for row in reader:
crimes.append(row['Primary Type'])
c = Counter(crimes).most_common(1)
print(c[0][0])
print(crimes) | [
"01codename01@gmail.com"
] | 01codename01@gmail.com |
781eef84dfbe21f5c7fd4840436c42051d0c3859 | 029aa4fa6217dbb239037dec8f2e64f5b94795d0 | /Python算法指南/栈_heapq_deque/200_逆波兰表达式求值_栈的应用.py | 41595da2b1b998c1229b1e528e91e30e1db5f53d | [] | no_license | tonyyo/algorithm | 5a3f0bd4395a75703f9ee84b01e42a74283a5de9 | 60dd5281e7ce4dfb603b795aa194a67ff867caf6 | refs/heads/master | 2022-12-14T16:04:46.723771 | 2020-09-23T06:59:33 | 2020-09-23T06:59:33 | 270,216,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,208 | py | class Solution:
def evalRPN2(self, tokens):
stack = []
for i in tokens:
if i not in ('+', '-', '*', '/'):
stack.append(int(i))
else:
op2 = stack.pop()
op1 = stack.pop()
if i == '+': stack.append(op1 + op2)
elif i == '-': stack.append(op1 - op2)
elif i == '*': stack.append(op1 * op2)
else: stack.append(int(op1 * 1.0 / op2))
return stack[0]
def evalRPN1(self, tokens):
size = len(tokens)
stack = []
for x in tokens:
if x not in ["+", "-", "*", "/"]:
stack.append(x)
else:
a = int(stack.pop())
b = int(stack.pop())
result = 0
if x == "+":
result = a + b
if x == "-":
result = a - b
if x == "*":
result = a * b
if x == "/":
result = a / b
stack.append(result)
return stack[-1]
def evalRPN(self, tokens):
stack = []
for i in range(len(tokens)):
if tokens[i] not in ['+', '-', '*', '/']:
stack.append(tokens[i]) # 存储非数字
else:
if len(stack) == 1: # 因为要弹出两次,最好做一次判断
return stack[-1]
temp2 = int(stack.pop()) #两个操作数的顺序不要搞错了
temp1 = int(stack.pop())
if tokens[i] == '+':
stack.append(temp1 + temp2)
elif tokens[i] == '_':
stack.append(temp1 - temp2)
elif tokens[i] == '*':
stack.append(temp1 * temp2)
else:
stack.append(temp1 // temp2)
return stack[-1] if stack else -1
#主函数
if __name__=="__main__":
tokens=["2", "1", "+", "3", "*"]
#创建对象
solution=Solution()
print("输入的逆波兰表达式是:",tokens)
print("计算逆波兰表达式的结果是:", solution.evalRPN(tokens)) | [
"1325338208@qq.com"
] | 1325338208@qq.com |
dfcfe43bab212cefb635a2746e2d5c8710e65661 | cb9281a34c3c5a36d4b3a846fb6ff22ede12f2f6 | /communities_CK_one_network_with_main_GC.py | 3b90ea26d0159d6b50b08a9cd6c7187a9378dcb5 | [] | no_license | juliettapc/CalorieKing | 9cb9f35ae9b239d2284175b0802cf2c60dc79d1d | 5f80bffb65fe4644a81ae2ab0b1738861e028331 | refs/heads/master | 2022-02-10T07:52:24.133379 | 2022-02-08T01:25:18 | 2022-02-08T01:25:18 | 153,174,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,612 | py | import subprocess as sp
import networkx as nx
def main():
file1 = open('summary_modularity_analysis_GC','wt') # one summary file for everything
print >> file1, "data: #_points time_scale GC_size <k> k(hub) modularity #_communities <community_size> max_community_size\n"
file1.close()
name_list=[] # list of names of the input files
scale_list=[10]
for scale in scale_list:
#name_list.append(str(scale)+'_points_network/data/friend_graph_all0')
name_list.append(str(scale)+'_points_network/data/friend_graph_all0')
map (str, name_list) # recordar que lo que escribo tiene que ser un string!!!!
for name in name_list: # loop to go over files (== over networks)
calculations=[] # list of atributes for every network (modularity, number of communities, averages,...)
list_of_data_list=[] #list of atributes that are lists (5top hubs, communitiy sizes,...)
#print "\n\nfile: "+name
edge_data = open(name).readlines()
H=nx.read_edgelist(name) # create the network from the original input file
components=nx.connected_component_subgraphs(H)
G=components[0] # i take just the GC as a subgraph to perform the community ID algorithm
# G is a list of tuples: [(n1,n2),(n3,n4),(n2,n3),...]
calculations.append("\n") # just to separate from the next set of data
calculations.append(len(G))
new_edge_data = [] #this list is what i will pass to Roger's code
for e in G.edges(): # e is a list of two neighbors: [n1,n2]
#i have to convert e to str because it is in some other format and the algorithm may not recognise it
new_edge_data.append(" ".join(map(str,e))) # i join the two neighbors, separating them just by a space, so now they are just one element of the edge_list, which is: [n1 n2, n3 n4, n2 n3,...]
degree_values=sorted(nx.degree(G).values())
most_connected=[]
for i in range (1,11):
most_connected.append(degree_values[-i])
list_of_data_list.append(most_connected) # save the connectivity values of the 5 top highest connected nodes
average_network_degree=int(round(sum(G.degree().values())/float(len(G)),0) )
calculations.append(average_network_degree)
calculations.append(degree_values[-1])
p = sp.Popen(["/opt/communityID"], stdin=sp.PIPE, stdout=sp.PIPE)
output, error = p.communicate("".join(new_edge_data)) # ojo le paso solo la GC
community_lines = output.split("part")
modularity = float(community_lines[0])
partition_lines = community_lines[1].split("\n")
modules = []
calculations.append(modularity)
max_max_degree=0
max_size=0
average_size=0
average_max_degree=0
size_list=[]
max_conect_list=[]
average_k_list=[]
for p in partition_lines:
this_module = p.split("---")
if len(this_module) > 1:
this_module = this_module[1] # 'this_module' is the list of nodes in the current module
this_module = map(int, this_module.split())
modules.append(this_module) # list of modules (list of lists)
size=0
conect_list=[]
averageK=0
for node in this_module: # loop over the nodes of the current module
node=str(node)
conect_list.append(G.degree(node)) #create a connectivity list for the nodes in the module
averageK=averageK+G.degree(node)
size=size+1
size_list.append(size)# list of community sizes
averageK=averageK/float(size)
average_k_list.append(int(round(averageK,0)))
if max_size < size:
max_size = size
if max_max_degree < max(conect_list):
max_max_degree = max(conect_list)
average_size=average_size+size
average_max_degree=average_max_degree+max(conect_list)
max_conect_list.append(max(conect_list))
#average over communities
average_size=average_size/len(modules)
average_max_degree=average_max_degree/len(modules)
calculations.append(len(modules)) #number of cummunities
calculations.append(average_size) # average sizes of communities
calculations.append(max_size) # maximum size of communities
list_of_data_list.append(max_conect_list) # list of maximum conectivity per each community
list_of_data_list.append(average_k_list) # list of average conectivity per each community
list_of_data_list.append(size_list) # list of community sizes
#print the results
#print "number_of_communities_detected:"+str(len(modules))
#print "average_size:", average_size,"average_max_degree:",average_max_degree
#print "max_size:", max_size,"max_max_degree:",max_max_degree
output_string = "modularity:" + str(modularity) +"\n" #print modularity
for s in modules:
module_string = ",".join(map(str,s))
output_string += module_string + ";\n" # print the elements of every community
#print output_string
print modularity
# write the output files
file2 = open(name+'_list_modularity_analysis_GC','wt') #one output file per each input file
print >> file2, "data: list_10top_hubs list_max(k)_each_comm list_<k>_each_comm list_community_sizes\n"
for item in list_of_data_list:
print >> file2, item
print >> file2, "\n"
file2.close()
file1 = open('summary_modularity_analysis_GC','at') # one summary file for everything
for calculation in calculations:
print >> file1, calculation, # with a comma at the end, there is not \n between values
file1.close()
if __name__== "__main__":
main()
| [
"julia@chem-eng.northwestern.edu"
] | julia@chem-eng.northwestern.edu |
5be1057357ca33e65e6cafbd5328dd42ab0dd6cb | abeec076f89231c4dd589e84def8301e653d6e20 | /pages/views.py | 5052137eabc399ac2ce69d4c3ee03c3ff3a1008c | [] | no_license | gibil5/pcm_restaurant | 1cde6ee2780d3aa39dbc26dd9583f8465a1ff13a | a56ec01c533ed2b6e198de9813f9518a3eca2d14 | refs/heads/master | 2020-08-29T20:10:13.606229 | 2019-12-01T19:48:47 | 2019-12-01T19:48:47 | 218,160,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
# Create your views here.
# ------------------------------------------------ Home ---------------------
def home(request):
ctx = {}
output = render(request, 'pages/home.html', ctx)
return HttpResponse(output)
| [
"jrevilla55@gmail.com"
] | jrevilla55@gmail.com |
2f335708256187e77033287574a3fb0a96501daa | fbbe424559f64e9a94116a07eaaa555a01b0a7bb | /Rasterio_osgeo_shapely_PIL_pyproj_numpy/source/docutils/languages/fa.py | 48aa588deb4066e1034b9e603893fded6779dba7 | [
"MIT"
] | permissive | ryfeus/lambda-packs | 6544adb4dec19b8e71d75c24d8ed789b785b0369 | cabf6e4f1970dc14302f87414f170de19944bac2 | refs/heads/master | 2022-12-07T16:18:52.475504 | 2022-11-29T13:35:35 | 2022-11-29T13:35:35 | 71,386,735 | 1,283 | 263 | MIT | 2022-11-26T05:02:14 | 2016-10-19T18:22:39 | Python | UTF-8 | Python | false | false | 2,044 | py | # -*- coding: utf-8 -*-
# $Id: fa.py 4564 2016-08-10 11:48:42Z
# Author: Shahin <me@5hah.in>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Persian-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
u'author': u'نویسنده',
u'authors': u'نویسندگان',
u'organization': u'سازمان',
u'address': u'آدرس',
u'contact': u'تماس',
u'version': u'نسخه',
u'revision': u'بازبینی',
u'status': u'وضعیت',
u'date': u'تاریخ',
u'copyright': u'کپیرایت',
u'dedication': u'تخصیص',
u'abstract': u'چکیده',
u'attention': u'توجه!',
u'caution': u'احتیاط!',
u'danger': u'خطر!',
u'error': u'خطا',
u'hint': u'راهنما',
u'important': u'مهم',
u'note': u'یادداشت',
u'tip': u'نکته',
u'warning': u'اخطار',
u'contents': u'محتوا'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
u'نویسنده': u'author',
u'نویسندگان': u'authors',
u'سازمان': u'organization',
u'آدرس': u'address',
u'تماس': u'contact',
u'نسخه': u'version',
u'بازبینی': u'revision',
u'وضعیت': u'status',
u'تاریخ': u'date',
u'کپیرایت': u'copyright',
u'تخصیص': u'dedication',
u'چکیده': u'abstract'}
"""Persian (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [u'؛', u'،']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| [
"ryfeus@gmail.com"
] | ryfeus@gmail.com |
a8c82d26a3f03fe05f4ce33a1d93a3003463983e | 18b3d06a8a93839f7e7a1cf536a71bfc0adf8e20 | /devel/lib/python2.7/dist-packages/msgs_demo/msg/_GetMapFeedback.py | 446f7fdb8b177dd8f01c4c16e0931a2dccbd66a2 | [] | no_license | akingse/ros_tutorial_ws | dc52cbbf443f7823a0abd9223fef076cf959a24e | 7c776d2f62af0455a899c80e171d5210e0a8b382 | refs/heads/main | 2023-03-01T04:48:54.510004 | 2021-02-08T14:08:18 | 2021-02-08T14:09:30 | 337,094,532 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,118 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from msgs_demo/GetMapFeedback.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class GetMapFeedback(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "msgs_demo/GetMapFeedback"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
#无返回部分
"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GetMapFeedback, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
| [
"akingse@qq.com"
] | akingse@qq.com |
a02a3e1aaed8310d24cd73055fd40d4605004c89 | b46594de5c173e891e04556d9a3866fa5bbbf32d | /python/argon/common.py | 0257923dd35bf5864ccb710efdbbc513cdf1659d | [] | no_license | ssorj/argon | 7e36cd64b7ac3b79a9b94a759347963673b05f3c | 1db9e74428ac0ade435cf36f5e3d3c82f460961d | refs/heads/master | 2021-05-11T03:19:02.661533 | 2019-01-06T14:48:31 | 2019-01-06T14:48:31 | 117,912,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,133 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys as _sys
_micropython = _sys.implementation.name == "micropython"
if _micropython:
import gc as _gc
import uos as _os
import urandom as _random
import uselect as _select
import usocket as _socket
import ustruct as _struct
import utime as _time
else:
_gc = None
import os as _os
import random as _random
import select as _select
import socket as _socket
import struct as _struct
import time as _time
try:
_DEBUG = _os.getenv("ARGON_DEBUG") is not None
except AttributeError:
_DEBUG = False
class Buffer:
def __init__(self):
self._octets = bytearray(256)
self._view = memoryview(self._octets)
def skip(self, offset, size):
return offset + size, offset
def read(self, offset, size):
end = offset + size
return end, self._view[offset:end]
def write(self, offset, octets):
end = offset + len(octets)
self.ensure(end)
self._octets[offset:end] = octets
return end
def unpack(self, offset, size, format_string):
assert len(self) > offset + size
values = _struct.unpack_from(format_string, self._view, offset)
return (offset + size,) + values
def __getitem__(self, index):
return self._view[index]
def __setitem__(self, index, value):
self._view[index] = value
def __len__(self):
return len(self._octets)
def ensure(self, size):
if len(self._octets) < size:
new_size = max(size, 2 * len(self._octets))
self._octets = self._octets + bytearray(new_size - len(self._octets))
self._view = memoryview(self._octets)
def pack(self, offset, size, format_string, *values):
self.ensure(offset + size)
_struct.pack_into(format_string, self._octets, offset, *values)
return offset + size
def _uuid_bytes():
_random.seed(round(_time.time() * 1000))
values = (
_random.getrandbits(32),
_random.getrandbits(32),
_random.getrandbits(32),
_random.getrandbits(32),
)
return _struct.pack("IIII", *values)
def _hex(data):
return "".join(["{:02x}".format(x) for x in data])
def _shorten(string, max_=20):
if string is None:
return string
return string[:min(max_, len(string))]
| [
"jross@apache.org"
] | jross@apache.org |
b624987f59ba7fdd8d21977422b1355a8e3e5847 | c7792b5e5ae5e74d643518a5b0644020288fc6da | /whichbugs.py | e8aa2354b97b600b4c5a6a49f4954a3a80a38897 | [
"BSD-2-Clause"
] | permissive | agroce/fuzzgoattriage | 0dc99daf2d061aaa0f58ceef3657b6f9ff411613 | 173c585cc7e87bcb2b82ae22fde56935352cd597 | refs/heads/master | 2020-07-29T14:49:39.691056 | 2019-09-20T18:07:19 | 2019-09-20T18:07:19 | 209,830,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | import subprocess
import glob
import os
import sys
dnull = open(os.devnull, 'w')
ms = glob.glob("justbug*")
ms = map(lambda x: "./" +x, ms)
if True:
for t in glob.glob("out/crashes/id*"):
print t
i = 0
for m in ms:
r = subprocess.call(["ulimit -t 1;" + m + " " + t], shell=True, stdout=dnull, stderr=dnull)
if r != 0:
print m
sys.stdout.flush()
print
| [
"agroce@gmail.com"
] | agroce@gmail.com |
57b3d7204d3454cd3a8b66c7f36e2c1017b07255 | 2a54a1d9996778362421299a936bb0dadaace958 | /units/adms/mysite/iclock/models/model_devoperate.py | b05e500934966760202e07897b6ed3452ae1eb50 | [] | no_license | icprog/zktime_wlm | 6d0719b5210c4d3196b5958bccbb7e606785ece3 | 449c487ce4664dde734f8007a974ed883801d106 | refs/heads/master | 2021-03-21T10:20:54.157131 | 2018-11-24T04:10:42 | 2018-11-24T04:10:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,786 | py | # -*- coding: utf-8 -*-
#! /usr/bin/env python
import datetime
from django.db import models, connection
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.core.cache import cache
from base.operation import OperationBase, Operation, ModelOperation
from base.cached_model import CachingModel
from django.conf import settings
from mysite.utils import get_option
CMM_TYPE=(
(1,_(u'设备自动命令')),
(2,_(u'用户下发命令')),
)
SUCCESS_FLAG=(
(0,_(u'未处理')),
(1,_(u'成功')),
(2,_(u'失败')),
)
CMM_SYSTEM=(
(1,_(u'门禁')),
(2,_(u'考勤')),
)
class OperateCmd(CachingModel):
Author=models.ForeignKey(User, null=True,blank=True)
CmdContent = models.TextField(verbose_name=_(u'命令描述'),max_length=2048)
CmdCommitTime = models.DateTimeField( verbose_name=_(u'命令创建时间'),default=datetime.datetime.now())
commit_time = models.DateTimeField(verbose_name=_(u'命令处理完成时间'), null=True, blank=True)
CmdReturn = models.IntegerField(_(u'返回值'), null=True, blank=True)
process_count=models.SmallIntegerField(verbose_name=_(u'处理次数'),default=0)
success_flag=models.SmallIntegerField(verbose_name=_(u'处理标志'),default=0,choices=SUCCESS_FLAG)
receive_data = models.TextField(verbose_name=_(u'命令数据'), null=True, blank=True)
cmm_type=models.SmallIntegerField(verbose_name=_(u'命令类型'),default=1,blank=False,null=False,choices=CMM_TYPE)
cmm_system=models.SmallIntegerField(verbose_name=_(u'命令系统'),default=1,blank=False,null=False,choices=CMM_SYSTEM)
class Admin(CachingModel.Admin):
list_display=('create_operator','CmdCommitTime','cmm_type','CmdContent','commit_time','process_count','success_flag',)
sort_fields =["create_operator","CmdCommitTime","commit_time","success_flag"]
search_fields = ["CmdContent"]
query_fields=('cmm_type','process_count','success_flag')
cache=False
read_only=True
log=False
visible = get_option("DEVOPERATE_VISIBLE")#暂只有考勤使用
disabled_perms=["add_operatecmd",'change_operatecmd','delete_operatecmd']
hide_perms=["dataexport_operatecmd",]
class Meta:
app_label='iclock'
db_table = 'operatecmds'
verbose_name = _(u'通信命令详情')
verbose_name_plural=verbose_name
def save(self, *args, **kwargs):
super(OperateCmd, self).save(log_msg=False)
@staticmethod
def clear():
OperateCmd.objects.all().delete()
class _delete(Operation):
verbose_name=_(u'删除')
visible=False
def action():
pass
class _add(Operation):
visible=False
verbose_name=_(u'新增')
def action():
pass
class _change(Operation):
visible=False
verbose_name=_(u'编辑')
def action():
pass
def get_process_status(self):
total=self.devcmd_set.all().count()
current=self.devcmd_set.filter(CmdOverTime__isnull=False).count()
from decimal import ROUND_HALF_UP,Decimal
if total>0:
return str(Decimal(str(float(current)/float(total)*100)).quantize(Decimal('0'),ROUND_HALF_UP))+"%"
else:
if self.success_flag==1:
return "100%"
else:
return "0%"
def limit_operatecmd_to(self,qs,user):
from django.db.models import Q
filter={'cmm_system':2}
if user.is_superuser:
pass
else:
filter['create_operator__exact']=u"%s"%user
return qs.filter(Q(**filter))
| [
"657984027@qq.com"
] | 657984027@qq.com |
88177735f8868cfda2b92a9a23a7e0b8b1b50b4e | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/109/usersdata/224/63305/submittedfiles/av2_p3_civil.py | 74e486626635478f0d456283a47e71aba216271f | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 668 | py | # -*- coding: utf-8 -*-
import numpy as np
def somatorioDaColuna(A,j):
soma2=0
for i in range(0,A.shape[0],1):
soma2=soma2+1
return(soma2)
def somatorioDaLinha(A,i):
soma=0
for j in range(0,A.shape[1],1):
soma=soma+A[i,j]
return (soma)
n=int(input('Digite a dimensão da Matriz A: '))
x=int(input('Digite o índice da linha: '))
y=int(input('Digite o índice da coluna: '))
A=np.zeros((n,n))
for i in range(0,A.shape[0],1):
for j in range(0,A.shape[1],1):
A[i,j]=int(input('Digite os pesos em cada posição: '))
pesoDaPosicao=somatorioDaColuna(A,y)+somatorioDaLinha(A,x)-(2*(A[x,y])
print(pesoDaPosicao)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
49fe190f0f418cee0e3a2eede3469a3c263b4dd6 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Games/got-tyrion-web/flask/lib/python2.7/site-packages/pip/_vendor/cachecontrol/__init__.py | 612abcc11e161b614239ba31c3000f491bd9b5ce | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:bb3ecec6cf61426531b71a1dd56c95a148fa25a6fb35f88b1023c716e73edb65
size 302
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
279bccd834532c0a1dd881e625f22c20d4468ac6 | 55e28e35db5bf6a844df3fb47080500b115a893e | /day13/s7.py | 60b61e339ca0556100e396336b41b952c378ed49 | [] | no_license | pylarva/Python | 5743ffa4a69db42b642d51b62f9e9b69ddbc1a72 | 71b484950e6dbdcf708726a68a3386d0d6ddc07f | refs/heads/master | 2020-04-19T09:11:11.195393 | 2017-11-16T07:32:59 | 2017-11-16T07:32:59 | 67,507,687 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,045 | py | # !/usr/bin/env python
# -*- coding:utf-8 -*-
# Author:pylarva
# bolg:www.lichengbing.com
from sqlalchemy import create_engine,and_,or_,func,Table
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String,ForeignKey,UniqueConstraint,DateTime
from sqlalchemy.orm import sessionmaker,relationship
engine = create_engine("mysql+pymysql://root:123@10.0.0.111:3306/s13", max_overflow=5)
Base = declarative_base()
# 程序登陆用户和服务器账户,一个人可以有多个服务器账号,一个服务器账号可以给多个人用
UserProfile2HostUser= Table('userprofile_2_hostuser',Base.metadata,
Column('userprofile_id',ForeignKey('user_profile.id'),primary_key=True),
Column('hostuser_id',ForeignKey('host_user.id'),primary_key=True),
)
class Host(Base):
__tablename__='host'
id = Column(Integer,primary_key=True,autoincrement=True)
hostname = Column(String(64),unique=True,nullable=False)
ip_addr = Column(String(128),unique=True,nullable=False)
port = Column(Integer,default=22)
def __repr__(self):
return "<id=%s,hostname=%s, ip_addr=%s>" %(self.id,
self.hostname,
self.ip_addr)
class HostUser(Base):
__tablename__ = 'host_user'
id = Column(Integer, primary_key=True)
AuthTypes = [
(u'ssh-passwd', u'SSH/Password'),
(u'ssh-key', u'SSH/KEY'),
]
# auth_type = Column(ChoiceType(AuthTypes))
auth_type = Column(String(64))
username = Column(String(64), unique=True, nullable=False)
password = Column(String(255))
host_id = Column(Integer, ForeignKey('host.id'))
__table_args__ = (UniqueConstraint('host_id', 'username', name='_host_username_uc'),)
class Group(Base):
__tablename__ = 'group'
id = Column(Integer,primary_key=True)
name = Column(String(64),unique=True,nullable=False)
class UserProfile(Base):
__tablename__ = 'user_profile'
id = Column(Integer,primary_key=True)
username = Column(String(64),unique=True,nullable=False)
password = Column(String(255),nullable=False)
# 一个人只能在一个组
group_id = Column(Integer, ForeignKey('group.id'))
host_list =relationship('HostUser', secondary=UserProfile2HostUser, backref='userprofiles')
Session = sessionmaker(bind=engine)
session = Session()
obj = session.query(UserProfile).filter(usename='输入的用户名', password='输入的密码').first()
if not obj:
# 输入这个人的所有机器
for item in obj.host_list:
# item 是一个HostUser对象
item.password, item.username,
# item.host 对象 host对象
item.host.hostname,item.host.port
class AuditLog(Base):
__tablename__ = 'audit_log'
id = Column(Integer,primary_key=True)
userprofile_id = Column(Integer,ForeignKey('user_profile.id'))
hostuser_id = Column(Integer,ForeignKey('host_user.id'))
cmd = Column(String(255))
date = Column(DateTime)
| [
"1326126359@qq.com"
] | 1326126359@qq.com |
bd37d5d53f0d15aec98b1f5d09c49c7ab1b8e1ac | 3be1ddf42236a1b33ec74ed3bfdd0f8918513733 | /coding-challenges/week12/day03/Q.2.py | 1a8fd269545bd4e6e303677b5387c464cca85f59 | [] | no_license | aabhishek-chaurasia-au17/MyCoding_Challenge | 84ef926b550b3f511f1c642fe35f4303c8abb949 | 419d02ad8740a2c00403fd30c661074266d2ba8f | refs/heads/main | 2023-08-29T09:52:36.796504 | 2021-11-07T07:32:09 | 2021-11-07T07:32:09 | 359,842,173 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | """
Q-2 )Write steps in heapify/percolate down method, and write time
complexity and space complexity analysis.(5 marks)
(Super Easy)
"""
heap = [111, 22, 33, 4, 15, 64, 7, 8, 9]
def heapsort():
global heap
while len(heap) != 0:
print(heap[0])
heap[0], heap[-1] = heap[-1], heap[0]
heap.pop()
heapify(0)
def heapify(i):
global heap
left_idx = 2 * i + 1
right_idx = 2 * i + 2
if left_idx > len(heap) - 1 and right_idx > len(heap) - 1:
return
max_idx = i
if left_idx < len(heap) and heap[max_idx] < heap[left_idx]:
max_idx = left_idx
if right_idx < len(heap) and heap[max_idx] < heap[right_idx]:
max_idx = right_idx
if max_idx != i:
heap[max_idx], heap[i] = heap[i], heap[max_idx]
heapify(max_idx)
if __name__ == "__main__":
n = len(heap)
for i in range(n - 1, -1, -1):
heapify(i)
heapsort()
| [
"abhishekc838@gmail.com"
] | abhishekc838@gmail.com |
f5ba5767d75aa13b3c93663c3462dbf7a20eff7b | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/cv/detection/SSD_for_PyTorch/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py | dd7eddfab7b40870df07c1e6045e9372de27c830 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 1,051 | py | # Copyright 2022 Huawei Technologies Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py'
model = dict(roi_head=dict(bbox_head=dict(num_classes=1)))
classes = ('person', )
data = dict(
train=dict(classes=classes),
val=dict(classes=classes),
test=dict(classes=classes))
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa
| [
"chenyong84@huawei.com"
] | chenyong84@huawei.com |
e4820e819afe8515dcfc4c70add913784166710c | 70cbc3e6002ccc0e2bf570c90e675c34a84b7ce9 | /device_list_api/wsgi.py | 9b5fa97e8c000353672604ae680c71b2545301e3 | [] | no_license | shotaro0726/dvice_manager | 61d31da512b3401a5864d06ad1c9269f22bace87 | 431ea9c7098dfcea46a57404541a34847559ddba | refs/heads/master | 2022-09-21T03:25:34.702838 | 2020-05-30T17:27:40 | 2020-05-30T17:27:40 | 268,010,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for device_list_api project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'device_list_api.settings')
application = get_wsgi_application()
| [
"shoutaro0726@gmail.com"
] | shoutaro0726@gmail.com |
7c8d25b8c58f79d83c263c0664d30b7997f41902 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p3BR/R1/benchmark/startQiskit_QC428.py | 65e336bc4f3e6fcbdf417489f4dbaa53551064de | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,508 | py | # qubit number=3
# total number=77
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.h(input_qubit[1]) # number=70
prog.rx(-0.09738937226128368,input_qubit[2]) # number=2
prog.h(input_qubit[1]) # number=33
prog.y(input_qubit[2]) # number=56
prog.cz(input_qubit[2],input_qubit[1]) # number=34
prog.h(input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=3
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC428.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_belem")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
769fb486715718f376703c406914f71e8c68b786 | 29540b843fa1fc8e0fa5979c2b0029ec7f957b55 | /unit/migrations/0007_airman_phone_number.py | 90ca3ae6f04a044c2d9e21c2b87e37b693a15739 | [] | no_license | lopezjronald/asts-fitness-program-v2 | 7ae5282e03f124d912e834e43907af3af4608a4e | a6e111265e9d86dfb101b9df1d2629010cf9066d | refs/heads/main | 2023-03-07T12:11:18.704931 | 2021-02-17T11:04:18 | 2021-02-17T11:04:18 | 316,976,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | # Generated by Django 3.1.3 on 2021-02-17 07:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('unit', '0006_auto_20201201_2013'),
]
operations = [
migrations.AddField(
model_name='airman',
name='phone_number',
field=models.CharField(max_length=10, null=True),
),
]
| [
"lopez.j.ronald@gmail.com"
] | lopez.j.ronald@gmail.com |
885b6854611137eb3ef2001628a9f8a75b508036 | 5b52320d3cc707285390e02bec8b33c51229054d | /server/user_settings/migrations/0007_auto_20201214_1822.py | 7b18e97498a11c5e817f81fbcb340f637d8387e6 | [] | no_license | Aviemusca/bjj-digraph | abb541f81a72acb2020e480dfac2f85a98cbfe73 | 9e01ff8ab73f6d9d16606ec1c8b7c91cdfa9cd2c | refs/heads/main | 2023-03-05T02:08:17.260158 | 2021-02-19T14:22:50 | 2021-02-19T14:22:50 | 337,366,200 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | # Generated by Django 3.1.3 on 2020-12-14 18:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("user_settings", "0006_auto_20201211_1347"),
]
operations = [
migrations.AddField(
model_name="usergamenodesettings",
name="fill_opacity",
field=models.FloatField(default=1.0),
),
migrations.AddField(
model_name="usermetanodesettings",
name="fill_opacity",
field=models.FloatField(default=1.0),
),
]
| [
"yvan@metatech.ie"
] | yvan@metatech.ie |
c700637541affd3d5b62be7bfe9b067396960cf7 | c32d1463914c8fb3d597361ba5a4a17e499bd888 | /models/backbones/resnet.py | ba23b353a0533b4903a778a6504886b2f8e86fd2 | [
"MIT"
] | permissive | DonYum/face_recognition_framework | aee7d6a902f6c7c0ea8513fd0204b73b34d7994b | 924b74f868bb06295d21122e3926d37a5d419d6e | refs/heads/master | 2020-12-03T23:38:58.082336 | 2020-01-03T11:29:51 | 2020-01-03T11:29:51 | 231,524,940 | 0 | 0 | MIT | 2020-01-03T06:17:28 | 2020-01-03T06:17:28 | null | UTF-8 | Python | false | false | 5,551 | py | import torch.nn as nn
import math
__all__ = ['resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, feature_dim, spatial_size=224):
fc_map = {224: 12544, 112: 4096}
self.inplanes = 64
super(ResNet, self).__init__()
self.feature_dim = feature_dim
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# face
self.layer1x1 = nn.Sequential(
nn.Conv2d(512 * block.expansion, 256, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=False))
self.drop1 = nn.Dropout(0.5)
self.feature = nn.Linear(fc_map[spatial_size], feature_dim)
self.drop2 = nn.Dropout(0.5)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.Linear):
scale = math.sqrt(3. / m.in_features)
m.weight.data.uniform_(-scale, scale)
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
# face
x = self.layer1x1(x)
x = self.drop1(x)
x = x.view(x.size(0), -1)
x = self.feature(x)
x = self.drop2(x)
return x
def resnet18(**kwargs):
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(**kwargs):
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50(**kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnet101(**kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def resnet152(**kwargs):
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
| [
"xiaohangzhan@outlook.com"
] | xiaohangzhan@outlook.com |
b38317fd7e15bf95d4c1d380413c57ceab3a3398 | ad9bd58a3ec8fa08dfcc994d4101ee815a9f5bc0 | /02_algorithm/baekjoon/all_problem/1076.py | 67a85516fa5d7cb85edc31570f22d7c7fd99149f | [] | no_license | wally-wally/TIL | 93fc1d0e3bc7d030341ed54155294c68c48b4c7d | 936783bc86f563646c0398c24e2fcaa707f0ed23 | refs/heads/master | 2023-04-28T08:59:48.235747 | 2023-04-12T12:06:52 | 2023-04-12T12:06:52 | 195,918,111 | 40 | 7 | null | 2020-09-29T16:20:46 | 2019-07-09T02:31:02 | Python | UTF-8 | Python | false | false | 356 | py | import sys
sys.stdin = open('input_1076.txt', 'r')
color_code = {'black': 0, 'brown': 1, 'red': 2, 'orange': 3, 'yellow': 4,
'green': 5, 'blue': 6, 'violet': 7, 'grey': 8, 'white': 9}
result = 0
for i in range(3):
if i <= 1: result += color_code[input()] * (10 if not i else 1)
else: result *= 10 ** color_code[input()]
print(result) | [
"wallys0213@gmail.com"
] | wallys0213@gmail.com |
a46201db45bff3afbcf83488cf2e86acdbb5a26b | 327b5efff2b24d42f1b1c7d13b6788c240d3b8d4 | /sapi_app/urls.py | 9730a941d9b75507e9bf636135ee1db10c010c34 | [
"MIT"
] | permissive | calixo888/sapi | 523c35240d19faeed4d4673bdb4ca4ec210d5671 | a2ff327795a7ea088cb158f7738af9121e465a08 | refs/heads/master | 2022-04-30T14:06:35.474634 | 2020-10-25T23:18:49 | 2020-10-25T23:18:49 | 237,724,096 | 0 | 0 | MIT | 2022-04-22T23:03:11 | 2020-02-02T05:34:45 | JavaScript | UTF-8 | Python | false | false | 422 | py | from django.conf.urls import url
from . import views
urlpatterns = [
url("^$", views.index, name="index"),
url("^forgot-api-key/$", views.forgot_api_key, name="forgot_api_key"),
url("^documentation/$", views.documentation, name="documentation"),
url("^get-api-key/$", views.get_api_key, name="get_api_key"),
# API Routes
url("^api/personal/$", views.personal_storage, name="personal_storage"),
]
| [
"calix.huang1@gmail.com"
] | calix.huang1@gmail.com |
788c0a4dc0b12e463c564f5fcf938d4a40d4b387 | e5ae250c070a4e23717f25bdaa99b2e310b05cd2 | /sorting3.py | 95855664abeb1c26e873f0b51baeb2835c1fb52d | [
"MIT"
] | permissive | matthewmuccio/InterviewPrepKit | d3c95c1c2dfb58761f3a61f35ffc72be3c0aebb7 | 13dabeddc3c83866c88bef1c80498c313e4c233e | refs/heads/master | 2020-03-27T15:10:21.540776 | 2018-11-05T16:13:12 | 2018-11-05T16:13:12 | 146,702,058 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | #!/usr/bin/env python3
from functools import cmp_to_key
class Player:
def __init__(self, name, score):
self.name = name
self.score = score
def __repr__(self):
pass
def comparator(a, b):
val = b.score - a.score
if val == 0:
return -1 if a.name < b.name else 1
return val
if __name__ == "__main__":
n = int(input())
data = []
for i in range(n):
name, score = input().split()
score = int(score)
player = Player(name, score)
data.append(player)
data = sorted(data, key=cmp_to_key(Player.comparator))
for i in data:
print(i.name, i.score)
| [
"me@matthewmuccio.com"
] | me@matthewmuccio.com |
e47dcd7a4fc5c0e91cf776cb94d341b8a1c11633 | 3ededad93e7e3cbcea4baad101812187fc449d89 | /torch_geometric_temporal/data/discrete/static_graph_discrete_signal.py | 82751531b4259edae4213d0b08979778a0133f32 | [
"MIT"
] | permissive | LFrancesco/pytorch_geometric_temporal | 04612030d3ef3ef34f856dd2c03a57d006287e0d | 0964515a6041ce0cceb12e36ed640df22c046b4d | refs/heads/master | 2023-03-27T13:51:37.134564 | 2021-03-20T14:54:19 | 2021-03-20T14:54:19 | 349,710,635 | 0 | 0 | MIT | 2021-03-20T14:54:20 | 2021-03-20T11:51:32 | null | UTF-8 | Python | false | false | 3,227 | py | import torch
import numpy as np
from typing import List, Union
from torch_geometric.data import Data
Edge_Index = Union[np.ndarray, None]
Edge_Weight = Union[np.ndarray, None]
Features = List[Union[np.ndarray, None]]
Targets = List[Union[np.ndarray, None]]
class StaticGraphDiscreteSignal(object):
r""" A data iterator object to contain a static graph with a dynamically
changing discrete temporal feature set (multiple signals). The node labels
(target) are also temporal. The iterator returns a single discrete temporal
snapshot for a time period (e.g. day or week). This single temporal snapshot
is a Pytorch Geometric Data object. Between two temporal snapshots the feature
matrix and the target matrix might change. However, the underlying graph is
the same.
Args:
edge_index (Numpy array): Index tensor of edges.
edge_weight (Numpy array): Edge weight tensor.
features (List of Numpy arrays): List of node feature tensors.
targets (List of Numpy arrays): List of node label (target) tensors.
"""
def __init__(self, edge_index: Edge_Index, edge_weight: Edge_Weight,
features: Features, targets: Targets):
self.edge_index = edge_index
self.edge_weight = edge_weight
self.features = features
self.targets = targets
self._check_temporal_consistency()
self._set_snapshot_count()
def _check_temporal_consistency(self):
assert len(self.features) == len(self.targets), "Temporal dimension inconsistency."
def _set_snapshot_count(self):
self.snapshot_count = len(self.features)
def _get_edge_index(self):
if self.edge_index is None:
return self.edge_index
else:
return torch.LongTensor(self.edge_index)
def _get_edge_weight(self):
if self.edge_weight is None:
return self.edge_weight
else:
return torch.FloatTensor(self.edge_weight)
def _get_features(self):
if self.features[self.t] is None:
return self.features[self.t]
else:
return torch.FloatTensor(self.features[self.t])
def _get_target(self):
if self.targets[self.t] is None:
return self.targets[self.t]
else:
if self.targets[self.t].dtype.kind == 'i':
return torch.LongTensor(self.targets[self.t])
elif self.targets[self.t].dtype.kind == 'f':
return torch.FloatTensor(self.targets[self.t])
def _get_snapshot(self):
x = self._get_features()
edge_index = self._get_edge_index()
edge_weight = self._get_edge_weight()
y = self._get_target()
snapshot = Data(x = x,
edge_index = edge_index,
edge_attr = edge_weight,
y = y)
return snapshot
def __next__(self):
if self.t < len(self.features):
snapshot = self._get_snapshot()
self.t = self.t + 1
return snapshot
else:
self.t = 0
raise StopIteration
def __iter__(self):
self.t = 0
return self
| [
"benedek.rozemberczki@gmail.com"
] | benedek.rozemberczki@gmail.com |
702bc20295f20d7d22a54fc0691eb5d930eb225d | 73b21ee53c73f37f0295534e21da5a83e77b328d | /ML/day3/test6.py | 04ecbd0b6555569bc336692db2d58679c08ebdc1 | [] | no_license | tauovir/Nielit | 2f5e0d49a5c20e90de8a74644a8a0ed1da07dd0f | 225351b770b88b2655431ab3ec04533e30b36057 | refs/heads/master | 2020-04-05T08:00:45.199440 | 2019-05-29T10:43:54 | 2019-05-29T10:43:54 | 156,697,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 952 | py | from sklearn.datasets import load_iris
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score,confusion_matrix,classification_report
import matplotlib.pyplot as plt
iris = load_iris()
#======Load Data=======
#print iris.data
#Load Target Data======
#print iris.target
X = iris.data
y = iris.target
#print y
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.2)
#print X_train.shape
#print X_test.shape
#Knn Classifier
knn = KNeighborsClassifier(n_neighbors = 1)
knn.fit(X_train,y_train)
p = knn.predict(X_test)
#==Print prediction
#print p
#===Print Confusion matrix
#print confusion_matrix(y_test,p)
#print accuracy
#print accuracy_score(y_test,p)
#=====Plot graph=======
score = cross_val_score(knn, X,y,cv = 10)
print"Report", classification_report(y_test, p)
print "score=",score
| [
"taukir707@gmail.com"
] | taukir707@gmail.com |
b3a53c0c17fa29b1ed7afe845c1b6db30deba156 | d0f2f7f220c825d827643ca81a08a23cfb871965 | /backend/code/alembic/versions/32038b09fa26_new_initial_commit.py | 0631ea23ef8f36803d341f6925285a39fc1171d9 | [] | no_license | socek/rankor | 7e5e73f8f13bc3d12bd1b18ef01bef04f8f38f0a | eaf5002dd1e852895670517a8cdcb07bf7c69f66 | refs/heads/master | 2021-04-12T07:52:20.341699 | 2018-06-03T20:07:17 | 2018-06-03T20:07:17 | 125,769,351 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,355 | py | """new initial commit
Revision ID: 32038b09fa26
Revises:
Create Date: 2018-05-13 20:36:23.880081
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '32038b09fa26'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(), nullable=True),
sa.Column('email', sa.String(), nullable=False),
sa.Column('is_admin', sa.Boolean(), nullable=False),
sa.Column('password', sa.Binary(), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_users'))
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_table('contests',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(), nullable=False),
sa.Column('owner_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['owner_id'], ['users.id'], name=op.f('fk_contests_owner_id_users')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_contests'))
)
op.create_table('games',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(), nullable=False),
sa.Column('contest_id', postgresql.UUID(), nullable=False),
sa.Column('owner_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['contest_id'], ['contests.id'], name=op.f('fk_games_contest_id_contests')),
sa.ForeignKeyConstraint(['owner_id'], ['users.id'], name=op.f('fk_games_owner_id_users')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_games'))
)
op.create_table('questions',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(), nullable=False),
sa.Column('description', sa.Text(), nullable=False),
sa.Column('category', sa.String(), nullable=True),
sa.Column('contest_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['contest_id'], ['contests.id'], name=op.f('fk_questions_contest_id_contests')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_questions'))
)
op.create_index(op.f('ix_questions_category'), 'questions', ['category'], unique=False)
op.create_table('answers',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(), nullable=False),
sa.Column('is_correct', sa.Boolean(), nullable=False),
sa.Column('question_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['question_id'], ['questions.id'], name=op.f('fk_answers_question_id_questions')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_answers'))
)
op.create_table('teams',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(), nullable=False),
sa.Column('game_id', postgresql.UUID(), nullable=False),
sa.ForeignKeyConstraint(['game_id'], ['games.id'], name=op.f('fk_teams_game_id_games')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_teams'))
)
op.create_table('game_answers',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('game_id', postgresql.UUID(), nullable=False),
sa.Column('question_id', postgresql.UUID(), nullable=False),
sa.Column('answer_id', postgresql.UUID(), nullable=True),
sa.Column('team_id', postgresql.UUID(), nullable=True),
sa.ForeignKeyConstraint(['answer_id'], ['answers.id'], name=op.f('fk_game_answers_answer_id_answers')),
sa.ForeignKeyConstraint(['game_id'], ['games.id'], name=op.f('fk_game_answers_game_id_games')),
sa.ForeignKeyConstraint(['question_id'], ['questions.id'], name=op.f('fk_game_answers_question_id_questions')),
sa.ForeignKeyConstraint(['team_id'], ['teams.id'], name=op.f('fk_game_answers_team_id_teams')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_game_answers'))
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('game_answers')
op.drop_table('teams')
op.drop_table('answers')
op.drop_index(op.f('ix_questions_category'), table_name='questions')
op.drop_table('questions')
op.drop_table('games')
op.drop_table('contests')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
# ### end Alembic commands ###
| [
"d.dlugajczyk@clearcode.cc"
] | d.dlugajczyk@clearcode.cc |
2fa9b51aec2e39ae9b67da9ffec162473eb20bb7 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4496/codes/1712_2507.py | 1c645da33dc35399c4e2e0dc8179eaa9c5bcc4e3 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | qip = int(input("qtd inicial de pirarucus: "))
pc = int(input("percentual de creescimento: "))
t = 0
while(qip<8000 and qip>0):
qv=int(input("retirados para venda: "))
qip= qip+((qip*pc)/100)-qv
t=t+1
if(qip>8000):
print("MAXIMO")
else:
print("ZERO")
print(t) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
a68dd26e9ee124b1f7779140374b4b4cc7cac486 | d83118503614bb83ad8edb72dda7f449a1226f8b | /src/dprj/platinumegg/app/cabaret/views/mgr/model_edit/card_level_exp.py | 00ad22f0b5e7b05bfe2992f08674a0fae99eae83 | [] | no_license | hitandaway100/caba | 686fe4390e182e158cd9714c90024a082deb8c69 | 492bf477ac00c380f2b2758c86b46aa7e58bbad9 | refs/heads/master | 2021-08-23T05:59:28.910129 | 2017-12-03T19:03:15 | 2017-12-03T19:03:15 | 112,512,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,516 | py | # -*- coding: utf-8 -*-
from platinumegg.app.cabaret.views.mgr.model_edit import AdminModelEditHandler,\
AppModelForm, ModelEditValidError
from platinumegg.app.cabaret.models.CardLevelExp import CardLevelExpMster
from defines import Defines
class Handler(AdminModelEditHandler):
"""マスターデータの操作.
"""
class Form(AppModelForm):
class Meta:
model = CardLevelExpMster
exclude = (
Defines.MASTER_EDITTIME_COLUMN,
)
def setting_property(self):
self.MODEL_LABEL = u'カード経験値テーブル'
def valid_write_end(self):
master_all = {}
for master in CardLevelExpMster.fetchValues():
master_all[master.level] = master
errors = []
for master in master_all.values():
if master.level == 1:
if master.exp != 0:
errors.append(u'レベル1は経験値を0に設定してください, level=%d' % master.level)
continue
pre = master_all.get(master.level - 1)
if pre is None:
errors.append(u'レベルが抜けています, level=%d' % (master.level - 1))
elif master.exp <= pre.exp:
errors.append(u'前のレベルの経験値よりも大きくありません, level=%d' % master.level)
if errors:
raise ModelEditValidError('<br />'.join(errors))
def main(request):
return Handler.run(request)
| [
"shangye@mail.com"
] | shangye@mail.com |
f2da2236b2af5e8cc428d8bc6dac3e08b19e574c | 4f4776eb69cbea9ee1c87a22732c5d778855c83a | /leetcode/Set_Matrix_Zeroes.py | a92db433c9375325332006cef0655b1f3ede7dd5 | [] | no_license | k4u5h4L/algorithms | 4a0e694109b8aadd0e3b7a66d4c20692ecdef343 | b66f43354792b1a6facff90990a7685f5ed36a68 | refs/heads/main | 2023-08-19T13:13:14.931456 | 2021-10-05T13:01:58 | 2021-10-05T13:01:58 | 383,174,341 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 926 | py | '''
Set Matrix Zeroes
Medium
Given an m x n integer matrix matrix, if an element is 0, set its entire row and column to 0's, and return the matrix.
You must do it in place.
Example 1:
Input: matrix = [[1,1,1],[1,0,1],[1,1,1]]
Output: [[1,0,1],[0,0,0],[1,0,1]]
Example 2:
Input: matrix = [[0,1,2,0],[3,4,5,2],[1,3,1,5]]
Output: [[0,0,0,0],[0,4,5,0],[0,3,1,0]]
'''
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
res = []
for i in range(len(matrix)):
res.append(matrix[i].copy())
for i in range(len(res)):
for j in range(len(res[i])):
if res[i][j] == 0:
for k in range(len(res)):
matrix[k][j] = 0
for k in range(len(res[i])):
matrix[i][k] = 0
| [
"noreply@github.com"
] | k4u5h4L.noreply@github.com |
3777e1676ddc316177897ba2a27039f169052c00 | f3fdfdf714e23ef69c9ce6631c188f1ebc328546 | /spider/utilities/util_urlfilter.py | 6c9b9fbf5967dd5550b517c4295741067f36b2e4 | [
"BSD-2-Clause"
] | permissive | liujie40/PSpider | bf2a134812ce81357588b260cee9e3d039c73df0 | f1162c777ec87250edfd2532882eb15b8d712e6a | refs/heads/master | 2022-02-21T18:20:41.468852 | 2022-01-19T06:55:54 | 2022-01-19T06:56:00 | 112,547,656 | 1 | 0 | null | 2017-11-30T01:17:47 | 2017-11-30T01:17:47 | null | UTF-8 | Python | false | false | 1,175 | py | # _*_ coding: utf-8 _*_
"""
util_urlfilter.py by xianhu
"""
from .util_config import CONFIG_RE_URL_LEGAL, CONFIG_RE_URL_ILLEGAL
class UrlFilter(object):
"""
class of UrlFilter, to filter urls by regexs and set
"""
def __init__(self, black_patterns=(CONFIG_RE_URL_ILLEGAL,), white_patterns=(CONFIG_RE_URL_LEGAL,)):
"""
constructor
"""
self._url_set = set()
self._re_black_list = black_patterns
self._re_white_list = white_patterns
return
def check(self, url):
"""
check the url based on re_black_list and re_white_list
"""
for re_black in self._re_black_list:
if re_black.search(url):
return False
for re_white in self._re_white_list:
if re_white.search(url):
return True
return False if self._re_white_list else True
def check_and_add(self, url):
"""
check whether url is in set, and add url to it
"""
result = False
if self.check(url):
result = (url not in self._url_set)
self._url_set.add(url)
return result
| [
"qixianhu@qq.com"
] | qixianhu@qq.com |
6a86f70dfd375605c250defd38f4fa8d093d11e7 | 0bb8296a1bfdba0c264ad7d764482dd3c724563a | /torcms/core/privilege.py | f0eff529668ff0889c7c2885bf5c165386453b1d | [
"MIT"
] | permissive | dlnan/TorCMS | c489a6ab573815f288d11efe4738f4c23323c9ea | 8bf71beb98d867cde5ef3aa749adae61f66356be | refs/heads/master | 2023-06-22T17:23:45.281281 | 2021-07-13T14:07:05 | 2021-07-13T14:07:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,998 | py | # -*- coding:utf-8 -*-
'''
针对增删改查的权限进行处理。
'''
from config import ROLE_CFG
def is_prived(usr_rule, def_rule):
'''
Compare between two role string.
'''
for iii in range(4):
if def_rule[iii] == '0':
continue
if usr_rule[iii] >= def_rule[iii]:
return True
return False
def auth_view(method):
'''
role for view.
'''
def wrapper(self, *args, **kwargs):
'''
wrapper.
'''
if ROLE_CFG['view'] == '':
return method(self, *args, **kwargs)
elif self.current_user:
if is_prived(self.userinfo.role, ROLE_CFG['view']):
return method(self, *args, **kwargs)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html',
kwd=kwd,
userinfo=self.userinfo)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html', kwd=kwd, userinfo=self.userinfo)
return wrapper
def auth_add(method):
'''
role for add.
'''
def wrapper(self, *args, **kwargs):
'''
wrapper.
'''
if self.current_user:
if is_prived(self.userinfo.role, ROLE_CFG['add']):
return method(self, *args, **kwargs)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html',
kwd=kwd,
userinfo=self.userinfo)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html', kwd=kwd, userinfo=self.userinfo)
return wrapper
def auth_edit(method):
'''
role for edit.
'''
def wrapper(self, *args, **kwargs):
'''
wrapper.
'''
if self.current_user:
if is_prived(self.userinfo.role, ROLE_CFG['edit']):
return method(self, *args, **kwargs)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html',
kwd=kwd,
userinfo=self.userinfo)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html', kwd=kwd, userinfo=self.userinfo)
return wrapper
def auth_delete(method):
'''
role for delete.
'''
def wrapper(self, *args, **kwargs):
'''
wrapper.
'''
if self.current_user:
if is_prived(self.userinfo.role, ROLE_CFG['delete']):
return method(self, *args, **kwargs)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html',
kwd=kwd,
userinfo=self.userinfo)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html', kwd=kwd, userinfo=self.userinfo)
return wrapper
def auth_admin(method):
'''
role for admin.
'''
def wrapper(self, *args, **kwargs):
'''
wrapper.
'''
if self.current_user:
if is_prived(self.userinfo.role, ROLE_CFG['admin']):
return method(self, *args, **kwargs)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html',
kwd=kwd,
userinfo=self.userinfo)
else:
kwd = {
'info': 'No role',
}
self.render('misc/html/404.html', kwd=kwd, userinfo=self.userinfo)
return wrapper
| [
"bukun@osgeo.cn"
] | bukun@osgeo.cn |
af96329dc1386af2578b528f0c0b0d3626ce19a3 | cc1472e5c7409db30b3e17271d1bd123f1c8abb3 | /3.Lambda functions and error-handling/Error handling with try-except.py | c7fa20b1591f40f231ecbcbc56155681e66f5a24 | [] | no_license | Mat4wrk/Python-Data-Science-Toolbox-Part-1-Datacamp | 8fd4528397f6be1fdbca06be4b7d22630bbf2f7f | 5d8d1320263021e02341a392fd8766a201ad8a8b | refs/heads/main | 2023-03-04T16:18:56.631589 | 2021-02-13T11:29:12 | 2021-02-13T11:29:12 | 338,530,762 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | # Define shout_echo
def shout_echo(word1, echo=1):
"""Concatenate echo copies of word1 and three
exclamation marks at the end of the string."""
# Initialize empty strings: echo_word, shout_words
echo_word = ""
shout_words = ""
# Add exception handling with try-except
try:
# Concatenate echo copies of word1 using *: echo_word
echo_word = word1 * echo
# Concatenate '!!!' to echo_word: shout_words
shout_words = echo_word + '!!!'
except:
# Print error message
print("word1 must be a string and echo must be an integer.")
# Return shout_words
return shout_words
# Call shout_echo
shout_echo("particle", echo="accelerator")
| [
"noreply@github.com"
] | Mat4wrk.noreply@github.com |
efd5051555a1671ac149d0de3d0e37719c92eba1 | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/System/Windows/Media/Animation_parts/EasingMode.py | 21e313268898de24943c5fb9a0285de8c2aec25f | [
"MIT"
] | permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | class EasingMode(Enum,IComparable,IFormattable,IConvertible):
"""
Defines the modes in which classes derived from System.Windows.Media.Animation.EasingFunctionBase perform their easing.
enum EasingMode,values: EaseIn (0),EaseInOut (2),EaseOut (1)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
EaseIn=None
EaseInOut=None
EaseOut=None
value__=None
| [
"magnetscoil@gmail.com"
] | magnetscoil@gmail.com |
7082cde9cf2222c4bd04b2f2f4926e998a7aeb4b | e1d9fe469422519084fbe9d8cea4c75e6c0828a3 | /import.py | d3dc52f2f40bc4ff2f176db4003b5415f3c1e528 | [] | no_license | DerThorsten/theplantlist | 3c9a1b0d38d793aad3030c9ea6c4afbd7a6b7b71 | f8be85a3c0d0fb644d0049be5d998c3768ab50d1 | refs/heads/master | 2020-03-18T16:48:05.148593 | 2017-03-01T04:40:04 | 2017-03-01T04:40:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py |
import pandas as pd
import gc
import os
store = []
for file in os.listdir("."):
if file.endswith(".csv"):
data = pd.read_csv(file, dtype=pd.np.object)
store += data['Genus'].tolist()
gc.collect()
store = map(lambda x: x.lower(), store)
print(len(store), len(pd.np.unique(store)))
| [
"kislov@easydan.com"
] | kislov@easydan.com |
f04cda7b171c4dffe9a99f2306de25129b8f8c29 | c922252e52eea97b7557937a2558bbec664d2e07 | /newsfeed/wsgi.py | 8a3ec5a03277be826eece4cbf71cfac7dc213775 | [] | no_license | strar-buck/twitter_insta_news_feed | cfe1d4cd88b6dc938134d82ec0c960090390aee3 | 22858054ebf7821d4e5469163b14b542983fadff | refs/heads/master | 2021-06-11T01:26:30.671568 | 2017-02-01T09:11:09 | 2017-02-01T09:11:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | """
WSGI config for newsfeed project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "newsfeed.settings")
application = get_wsgi_application()
try:
from dj_static import Cling
application = Cling(get_wsgi_application())
except:
pass
| [
"djangopycon@gmail.com"
] | djangopycon@gmail.com |
4a8e1050ebad2ddf5f56ca78e92c3f67009533d6 | e0980f704a573894350e285f66f4cf390837238e | /.history/streams/blocks_20201019102609.py | 77ffd2756680b25d322e412add76538e7e19698c | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | from wagtail.core import blocks
class TitleBlock(blocks.StructBlock):
text = blocks.CharBlock(
required = True,
elp_text='Tekst do wyświetlenia',
)
class Meta:
template = 'streams/title_block.html'
icon = 'edycja'
label = 'Tytuł'
help_text = 'Wyśrodkowany tekst do wyświetlenia na stronie.'
| [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
0d97595c8e7c5dbc0fbc7767f79e518f3093d714 | 4970f0d662ca0d5d8c270b36e6858aa313c67dcc | /lk/classes/commands_config_keys.py | 51eeabf18415c7f2bd97d18edc0b53cbd5816584 | [] | no_license | eyalev/lk | 3168de19edf09f32b9277d3bf786d445855a6df1 | 59079e9071d7fbccc438e3ea3f9c8914f4767b78 | refs/heads/master | 2020-12-24T08:24:11.079329 | 2017-03-24T16:48:17 | 2017-03-24T16:48:17 | 40,901,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py |
commands_dir_key = 'commands_dir'
commands_key = 'commands'
relative_path_key = 'relative_path'
repo_url_key = 'repo_url'
file_name_key = 'file_name'
local_path_key = 'local_path'
local_repo_path_key = 'local_repo_path'
local_repo_command_path_key = 'local_repo_command_path'
last_push_timestamp_key = 'last_push_timestamp'
info_key = 'info'
| [
"eyalev@gmail.com"
] | eyalev@gmail.com |
e0b3f02bf9aab17200129623ef552108b30151b0 | a660f0674e816e7f97353c0eec7c9960eed36889 | /ipde/annular/modified_helmholtz.py | e46efe73a9c0e28e019350b7d5ce0fe3d4bfc96e | [
"Apache-2.0"
] | permissive | dbstein/ipde | da4642cbd26e4857c966123ed6654f38ddf5dff6 | a254bf128eba835284935290b8de09eb1374aa3f | refs/heads/master | 2022-07-22T14:29:47.420137 | 2022-07-13T18:30:10 | 2022-07-13T18:30:10 | 215,557,734 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,712 | py | import numpy as np
from ipde.utilities import fast_dot, concat, fast_LU_solve, mfft, mifft, fourier_multiply, fft, ifft, ffourier_multiply
import scipy as sp
import scipy.linalg
from personal_utilities.scipy_gmres import right_gmres, gmres
import numexpr as ne
from ipde.sparse_matvec import zSpMV_viaMKL
import numba
def scalar_laplacian(CO, AAG, RAG, uh):
R01 = CO.R01
R12 = CO.R12
D01 = CO.D01
D12 = CO.D12
iks = AAG.iks
psi1 = RAG.psi1
ipsi1 = RAG.inv_psi1
ipsi2 = RAG.inv_psi2
uh_t = R01.dot(uh*iks)
uh_tt = R12.dot(fourier_multiply(uh_t, ipsi1)*iks)
uh_rr = D12.dot(fourier_multiply(D01.dot(uh), psi1))
luh = fourier_multiply(uh_rr+uh_tt, ipsi2)
return luh
def fscalar_laplacian(CO, AAG, RAG, uh):
R01 = CO.R01
R12 = CO.R12
D01 = CO.D01
D12 = CO.D12
iks = AAG.iks
psi1 = RAG.psi1
ipsi1 = RAG.inv_psi1
ipsi2 = RAG.inv_psi2
uh_t = R01.dot(uh*iks)
uh_tt = R12.dot(ffourier_multiply(uh_t, ipsi1)*iks)
uh_rr = D12.dot(ffourier_multiply(D01.dot(uh), psi1))
luh = ffourier_multiply(uh_rr+uh_tt, ipsi2)
return luh
# custom numba function for preconditioner
# basically a batched matvec; but note the ordering for the input vector
# this conforms with how that vector is stored in the rest of the solve
@numba.njit(parallel=True, fastmath=True)
def batch_matvecT_par(A, x):
sh = (A.shape[0], A.shape[1])
out = np.zeros(sh, dtype=np.complex128)
for i in numba.prange(A.shape[0]):
for j in range(A.shape[1]):
for k in range(A.shape[2]):
out[i, j] += A[i, j, k] * x[k, i]
return out
@numba.njit(parallel=False, fastmath=True)
def batch_matvecT_ser(A, x):
sh = (A.shape[0], A.shape[1])
out = np.zeros(sh, dtype=np.complex128)
for i in range(A.shape[0]):
for j in range(A.shape[1]):
for k in range(A.shape[2]):
out[i, j] += A[i, j, k] * x[k, i]
return out
def batch_matvecT(A, x):
if A.shape[0]*A.shape[1] > 10000:
return batch_matvecT_par(A, x)
else:
return batch_matvecT_ser(A, x)
@numba.njit(parallel=True, fastmath=True)
def optim_batch_matvecT_par(A, x, out):
for i in numba.prange(A.shape[0]):
for j in range(A.shape[1]):
kaccum = 0.0
for k in range(A.shape[2]):
kaccum += A[i, j, k] * x[i+k*A.shape[0]]
out[i+j*A.shape[0]] = kaccum
@numba.njit(parallel=False, fastmath=True)
def optim_batch_matvecT_ser(A, x, out):
for i in numba.prange(A.shape[0]):
for j in range(A.shape[1]):
kaccum = 0.0
for k in range(A.shape[2]):
kaccum += A[i, j, k] * x[i+k*A.shape[0]]
out[i+j*A.shape[0]] = kaccum
def optim_batch_matvecT(A, x, out):
if A.shape[0]*A.shape[1] > 10000:
optim_batch_matvecT_par(A, x, out)
else:
optim_batch_matvecT_ser(A, x, out)
class AnnularModifiedHelmholtzSolver(object):
"""
Spectrally accurate Modified Helmholtz solver on annular domain
Solves (k^2-L)u = f in the annulus described by the Annular Geometry AG
Subject to the Robin boundary condition:
ia*u(ri) + ib*u_r(ri) = ig (boundary condition at the inner radius)
oa*u(ro) + ob*u_r(ro) = og (boundary condition at the outer radius)
On instantionation, a preconditioner is formed with ia, ib, ua, ub
defining the boundary conditions
These can be changed at solvetime, but preconditioning may not work so well
"""
def __init__(self, AAG, k, ia=1.0, ib=0.0, oa=1.0, ob=0.0):
self.AAG = AAG
self.ia = ia
self.ib = ib
self.oa = oa
self.ob = ob
self.k = k
M = AAG.M
ns = AAG.ns
n = AAG.n
NB = M*ns
self.M = M
self.ns = ns
self.n = n
self.NB = NB
self.small_shape = (self.M, self.ns)
self.shape = (self.M, self.n)
self._construct()
self.APPLY = scipy.sparse.linalg.LinearOperator((self.NB, self.NB), dtype=complex, matvec=self._apply)
self.PREC = scipy.sparse.linalg.LinearOperator((self.NB, self.NB), dtype=complex, matvec=self._optim_preconditioner)
def _construct(self):
AAG = self.AAG
CO = AAG.CO
apsi1 = AAG.approx_psi1
aipsi1 = AAG.approx_inv_psi1
aipsi2 = AAG.approx_inv_psi2
ks = AAG.ks
D01 = CO.D01
D12 = CO.D12
R01 = CO.R01
R12 = CO.R12
R02 = CO.R02
ibcd = CO.ibc_dirichlet
ibcn = CO.ibc_neumann
obcd = CO.obc_dirichlet
obcn = CO.obc_neumann
ns = self.ns
M = self.M
self._KLUS = []
self._KINVS = []
for i in range(ns):
K = np.empty((M,M), dtype=complex)
LL = fast_dot(aipsi2, fast_dot(D12, fast_dot(apsi1, D01))) - \
fast_dot(np.ones(M-2)*ks[i]**2, fast_dot(R12, fast_dot(aipsi1, R01)))
K[:M-2] = self.k**2*R02 - LL
K[M-2:M-1] = self.ia*ibcd + self.ib*ibcn
K[M-1:M-0] = self.oa*obcd + self.ob*obcn
# self._KLUS.append(sp.linalg.lu_factor(K)) # for old preconditioner
self._KINVS.append(sp.linalg.inv(K.real))
self.KINV = sp.sparse.block_diag(self._KINVS, 'csr').astype('complex') # for SpMV preconditioner
self.Stacked_KINVS = np.stack(self._KINVS).copy()
self.prealloc = np.zeros(self.M*self.ns, dtype=complex)
def _preconditioner(self, fh):
return batch_matvecT(self.Stacked_KINVS, fh.reshape(self.small_shape)).ravel('F')
def _optim_preconditioner(self, fh):
optim_batch_matvecT(self.Stacked_KINVS, fh, self.prealloc)
return self.prealloc
def _SpMV_preconditioner(self, fh):
# could avoid these reshapes if self.KINV constructed differently
# however, they don't seem to take a huge amount of time
w1 = fh.reshape(self.small_shape).ravel('F')
w2 = zSpMV_viaMKL(self.KINV, w1)
return w2.reshape(self.small_shape, order='F').ravel()
def _old_preconditioner(self, fh):
fh = fh.reshape(self.small_shape)
fo = np.empty(self.small_shape, dtype=complex)
for i in range(self.ns):
fo[:,i] = fast_LU_solve(self._KLUS[i], fh[:,i])
return fo.ravel()
def _apply(self, uh):
AAG = self.AAG
RAG = self.RAG
CO = self.AAG.CO
ibcd = CO.ibc_dirichlet
ibcn = CO.ibc_neumann
obcd = CO.obc_dirichlet
obcn = CO.obc_neumann
R02 = CO.R02
uh = uh.reshape(self.small_shape)
luh = fscalar_laplacian(CO, AAG, RAG, uh)
fuh = self.k**2*R02.dot(uh) - luh
ibc = (self.ia*ibcd + self.ib*ibcn).dot(uh)
obc = (self.oa*obcd + self.ob*obcn).dot(uh)
return concat(fuh, ibc, obc)
def solve(self, RAG, f, ig, og, ia=None, ib=None, oa=None, ob=None,
verbose=False, **kwargs):
self.RAG = RAG
self.ia = ia if ia is not None else self.ia
self.ib = ib if ib is not None else self.ib
self.oa = oa if oa is not None else self.oa
self.ob = ob if ob is not None else self.ob
R02 = self.AAG.CO.R02
ff = concat(R02.dot(f), ig, og)
# ffh = mfft(ff.reshape(self.shape)).ravel()
ffh = fft(ff.reshape(self.shape)).ravel()
out = right_gmres(self.APPLY, ffh, M=self.PREC, verbose=verbose, **kwargs)
res = out[0]
if verbose:
print('GMRES took:', len(out[2]), 'iterations.')
self.iterations_last_call = len(out[2])
return ifft(res.reshape(self.small_shape)).real
| [
"dstein@flatironinstitute.org"
] | dstein@flatironinstitute.org |
760b723d21d604814cee51e8a214cc8cc7d6fd3c | 152ff2ef15245883b0b7cc3208fe71edcb4ba446 | /my_uu/__migrations/0008_auto__chg_field_unsubscribe_user__add_unique_unsubscribe_user.py | a347708702d7da7f84875413ebc6e8a1906f20a5 | [] | no_license | pvoytko/my-uu.ru | 302d947f48446ec8c86177867ef9607f692a4a0f | 00b77f02d230bf2f7adb5bd02eebbaf165f70d39 | refs/heads/master | 2021-06-10T08:11:09.282716 | 2021-04-08T10:21:55 | 2021-04-08T10:21:55 | 15,873,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,167 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Unsubscribe.user'
db.alter_column(u'my_uu_unsubscribe', 'user_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True))
# Adding unique constraint on 'Unsubscribe', fields ['user']
db.create_unique(u'my_uu_unsubscribe', ['user_id'])
def backwards(self, orm):
# Removing unique constraint on 'Unsubscribe', fields ['user']
db.delete_unique(u'my_uu_unsubscribe', ['user_id'])
# Changing field 'Unsubscribe.user'
db.alter_column(u'my_uu_unsubscribe', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'my_uu.account': {
'Meta': {'object_name': 'Account'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'my_uu.category': {
'Meta': {'object_name': 'Category'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'my_uu.event': {
'Meta': {'object_name': 'Event'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
u'my_uu.eventlog': {
'Meta': {'object_name': 'EventLog'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['my_uu.Event']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'my_uu.uchet': {
'Meta': {'object_name': 'Uchet'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['my_uu.Account']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['my_uu.Category']"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sum': ('django.db.models.fields.DecimalField', [], {'max_digits': '11', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'utype': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['my_uu.UType']"})
},
u'my_uu.unsubscribe': {
'Meta': {'object_name': 'Unsubscribe'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'my_uu.utype': {
'Meta': {'object_name': 'UType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
}
}
complete_apps = ['my_uu'] | [
"devnull@localhost"
] | devnull@localhost |
eb4def72637372ee312f3439627f207f24dc0828 | 71f3ecb8fc4666fcf9a98d39caaffc2bcf1e865c | /.history/第3章/ling_20200607212959.py | b5ed0fb3001268d28f8627a5245565a97d0a52b5 | [
"MIT"
] | permissive | dltech-xyz/Alg_Py_Xiangjie | 03a9cac9bdb062ce7a0d5b28803b49b8da69dcf3 | 877c0f8c75bf44ef524f858a582922e9ca39bbde | refs/heads/master | 2022-10-15T02:30:21.696610 | 2020-06-10T02:35:36 | 2020-06-10T02:35:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,154 | py | #!/usr/bin/env python
# coding=utf-8
'''
@version:
@Author: steven
@Date: 2020-05-27 22:20:22
@LastEditors: steven
@LastEditTime: 2020-06-07 21:29:59
@Description:最少的零钱数给顾客。
'''
# 检验一个输入数是否为正整数:#https://www.quora.com/How-can-I-make-sure-the-user-inputs-a-positive-integer-in-Python
def pos_num(n,i):
d = [0.01,0.02,0.05,0.1,0.2,0.5,1.0]
while type(n) is not int:
try:
# n = input("Please enter a positive integer: ")
num = int(n)
if num < 0:
print(f"{d[i]}:{n} is not positive.\n")
pos_num()
return num
except ValueError:
print(f"{d[i]}:{n} is not positive.\n")
def check_money(need2pay,all_money):
while type(need2pay) is not float:
try:
if need2pay > all_money:
# 当输入的总金额比收银员的总金额多时,无法进行找零
print("付不起,请检查输入的钱是否有误。\n")
# 选择重新输入钱
if_reinput = input("是否重新输入,y表示是,n表示否:")
if if_reinput == 'y' or 'Y':
# TODO:检验再次的输入。
need2pay = float(input("请输入需要找的零钱:"))
check_money(need2pay,all_money)
elif if_reinput == 'n' or 'N':
exit()
return 0 # ?
except ValueError:
print(f"输入的{need2pay}不是数字,请重新输入")
def main():
# 初始化钱数为0和储存各面值的硬币的列表:
face_value = [0.01,0.02,0.05,0.1,0.2,0.5,1.0] # 存储每种硬币面值
fval_num = [] # 存储每种硬币的数量
all_money = 0 # 总钱数(初始)为0
# 输入现在拥有的零钱:
temp = input("请按【1分,2分,5分,1角,2角,5角,1元】的顺序,来输入每种零钱的数量,用空格来分割:")
fval_num0 = temp.split(" ")
# 检验输入的序列是否为正整数:
for item in fval_num0: # x in mylist is better and more readable than x in mylist[:]
pos_num(fval_num0(item),item)
# 拥有的零钱总和
for i in range(0, len(fval_num0)):
fval_num.append(int(fval_num0[i]))
all_money += d[i] * fval_num[i] # 计算出收银员总共拥有多少钱
need2pay = float(input("请输入需要找的零钱:"))
check_money(need2pay,all_money)
# all_money = all_money - need2pay #更新还剩的钱。
# 要想用的钱币数量最少,那么需要利用所有面值大的钱币,因此从数组的面值大的元素开始遍历
i = len(d)-1
while i >= 0:
if need2pay >= d[i]:
n = int(need2pay / d[i])
if n > fval_num[i]:
n = fval_num[i] # 最多用已有的硬币数,来支付。
fval_num[i] -= n # 更新硬币数。
need2pay -= n * d[i] # 贪心的关键步骤,令sum动态的改变,
print(f'用了{n}个{d[i]}元硬币')
i -= 1
if __name__ == "__main__":
main()
| [
"a867907127@gmail.com"
] | a867907127@gmail.com |
8f339cb9c53e93b193dba8fd6b109b15ecaf18d3 | 694427fd2155fea664241f9e029f955d83fef2a2 | /deploy_tools/fabfile.py | 61536b3938dd326356a8d9cf17e7d5c1e2159022 | [] | no_license | fbidu/superlists | 2ea67f5405ac8237d135f8ad315f4bc53a06239f | 391f8fa8396f8c8890a30f3b18d95a47778c2505 | refs/heads/master | 2021-06-18T20:43:58.159824 | 2019-09-12T15:48:52 | 2019-09-12T15:48:52 | 172,391,723 | 0 | 0 | null | 2021-01-28T11:16:37 | 2019-02-24T21:20:57 | JavaScript | UTF-8 | Python | true | false | 1,182 | py | import random
from fabric.contrib.files import append, exists
from fabric.api import cd, env, local, run
REPO_URL = "https://github.com/fbidu/superlists.git"
def _get_latest_source():
if exists(".git"):
run("git fetch")
else:
run(f"git clone {REPO_URL} .")
current_commit = local("git log -n 1 --format=%H", capture=True)
run(f"git reset --hard {current_commit}")
def _update_pipenv():
if not exists(f"/home/{env.user}/miniconda3/bin/pipenv"):
run(f"pip install --user pipenv")
run(f"/home/{env.user}/miniconda3/bin/pipenv install")
def _update_dotenv():
pass
def _update_static_files():
run(f"/home/{env.user}/miniconda3/bin/pipenv run python manage.py collectstatic --noinput")
def _update_database():
run(f"/home/{env.user}/miniconda3/bin/pipenv run python manage.py migrate --noinput")
def deploy():
site_folder = f"/home/{env.user}/django-apps/{env.host}"
run(f"mkdir -p {site_folder}")
with cd(site_folder):
_get_latest_source()
with cd(f"{site_folder}/superlists"):
_update_pipenv()
_update_dotenv()
_update_static_files()
_update_database() | [
"felipe@felipevr.com"
] | felipe@felipevr.com |
e9bb9fa6c3131fe1c09815de84bbcae7570fe34c | bf2aa4eab14a6a5347fe4af65cc4a37f512a465d | /people/migrations/0108_auto_20200530_0602.py | 76b15ad9f9532587e2fa3e0edf18453d24e8c970 | [] | no_license | drdavidknott/betterstart | 0cda889f5cd6bb779f6d1fa75cb4f2ef08eb626c | 59e2f8282b34b7c75e1e19e1cfa276b787118adf | refs/heads/master | 2023-05-04T07:32:24.796488 | 2023-04-16T15:26:30 | 2023-04-16T15:26:30 | 173,626,906 | 0 | 0 | null | 2023-02-18T07:27:55 | 2019-03-03T20:37:01 | Python | UTF-8 | Python | false | false | 1,058 | py | # Generated by Django 3.0.3 on 2020-05-30 05:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0107_auto_20200530_0549'),
]
operations = [
migrations.AddField(
model_name='profile',
name='requested_resets',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='profile',
name='reset_code',
field=models.CharField(default=0, max_length=16),
),
migrations.AddField(
model_name='profile',
name='reset_timeout',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='profile',
name='successful_resets',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='site',
name='password_reset_timeout',
field=models.IntegerField(default=15),
),
]
| [
"dkoysta@gmail.com"
] | dkoysta@gmail.com |
f9aa2c51297ea2624dae2f4826d88086a7d086af | 07f815078189169cd2944105ef373b522aef8f33 | /Scrapy_spider/build_medals_table/build_medals_table/pipelines.py | 2c36fefd6e33f0cfbf1cb15f83f538e1451b98e2 | [
"MIT"
] | permissive | chrisjdavie/Olympics_redo | ecc5c0a593085ba5d3d9ce608362ccb1fece8eec | 43abe8d9bd9da4e9b15013b12bc3b5a740c55871 | refs/heads/master | 2021-01-20T10:11:05.994665 | 2015-01-23T14:58:10 | 2015-01-23T14:58:10 | 29,680,905 | 0 | 0 | null | 2015-01-22T16:51:11 | 2015-01-22T13:50:53 | OpenEdge ABL | UTF-8 | Python | false | false | 296 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class BuildMedalsTablePipeline(object):
def process_item(self, item, spider):
return item
| [
"chris.d@theasi.co"
] | chris.d@theasi.co |
349c0c0214a8062a6fed23e1e2968bfb83a10f1e | d838bed08a00114c92b73982a74d96c15166a49e | /docs/data/learn/Bioinformatics/input/ch4_code/src/helpers/DnaUtils.py | 7e027494e7ddf73aff79ccb6bbbd85e722d9f961 | [] | no_license | offbynull/offbynull.github.io | 4911f53d77f6c59e7a453ee271b1e04e613862bc | 754a85f43159738b89dd2bde1ad6ba0d75f34b98 | refs/heads/master | 2023-07-04T00:39:50.013571 | 2023-06-17T20:27:05 | 2023-06-17T23:27:00 | 308,482,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py | from random import Random
from typing import Optional, List
def generate_random_genome(size: int, r: Optional[Random] = None) -> str:
if r is None:
r = Random()
return ''.join([r.choice(['A', 'C', 'T', 'G']) for i in range(size)])
def generate_random_cyclic_genome(size: int, copies: int, r: Optional[Random] = None) -> List[str]:
if r is None:
r = Random()
copies = [''.join([r.choice(['A', 'C', 'T', 'G']) for i in range(size)])] * copies
for i, copy in enumerate(copies):
offset = r.randint(0, size)
copies[i] = copy[offset+1:] + copy[:offset]
return copies
def dna_reverse_complement(dna: str):
return dna_complement(dna)[::-1]
def dna_complement(dna: str):
ret = ''
for ch in dna:
if ch == 'A':
ret += 'T'
elif ch == 'C':
ret += 'G'
elif ch == 'T':
ret += 'A'
elif ch == 'G':
ret += 'C'
else:
raise
return ret
# MARKDOWN_DNA_TO_RNA
def dna_to_rna(dna: str):
ret = ''
for ch in dna:
if ch == 'A' or ch == 'C' or ch == 'G':
ret += ch
elif ch == 'T':
ret += 'U'
else:
raise
return ret
# MARKDOWN_DNA_TO_RNA
def rna_to_dna(rna: str):
ret = ''
for ch in rna:
if ch == 'A' or ch == 'C' or ch == 'G':
ret += ch
elif ch == 'U':
ret += 'T'
else:
raise
return ret | [
"offbynull@gmail.com"
] | offbynull@gmail.com |
3643aaa66a11bc0bb9a6cbd437878ad7d0c62ef8 | 64c8d431c751b1b7a7cb7224107ee40f67fbc982 | /code/python/echomesh/command/Broadcast.py | f9eb8a40450ff4d2509558545ac4f074fcf98b99 | [
"MIT"
] | permissive | silky/echomesh | 6ac4755e4ff5ea3aa2b2b671c0979068c7605116 | 2fe5a00a79c215b4aca4083e5252fcdcbd0507aa | refs/heads/master | 2021-01-12T20:26:59.294649 | 2013-11-16T23:29:05 | 2013-11-16T23:29:05 | 14,458,268 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | from __future__ import absolute_import, division, print_function, unicode_literals
from echomesh.command import Show
from echomesh.util import Log
LOGGER = Log.logger(__name__)
def broadcast(echomesh_instance, on_or_off=None):
if on_or_off is None:
Show.broadcast(echomesh_instance)
else:
on_or_off = on_or_off.lower()
b_on = on_or_off in ['on', 'true']
if not (b_on or on_or_off in ['off', 'false']):
raise Exception('You can only turn broadcast mode "on" or "off".')
name = 'ON' if b_on else 'off'
if b_on == echomesh_instance.broadcasting():
message = 'was already'
else:
echomesh_instance.set_broadcasting(b_on)
message = 'is now'
LOGGER.info('broadcast mode %s %s.', message, name)
HELP = """
Set the broadcast mode on or off.
When broadcast mode is on, all start and pause commands are sent to all echomesh
nodes; when broadcast mode is off, start and pause only go to this node.
"""
SEE_ALSO = ['show broadcast']
| [
"tom@swirly.com"
] | tom@swirly.com |
ccae95d130e10c420a1123ca43de367cbe9d79fe | 6bc4160d9f9e59df4f019cd0979b9c1266e6feec | /src/swarm_worker.py | 31359438848bc9742fbb7f01fde8dd622ff9b8ba | [
"MIT"
] | permissive | raymondlwb/docker-pygen | f00c3f17fa8ffde4426e55150775767f945d3038 | a7d1e70daba58c8bf44949b56453bdf017a56b4d | refs/heads/master | 2021-05-01T21:13:57.990953 | 2018-01-25T22:52:24 | 2018-01-25T22:52:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,646 | py | import re
import sys
import json
import signal
import argparse
import six
import requests
from actions import Action
from api import DockerApi
from http_server import HttpServer
from metrics import MetricsServer, Counter
from utils import get_logger, set_log_level
logger = get_logger('pygen-worker')
request_counter = Counter(
'pygen_worker_request_count', 'Number of requests handled by the Swarm worker',
labelnames=('client',)
)
send_counter = Counter(
'pygen_worker_send_count', 'Number of requests sent by the Swarm worker',
labelnames=('target',)
)
class Worker(HttpServer):
manager_port = 9411
worker_port = 9412
DEFAULT_EVENTS = ['start', 'stop', 'die', 'health_status']
EMPTY_DICT = dict()
def __init__(self, managers, retries=0, events=None, metrics_port=9414):
super(Worker, self).__init__(self.worker_port)
if any(isinstance(managers, string_type) for string_type in six.string_types):
self.managers = [managers]
else:
self.managers = managers
self.retries = retries
self.events = events or self.DEFAULT_EVENTS
self.metrics = MetricsServer(metrics_port)
self.api = DockerApi()
def start(self):
super(Worker, self).start()
if self.metrics:
self.metrics.start()
def _handle_request(self, request):
request_counter.labels(request.address_string()).inc()
length = int(request.headers['Content-Length'])
data = json.loads(request.rfile.read(length).decode('utf-8'))
self.handle_action(data.get('action'), *data.get('args', list()))
def handle_action(self, action_name, *args):
action_type = Action.by_name(action_name)
self.api.run_action(action_type, *args)
def watch_events(self):
for event in self.api.events(decode=True):
if self.is_watched(event):
logger.info('Received %s event from %s',
event.get('status'),
event.get('Actor', self.EMPTY_DICT).get('Attributes', self.EMPTY_DICT).get('name', '<?>'))
self.send_update(event.get('status'))
def is_watched(self, event):
if event.get('status') in self.events:
return True
# health_status comes as 'health_status: healthy' for example
if any(re.match(r'%s:.+' % item, event.get('status', '')) for item in self.events):
return True
return False
def send_update(self, status):
for manager in self.managers:
for _ in range(self.retries + 1):
try:
response = requests.post('http://%s:%d/' % (manager, self.manager_port), timeout=(5, 30))
logger.info('Update (%s) sent to http://%s:%d/ : HTTP %s : %s',
status, manager, self.manager_port, response.status_code, response.text.strip())
send_counter.labels(manager).inc()
break
except Exception as ex:
logger.error('Failed to send update to http://%s:%d/: %s',
manager, self.manager_port, ex, exc_info=1)
def shutdown(self):
super(Worker, self).shutdown()
if self.metrics:
self.metrics.shutdown()
def parse_arguments(args=sys.argv[1:]):
parser = argparse.ArgumentParser(description='PyGen cli to send HTTP updates on Docker events')
parser.add_argument('--manager',
metavar='<HOSTNAME>', required=True, nargs='+',
help='The target hostnames of the PyGen manager instances listening on port 9411')
parser.add_argument('--retries',
required=False, type=int, default=0,
help='Number of retries for sending an update to the manager')
parser.add_argument('--events',
metavar='<EVENT>', required=False, nargs='+',
default=['start', 'stop', 'die', 'health_status'],
help='Docker events to watch and trigger updates for '
'(default: start, stop, die, health_status)')
parser.add_argument('--metrics',
metavar='<PORT>', required=False, type=int, default=9414,
help='HTTP port number for exposing Prometheus metrics (default: 9414)')
parser.add_argument('--debug',
required=False, action='store_true',
help='Enable debug log messages')
return parser.parse_args(args)
def setup_signals(worker): # pragma: no cover
def exit_signal(*args):
logger.info('Exiting ...')
exit(0 if signal.SIGTERM else 1)
signal.signal(signal.SIGTERM, exit_signal)
signal.signal(signal.SIGINT, exit_signal)
def update_signal(*args):
worker.send_update()
signal.signal(signal.SIGHUP, update_signal)
if __name__ == '__main__': # pragma: no cover
set_log_level('INFO')
arguments = parse_arguments()
if arguments.debug:
set_log_level('DEBUG')
worker = Worker(arguments.manager, arguments.retries, arguments.events, arguments.metrics)
setup_signals(worker)
logger.debug('Signal handlers set up for SIGTERM, SIGINT and SIGHUP')
try:
worker.start()
logger.info('Starting event watch loop')
worker.watch_events()
except SystemExit:
logger.info('Exiting...')
worker.shutdown()
raise
except Exception:
worker.shutdown()
raise
| [
"rycus86@gmail.com"
] | rycus86@gmail.com |
c94c93a473f20902ced88bade76c7e8d1ae31b1f | d5e94042ac2b248b7701117a6ea941bcc862067a | /upvote/gae/modules/santa_api/main_test.py | cacbfc13901e6c3eb8f97c8ec22633bb4a553014 | [
"Apache-2.0"
] | permissive | codegrande/upvote | f373105203a0595f76c29e138a18a95dc24a63df | e05d477bb13e470127b109eb8905a66a06eed5ac | refs/heads/master | 2020-03-07T19:40:47.185833 | 2019-06-20T14:35:20 | 2019-06-20T14:35:20 | 127,677,753 | 0 | 0 | null | 2018-04-01T22:49:28 | 2018-04-01T22:49:27 | null | UTF-8 | Python | false | false | 957 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for main.py."""
from upvote.gae.shared.common import basetest
class RouteTest(basetest.UpvoteTestCase):
def testImport(self):
# pylint: disable=g-import-not-at-top, unused-variable
from upvote.gae.modules.santa_api import main
# pylint: enable=g-import-not-at-top, unused-variable
if __name__ == '__main__':
basetest.main()
| [
"msuozzo@google.com"
] | msuozzo@google.com |
d66de0e407ab4f4ed1c8b9e8493cb21c10148fc4 | a9d5033858bf54768dbed97e5bac85af2eb7ce2c | /models/engine/file_storage.py | 270bc1b30472aed3e76b3a4c9308f5d64a2195e4 | [] | no_license | merryta/AirBnB_clone | 71942d65e91fd61029e4619ccdeebbb6ed13f9eb | 4e476d2a3565798f60cab82961146210a6170bf9 | refs/heads/main | 2023-07-18T14:30:49.227401 | 2021-09-01T13:15:53 | 2021-09-01T13:15:53 | 390,437,596 | 0 | 1 | null | 2021-07-28T17:14:06 | 2021-07-28T17:14:05 | null | UTF-8 | Python | false | false | 1,489 | py | #!/usr/bin/python3
"""
File storage module
"""
import json
from models.base_model import BaseModel
from models.user import User
from models.state import State
from models.city import City
from models.amenity import Amenity
from models.place import Place
from models.review import Review
class FileStorage:
"""serializes instances to a JSON file and deserializes
JSON file to instances"""
__file_path = "file.json"
__objects = {}
def all(self):
"""returns the dictionary __objects"""
return FileStorage.__objects
def new(self, obj):
""" sets in __objects the obj with key
<obj class name>.id"""
FileStorage.__objects["{}.{}".format(obj.__class__.__name__,
obj.id)] = obj
def save(self):
"""serializes __objects to the JSON file"""
dict_o = FileStorage.__objects
obj_dict = {obj: dict_o[obj].to_dict() for obj in dict_o.keys()}
with open(FileStorage.__file_path, "w") as f:
json.dump(obj_dict, f)
def reload(self):
"""deserializes the JSON file to __objects"""
try:
with open(FileStorage.__file_path) as f:
obj_dict = json.load(f)
for item in obj_dict.values():
class_name = item["__class__"]
del item["__class__"]
self.new(eval(class_name)(**item))
except FileNotFoundError:
return
| [
"shikandadennis07@gmail.com"
] | shikandadennis07@gmail.com |
561e2326627bbb9767ed6374fef7c9e77c6c0ae9 | b7fc15a7aa3596facb7373bae9ea12124729cec3 | /Anotações de aula/Unidade20 - Hashing/criar.py | 8412b3adaae8d9a438e0593c13ca051b54b66acd | [] | no_license | gigennari/mc202 | 0801d081eb347b1106626dfa80c9f2a3e09e49ad | 22c88873028204fe86138bc241c547042417889e | refs/heads/master | 2023-03-30T17:02:32.569080 | 2021-03-18T18:03:15 | 2021-03-18T18:03:15 | 354,131,206 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | """
1º caso de teste: 10 100
2º caso de teste: 100000 72000
3º caso de teste: 100000 720000
"""
import random
numero_palavras = 10000
numero_ocorrencias = 72000
palavras = list()
num = 0
while len(palavras) < numero_palavras:
palavras.append('palavra' + str(num))
num += 1
num = 0
while num < numero_ocorrencias:
print(random.choice(palavras))
num += 1
| [
"g198010@dac.unicamp.br"
] | g198010@dac.unicamp.br |
ab8fb83994515cf9e89d97044d98deb791d77949 | f75632bafa7d9771655a2030cbf009e73682d0f2 | /woot/apps/distribution/tasks.py | 3f68728a4931983442732570a6c290a6a7202037 | [] | no_license | NicholasPiano/arktic | 563acab350e9d4f1ef5375d9237cba6d2df04295 | f28d868373f00aa8a817239f5bc167dadb50b053 | refs/heads/master | 2016-09-08T01:20:06.885034 | 2015-02-28T16:28:57 | 2015-02-28T16:28:57 | 19,547,469 | 0 | 1 | null | 2014-09-23T11:27:00 | 2014-05-07T19:31:26 | JavaScript | UTF-8 | Python | false | false | 3,292 | py | #apps.distribution.tasks
#django
from django.conf import settings
#local
from apps.distribution.models import Client, Project
from apps.transcription.models import Grammar
from apps.transcription.models import Transcription, CSVFile, WavFile
from libs.utils import generate_id_token
#util
import os
#third party
from celery import task
#from apps.distribution.tasks import scan_data; scan_data();
@task()
def scan_data():
'''
Walks through data directory and finds new grammars, creating them and adding them to the right clients and projects.
'''
#1. get all filenames+paths in project dir
#2. get all filenames from all csv files in project dir -> dictionary
#3.
data_dir = os.path.join(settings.DJANGO_ROOT, 'data')
for name in os.listdir(data_dir):
client, created = Client.objects.get_or_create(name=name)
if created: #scan directory for grammars
client.client_path = os.path.join(data_dir, name)
client.save()
print('created client: ' + str(client))
for project_name in [dir_i for dir_i in os.listdir(client.client_path) if os.path.isdir(os.path.join(client.client_path, dir_i))]:
project, created = client.projects.get_or_create(name=project_name)
if created:
project.id_token = generate_id_token(Project)
project.project_path = os.path.join(client.client_path, project_name)
project.save()
print('created project: ' + str(project))
#generate list of .csv files and list of .wav files
csv_file_list = []
wav_file_dictionary = {}
for sup, subs, file_list in os.walk(project.project_path):
for file_name in file_list:
if '.csv' in file_name and 'Unsorted' not in sup and 'save' not in sup:
csv_file_list.append(file_name)
root, ext = os.path.splitext(file_name)
project.csv_files.get_or_create(client=client, name=root, file_name=file_name, path=sup)
elif '.wav' in file_name:
wav_file_dictionary[file_name] = os.path.join(sup, file_name)
for i, csv_file in enumerate(project.csv_files.all()):
grammar, created = project.grammars.get_or_create(client=client, name=csv_file.name)
if created:
grammar.csv_file = csv_file
grammar.id_token = generate_id_token(Grammar)
print('created grammar ' + str(grammar))
with open(os.path.join(csv_file.path, csv_file.file_name)) as open_rel_file:
lines = open_rel_file.readlines()
for j, line in enumerate(lines):
tokens = line.split('|') #this can be part of a relfile parser object with delimeter '|'
transcription_audio_file_name = os.path.basename(tokens[0])
grammar.wav_files.get_or_create(client=client, project=project, path=wav_file_dictionary[transcription_audio_file_name], file_name=transcription_audio_file_name)
print('grammar %d/%d, wav %d/%d'%(i+1,project.csv_files.count(),j+1,len(lines)), end='\r' if j<len(lines)-1 else '\n')
grammar.save()
csv_file.save()
@task()
def process_grammar(grammar_id_token):
grammar = Grammar.objects.get(id_token=grammar_id_token)
grammar.process()
for transcription in grammar.transcriptions.all():
transcription.process()
| [
"nicholas.d.piano@gmail.com"
] | nicholas.d.piano@gmail.com |
38993334e2ac97d9cadceb588d4434dc6502e8b1 | 2c84afdb7d80fd482738e2f8f715b717fecd0332 | /setup.py | 8367fcb7679105852081e44bb26831582a9372c6 | [
"Apache-2.0"
] | permissive | templeblock/g2pM | 16c36da17121a4f249f64d7895de38e50c459bda | e5b4f903364e496beebf03af7b40d4b5e8b2419f | refs/heads/master | 2022-11-23T13:46:50.952224 | 2020-07-27T03:07:28 | 2020-07-27T03:07:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 943 | py | import setuptools
setuptools.setup(
name="g2pM",
version="0.1.2.4",
license='Apache License 2.0',
author="Seanie Lee",
author_email="lsnfamily02@gmail.com",
description="g2pM: A Neural Grapheme-to-Phoneme Conversion Package for MandarinChinese",
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
url="https://github.com/kakaobrain/g2pM",
packages=setuptools.find_packages(),
python_requires=">=3.6",
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| [
"lsnfamily02@naver.com"
] | lsnfamily02@naver.com |
f822112fc9a971124348558fda6eb04497c71c8e | be2185ca694c8f476b1425128669b1fa086c7107 | /tests/sandbox/handlers/test.py | 619f8afb89036a055c9b4e52d23b928650860100 | [] | no_license | scorphus/cow | 51983b13cd81d40a938762c1da4217a5d8584601 | bd633305f8bbdc4f5773685a8533edf76503e8e6 | refs/heads/master | 2021-01-16T21:58:38.097609 | 2014-07-15T18:00:09 | 2014-07-15T18:00:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 191 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from tornado.web import RequestHandler
class TestHandler(RequestHandler):
def get(self):
self.write(self.application.config.TESTCONF)
| [
"heynemann@gmail.com"
] | heynemann@gmail.com |
f636a2d707f57716c0f778ca500d9a5f8ff46b41 | 8e69eee9b474587925e22413717eb82e4b024360 | /v1.0.0.test/toontown/coghq/DistributedCountryClubBattleAI.py | be61dbe9e49a5cd8aa2e40b365c680060ab6f387 | [
"MIT"
] | permissive | TTOFFLINE-LEAK/ttoffline | afaef613c36dc3b70514ccee7030ba73c3b5045b | bb0e91704a755d34983e94288d50288e46b68380 | refs/heads/master | 2020-06-12T15:41:59.411795 | 2020-04-17T08:22:55 | 2020-04-17T08:22:55 | 194,348,185 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,284 | py | from toontown.toonbase import ToontownGlobals
from toontown.coghq import DistributedLevelBattleAI
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import State
from direct.fsm import ClassicFSM, State
from toontown.battle.BattleBase import *
import CogDisguiseGlobals
from toontown.toonbase.ToontownBattleGlobals import getCountryClubCreditMultiplier
from direct.showbase.PythonUtil import addListsByValue
class DistributedCountryClubBattleAI(DistributedLevelBattleAI.DistributedLevelBattleAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCountryClubBattleAI')
def __init__(self, air, battleMgr, pos, suit, toonId, zoneId, level, battleCellId, roundCallback=None, finishCallback=None, maxSuits=4):
DistributedLevelBattleAI.DistributedLevelBattleAI.__init__(self, air, battleMgr, pos, suit, toonId, zoneId, level, battleCellId, 'CountryClubReward', roundCallback, finishCallback, maxSuits)
self.battleCalc.setSkillCreditMultiplier(1)
if self.bossBattle:
self.level.d_setBossConfronted(toonId)
self.fsm.addState(State.State('CountryClubReward', self.enterCountryClubReward, self.exitCountryClubReward, ['Resume']))
playMovieState = self.fsm.getStateNamed('PlayMovie')
playMovieState.addTransition('CountryClubReward')
def getTaskZoneId(self):
return self.level.countryClubId
def handleToonsWon(self, toons):
extraMerits = [
0,
0,
0,
0]
amount = ToontownGlobals.CountryClubCogBuckRewards[self.level.countryClubId]
index = ToontownGlobals.cogHQZoneId2deptIndex(self.level.countryClubId)
extraMerits[index] = amount
for toon in toons:
recovered, notRecovered = self.air.questManager.recoverItems(toon, self.suitsKilled, self.getTaskZoneId())
self.toonItems[toon.doId][0].extend(recovered)
self.toonItems[toon.doId][1].extend(notRecovered)
meritArray = self.air.promotionMgr.recoverMerits(toon, self.suitsKilled, self.getTaskZoneId(), getCountryClubCreditMultiplier(self.getTaskZoneId()), extraMerits=extraMerits)
if toon.doId in self.helpfulToons:
self.toonMerits[toon.doId] = addListsByValue(self.toonMerits[toon.doId], meritArray)
else:
self.notify.debug('toon %d not helpful list, skipping merits' % toon.doId)
def enterCountryClubReward(self):
self.joinableFsm.request('Unjoinable')
self.runableFsm.request('Unrunable')
self.resetResponses()
self.assignRewards()
self.bossDefeated = 1
self.level.setVictors(self.activeToons[:])
self.timer.startCallback(BUILDING_REWARD_TIMEOUT, self.serverRewardDone)
return
def exitCountryClubReward(self):
return
def enterResume(self):
DistributedLevelBattleAI.DistributedLevelBattleAI.enterResume(self)
if self.bossBattle and self.bossDefeated:
self.battleMgr.level.b_setDefeated()
def enterReward(self):
DistributedLevelBattleAI.DistributedLevelBattleAI.enterReward(self)
roomDoId = self.getLevelDoId()
room = simbase.air.doId2do.get(roomDoId)
if room:
room.challengeDefeated() | [
"s0mberdemise@protonmail.com"
] | s0mberdemise@protonmail.com |
3c7e3656ee88fd41d3feeb0279d5c585f3780f0f | b08f5367ffd3bdd1463de2ddc05d34cbfba6796e | /arrays/enumerate_primes.py | 6c7038972e282198034637fed64a0a7c27849707 | [] | no_license | uohzxela/fundamentals | cb611fa6c820dc8643a43fd045efe96bc43ba4ed | 6bbbd489c3854fa4bf2fe73e1a2dfb2efe4aeb94 | refs/heads/master | 2020-04-04T03:56:44.145222 | 2018-04-05T01:08:14 | 2018-04-05T01:08:14 | 54,199,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | def enum_primes(n):
primes = []
is_primes = [True for i in xrange(n+1)]
for i in xrange(2, n+1):
if is_primes[i]:
for j in xrange(i*i, n+1, i):
is_primes[j] = False
primes.append(i)
return primes
assert enum_primes(200) == [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,
61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157,
163, 167, 173, 179, 181, 191, 193, 197, 199]
assert enum_primes(18) == [2, 3, 5, 7, 11, 13, 17]
| [
"uohzxela@gmail.com"
] | uohzxela@gmail.com |
e6634981904e2b25a3fc4a233753b07e3e20c60b | fa032ddde94e7e397b15940159c706dadc89559c | /packages/std/nodes/std___If0/std___If0___METACODE.py | 861f76d269249d5ad29ed8b79a4fe738116bed05 | [
"MIT"
] | permissive | rkoschmitzky/pyScript | dd97ccb5d196c610de5df982ea006fa58652b82d | b60c6d3cdc3856e3b59843feaa7bdd2461f10158 | refs/heads/master | 2022-11-09T12:52:54.055020 | 2020-06-13T09:17:01 | 2020-06-13T09:17:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,504 | py | from custom_src.NodeInstance import NodeInstance
from custom_src.Node import Node
# USEFUL
# self.input(index) <- access to input data
# self.outputs[index].set_val(val) <- set output data port value
# self.main_widget <- access to main widget
# self.exec_output(index) <- executes an execution output
# self.create_new_input(type_, label, append=True, widget_type='', widget_name='', widget_pos='under', pos=-1)
# self.delete_input(input or index)
# self.create_new_output(type_, label, append=True, pos=-1)
# self.delete_output(output or index)
# self.update_shape()
class %NODE_TITLE%_NodeInstance(NodeInstance):
def __init__(self, parent_node: Node, flow, configuration=None):
super(%NODE_TITLE%_NodeInstance, self).__init__(parent_node, flow, configuration)
self.special_actions['add else if'] = {'method': self.action_add_else_if}
self.else_if_enlargement_state = 0
self.initialized()
def action_add_else_if(self):
self.create_new_input('data', 'condition '+str(self.else_if_enlargement_state+1), widget_type='std line edit', widget_pos='under')
self.create_new_output('exec', 'elif '+str(self.else_if_enlargement_state+1), append=False, pos=len(self.outputs)-1)
self.else_if_enlargement_state += 1
self.special_actions['remove else if'] = {'method': self.action_remove_else_if}
self.update_shape()
def action_remove_else_if(self):
self.delete_input(self.inputs[-1])
self.delete_output(self.outputs[-2])
self.else_if_enlargement_state -= 1
if self.else_if_enlargement_state == 0:
del self.special_actions['remove else if']
self.update_shape()
def update_event(self, input_called=-1):
if input_called == 0:
self.do_if(0, self.else_if_enlargement_state)
def do_if(self, if_cnt, current_enlarment_state):
if self.input(1+if_cnt):
self.exec_output(if_cnt)
elif if_cnt < current_enlarment_state:
self.do_if(if_cnt+1, current_enlarment_state)
else:
self.exec_output(len(self.outputs)-1)
def get_data(self):
data = {'else if enlargment state': self.else_if_enlargement_state}
return data
def set_data(self, data):
self.else_if_enlargement_state = data['else if enlargment state']
# optional - important for threading - stop everything here
def removing(self):
pass
| [
"leon.thomm@gmx.de"
] | leon.thomm@gmx.de |
2b19d579f3da6bd7b7e1259c5e69e68e9e083a04 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-roma/huaweicloudsdkroma/v2/model/env_info.py | b4922f230a4b405600fc3104026de9308661d152 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,936 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class EnvInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'create_time': 'datetime',
'name': 'str',
'remark': 'str',
'id': 'str'
}
attribute_map = {
'create_time': 'create_time',
'name': 'name',
'remark': 'remark',
'id': 'id'
}
def __init__(self, create_time=None, name=None, remark=None, id=None):
"""EnvInfo
The model defined in huaweicloud sdk
:param create_time: 创建时间
:type create_time: datetime
:param name: 环境名称
:type name: str
:param remark: 描述信息
:type remark: str
:param id: 环境编号
:type id: str
"""
self._create_time = None
self._name = None
self._remark = None
self._id = None
self.discriminator = None
if create_time is not None:
self.create_time = create_time
if name is not None:
self.name = name
if remark is not None:
self.remark = remark
if id is not None:
self.id = id
@property
def create_time(self):
"""Gets the create_time of this EnvInfo.
创建时间
:return: The create_time of this EnvInfo.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this EnvInfo.
创建时间
:param create_time: The create_time of this EnvInfo.
:type create_time: datetime
"""
self._create_time = create_time
@property
def name(self):
"""Gets the name of this EnvInfo.
环境名称
:return: The name of this EnvInfo.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this EnvInfo.
环境名称
:param name: The name of this EnvInfo.
:type name: str
"""
self._name = name
@property
def remark(self):
"""Gets the remark of this EnvInfo.
描述信息
:return: The remark of this EnvInfo.
:rtype: str
"""
return self._remark
@remark.setter
def remark(self, remark):
"""Sets the remark of this EnvInfo.
描述信息
:param remark: The remark of this EnvInfo.
:type remark: str
"""
self._remark = remark
@property
def id(self):
"""Gets the id of this EnvInfo.
环境编号
:return: The id of this EnvInfo.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this EnvInfo.
环境编号
:param id: The id of this EnvInfo.
:type id: str
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EnvInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
6db96b1402f7b74098c653be32e69a153e0e21db | a660f0674e816e7f97353c0eec7c9960eed36889 | /examples/embedded_boundary.py | 97e535165514712553cbcd0b5270a719c6cb82eb | [
"Apache-2.0"
] | permissive | dbstein/ipde | da4642cbd26e4857c966123ed6654f38ddf5dff6 | a254bf128eba835284935290b8de09eb1374aa3f | refs/heads/master | 2022-07-22T14:29:47.420137 | 2022-07-13T18:30:10 | 2022-07-13T18:30:10 | 215,557,734 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,258 | py | import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pybie2d
from ipde.embedded_boundary import EmbeddedBoundary
from ipde.heavisides import SlepianMollifier
from ipde.derivatives import fd_x_4, fd_y_4, fourier
from personal_utilities.arc_length_reparametrization import arc_length_parameterize
star = pybie2d.misc.curve_descriptions.star
GSB = pybie2d.boundaries.global_smooth_boundary.global_smooth_boundary.Global_Smooth_Boundary
Grid = pybie2d.grid.Grid
nb = 200
ng = int(nb/2)
M = 16
pad_zone = 4
interior = False
slepian_r = 20
reparametrize = False
# get heaviside function
MOL = SlepianMollifier(slepian_r)
# construct boundary
bdy = GSB(c=star(nb, a=0.1, f=5))
# reparametrize if reparametrizing
if reparametrize:
bdy = GSB(*arc_length_parameterize(bdy.x, bdy.y))
# construct a grid
grid = Grid([-1.5, 1.5], ng, [-1.5, 1.5], ng, x_endpoints=[True, False], y_endpoints=[True, False])
# construct embedded boundary
ebdy = EmbeddedBoundary(bdy, interior, M, grid.xh*0.75, pad_zone, MOL.step)
# register the grid
print('\nRegistering the grid')
ebdy.register_grid(grid, verbose=True)
################################################################################
# Make basic plots
fig, ax = plt.subplots()
ax.pcolormesh(grid.xg, grid.yg, ebdy.phys)
ax.scatter(bdy.x, bdy.y, color='white', s=20)
ax.set_title('Phys')
fig, ax = plt.subplots()
ax.pcolormesh(grid.xg, grid.yg, ebdy.grid_in_annulus)
ax.scatter(bdy.x, bdy.y, color='white', s=20)
ax.set_title('In Annulus')
fig, ax = plt.subplots()
ax.pcolormesh(grid.xg, grid.yg, ebdy.grid_step)
ax.scatter(bdy.x, bdy.y, color='white', s=20)
ax.set_title('Heaviside')
fig, ax = plt.subplots()
ax.scatter(ebdy.radial_x, ebdy.radial_y, color='blue', s=10, label='special coordinates')
ax.scatter(ebdy.bdy.x, ebdy.bdy.y, color='black', s=10, label='boundary')
ax.scatter(ebdy.interface.x, ebdy.interface.y, color='gray', s=10, label='interface')
ax.legend()
ax.set_title('Special Coordinates')
################################################################################
# Test interpolation operations
k = 2*np.pi/3
test_func = lambda x, y: np.exp(np.sin(k*x))*np.sin(k*y)
test_func_x = lambda x, y: k*np.exp(np.sin(k*x))*np.cos(k*x)*np.sin(k*y)
test_func_y = lambda x, y: k*np.exp(np.sin(k*x))*np.cos(k*y)
# Interpolation of a globally smooth function on grid to radial
f = test_func(grid.xg, grid.yg)
fr = test_func(ebdy.radial_x, ebdy.radial_y)
fe = ebdy.interpolate_grid_to_radial(f, order=5)
err = np.abs(fe-fr).max()
print('Error in grid --> radial interpolation: {:0.2e}'.format(err))
# Interpolation of a function to the interface
fr = test_func(ebdy.interface.x, ebdy.interface.y)
fe = ebdy.interpolate_grid_to_interface(f, order=5)
err = np.abs(fe-fr).max()
print('Error in grid --> interface interpolation: {:0.2e}'.format(err))
# Interpolation of a function from radial to grid
fr = test_func(ebdy.radial_x, ebdy.radial_y)
ft = ebdy.interpolate_radial_to_grid(fr)
fe = test_func(ebdy.grid_ia_x, ebdy.grid_ia_y)
err = np.abs(fe-ft).max()
print('Error in radial --> grid interpolation: {:0.2e}'.format(err))
################################################################################
# Test derivatives
# radial gradient
frxe, frye = ebdy.radial_grid_derivatives(fr)
frxt = test_func_x(ebdy.radial_x, ebdy.radial_y)
fryt = test_func_y(ebdy.radial_x, ebdy.radial_y)
err_x = np.abs(frxt-frxe).max()
err_y = np.abs(fryt-frye).max()
err = max(err_x, err_y)
print('Error in radial grid differentiation: {:0.2e}'.format(err))
# fourth order accurate gradient on whole domain
dx = lambda x: fd_x_4(x, grid.xh, periodic_fix=not interior)
dy = lambda x: fd_y_4(x, grid.yh, periodic_fix=not interior)
fxe, fye, fxre, fyre = ebdy.gradient(f, fr, dx, dy)
fxt = test_func_x(grid.xg, grid.yg)
fyt = test_func_y(grid.xg, grid.yg)
err_x = np.abs(fxt-fxe)[ebdy.phys].max()
err_y = np.abs(fyt-fye)[ebdy.phys].max()
err = max(err_x, err_y)
print('Error in gradient, 4th order FD: {:0.2e}'.format(err))
# spectrally accurate gradient on whole domain
kxv = np.fft.fftfreq(grid.Nx, grid.xh/(2*np.pi))
kyv = np.fft.fftfreq(grid.Ny, grid.yh/(2*np.pi))
kx, ky = np.meshgrid(kxv, kyv, indexing='ij')
ikx, iky = 1j*kx, 1j*ky
dx = lambda x: fourier(x, ikx)
dy = lambda x: fourier(x, iky)
fxe, fye, fxre, fyre = ebdy.gradient(f, fr, dx, dy)
err_x = np.abs(fxt-fxe)[ebdy.phys].max()
err_y = np.abs(fyt-fye)[ebdy.phys].max()
err = max(err_x, err_y)
print('Error in gradient, Fourier: {:0.2e}'.format(err))
################################################################################
# Plot QFS Boundaries
fig, ax = plt.subplots()
ax.scatter(ebdy.bdy.x, ebdy.bdy.y, color='black', s=10, label='boundary')
ax.scatter(ebdy.interface.x, ebdy.interface.y, color='gray', s=10, label='interface')
bb = ebdy.bdy_qfs.interior_source_bdy if interior else ebdy.bdy_qfs.exterior_source_bdy
ax.scatter(bb.x, bb.y, color='blue', s=10, label='boundary effective')
bb = ebdy.interface_qfs.exterior_source_bdy
ax.scatter(bb.x, bb.y, color='red', s=10, label='interface effective 1')
bb = ebdy.interface_qfs.interior_source_bdy
ax.scatter(bb.x, bb.y, color='pink', s=10, label='interface effective 2')
ax.legend()
ax.set_title('QFS Boundaries')
| [
"dstein@flatironinstitute.org"
] | dstein@flatironinstitute.org |
c6cd2fc9dc2c9064c937f58a6dbfe5b5065a343e | 5da5473ff3026165a47f98744bac82903cf008e0 | /scripts/configure_release_please/configure_release_please.py | 6d457a63a8297a9d6910a911b66280d01abbfeeb | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 5,416 | py | import json
from pathlib import Path
from typing import Union, Dict, List, Tuple
import re
SCRIPT_DIR = Path(__file__).resolve().parent
ROOT_DIR = Path(SCRIPT_DIR / ".." / "..").resolve()
PACKAGES_DIR = ROOT_DIR / "packages"
def get_version_for_package(version_path: Path) -> Tuple[int]:
"""
Given a `version_path` to a `gapic_version.py` file,
return Tuple<int> which contains the version.
Args:
version_path(pathlib.Path): Path to the gapic_version.py file
Returns:
Tuple[int] in the format (<major>, <minor>, <patch>)
"""
VERSION_REGEX = r"__version__\s=\s\"(?P<major_version>\d+)\.(?P<minor_version>\d+)\.(?P<patch_version>\d+)\""
match = re.search(VERSION_REGEX, version_path.read_text())
if match is None:
raise Exception("Could not detect version")
major_version = int(match.group("major_version"))
minor_version = int(match.group("minor_version"))
patch_version = int(match.group("patch_version"))
if any(elem is None for elem in [major_version, minor_version, patch_version]):
raise Exception("could not detect version")
return (major_version, minor_version, patch_version)
def get_packages_with_owlbot_yaml(packages_dir: Path = PACKAGES_DIR) -> List[Path]:
"""
Walks through all API packages in the specified `packages_dir` path.
Args:
packages_dir(pathlib.Path): Path to the directory which contains packages.
Returns:
List[pathlib.Path] where each entry corresponds to a package within the
specified `packages_dir`, which has a corresponding .OwlBot.yaml file.
"""
if not Path(packages_dir).exists():
raise FileNotFoundError(f"Directory {packages_dir} not found")
return [obj.parents[0].resolve() for obj in packages_dir.rglob("**/.OwlBot.yaml")]
def configure_release_please_manifest(
package_dirs: List[Path], root_dir: Path = ROOT_DIR
) -> None:
"""
This method updates the `.release-please-manifest.json` file in the directory
`root_dir`.
Args:
package_dirs(List[pathlib.Path]): A list of Paths, one for each package in the
`packages/` folder whose entry will be updated in the release-please manifest.
root_dir(pathlib.Path): The directory to update the `.release-please-manifest.json`
Returns:
None
"""
release_please_manifest = root_dir / ".release-please-manifest.json"
with open(release_please_manifest, "r") as f:
manifest_json = json.load(f)
for package_dir in package_dirs:
if f"packages/{package_dir.name}" not in manifest_json:
manifest_json[f"packages/{package_dir.name}"] = "0.0.0"
gapic_version_file = next(package_dir.rglob("**/gapic_version.py"), None)
if gapic_version_file is None:
raise Exception("Failed to find gapic_version.py")
version = get_version_for_package(gapic_version_file)
# check the version in gapic_version.py and update if newer than the default which is
# 0.0.0 or 0.1.0.
if version != (0, 0, 0) and version != (0, 1, 0):
manifest_json[
f"packages/{package_dir.name}"
] = f"{version[0]}.{version[1]}.{version[2]}"
with open(release_please_manifest, "w") as f:
json.dump(manifest_json, f, indent=4, sort_keys=True)
f.write("\n")
def configure_release_please_config(
package_dirs: List[Path], root_dir: Path = ROOT_DIR
) -> None:
"""
This method updates the `release-please-config.json` file in the directory
`root_dir`. If `root_dir` is not provided, `google-cloud-python` will be used as the root.
Args:
package_dirs(List[pathlib.Path]): A list of Paths, one for each package in
the `packages/` folder whose entry will be updated in the release-please config.
root_dir(pathlib.Path): The directory to update the `release-please-config.json`
Returns:
None
"""
release_please_config = root_dir / "release-please-config.json"
config_json = {"packages": {}}
for package_dir in package_dirs:
extra_files: List[Union[str, Dict[str, str]]] = [
str(file.relative_to(package_dir))
for file in sorted(package_dir.rglob("**/gapic_version.py"))
]
if len(extra_files) < 1:
raise Exception("Failed to find gapic_version.py")
for json_file in sorted(package_dir.glob("samples/**/*.json")):
sample_json = {}
sample_json["jsonpath"] = "$.clientLibrary.version"
sample_json["path"] = str(json_file.relative_to(package_dir))
sample_json["type"] = "json"
extra_files.append(sample_json)
config_json["packages"][f"packages/{package_dir.name}"] = {
"component": f"{package_dir.name}",
"release-type": "python",
"extra-files": extra_files,
"bump-minor-pre-major": True,
"bump-patch-for-minor-pre-major": True,
}
with open(release_please_config, "w") as f:
json.dump(config_json, f, indent=4, sort_keys=True)
f.write("\n")
if __name__ == "__main__":
owlbot_dirs = get_packages_with_owlbot_yaml()
configure_release_please_manifest(owlbot_dirs)
configure_release_please_config(owlbot_dirs)
| [
"noreply@github.com"
] | googleapis.noreply@github.com |
3159244fd02b2ff10c466b4c0a2f67ecdf62d672 | 92a619c043e0c26fb65e58619a0e1c5090a9efe0 | /Grokking_the_Coding_Interviews/p125_k_pairs_with_largest_sum.py | d4cd0c92088a92b85a86448e9706fa355f351ac0 | [] | no_license | curieshicy/My_Utilities_Code | 39150171f8e0aa4971cfc3d7adb32db7f45e6733 | 8b14a5c1112794d3451486c317d5e3c73efcd3b5 | refs/heads/master | 2022-06-22T06:06:39.901008 | 2022-06-20T16:00:51 | 2022-06-20T16:00:51 | 177,379,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py | import heapq
def find_k_largest_pairs(nums1, nums2, k):
result = []
max_heap = []
i = 0
j = 0
while len(max_heap) < k:
if i + 1 < len(nums1) and j + 1 < len(nums2):
heapq.heappush(max_heap, (-nums1[i] - nums2[j], (nums1[i], nums2[j])))
heapq.heappush(max_heap, (-nums1[i + 1] - nums2[j], (nums1[i + 1], nums2[j])))
heapq.heappush(max_heap, (-nums1[i] - nums2[j + 1], (nums1[i], nums2[j + 1])))
heapq.heappush(max_heap, (-nums1[i + 1] - nums2[j + 1], (nums1[i + 1], nums2[j + 1])))
i += 1
j += 1
while max_heap and k:
_, pair = heapq.heappop(max_heap)
result.append(list(pair))
k -= 1
return result
def find_k_largest_pairs(nums1, nums2, k):
min_heap = []
for i in range(min(len(nums1), k)):
for j in range(min(len(nums2), k)):
if len(min_heap) < k:
heapq.heappush(min_heap, (nums1[i] + nums2[j], (nums1[i], nums2[j])))
else:
if nums1[i] + nums2[j] < min_heap[0][0]:
break
else:
heapq.heappushpop(min_heap, (nums1[i] + nums2[j], (nums1[i], nums2[j])))
result = []
while min_heap:
_, pair = heapq.heappop(min_heap)
result.append(list(pair))
return result
def main():
print("Pairs with largest sum are: " + str(find_k_largest_pairs([9, 8, 2], [6, 3, 1], 3)))
print("Pairs with largest sum are: " + str(find_k_largest_pairs([5, 2, 1], [2, -1], 3)))
main()
| [
"noreply@github.com"
] | curieshicy.noreply@github.com |
bc6c4a4c0bd8880ce526d203d61b4232b89dc9e9 | 4c8295990aad62b0edbf5c31ee8db9ee485fbea7 | /tests/test_room.py | d39e4d8fb46d5a586d11bf0e1d9b7a7704a1d81e | [
"MIT"
] | permissive | frostburn/image-source-reverb | 7b72e2d215f4f2dcedafb1cf2ecddd6ffca71c2f | 84c467249fb5fb1e978009510889a0447a625cda | refs/heads/master | 2021-04-16T18:54:36.948737 | 2020-03-23T14:57:13 | 2020-03-23T14:57:13 | 249,377,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | import numpy as np
from image_source_reverb.room import Plane, Room
def test_mirrors_origin():
p = Plane([1, 0], distance=1.1)
x = p.mirror_point([0, 0])
assert np.allclose(x, [-2.2, 0])
def test_ignores_backside():
p = Plane([2, 1])
assert p.mirror_point([-2, -2]) is None
def test_triangle_reflections():
r = Room([[0, 1], [1, -1], [-1, -1]])
reflections = r.reflect_points([[0, 0]])
assert np.allclose(reflections, [
[0, -2],
[-np.sqrt(2), np.sqrt(2)],
[np.sqrt(2), np.sqrt(2)]
])
| [
"lumi.pakkanen@gmail.com"
] | lumi.pakkanen@gmail.com |
4c9ff60522b7965fb8c10f42157fec743e28aaee | 135999da46a46d95c5e8aa3e3dc840a589b46dc7 | /ch01Text/1.3re/FindingPatterns.py | 94e03f06bfbeb0fcb531ddb8957afcd53fcfdb52 | [
"MIT"
] | permissive | eroicaleo/ThePythonStandardLibraryByExample | 53e6b32f6b2c3125a0bf4023d5df8705756a756d | b9c48c026e6966eee32cd51b0c49a79a5cbdceb9 | refs/heads/master | 2016-09-05T18:48:30.325386 | 2014-09-09T06:58:21 | 2014-09-09T06:58:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | #!/usr/bin/env python3
import re
pattern = 'this'
text = 'Does this text match the pattern?'
match = re.search(pattern, text)
start = match.start()
end = match.end()
print('Found "%s"\nin "%s"\nfrom %d to %d ("%s")' % (match.re.pattern, match.string, start, end, text[start:end]))
| [
"eroicaleo@gmail.com"
] | eroicaleo@gmail.com |
bc8ec7fa1aecc078fe811fbfae170f535f47fa6a | f22ca9aecda111a019502b462ce6772cb22d9425 | /test/test_order_abandoned.py | 02747404be9cb2f354e82036be14d89526568537 | [] | no_license | sivanv-unbxd/a2c-sdk-pim | cac05bc6335ddc3c4121d43e2dc476a6fec14965 | 51a07a0b7f90d74569ad14b47b174da7ac1fc374 | refs/heads/main | 2023-05-29T05:45:32.279821 | 2021-06-09T03:52:11 | 2021-06-09T03:52:11 | 375,218,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | # coding: utf-8
"""
Swagger API2Cart
API2Cart # noqa: E501
OpenAPI spec version: 1.1
Contact: contact@api2cart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.order_abandoned import OrderAbandoned # noqa: E501
from swagger_client.rest import ApiException
class TestOrderAbandoned(unittest.TestCase):
"""OrderAbandoned unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testOrderAbandoned(self):
"""Test OrderAbandoned"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.order_abandoned.OrderAbandoned() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"sivanv@unbxd.com"
] | sivanv@unbxd.com |
56df43e391086e06e386cee6ba4bd80b14b84822 | 921aa8e9f63ddd9e671b33e01fd18af1dafaffae | /keras/dtensor/__init__.py | bcc8b6e2c8db4d14482bec1d0bd5f90dfc108152 | [
"Apache-2.0"
] | permissive | Vishu26/keras | d116f875afb6c4ce5b7fbc783918463abfd9405a | c6fcaf79242ead384709973124069281f704b41d | refs/heads/master | 2022-05-29T06:24:56.299097 | 2022-03-11T10:37:09 | 2022-03-11T10:37:09 | 119,832,678 | 0 | 0 | null | 2018-02-01T12:25:54 | 2018-02-01T12:25:54 | null | UTF-8 | Python | false | false | 1,284 | py | # Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras' DTensor library."""
_DTENSOR_API_ENABLED = False
# Conditional import the dtensor API, since it is currently broken in OSS.
if _DTENSOR_API_ENABLED:
try:
# pylint: disable=g-direct-tensorflow-import, g-import-not-at-top
from tensorflow.dtensor import python as dtensor_api
except ImportError:
# TODO(b/222341036): Remove this conditional import after dtensor have a
# trimmed target that can be used by Keras.
dtensor_api = None
else:
# Leave it with a placeholder, so that the import line from other python file
# will not break.
dtensor_api = None
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
5fd9bfc0c389d764ee513da806b8852833cdbb96 | 07b75ac7273a92ddb617023a90667f9609250419 | /A0/slam_03_c_find_cylinders_question.py | a9a1842270878fddf91edb9a6d633df61b8b12ec | [] | no_license | khaledgabr77/SLAM-Learn | be151e7ec01b39d2ff894403f29705c3258354f5 | a6fc208dbbcabb1b8fd8164dcebc5a5b8d212424 | refs/heads/master | 2022-11-07T05:34:39.433870 | 2020-06-29T22:47:25 | 2020-06-29T22:47:25 | 275,672,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,054 | py | # For each cylinder in the scan, find its ray and depth.
# 03_c_find_cylinders
# Claus Brenner, 09 NOV 2012
from pylab import *
from lego_robot import *
# Find the derivative in scan data, ignoring invalid measurements.
def compute_derivative(scan, min_dist):
jumps = [ 0 ]
for i in range(1, len(scan) - 1):
l = scan[i-1]
r = scan[i+1]
if l > min_dist and r > min_dist:
derivative = (r - l) / 2.0
jumps.append(derivative)
else:
jumps.append(0)
jumps.append(0)
return jumps
# For each area between a left falling edge and a right rising edge,
# determine the average ray number and the average depth.
def find_cylinders(scan, scan_derivative, jump, min_dist):
cylinder_list = []
on_cylinder = False
sum_ray, sum_depth, rays = 0.0, 0.0, 0
discard = False
direction = "Left"
for i in range(len(scan_derivative)):
# --->>> Insert your cylinder code here.
# Whenever you find a cylinder, add a tuple
# (average_ray, average_depth) to the cylinder_list.
current_der = scan_derivative[i]
if abs(current_der) > jump:
if on_cylinder and direction == 'Left':
if current_der < 0: # Left again
discard = True
else:
on_cylinder = False
average_ray = sum_ray / rays
average_depth = sum_depth / rays
cylinder_list.append((average_ray, average_depth))
sum_ray, sum_depth, rays = 0.0, 0.0, 0
if not on_cylinder and current_der < 0:
on_cylinder = True
# if current_der > 0:
# direction = 'Right'
# elif current_der < 0:
# direction = 'Left'
direction = 'Left'
if scan[i] <= min_dist and not on_cylinder:
discard = True
if scan[i] <= min_dist and on_cylinder:
continue
if on_cylinder and scan[i] > min_dist:
rays += 1
sum_ray += i
sum_depth += scan[i]
if discard:
sum_ray, sum_depth, rays = 0.0, 0.0, 0
discard = False
# Just for fun, I'll output some cylinders.
# Replace this by your code.
return cylinder_list
if __name__ == '__main__':
minimum_valid_distance = 20.0
depth_jump = 100.0
# Read the logfile which contains all scans.
logfile = LegoLogfile()
logfile.read("robot4_scan.txt")
# Pick one scan.
scan = logfile.scan_data[8]
# Find cylinders.
der = compute_derivative(scan, minimum_valid_distance)
cylinders = find_cylinders(scan, der, depth_jump,
minimum_valid_distance)
# Plot results.
plot(scan)
scatter([c[0] for c in cylinders], [c[1] for c in cylinders],
c='r', s=200)
show()
| [
"khaledgabr77@gmail.com"
] | khaledgabr77@gmail.com |
7c1452fbcea938785de3c562f1d098d900b97550 | 8a166a0ad64efccb6231b2a351a65a54d6e0fa8e | /tests/test_quadApprox.py | eb8a76a20b9a819098ae632305cf43e3785c57d4 | [] | no_license | frossie-shadow/astshim | 6c29d584b5e6522e319f81672eb9512f0f62d8d0 | f6b1fd7af3e7147e4266d1bb44b9a8be5df29870 | refs/heads/master | 2021-01-19T17:50:13.479819 | 2017-08-03T17:56:29 | 2017-08-03T17:56:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | from __future__ import absolute_import, division, print_function
import unittest
import numpy as np
from numpy.testing import assert_allclose
import astshim
from astshim.test import MappingTestCase
class TestQuadApprox(MappingTestCase):
def test_QuadApprox(self):
# simple parabola
coeff_f = np.array([
[0.5, 1, 2, 0],
[0.5, 1, 0, 2],
], dtype=float)
polymap = astshim.PolyMap(coeff_f, 1)
qa = astshim.QuadApprox(polymap, [-1, -1], [1, 1], 3, 3)
self.assertAlmostEqual(qa.rms, 0)
self.assertEqual(len(qa.fit), 6)
assert_allclose(qa.fit, [0, 0, 0, 0, 0.5, 0.5])
if __name__ == "__main__":
unittest.main()
| [
"rowen@uw.edu"
] | rowen@uw.edu |
079c41f794ad86e3d9b2fc122f2516e676cdd849 | b059c2cf1e19932abb179ca3de74ced2759f6754 | /S20/用协程实现TCP server端/server端.py | 7b4194d0d3f898fdcf9dfcf1c9dce3b34b1472bd | [] | no_license | Lwk1071373366/zdh | a16e9cad478a64c36227419d324454dfb9c43fd9 | d41032b0edd7d96e147573a26d0e70f3d209dd84 | refs/heads/master | 2020-06-18T02:11:22.740239 | 2019-07-10T08:55:14 | 2019-07-10T08:55:14 | 196,130,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | # from gevent import monkey;monkey.patch_all()
# from socket import *
# import gevent
# def sever(ipport):
# s = socket(AF_INET,SOCK_STREAM)
# s.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
# s.bind(ipport)
# s.listen(5)
# while True:
# cnn, addr = s.accept()
# print('%s is from %s'%(cnn, addr))
# gevent.spawn(talk, cnn,addr)
# s.close()
# def talk(cnn,addr):
# while True:
# try:
# res = cnn.recv(1024).decode('utf-8')
# cnn.send(res.upper().encode('utf-8'))
# except Exception:break
# cnn.close()
#
# if __name__ == '__main__':
# ipport = ('127.0.0.1', 8080,)
# sever(ipport)
#
# sever
| [
"1071373366@qq.com"
] | 1071373366@qq.com |
33ab547fa528dc26b3e4296150c3b317e78c2b17 | a4ad068e96b772786e5eeb0bec027b759924cd12 | /chatapp/migrations/0001_initial.py | f939dede1ad22b16638cd88ed590b1da3951dcc8 | [] | no_license | kolamor/chatrest | 54f04459ec323df7d8a1603a3a91432d360b1b1d | f48084b14ecc516cff7acf80349113afca18ecb9 | refs/heads/master | 2020-04-03T18:04:58.808642 | 2018-11-04T23:35:53 | 2018-11-04T23:35:53 | 155,470,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,054 | py | # Generated by Django 2.1.2 on 2018-10-31 13:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(max_length=500, verbose_name='Сообщения')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='Дата отправки')),
],
options={
'verbose_name': 'Сообщении чата',
'verbose_name_plural': 'Сообщения чатов',
},
),
migrations.CreateModel(
name='Room',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True, verbose_name='Дата создания')),
('creater', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Комната чата')),
('invited', models.ManyToManyField(related_name='invated_user', to=settings.AUTH_USER_MODEL, verbose_name='Участники')),
],
),
migrations.AddField(
model_name='chat',
name='room',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='chatapp.Room', verbose_name='Комната чата'),
),
migrations.AddField(
model_name='chat',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Пользователь'),
),
]
| [
"kolamorev@mail.ru"
] | kolamorev@mail.ru |
41f5c58f77649619cdd25701bcfe33c729f7cb27 | c237dfae82e07e606ba9385b336af8173d01b251 | /lib/python/Products/ZCTextIndex/PipelineFactory.py | db26faffb6f4c1f06f8114d6fa52e1212d487bbf | [
"ZPL-2.0"
] | permissive | OS2World/APP-SERVER-Zope | 242e0eec294bfb1ac4e6fa715ed423dd2b3ea6ff | dedc799bd7eda913ffc45da43507abe2fa5113be | refs/heads/master | 2020-05-09T18:29:47.818789 | 2014-11-07T01:48:29 | 2014-11-07T01:48:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,755 | py | ##############################################################################
#
# Copyright (c) 2002 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
from Products.ZCTextIndex.IPipelineElementFactory \
import IPipelineElementFactory
class PipelineElementFactory:
__implements__ = IPipelineElementFactory
def __init__(self):
self._groups = {}
def registerFactory(self, group, name, factory):
if self._groups.has_key(group) and \
self._groups[group].has_key(name):
raise ValueError('ZCTextIndex lexicon element "%s" '
'already registered in group "%s"'
% (name, group))
elements = self._groups.get(group)
if elements is None:
elements = self._groups[group] = {}
elements[name] = factory
def getFactoryGroups(self):
groups = self._groups.keys()
groups.sort()
return groups
def getFactoryNames(self, group):
names = self._groups[group].keys()
names.sort()
return names
def instantiate(self, group, name):
factory = self._groups[group][name]
if factory is not None:
return factory()
element_factory = PipelineElementFactory()
| [
"martin@os2world.com"
] | martin@os2world.com |
d870899b9adaefd930cc8e8c6db22b73f7aedb2f | 73a0f661f1423d63e86489d4b2673f0103698aab | /python/oneflow/test/modules/test_global_atleast.py | 72a326ed1d97d4e680e99bd754eb71ec05aa9f58 | [
"Apache-2.0"
] | permissive | Oneflow-Inc/oneflow | 4fc3e081e45db0242a465c4330d8bcc8b21ee924 | 0aab78ea24d4b1c784c30c57d33ec69fe5605e4a | refs/heads/master | 2023-08-25T16:58:30.576596 | 2023-08-22T14:15:46 | 2023-08-22T14:15:46 | 81,634,683 | 5,495 | 786 | Apache-2.0 | 2023-09-14T09:44:31 | 2017-02-11T06:09:53 | C++ | UTF-8 | Python | false | false | 2,566 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
@autotest(n=1, check_graph=True)
def _test_atleast1d_with_random_data(test_case, placement, sbp):
x = random_tensor(ndim=1, dim0=8).to_global(placement, sbp)
y = random_tensor(ndim=2, dim0=8).to_global(placement, sbp)
out = torch.atleast_1d([x, y])
return out
@autotest(n=1, check_graph=True)
def _test_atleast2d_with_random_data(test_case, placement, sbp):
x = random_tensor(ndim=1, dim0=8).to_global(placement, sbp)
y = random_tensor(ndim=2, dim0=8).to_global(placement, sbp)
z = random_tensor(ndim=3, dim0=8).to_global(placement, sbp)
out = torch.atleast_2d([x, y, z])
return out
@autotest(n=1, check_graph=True)
def _test_atleast3d_with_random_data(test_case, placement, sbp):
x = random_tensor(ndim=1, dim0=8).to_global(placement, sbp)
y = random_tensor(ndim=2, dim0=8).to_global(placement, sbp)
z = random_tensor(ndim=3, dim0=8).to_global(placement, sbp)
p = random_tensor(ndim=4, dim0=8).to_global(placement, sbp)
out = torch.atleast_3d([x, y, z, p])
return out
class TestAtLeastModule(flow.unittest.TestCase):
@globaltest
def test_atleast1d_with_random_data(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=1):
_test_atleast1d_with_random_data(test_case, placement, sbp)
@globaltest
def test_atleast2d_with_random_data(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=1):
_test_atleast2d_with_random_data(test_case, placement, sbp)
@globaltest
def test_atleast3d_with_random_data(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=1):
_test_atleast3d_with_random_data(test_case, placement, sbp)
if __name__ == "__main__":
unittest.main()
| [
"noreply@github.com"
] | Oneflow-Inc.noreply@github.com |
24b9244f975fc9f704b84fc1daaf22d63692e52f | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_5/mznsha002/question4.py | 77e703fb5f51759144b184f439d7d5c00edb3207 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,537 | py | # 16 April 2014
# Shaun Muzenda
# Drawing a text based graph based on a user inputed function
import math
def main():
function = input("Enter a function f(x):\n") #asks the user for a given function
x = 0 #initial vaule of x set to 0
y = 0 #initial vaule of x set to 0
for rows in range(10,-11,-1): #the range for the y-axis
for column in range(-10,11,1): #the range for the x-axis
x = column
round_fx = round(eval(function)) #rounds the value of the given value
if round_fx == rows:
print("o", end="") #prints the plotted values as " o's "
if rows == 0 and column == 0 and not rows == round_fx:
print("+", end="")
if column == 0 and not rows == 0 and not rows == round_fx:
print("|", end="") #prints the y-axis using " |'s "
if rows == 0 and not column == 0 and not rows == round_fx:
print("-", end="") #prints the x-axis using " -'s "
else:
if not rows == 0:
if not column == 0:
if not rows == round_fx:
print(" ", end="") #leaves the unplotted parts of the screen blank
print() #prints the graph
main() | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
1ac1cec482db208913214198ef20d3d5f9820694 | 88cdfe0809655f9e3fcd460558e861cc01d639a3 | /Scripts/split_data_frontal_ap.py | bb85d686c40632cf01e4bab0fde090fb795f263a | [] | no_license | aguilarmg/cs231n-finalproject | 36bc3d7e03512d64e20c84e3b8bc538894a826f1 | 8d13a7ce59938bde5f46b0006690157ba88305d8 | refs/heads/master | 2020-05-23T19:38:12.810170 | 2020-03-28T12:40:11 | 2020-03-28T12:40:11 | 186,917,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,373 | py | import csv
train_frontal_ap_labels = []
with open('../Data/csv_files/train_frontal_ap.csv', 'r') as f:
train_frontal_ap_labels = list(csv.reader(f))
num_batches = 5
size_of_batches = (len(train_frontal_ap_labels)-1) // 5
for idx in range(num_batches):
with open('../Data/csv_files/train_frontal_ap_'+str(idx)+'.csv', 'w') as fp:
writer = csv.writer(fp)
writer.writerow(train_frontal_ap_labels[0])
#print(len(train_frontal_ap_labels))
train_frontal_ap_labels.pop(0)
#print(len(train_frontal_ap_labels))
for idx, patient in enumerate(train_frontal_ap_labels):
if idx < size_of_batches:
with open('../Data/csv_files/train_frontal_ap_0.csv', 'a') as fp_0:
writer = csv.writer(fp_0)
writer.writerow(patient)
elif idx < size_of_batches*2:
with open('../Data/csv_files/train_frontal_ap_1.csv', 'a') as fp_1:
writer = csv.writer(fp_1)
writer.writerow(patient)
elif idx < size_of_batches*3:
with open('../Data/csv_files/train_frontal_ap_2.csv', 'a') as fp_2:
writer = csv.writer(fp_2)
writer.writerow(patient)
elif idx < size_of_batches*4:
with open('../Data/csv_files/train_frontal_ap_3.csv', 'a') as fp_3:
writer = csv.writer(fp_3)
writer.writerow(patient)
elif idx < size_of_batches*5:
with open('../Data/csv_files/train_frontal_ap_4.csv', 'a') as fp_4:
writer = csv.writer(fp_4)
writer.writerow(patient)
| [
"google-dl-platform@googlegroups.com"
] | google-dl-platform@googlegroups.com |
39d31f29a3f55b5250f1df043bf9e09c2ea6007e | 8a96b57301ae04d40a32aa194c7680dc853ff767 | /carts/migrations/0001_initial.py | 111ebb7816b3e38d018bc59c388b83fca28fe413 | [] | no_license | katalyzator/internetShop | 14b39f2b8402d30beeeb0b26f4fa108dfa3ddb09 | 3f740b63e481bd620c23124a973e657fd35a447f | refs/heads/master | 2020-09-26T09:09:29.995231 | 2016-08-22T09:00:32 | 2016-08-22T09:00:32 | 66,252,948 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 933 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-07-29 14:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('products', '0007_auto_20160728_1439'),
]
operations = [
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('total', models.DecimalField(decimal_places=2, default=0.0, max_digits=100)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('active', models.BooleanField(default=True)),
('products', models.ManyToManyField(blank=True, null=True, to='products.Product')),
],
),
]
| [
"web.coder96@gmail.com"
] | web.coder96@gmail.com |
154a721d1bf5981c9018b42e71a3432e7e4613b7 | 1ebe5a07e7f6260c2c2ceb6ca00dcf2a0341e544 | /op_impl/built-in/ai_core/tbe/impl/squared_difference.py | 3c61e8f9c8388ecf1d55dedb175f39176f47270c | [] | no_license | gekowa/ascend-opp | f5e09905336d85f9974d555d03d37a75cb8185c1 | 5c28a2faf9d2a117ea6f0923efe35fcd53904dd2 | refs/heads/master | 2023-04-09T12:14:40.337104 | 2021-04-19T23:00:59 | 2021-04-19T23:00:59 | 359,620,865 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,923 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
squared_difference
"""
import te.lang.cce
from te import tvm
from topi import generic
from topi.cce import util
from te.utils.op_utils import refine_shapes_for_broadcast
from te.utils.op_utils import *
SHAPE_SIZE_LIMIT = 2147483648
# pylint: disable=locally-disabled,too-many-locals,invalid-name
@check_op_params(REQUIRED_INPUT, REQUIRED_INPUT, REQUIRED_OUTPUT, KERNEL_NAME)
def squared_difference(x1, x2, y, kernel_name="squared_difference"):
"""
algorithm: squared_difference
calculating data's tf_squared_difference,y= (x - y) * (x - y)
Parameters
----------
x2 : dict
shape and dtype of y input, only support float16, float32
input_dy : dict
shape and dtype of dy input, only support float16, float32
output_x: dict
shape and dtype of output, should be same shape and type as input
kernel_name : str
cce kernel name, default value is squared_difference
Returns
-------
None
"""
shape_x = x1.get("shape")
shape_y = x2.get("shape")
check_shape(shape_x, param_name="x1")
check_shape(shape_y, param_name="x2")
check_list = ["float16", "float32", "int32"]
dtype = x1.get("dtype").lower()
if not dtype in check_list:
raise RuntimeError(
"tf_squared_difference_cce only support float16, float32, int32")
shape_x, shape_y, shape_max = broadcast_shapes(shape_x, shape_y, param_name_input1="x1", param_name_input2="x2")
shape_x, shape_y = refine_shapes_for_broadcast(shape_x, shape_y)
data_x = tvm.placeholder(shape_x, dtype=dtype, name="data_x")
data_y = tvm.placeholder(shape_y, dtype=dtype, name="data_y")
with tvm.target.cce():
shape_x, shape_y, shape_max = broadcast_shapes(shape_x, shape_y, param_name_input1="x1", param_name_input2="x2")
data_x_tmp = te.lang.cce.broadcast(data_x, shape_max)
data_y_tmp = te.lang.cce.broadcast(data_y, shape_max)
data_sub = te.lang.cce.vsub(data_x_tmp, data_y_tmp)
res = te.lang.cce.vmul(data_sub, data_sub)
sch = generic.auto_schedule(res)
config = {"print_ir": False,
"name": kernel_name,
"tensor_list": [data_x, data_y, res]}
te.lang.cce.cce_build_code(sch, config)
| [
"gekowa@gmail.com"
] | gekowa@gmail.com |
8b6857e0576004fd1ba9bedb211c06e86d3844e6 | c0631f0c4f02f2ed750ec4c48341f87885fbe4ff | /GRACE_Loading/grace_loading_driver.py | d906a0bf0d752aa5b3086ef63dbe64ad78016336 | [] | no_license | whigg/GRACE_loading | bdac4ee5a0087d219ef47813f87170d90bde446d | a5ef26c35854d30a3a0e61fd471675ac519d2e3d | refs/heads/master | 2023-03-21T11:00:34.187732 | 2021-03-11T23:00:46 | 2021-03-11T23:00:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,402 | py | #!/usr/bin/env python
"""
EVALUATE GRACE LOADS ON GPS STATIONS
Written by Kathryn Materna, 2017
This program takes all GRACE gravity loads (MASCON or TELLUS) within a certain distance from station,
and computes the loading effect from each cell.
You can choose:
-to compute loads on a PREM spherical earth structure.
-to use which GRACE solution (JPL, GFZ, CSR, Mascons).
-to use the scaling grid (1) or not use scaling grid (0).
Read in station information for your network: name, lon, lat, T1, T2.
For each station,
For each timestep from T1 to T2,
compute 3D loading displacement
"""
import argparse
from GRACE_Loading_Code import parse_configfile
from GRACE_Loading_Code import prem_earth
def welcome_and_parse():
print("\n\nWelcome to a forward modeling tool for calculating GRACE loading at GPS points. ");
parser = argparse.ArgumentParser(description='Run GRACE load models in Python',
epilog='\U0001f600 \U0001f600 \U0001f600 ');
parser.add_argument('configfile', type=str, help='name of config file for calculation. Required.')
args = parser.parse_args()
print("Config file:", args.configfile);
return args;
if __name__ == "__main__":
# The main driver
args = welcome_and_parse();
params = parse_configfile.configure_calc(args.configfile);
prem_earth.prem_earth_grace_timeseries(params);
| [
"kathrynmaterna@gmail.com"
] | kathrynmaterna@gmail.com |
d1c6dd3c5995200c255e74a3e1ba38467121f7af | d2cffc0a371f9e4d587951755c0eb370ca491d2a | /mod_repair_extended/_build_auto.py | af7c6c36ef4f8b58c68390bccfeaf7ed8a06e153 | [
"WTFPL"
] | permissive | Havenard/spoter-mods | 633d06fa6202a8bd390f642f4847f321b7d3bbb8 | a8c8c143d744d4fe5838fbd92e2f188f5dbffe62 | refs/heads/master | 2020-05-01T08:16:12.138415 | 2019-03-21T17:50:08 | 2019-03-21T17:50:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,245 | py | # -*- coding: utf-8 -*-
import codecs
import datetime
import glob
import json
import os
import re
import shutil
import subprocess
import base64
CLIENT_VERSION = '1.4.1.0'
BUILD = 'auto'
NAME = 'spoter.repair_extended_auto'
ADD_LICENSE = True
class Build(object):
OUT_PATH = '.out'
PYC_PATH = os.path.join(OUT_PATH, 'res', 'scripts', 'client', 'gui', 'mods')
BUILD_PATH = os.path.join('source', BUILD)
VERSION = None
RELEASE = '%s.wotmod' % NAME
DATE = datetime.datetime.now().strftime("%Y-%m-%d")
CONFIG_NAME = None
def __init__(self):
self.clear()
if not os.path.exists('release'): subprocess.check_call(['powershell', 'mkdir', 'release'])
self.readVersion()
self.createFileDict()
self.packWotmod()
self.clear()
print 'created: %s v%s (%s) to %s' % (self.RELEASE, self.VERSION["version"], self.DATE, CLIENT_VERSION)
def clear(self):
try:
shutil.rmtree(self.OUT_PATH, True)
except OSError:
pass
def readVersion(self):
filePath = os.path.join(self.BUILD_PATH, 'VERSION')
with codecs.open(filePath, 'r', encoding='utf-8') as versionFile:
data = versionFile.read().decode('utf-8')
versionFile.close()
self.VERSION = json.loads(data)
def createFileDict(self):
version = '{:.2f}'.format(float(self.VERSION["version"]))
files = []
if self.VERSION["source"]:
files.append((os.path.join(self.BUILD_PATH, self.VERSION["source"]), 'self.version = ', "'v%s (%s)'" % (version, self.DATE)))
files.append((os.path.join(self.BUILD_PATH, self.VERSION["source"]), 'self.version_id = ', re.sub('[.\s]', '', '%s' % version)))
if self.VERSION["meta"]:
files.append((os.path.join(self.BUILD_PATH, self.VERSION["meta"]), '<version>', '%s</version>' % version))
if self.VERSION["config"]:
files.append((os.path.join(self.BUILD_PATH, self.VERSION["config"]), '"version": ', re.sub('[.\s]', '', '%s' % version)))
if self.VERSION["i18n"]:
for path in glob.glob(os.path.join(self.BUILD_PATH, self.VERSION["i18n"], "*.json")):
files.append((path, '"version": ', re.sub('[.\s]', '', '%s' % version)))
for path in files:
self.updateFiles(*path)
def updateFiles(self, path, string, text):
with open(path, 'a+') as xfile:
data = xfile.readlines()
newData = []
for line in data:
if 'self.ids = ' in line:
self.configName = re.split('self.ids = ', line)[1]
if string in line:
newData.append('%s%s%s\n' % (re.split(string, line)[0], string, text))
continue
newData.append(line)
xfile.close()
with open(path, 'w') as xfile:
xfile.writelines(newData)
xfile.close()
def packWotmod(self):
self.RELEASE = '%s_%s.wotmod' % (NAME, '{:.2f}'.format(float(self.VERSION["version"])))
subprocess.check_call(['powershell', 'mkdir', self.PYC_PATH])
py = '%s' % os.path.join(self.BUILD_PATH, self.VERSION["source"])
pyc = '%sc' % self.VERSION["source"]
ps = '%s\%s' % (os.path.realpath(self.OUT_PATH), 'create-7zip.ps1')
metaPath = '%s' % os.path.join(self.BUILD_PATH, os.path.dirname(self.VERSION["meta"]))
metaFile = os.path.basename(self.VERSION["meta"])
subprocess.check_call(['python', '-m', 'compileall', py])
subprocess.call('powershell robocopy %s %s %s /COPYALL /MOV' % (os.path.realpath(self.BUILD_PATH), os.path.realpath(self.PYC_PATH), pyc))
subprocess.call('powershell robocopy %s %s %s /COPYALL' % (os.path.realpath(metaPath), os.path.realpath(self.OUT_PATH), metaFile))
if self.VERSION["resources"]:
for directory in self.VERSION["resources"]:
if os.path.exists(os.path.join(self.BUILD_PATH, directory)):
subprocess.call('powershell robocopy %s %s /COPYALL /E' % (os.path.realpath(os.path.join(self.BUILD_PATH, directory)), os.path.realpath(os.path.join(self.OUT_PATH, 'res', directory))))
with open(ps, 'w') as xfile:
xfile.write('function create-7zip([String] $aDirectory, [String] $aZipfile){ [string]$pathToZipExe = "C:\Program Files\\7-zip\\7z.exe"; [Array]$arguments = "a", "-tzip", "-ssw", "-mx0", "$aZipfile", "$aDirectory"; & $pathToZipExe $arguments; }\n'
'create-7zip "%s" "%s"\n'
'create-7zip "%s" "%s"\n' % (os.path.realpath(os.path.join(self.OUT_PATH, 'res')), os.path.realpath(os.path.join('release', self.RELEASE)),
os.path.realpath(os.path.join(self.OUT_PATH, metaFile)), os.path.realpath(os.path.join('release', self.RELEASE))))
if ADD_LICENSE:
xfile.write('create-7zip "%s" "%s"\n' % (self.createLicense(), os.path.realpath(os.path.join('release', self.RELEASE))))
xfile.close()
subprocess.call('powershell -executionpolicy bypass -command "& {Set-ExecutionPolicy AllSigned; %s; Set-ExecutionPolicy Undefined}"' % ps)
def createLicense(self):
b64 = "DQogICAgICAgIERPIFdIQVQgVEhFIEZVQ0sgWU9VIFdBTlQgVE8gUFVCTElDIExJQ0VOU0UgDQogICAgICAgICAgICAgICAgICAgIFZlcnNpb24gMiwgRGVjZW1iZXIgMjAwNCANCg0KIENvcHlyaWdodCAoQykgMjAwNCBTYW0gSG9jZXZhciA8c2FtQGhvY2V2YXIubmV0PiANCg0KIEV2ZXJ5b25lIGlzIHBlcm1pdHRlZCB0byBjb3B5IGFuZCBkaXN0cmlidXRlIHZlcmJhdGltIG9yIG1vZGlmaWVkIA0KIGNvcGllcyBvZiB0aGlzIGxpY2Vuc2UgZG9jdW1lbnQsIGFuZCBjaGFuZ2luZyBpdCBpcyBhbGxvd2VkIGFzIGxvbmcgDQogYXMgdGhlIG5hbWUgaXMgY2hhbmdlZC4gDQoNCiAgICAgICAgICAgIERPIFdIQVQgVEhFIEZVQ0sgWU9VIFdBTlQgVE8gUFVCTElDIExJQ0VOU0UgDQogICBURVJNUyBBTkQgQ09ORElUSU9OUyBGT1IgQ09QWUlORywgRElTVFJJQlVUSU9OIEFORCBNT0RJRklDQVRJT04gDQoNCiAgMC4gWW91IGp1c3QgRE8gV0hBVCBUSEUgRlVDSyBZT1UgV0FOVCBUTy4NCg=="
output_name = os.path.realpath(os.path.join(self.OUT_PATH, 'LICENSE'))
data = base64.b64decode(b64)
with open(output_name, "wb") as output_file:
output_file.write(data)
output_file.close()
return output_name
build = Build()
| [
"spoter@mail.ru"
] | spoter@mail.ru |
59473fb7b22ba85c6e7dd56a89249f45908c5ce3 | b4ed708779cab2dc344ca9601ec0d879ab1b6f04 | /indra/tests/test_sif_assembler.py | 911892a93fd959cc6d35cc60252c4792374117b2 | [
"BSD-2-Clause"
] | permissive | budakn/INDRA | e360e17c3de9f2cf9e49f11f003fd2b18ae2cbfc | 393958b2ca7bc1ca5d054885c0634f434ff7496e | refs/heads/master | 2020-03-27T13:15:05.897555 | 2018-08-28T05:24:09 | 2018-08-28T16:31:16 | 146,599,146 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.assemblers import SifAssembler
from indra.statements import *
def test_simple_assembly():
st1 = Activation(Agent('a'), Agent('b'))
st2 = Inhibition(Agent('a'), Agent('c'))
sa = SifAssembler([st1, st2])
sa.make_model()
assert(len(sa.graph.nodes()) == 3)
assert(len(sa.graph.edges()) == 2)
def test_evidence_assembly():
ev1 = Evidence(pmid='1')
ev2 = Evidence(pmid='2')
ev3 = Evidence(pmid='3')
Evidence(pmid='4')
st1 = Activation(Agent('a'), Agent('b'), evidence=[ev1])
st2 = Inhibition(Agent('a'), Agent('c'), evidence=[ev1, ev2, ev3])
sa = SifAssembler([st1, st2])
sa.make_model()
assert(len(sa.graph.nodes()) == 3)
assert(len(sa.graph.edges()) == 2)
sa.set_edge_weights('support_pmid')
def test_modification():
st1 = Phosphorylation(Agent('BRAF'), Agent('MAP2K1'), 'S', '222')
sa = SifAssembler([st1])
sa.make_model(True, True, True)
assert(len(sa.graph.nodes()) == 2)
assert(len(sa.graph.edges()) == 1)
sa.save_model('test_sif.sif', True)
with open('test_sif.sif', 'rb') as fh:
txt = fh.read().decode('utf-8')
assert txt == 'BRAF 0 MAP2K1\n', txt
| [
"ben.gyori@gmail.com"
] | ben.gyori@gmail.com |
91a2a9ce99746758276cc7f8bafd303e8bdff102 | 1539f86f91ce0ee6150fba7363976d32cd37ece2 | /codes_auto/1544.count-good-nodes-in-binary-tree.py | 4510f4a60ef07892c81e8453597a7c8dca9a27db | [] | no_license | zhpbo/LeetCode_By_Python | fdee0a8b7ea7ed1f61a99f0041e1c748e50f138c | 0017b9db891d36789116f7299d32510a373e68da | refs/heads/master | 2023-07-09T15:38:45.003002 | 2020-08-18T07:04:51 | 2020-08-18T07:04:51 | 281,598,190 | 0 | 0 | null | 2021-08-18T04:58:39 | 2020-07-22T06:47:05 | null | UTF-8 | Python | false | false | 1,169 | py | #
# @lc app=leetcode.cn id=1544 lang=python3
#
# [1544] count-good-nodes-in-binary-tree
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def goodNodes(self, root: TreeNode) -> int:
if not root: return 0
self.ans = 0
res = []
dic = set()
def helper(root,tem,dic):
if not root: return
helper(root.left,tem+[root],dic)
helper(root.right,tem+[root],dic)
tem.append(root)
if not root.left and not root.right:
# res.append(tem)
# print(tem)
tem_max = float("-inf")
for i in tem:
# print(tem_max)
if i.val>=tem_max:
tem_max = i.val
if i not in dic:
dic.add(i)
self.ans+=1
return
helper(root,[],dic)
# print(res)
return self.ans
# @lc code=end | [
"liuyang0001@outlook.com"
] | liuyang0001@outlook.com |
c9d6c6348c39a58c856ae3ef6191dcefa82ea589 | bc5dd7be84a43ec53f8e4215761badb9b61a13ad | /kurs_2/newadvito/advito/backend/gallery/models.py | e61ab108c73a146128033a86a8f56c4c15ceb878 | [] | no_license | MaximMak/DL_Academy_Lessons | ef4758be02e43954748031ac95c970077f71cd7e | 427576859657e88fd81683494397af3df920c674 | refs/heads/master | 2023-01-29T19:53:11.650096 | 2020-12-13T21:40:58 | 2020-12-13T21:40:58 | 276,397,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,070 | py | from django.db import models
import os
from PIL import Image
from django.db import models
from django.utils import timezone
def get_path_upload_image(file):
"""
Представление для формата сохранения файлов
"""
date = timezone.now().strftime("%Y-%m-%d")
time = timezone.now().strftime("%H-%M-%S")
end_extention = file.split('.')[1]
head = file.split('.')[0]
if len(head) > 10:
head = head[:10]
file_name = head + '_' + time + '.' + end_extention
return os.path.join('photos', '{}', '{}').format(date, file_name)
class Photo(models.Model):
"""
Фото
"""
name = models.CharField("Имя", max_length=50)
image = models.ImageField("Фото", upload_to="gallery/")
created = models.DateTimeField("Дата создания", auto_now_add=True)
slug = models.SlugField("url", max_length=50, unique=True)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
img = Image.open(self.image.get_path_upload_image())
super().save(*args, **kwargs)
if self.image:
if img.height > 200 or img.width > 200:
output_size = (200, 200)
img.thumbnail(output_size)
img.save(self.avatar.path)
def save(self, *args, **kwargs):
self.image.name = get_path_upload_image(self.image.name)
super().save(*args, **kwargs)
class Meta:
verbose_name = "Изображение"
verbose_name_plural = "Изображения"
class Gallery(models.Model):
"""
Галерея
"""
name = models.CharField("Имя", max_length=50)
photos = models.ManyToManyField(Photo, verbose_name="Фотографии")
created = models.DateTimeField("Дата создания", auto_now_add=True)
slug = models.SlugField("url", max_length=50, unique=True)
def __str__(self):
return self.name
class Meta:
verbose_name = "Галерея"
verbose_name_plural = "Галереи"
| [
"54116778+MaximMak@users.noreply.github.com"
] | 54116778+MaximMak@users.noreply.github.com |
ef9859b8ef969a947eaf6ee0be55db1b4e92c210 | f3fb672cee2919f5032fc8d1ac2e3444c6404ed2 | /Algorithms/GALE_EAST_WEST/Utilities/to_generate_data.py | fca25d77c60a36ae5c6efde884eaad539424a02b | [] | no_license | vivekaxl/Parallel | 62c1b642d3c0e653b8beff90308538f78d3c1900 | 0df147ac941c39dde9bbab05e07fc6342b46e84e | refs/heads/master | 2021-01-19T21:54:46.643017 | 2017-05-05T17:20:37 | 2017-05-05T17:20:37 | 88,720,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | def generate_data(problem, number_of_points):
dataset = []
while len(dataset) < number_of_points:
print "# ", len(dataset),
import sys
sys.stdout.flush()
temp_dataset = []
for run in xrange(number_of_points):
temp_dataset.append(problem.generateInput())
import itertools
dataset.sort()
dataset.extend(list(temp_dataset for temp_dataset,_ in itertools.groupby(temp_dataset)))
from random import shuffle
shuffle(dataset)
return dataset[:number_of_points]
| [
"vivekaxl@gmail.com"
] | vivekaxl@gmail.com |
87dd7aaa81fbd84def91cd66a834b7ce2e50409a | f8ffa8ff257266df3de9d20d95b291e393f88434 | /Python - advanced/zajecia15/02_plecak_req/api.py | 15d755d2cb022e1ee4d319b992aca8ee57999179 | [] | no_license | janiszewskibartlomiej/Python_Code_Me_Gda | c0583c068ef08b6130398ddf93c3a3d1a843b487 | 7568de2a9acf80bab1429bb55bafd89daad9b729 | refs/heads/master | 2020-03-30T05:06:26.757033 | 2020-03-02T08:53:28 | 2020-03-02T08:53:28 | 150,781,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | import json
from flask import Blueprint, request
from db import get_connection
api_bp = Blueprint('api_endpoints', __name__)
def _policz_sume(przedmioty):
suma = 0
for przedmiot in przedmioty:
if przedmiot['waga']:
suma += przedmiot['ilosc'] * przedmiot['waga']
return suma
@api_bp.route('/przedmioty', methods=['GET'])
def przedmioty():
conn = get_connection()
c = conn.cursor()
if request.method == 'GET':
result = c.execute('SELECT * FROM plecak')
przedmioty = result.fetchall()
# przepakowanie z obiektów Row na słowniki
przedmioty = [dict(p) for p in przedmioty]
suma = _policz_sume(przedmioty)
wynik = {'przedmioty': przedmioty,
'suma': suma}
return json.dumps(wynik)
| [
"janiszewski.bartlomiej@gmail.com"
] | janiszewski.bartlomiej@gmail.com |
1afa40c15c84ae40bfde8729c259f3e988e61bdd | 55940b1d627768de8ac11387f60559bbb42047a0 | /stoploss.py | c9ac0fa624ee863e89d83f938e65ce3adb9968fe | [] | no_license | fengmm521/bitmextrade | 8920a28d03f406db18d2a5d3fd806b72fb319c2f | 400e3fcd6d1b70eaccad01eab6df2b3e8f674877 | refs/heads/master | 2021-05-03T04:26:10.993003 | 2018-05-26T13:17:22 | 2018-05-26T13:17:22 | 120,615,689 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,751 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# encoding: utf-8
#客户端调用,用于查看API返回结果
from OkcoinSpotAPI import OKCoinSpot
from OkcoinFutureAPI import OKCoinFuture
from magetool import urltool
import json
import sys
import os
import time
f = open('../../btc/okexapikey/okexapikey.txt','r')
tmpstr = f.read()
f.close()
apikeydic = json.loads(tmpstr)
#初始化apikey,secretkey,url
apikey = apikeydic['apikey']
secretkey = apikeydic['secretkey']
okcoinRESTURL = 'www.okex.com'#'www.okcoin.com' #请求注意:国内账号需要 修改为 www.okcoin.cn
def sayMsg(msg):
cmd = 'say %s'%(msg)
os.system(cmd)
print msg
class TradeTool(object):
"""docstring for ClassName"""
def __init__(self):
self.okcoinFuture = OKCoinFuture(okcoinRESTURL,apikey,secretkey)
self.depthSells = []
self.depthBuys = []
def getDepth(self):
turl = 'https://www.okex.com/api/v1/future_depth.do?symbol=ltc_usd&contract_type=quarter&size=20'
data = urltool.getUrl(turl)
ddic = json.loads(data)
buys = ddic['bids']
sells = ddic['asks']
return buys,sells
#1:开多 2:开空 3:平多 4:平空
def openShort(self,pprice,pamount):
print ('期货开空')
print time.ctime()
print self.okcoinFuture.future_trade('ltc_usd','quarter',str(pprice),str(pamount),'2','0','10')
def closeShort(self,pprice,pamount):
print ('期货平空')
print time.ctime()
print self.okcoinFuture.future_trade('ltc_usd','quarter',str(pprice),str(pamount),'4','0','10')
def openLong(self,pprice,pamount):
print ('期货开多')
print time.ctime()
print self.okcoinFuture.future_trade('ltc_usd','quarter',str(pprice),str(pamount),'1','0','10')
def closeLong(self,pprice,pamount):
print ('期货平多')
print self.okcoinFuture.future_trade('ltc_usd','quarter',str(pprice),str(pamount),'3','0','10')
def getBuyAndSell(tradetool):
try:
bs,ss = tradetool.getDepth()
ss = ss[::-1]
return bs[0][0],ss[0][0]
except Exception as e:
return None,None
def main(ptype,cprice,amount):
tradetool = TradeTool()
print 'is run'
while True:
b = None
s = None
if ptype == 'cl':
b,s = getBuyAndSell(tradetool)
if b and b > cprice:
try:
tradetool.closeLong(cprice,amount)
break
except Exception as e:
print 'closelong erro'
elif ptype == 'cs':
b,s = getBuyAndSell(tradetool)
if s and s < cprice:
try:
tradetool.closeShort(cprice,amount)
break
except Exception as e:
print 'closeshort erro'
else:
print 'b=',b,',s=',s,',time=',time.ctime
time.sleep(300) #5分钟测一次止损价
def test():
tradetool = TradeTool()
bs,ss = tradetool.getDepth()
ss = ss[::-1]
for s in ss:
print s
print '------'
for b in bs:
print b
print getBuyAndSell(tradetool)
if __name__ == '__main__':
args = sys.argv
if len(args) == 4:
ptype = args[1]
cprice = args[2]
camount = args[3]
print(ptype,cprice,camount)
if ptype and cprice and camount:
main(ptype,cprice,amount)
else:
test()
print '参数错误,要输入止损类型,数量和价格'
else:
test()
print '参数错误,要输入止损类型,数量和价格'
| [
"fengmm521@gmail.com"
] | fengmm521@gmail.com |
8d9f43f1f26d66d8a307fbfd9842cfa46d348dc8 | 685038d4be188fa72e9dba1d2213a47ee3aa00a2 | /ECOS2021/Sizing and Finding Critical Point/Outputs/BSA/Critical Point NPC.py | 65490fd14590236bdfce02ebdf623eb1a2105682 | [] | no_license | CIE-UMSS/Tradeoff-between-Installed-Capacity-and-Unserved-Energy | e5599e4e4ac60b97f0c4c57c5de95e493b1b5ac4 | 459f31552e3ab57a2e52167ab82f8f48558e173c | refs/heads/master | 2023-06-01T18:09:29.839747 | 2021-06-19T15:56:26 | 2021-06-19T15:56:26 | 343,720,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,368 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 1 20:49:49 2021
@author: alejandrosoto
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
BSA=pd.read_csv('BSA.csv', sep=',', decimal='.', encoding='latin1')
df=pd.concat([BSA['LLC'], BSA['NPC (USD)']], axis=1, keys=['LLP', 'NPC'])
df['NPC']=df['NPC']/1000
from scipy.optimize import curve_fit
def funcbsa(x, a, b, c,d,e):
return (a*x-b)/(c*x**e+d)
xdata = df['LLP']
ydata=df['NPC']
popt, pcov = curve_fit(funcbsa, xdata, ydata)
residuals = ydata- funcbsa(xdata, *popt)
ss_res = np.sum(residuals**2)
ss_tot = np.sum((ydata-np.mean(ydata))**2)
r_squared = 1 - (ss_res / ss_tot)
print(r_squared)
x_1 = df['LLP']
y_1 = df['NPC']
a = Symbol('a')
j = Symbol('j')
x = Symbol('x')
from numpy import ones,vstack
from numpy.linalg import lstsq
points = [df.iloc[0],df.iloc[20]]
x_coords, y_coords = zip(*points)
A = vstack([x_coords,ones(len(x_coords))]).T
m, c = lstsq(A, y_coords)[0]
#print("Line Solution is y = {m}x + {c}".format(m=m,c=c))
y1=m*x+c
z1=np.array([m, c])
p1= np.poly1d(z1)
m1=-1/m
#plt.show()
f=(popt[0]*x-popt[1])/(popt[2]*x**popt[4]+popt[3])
#eq1=Eq(l1-j)
eq3=Eq(f-j)
#puntos criticos
print(df.NPC.iloc[20]-m1*df.LLP.iloc[20])
print(df.NPC.iloc[0]-m1*df.LLP.iloc[0])
#Solucionador iterativo de sistema de ecuaciones no lineales
liminf=-0.5
limsup=1.8
r=list()
for a in np.arange(liminf,limsup,0.01):
l1=m1*x+a
z2=np.array([m1, a])
p2=np.poly1d(z2)
eq1=Eq(l1-j)
eq3=Eq(f-j)
sol1 = nsolve((eq1, eq3), (x,j), (0.0005, 1.1))
r.append([sol1])
r=pd.DataFrame(r)
r['sol'] = r[0].astype(str)
r[['x','y']] = r.sol.str.split(",",expand=True)
r[['g','g1','x1']] = r.x.str.split("[",expand=True)
del r['g']
del r['g1']
r[['x1','g1']] = r.x1.str.split("]",expand=True)
del r['g1']
r[['y1','g','g1']] = r.y.str.split("]",expand=True)
del r['g1']
del r['g']
r[['g','y2']] = r.y1.str.split("[",expand=True)
del r['g']
del r['y1']
del r['x']
del r['y']
del r[0]
del r['sol']
r = r.rename(columns={'y2': 'y1'})
r['x1'] = r['x1'].astype(float)
r['y1'] = r['y1'].astype(float)
r1=r
points = [df.iloc[0],df.iloc[20]]
x_coords, y_coords = zip(*points)
A = vstack([x_coords,ones(len(x_coords))]).T
m, c = lstsq(A, y_coords)[0]
#print("Line Solution is y = {m}x + {c}".format(m=m,c=c))
y1=m*x+c
z1=np.array([m, c])
p1= np.poly1d(z1)
#Solucionador iteritvo ecuaciones lineales
r=list()
for a in np.arange(liminf,limsup,0.01):
l1=m1*x+a
z2=np.array([m1, a])
p2=np.poly1d(z2)
eq1=Eq(l1-j)
sol = solve((l1-j, y1-j),(x, j))
x1_1=float(sol[x])
y1_1=float(sol[j])
r.append([sol])
r=pd.DataFrame(r)
r['sol'] = r[0].astype(str)
r[['x','y']] = r.sol.str.split(",",expand=True)
r[['g','x1']] = r.x.str.split(":",expand=True)
del r['g']
r[['g1','y1']] = r.y.str.split(":",expand=True)
del r['g1']
r[['y1','g2']] = r.y1.str.split("}",expand=True)
del r['g2']
del r['sol']
del r[0]
del r['x']
del r['y']
r = r.rename(columns={'x1': 'x', 'y1': 'y'})
r['x'] = r['x'].astype(float)
r['y'] = r['y'].astype(float)
#print(r)
rt = pd.concat([r, r1], axis=1, join='inner')
rt['step']=np.arange(liminf,limsup,0.01)
rt['d']=((rt['x']-rt['x1'])**2+(rt['y']-rt['y1'])**2)**0.5
print('x de d max:',rt['x1'].iloc[rt['d'].idxmax()])
print('y de d max:',rt['y1'].iloc[rt['d'].idxmax()])
print('Distancia Máxima',rt['d'].max())
a=rt['step'].iloc[rt['d'].idxmax()]
l1=m1*x+a
z2=np.array([m1, a])
p2=np.poly1d(z2)
#plt.show()
BSAf=popt
BSAr2=r_squared
BSAx=rt['x1'].iloc[rt['d'].idxmax()]
BSAy=rt['y1'].iloc[rt['d'].idxmax()]
plt.figure(figsize=(10,6.7))
xp = np.linspace(0,1, 100)
_ = plt.plot(x_1, y_1, '.',label='data', color='blue')
o= plt.plot(xp, funcbsa(xp,*popt), '--', label='fit', color='green')
o1=plt.plot(xp, p1(xp), '-', label='secant', color='red')
_=plt.plot(xp, p2(xp), '-', label='distance', color='black')
plt.plot(rt['x1'].iloc[rt['d'].idxmax()], rt['y1'].iloc[rt['d'].idxmax()], marker='o', markersize=3, color="green")
#plt.plot(x_1, y_1, '-')
plt.plot(BSAx,BSAy, marker='o', markersize=5, color="red", label='critical point')
#escala real
plt.ylabel('NPC [Thousand USD]')
plt.xlabel('LLP')
plt.axis('scaled')
plt.legend()
#plt.savefig('critical point1.png',dpi=600,bbox_inches="tight")
#plt.show()
plt.show()
#Results
print('R2=',r_squared)
print('parameters=',popt)
print('critical point=',BSAx)
| [
"asm19971997@gmail.com"
] | asm19971997@gmail.com |
d8b77b98859c658e649402c87b8854c6c7db676d | e14360f5001f865824206e54de6294b57b4ada48 | /vendor/migrations/0003_userdetails.py | 22c9de593da9d4692db694d28274dd69682660e2 | [] | no_license | adnankattekaden/thatsmestore | cb2b038f7b7e5b0f1487a6cce94b76c354e60073 | 034c05285370594f34b8ae87069029dfc5041765 | refs/heads/master | 2023-02-27T06:56:35.923684 | 2021-01-31T13:51:04 | 2021-01-31T13:51:04 | 311,295,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | # Generated by Django 3.1.2 on 2020-11-10 11:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vendor', '0002_product_image'),
]
operations = [
migrations.CreateModel(
name='Userdetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, null=True, upload_to='userdata/images')),
],
),
]
| [
"adnankattekaden2020@gmail.com"
] | adnankattekaden2020@gmail.com |
3076a77160acd99a7f01575554874a512bd08b22 | 3fbd28e72606e5358328bfe4b99eb0349ca6a54f | /.history/a_expressions_20210608021929.py | 35da28d21e88e58f0ca2df1b44a62dda7901f6a1 | [] | no_license | Tarun1001/codeforces | f0a2ef618fbd45e3cdda3fa961e249248ca56fdb | 576b505d4b8b8652a3f116f32d8d7cda4a6644a1 | refs/heads/master | 2023-05-13T04:50:01.780931 | 2021-06-07T21:35:26 | 2021-06-07T21:35:26 | 374,399,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | a= int(input())
b= int(input())
c= int(input())
if(a==1 and c==1):
return 2+b
elif(a==1):
return (a+b)*c
elif(b==1):
return max((a+b)*c,a*(b+c))
elif(c==1):
return a*(b+c)
else:
return a*b*c | [
"tarunsivasai8@gmail.com"
] | tarunsivasai8@gmail.com |
34555dc9dc68a57a4729abaf8e0a07d35f25ae21 | 6a8d047b4502507c67120a0a32640c6a3e60d8a5 | /apps/accounts/factories.py | 89d2759f6901b775f25b65129ebcea660fc7e0f4 | [] | no_license | dwebdevcore/BoardDirector_dashboard | 320f110d7581c065920b7607ef06a457851c4bb4 | 7cd2b2abe1c660531a805d84930c8a6183b863b6 | refs/heads/master | 2020-05-26T05:32:37.501642 | 2019-05-22T22:33:25 | 2019-05-22T22:33:25 | 188,122,429 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | # -*- coding: utf-8 -*-
import factory
from django.template.defaultfilters import slugify
from accounts.models import Account
from billing.models import Plan
class AccountFactory(factory.DjangoModelFactory):
class Meta:
model = Account
@factory.lazy_attribute_sequence
def name(self, n):
return 'test company {0}'.format(n)
@factory.lazy_attribute
def url(self):
return slugify(self.name)[-25:]
@factory.lazy_attribute
def plan(self):
return Plan.objects.get(name=Plan.DEFAULT_PLAN)
| [
"dwebdevcore@gmail.com"
] | dwebdevcore@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.