hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f718b4fadc70811185014ceea7a2ac977f84aa08
| 1,472
|
py
|
Python
|
src/server/core/tests/test_config.py
|
Freshia/masakhane-web
|
acf5eaef7ab8109d6f10f212765572a1dc893cd5
|
[
"MIT"
] | 20
|
2021-04-09T09:08:53.000Z
|
2022-03-16T09:45:36.000Z
|
src/server/core/tests/test_config.py
|
Freshia/masakhane-web
|
acf5eaef7ab8109d6f10f212765572a1dc893cd5
|
[
"MIT"
] | 15
|
2021-04-19T07:04:56.000Z
|
2022-03-12T00:57:44.000Z
|
src/server/core/tests/test_config.py
|
Freshia/masakhane-web
|
acf5eaef7ab8109d6f10f212765572a1dc893cd5
|
[
"MIT"
] | 14
|
2021-04-19T04:39:04.000Z
|
2021-10-08T22:19:58.000Z
|
import os
import unittest
from flask import current_app
from flask_testing import TestCase
from core import masakhane
class TestDevelopmentConfig(TestCase):
def create_app(self):
masakhane.config.from_object('core.config.DevelopmentConfig')
return masakhane
def test_app_is_development(self):
self.assertTrue(masakhane.config['SECRET_KEY'] == "super-secret-key")
self.assertFalse(current_app is None)
self.assertTrue(
masakhane.config['SQLALCHEMY_DATABASE_URI'] ==
os.getenv('DATABASE_TEST_URL', "sqlite:///masakhane.db")
)
class TestTestingConfig(TestCase):
def create_app(self):
masakhane.config.from_object('core.config.StagingConfig')
return masakhane
def test_app_is_testing(self):
self.assertTrue(masakhane.config['SECRET_KEY'] == "key_testing")
self.assertTrue(masakhane.config['TESTING'])
self.assertTrue(
masakhane.config['SQLALCHEMY_DATABASE_URI'] ==
os.getenv('DATABASE_TEST_URL', "sqlite:///masakhane.db")
)
class TestProductionConfig(TestCase):
def create_app(self):
masakhane.config.from_object('core.config.ProductionConfig')
return masakhane
def test_app_is_production(self):
self.assertTrue(masakhane.config['SECRET_KEY'] == "key_production")
self.assertFalse(masakhane.config['TESTING'])
if __name__ == '__main__':
unittest.main()
| 32
| 77
| 0.688179
| 1,298
| 0.881793
| 0
| 0
| 0
| 0
| 0
| 0
| 335
| 0.227582
|
f719bed52604d78cd372c38b0ba41bc4f013d7b2
| 311
|
py
|
Python
|
routes/show_bp.py
|
Silve1ra/fyyur
|
580562cc592d587c9bed4f080b856664abb9f70d
|
[
"MIT"
] | 1
|
2021-09-17T11:56:38.000Z
|
2021-09-17T11:56:38.000Z
|
routes/show_bp.py
|
Silve1ra/fyyur
|
580562cc592d587c9bed4f080b856664abb9f70d
|
[
"MIT"
] | null | null | null |
routes/show_bp.py
|
Silve1ra/fyyur
|
580562cc592d587c9bed4f080b856664abb9f70d
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
from controllers.show import shows, create_shows, create_show_submission
show_bp = Blueprint('show_bp', __name__)
show_bp.route('/', methods=['GET'])(shows)
show_bp.route('/create', methods=['GET'])(create_shows)
show_bp.route('/create', methods=['POST'])(create_show_submission)
| 31.1
| 72
| 0.762058
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 46
| 0.14791
|
f719f32c0de53ae35c0223c63678dbad415c2f11
| 22
|
py
|
Python
|
__init__.py
|
andy-96/GFPGAN
|
0ed1214760170cc27fdfd60da1f64a0699a28cf4
|
[
"BSD-3-Clause"
] | null | null | null |
__init__.py
|
andy-96/GFPGAN
|
0ed1214760170cc27fdfd60da1f64a0699a28cf4
|
[
"BSD-3-Clause"
] | null | null | null |
__init__.py
|
andy-96/GFPGAN
|
0ed1214760170cc27fdfd60da1f64a0699a28cf4
|
[
"BSD-3-Clause"
] | null | null | null |
from .gfpgan import *
| 11
| 21
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f71a808666b13ce290442e22bb59d1788d36b370
| 1,950
|
py
|
Python
|
tools/find_run_binary.py
|
pospx/external_skia
|
7a135275c9fc2a4b3cbdcf9a96e7102724752234
|
[
"BSD-3-Clause"
] | 6,304
|
2015-01-05T23:45:12.000Z
|
2022-03-31T09:48:13.000Z
|
third_party/skia/tools/find_run_binary.py
|
w4454962/miniblink49
|
b294b6eacb3333659bf7b94d670d96edeeba14c0
|
[
"Apache-2.0"
] | 459
|
2016-09-29T00:51:38.000Z
|
2022-03-07T14:37:46.000Z
|
third_party/skia/tools/find_run_binary.py
|
w4454962/miniblink49
|
b294b6eacb3333659bf7b94d670d96edeeba14c0
|
[
"Apache-2.0"
] | 1,231
|
2015-01-05T03:17:39.000Z
|
2022-03-31T22:54:58.000Z
|
#!/usr/bin/python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module that finds and runs a binary by looking in the likely locations."""
import os
import subprocess
import sys
def run_command(args):
"""Runs a program from the command line and returns stdout.
Args:
args: Command line to run, as a list of string parameters. args[0] is the
binary to run.
Returns:
stdout from the program, as a single string.
Raises:
Exception: the program exited with a nonzero return code.
"""
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode is not 0:
raise Exception('command "%s" failed: %s' % (args, stderr))
return stdout
def find_path_to_program(program):
"""Returns path to an existing program binary.
Args:
program: Basename of the program to find (e.g., 'render_pictures').
Returns:
Absolute path to the program binary, as a string.
Raises:
Exception: unable to find the program binary.
"""
trunk_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir))
possible_paths = [os.path.join(trunk_path, 'out', 'Release', program),
os.path.join(trunk_path, 'out', 'Debug', program),
os.path.join(trunk_path, 'out', 'Release',
program + '.exe'),
os.path.join(trunk_path, 'out', 'Debug',
program + '.exe')]
for try_path in possible_paths:
if os.path.isfile(try_path):
return try_path
raise Exception('cannot find %s in paths %s; maybe you need to '
'build %s?' % (program, possible_paths, program))
| 31.451613
| 77
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 976
| 0.500513
|
f71b20c6a58525d0ad6e5a5b0ad92dbbdf9f5849
| 1,599
|
py
|
Python
|
user/tests.py
|
Vr3n/django_react_cart_system
|
f6d2572b640f711ff9c7020641051e3f92c3dd59
|
[
"MIT"
] | null | null | null |
user/tests.py
|
Vr3n/django_react_cart_system
|
f6d2572b640f711ff9c7020641051e3f92c3dd59
|
[
"MIT"
] | 3
|
2021-06-18T15:13:46.000Z
|
2021-06-18T18:24:43.000Z
|
user/tests.py
|
Vr3n/django_react_cart_system
|
f6d2572b640f711ff9c7020641051e3f92c3dd59
|
[
"MIT"
] | null | null | null |
from django.contrib.auth import get_user_model
from django.test import TestCase
# Create your tests here.
class UserManagersTests(TestCase):
def test_create_user(self):
User = get_user_model()
user = User.objects.create_user(
email="normal@user.com", password="testing@123")
self.assertEqual(user.email, 'normal@user.com')
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
try:
self.assertIsNotNone(user.username)
self.assertIsNotNone(user.email)
except AttributeError:
pass
with self.assertRaises(TypeError):
User.objects.create_user()
with self.assertRaises(TypeError):
User.objects.create_user(email='')
with self.assertRaises(ValueError):
User.objects.create_user(email='', password="testing@123")
def test_create_superuser(self):
User = get_user_model()
admin = User.objects.create_superuser(
email="admin@user.com", password="testing@123")
self.assertEqual(admin.email, 'admin@user.com')
self.assertTrue(admin.is_active)
self.assertTrue(admin.is_staff)
self.assertTrue(admin.is_superuser)
try:
self.assertIsNotNone(admin.username)
self.assertIsNotNone(admin.email)
except AttributeError:
pass
with self.assertRaises(ValueError):
User.objects.create_user(
email='', password="testing@123", is_superuser=False)
| 34.76087
| 70
| 0.642276
| 1,489
| 0.931207
| 0
| 0
| 0
| 0
| 0
| 0
| 149
| 0.093183
|
f71b9e37908dd5da30752301903bfc85504aa496
| 728
|
py
|
Python
|
Examples/AcceptAllRevisions.py
|
aspose-words-cloud/aspose-words-cloud-python
|
65c7b55fa4aac69b60d41e7f54aed231df285479
|
[
"MIT"
] | 14
|
2018-07-15T17:01:52.000Z
|
2018-11-29T06:15:33.000Z
|
Examples/AcceptAllRevisions.py
|
aspose-words-cloud/aspose-words-cloud-python
|
65c7b55fa4aac69b60d41e7f54aed231df285479
|
[
"MIT"
] | 1
|
2018-09-28T12:59:34.000Z
|
2019-10-08T08:42:59.000Z
|
Examples/AcceptAllRevisions.py
|
aspose-words-cloud/aspose-words-cloud-python
|
65c7b55fa4aac69b60d41e7f54aed231df285479
|
[
"MIT"
] | 2
|
2020-12-21T07:59:17.000Z
|
2022-02-16T21:41:25.000Z
|
import os
import asposewordscloud
import asposewordscloud.models.requests
from asposewordscloud.rest import ApiException
from shutil import copyfile
words_api = WordsApi(client_id = '####-####-####-####-####', client_secret = '##################')
file_name = 'test_doc.docx'
# Upload original document to cloud storage.
my_var1 = open(file_name, 'rb')
my_var2 = file_name
upload_file_request = asposewordscloud.models.requests.UploadFileRequest(file_content=my_var1, path=my_var2)
words_api.upload_file(upload_file_request)
# Calls AcceptAllRevisions method for document in cloud.
my_var3 = file_name
request = asposewordscloud.models.requests.AcceptAllRevisionsRequest(name=my_var3)
words_api.accept_all_revisions(request)
| 38.315789
| 108
| 0.787088
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 165
| 0.226648
|
f71c77d1c0f627d4c0d8120689ae89c7e1a43d86
| 2,577
|
py
|
Python
|
agogosml_cli/cli/templates/{{cookiecutter.PROJECT_NAME_SLUG}}/e2e/testgen/main.py
|
cicorias/agogosml
|
60e0b52c2fc721bdd965aadaf8c1afd1ddb9b7d1
|
[
"MIT"
] | 13
|
2018-12-07T21:02:20.000Z
|
2019-02-22T14:36:31.000Z
|
agogosml_cli/cli/templates/{{cookiecutter.PROJECT_NAME_SLUG}}/e2e/testgen/main.py
|
cicorias/agogosml
|
60e0b52c2fc721bdd965aadaf8c1afd1ddb9b7d1
|
[
"MIT"
] | 43
|
2018-11-30T11:31:43.000Z
|
2019-04-03T16:09:06.000Z
|
agogosml_cli/cli/templates/{{cookiecutter.PROJECT_NAME_SLUG}}/e2e/testgen/main.py
|
cicorias/agogosml
|
60e0b52c2fc721bdd965aadaf8c1afd1ddb9b7d1
|
[
"MIT"
] | 13
|
2018-11-29T00:31:29.000Z
|
2019-02-22T18:50:28.000Z
|
import json
import os
import sys
import time
from agogosml.common.abstract_streaming_client import find_streaming_clients
from agogosml.tools.sender import send
from agogosml.tools.receiver import receive
eh_base_config = {
"EVENT_HUB_NAMESPACE": os.getenv("EVENT_HUB_NAMESPACE"),
"EVENT_HUB_NAME": os.getenv("EVENT_HUB_NAME_INPUT"),
"EVENT_HUB_SAS_POLICY": os.getenv("EVENT_HUB_SAS_POLICY"),
"EVENT_HUB_SAS_KEY": os.getenv("EVENT_HUB_SAS_KEY_INPUT"),
}
eh_send_config = {
**eh_base_config,
'LEASE_CONTAINER_NAME': os.getenv('LEASE_CONTAINER_NAME_INPUT')
}
eh_receive_config = {
**eh_base_config,
"AZURE_STORAGE_ACCOUNT": os.getenv("AZURE_STORAGE_ACCOUNT"),
"AZURE_STORAGE_ACCESS_KEY": os.getenv("AZURE_STORAGE_ACCESS_KEY"),
"LEASE_CONTAINER_NAME": os.getenv("LEASE_CONTAINER_NAME_OUTPUT"),
"EVENT_HUB_CONSUMER_GROUP": os.getenv("EVENT_HUB_CONSUMER_GROUP"),
"TIMEOUT": 10,
}
kafka_base_config = {
'KAFKA_ADDRESS': os.getenv("KAFKA_ADDRESS"),
'TIMEOUT': os.getenv('KAFKA_TIMEOUT'),
# These configs are specific to Event Hub Head for Kafka
'EVENTHUB_KAFKA_CONNECTION_STRING': os.getenv('EVENTHUB_KAFKA_CONNECTION_STRING'),
'SSL_CERT_LOCATION': os.getenv('SSL_CERT_LOCATION') # /usr/local/etc/openssl/cert.pem
}
kafka_receive_config = {
**kafka_base_config,
'KAFKA_CONSUMER_GROUP': os.getenv('KAFKA_CONSUMER_GROUP'),
}
kafka_send_config = {
**kafka_base_config,
'KAFKA_TOPIC': os.getenv('KAFKA_TOPIC_INPUT')
}
def put_messages_on_input_queue(msg_type: str):
with open('test_messages.json', encoding='utf-8') as f:
test_messages = json.load(f)
send_client = find_streaming_clients()[msg_type]
send_config = {**eh_send_config, **kafka_send_config}
send(test_messages, send_client, send_config)
def receive_messages_on_queue(kafka_topic: str, msg_type: str):
receive_client = find_streaming_clients()[msg_type]
receive_config = {**eh_receive_config, **kafka_receive_config, **{'KAFKA_TOPIC': os.getenv(kafka_topic)}}
return receive(sys.stdout, receive_client, receive_config)
def cli():
msg_type = os.getenv("MESSAGING_TYPE")
put_messages_on_input_queue(msg_type)
time.sleep(3)
input_received = receive_messages_on_queue('KAFKA_TOPIC_INPUT', msg_type)
print(input_received)
time.sleep(20)
output_received = receive_messages_on_queue('KAFKA_TOPIC_OUTPUT', msg_type)
print(output_received)
if output_received == "[]":
sys.exit(1)
else:
sys.exit(0)
if __name__ == "__main__":
cli()
| 28.955056
| 109
| 0.73962
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 862
| 0.334497
|
f71c9a76519602baf175d90363655dc76c65ea28
| 512
|
py
|
Python
|
MobileRevelator/python/postbank_finanzassistent_decrypt.py
|
ohunecker/MR
|
b0c93436c7964d87a0b8154f8b7662b1731124b9
|
[
"MIT"
] | 98
|
2019-02-03T22:50:24.000Z
|
2022-03-17T12:50:56.000Z
|
MobileRevelator/python/postbank_finanzassistent_decrypt.py
|
cewatkins/MR
|
5ba553fd0eb4c1d80842074a553119486f005822
|
[
"MIT"
] | 10
|
2019-03-14T20:12:10.000Z
|
2020-05-23T10:37:54.000Z
|
MobileRevelator/python/postbank_finanzassistent_decrypt.py
|
cewatkins/MR
|
5ba553fd0eb4c1d80842074a553119486f005822
|
[
"MIT"
] | 30
|
2019-02-03T22:50:27.000Z
|
2022-03-30T12:37:30.000Z
|
#Filename="finanzassistent"
#Type=Prerun
import os
def main():
ctx.gui_setMainLabel("Postbank Finanzassistent: Extracting key");
error=""
dbkey="73839EC3A528910B235859947CC8424543D7B686"
ctx.gui_setMainLabel("Postbank: Key extracted: " + dbkey)
if not (ctx.fs_sqlcipher_decrypt(filename, filename + ".dec", dbkey)):
error="Error: Wrong key for decryption."
if (error==""):
return "Postbank Finanzassistent: Decryption of database successful."
return ""
| 34.133333
| 78
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 260
| 0.507813
|
f71d87a97f28b6912c291299e2155b00941ed654
| 1,615
|
py
|
Python
|
imcsdk/__init__.py
|
kenrusse/imcsdk
|
c35ec5d41072c3ea82c64b1b66e0650d1d873657
|
[
"Apache-2.0"
] | null | null | null |
imcsdk/__init__.py
|
kenrusse/imcsdk
|
c35ec5d41072c3ea82c64b1b66e0650d1d873657
|
[
"Apache-2.0"
] | null | null | null |
imcsdk/__init__.py
|
kenrusse/imcsdk
|
c35ec5d41072c3ea82c64b1b66e0650d1d873657
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import logging.handlers
log = logging.getLogger('imc')
console = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console.setFormatter(formatter)
def enable_file_logging(filename="imcsdk.log"):
file_handler = logging.handlers.RotatingFileHandler(
filename, maxBytes=10*1024*1024, backupCount=5)
log.addHandler(file_handler)
def set_log_level(level=logging.DEBUG):
"""
Allows setting log level
Args:
level: logging level - import logging and pass enums from it(INFO/DEBUG/ERROR/etc..)
Returns:
None
Example:
from imcsdk import set_log_level
import logging
set_log_level(logging.INFO)
"""
log.setLevel(level)
console.setLevel(level)
set_log_level(logging.DEBUG)
log.addHandler(console)
if os.path.exists('/tmp/imcsdk_debug'):
enable_file_logging()
__author__ = 'Cisco Systems'
__email__ = 'ucs-python@cisco.com'
__version__ = '0.9.11'
| 26.47541
| 92
| 0.721981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 987
| 0.611146
|
f71d9c7d4d1edc3d1f3f51bfea2e872d5c549f44
| 1,047
|
py
|
Python
|
64-minimum-path-sum/64-minimum-path-sum.py
|
jurayev/data-structures-algorithms-solutions
|
7103294bafb60117fc77efe4913edcffbeb1ac7a
|
[
"MIT"
] | null | null | null |
64-minimum-path-sum/64-minimum-path-sum.py
|
jurayev/data-structures-algorithms-solutions
|
7103294bafb60117fc77efe4913edcffbeb1ac7a
|
[
"MIT"
] | null | null | null |
64-minimum-path-sum/64-minimum-path-sum.py
|
jurayev/data-structures-algorithms-solutions
|
7103294bafb60117fc77efe4913edcffbeb1ac7a
|
[
"MIT"
] | null | null | null |
class Solution:
def minPathSum(self, grid: List[List[int]]) -> int:
"""
[1,3,1]
[1,5,1]
[4,2,1]
time O (nm)
space O(nm)
state -> sums[r][c] = min path sum till r, c position
initial state -> sums[0][0…cols] = inf
-> sums[0…cols][0] = inf
transition function -> sum[r][c] = min(sum[r-1][c], sum[r][c-1]) + sum[r][c]
calculation order: 1….rows-1; 1….cols-1
"""
return self.find_min_path_sum(grid)
def find_min_path_sum(self, grid):
rows, cols = len(grid), len(grid[0])
if not rows or not cols:
return -1
sums = [[float("inf") for _ in range(cols+1)] for _ in range(rows+1)]
for r in range(1, rows+1):
for c in range(1, cols+1):
if r == 1 and c == 1:
sums[r][c] = grid[r-1][c-1]
else:
sums[r][c] = min(sums[r-1][c], sums[r][c-1]) + grid[r-1][c-1]
return sums[rows][cols]
| 31.727273
| 84
| 0.455587
| 1,054
| 0.999052
| 0
| 0
| 0
| 0
| 0
| 0
| 416
| 0.394313
|
f71e495c79f4bb1a1505cad9bdde64d7e37c7ba1
| 1,293
|
py
|
Python
|
proxy/core/tls/certificate.py
|
fisabiliyusri/proxy
|
29934503251b704813ef3e7ed8c2a5ae69448c8a
|
[
"BSD-3-Clause"
] | null | null | null |
proxy/core/tls/certificate.py
|
fisabiliyusri/proxy
|
29934503251b704813ef3e7ed8c2a5ae69448c8a
|
[
"BSD-3-Clause"
] | 8
|
2022-01-23T10:51:59.000Z
|
2022-03-29T22:11:57.000Z
|
proxy/core/tls/certificate.py
|
fisabiliyusri/proxy
|
29934503251b704813ef3e7ed8c2a5ae69448c8a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
from typing import Tuple, Optional
class TlsCertificate:
"""TLS Certificate"""
def __init__(self) -> None:
self.data: Optional[bytes] = None
def parse(self, raw: bytes) -> Tuple[bool, bytes]:
self.data = raw
return True, raw
def build(self) -> bytes:
assert self.data
return self.data
class TlsCertificateRequest:
"""TLS Certificate Request"""
def __init__(self) -> None:
self.data: Optional[bytes] = None
def parse(self, raw: bytes) -> Tuple[bool, bytes]:
return False, raw
def build(self) -> bytes:
assert self.data
return self.data
class TlsCertificateVerify:
"""TLS Certificate Verify"""
def __init__(self) -> None:
self.data: Optional[bytes] = None
def parse(self, raw: bytes) -> Tuple[bool, bytes]:
return False, raw
def build(self) -> bytes:
assert self.data
return self.data
| 23.509091
| 86
| 0.622583
| 906
| 0.69746
| 0
| 0
| 0
| 0
| 0
| 0
| 425
| 0.327175
|
f71f18898c8292f215084d67a0492fc48f5a9d6c
| 8,974
|
py
|
Python
|
main.py
|
PabloEmidio/Know-Weather-GTK
|
797f25cbd0c8e1a2f124a5328d9decf2f3829252
|
[
"MIT"
] | 4
|
2021-05-06T02:07:02.000Z
|
2021-05-06T17:48:08.000Z
|
main.py
|
PabloEmidio/Know-Weather-GTK
|
797f25cbd0c8e1a2f124a5328d9decf2f3829252
|
[
"MIT"
] | null | null | null |
main.py
|
PabloEmidio/Know-Weather-GTK
|
797f25cbd0c8e1a2f124a5328d9decf2f3829252
|
[
"MIT"
] | null | null | null |
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from datetime import datetime
from api_request import Weather
builder = Gtk.Builder()
builder.add_from_file('./glade/main.glade')
class Handler:
def __init__(self, *args, **kwargs):
super(Handler, self).__init__(*args, **kwargs)
self.weather_instance = Weather()
self.entry = builder.get_object('entry')
self.btn_search = builder.get_object('btn_search')
self.city_name = builder.get_object('city_name')
self.city_text = builder.get_object('city_text')
self.main_temp = builder.get_object('main_temp')
self.which_temp_simbol_is = 'Celsius'
self.weekday_name = builder.get_object('weekday_name')
self.weekday_name_today = builder.get_object('weekday_name_today')
self.temp_today_max = builder.get_object('today_max')
self.temp_today_min = builder.get_object('today_min')
self.hour_1_now = builder.get_object('hour_1_now')
self.hour_1_chance_of_rain = builder.get_object('hour_1_chance_of_rain')
self.hour_1_icon = builder.get_object('hour_1_icon')
self.hour_1_temp = builder.get_object('hour_1_temp')
self.hour_2_clock = builder.get_object('hour_2_clock')
self.hour_2_chance_of_rain = builder.get_object('hour_2_chance_of_rain')
self.hour_2_icon = builder.get_object('hour_2_icon')
self.hour_2_temp = builder.get_object('hour_2_temp')
self.hour_3_clock = builder.get_object('hour_3_clock')
self.hour_3_chance_of_rain = builder.get_object('hour_3_chance_of_rain')
self.hour_3_icon = builder.get_object('hour_3_icon')
self.hour_3_temp = builder.get_object('hour_3_temp')
self.hour_4_clock = builder.get_object('hour_4_clock')
self.hour_4_chance_of_rain = builder.get_object('hour_4_chance_of_rain')
self.hour_4_icon = builder.get_object('hour_4_icon')
self.hour_4_temp = builder.get_object('hour_4_temp')
self.hour_5_clock = builder.get_object('hour_5_clock')
self.hour_5_chance_of_rain = builder.get_object('hour_5_chance_of_rain')
self.hour_5_icon = builder.get_object('hour_5_icon')
self.hour_5_temp = builder.get_object('hour_5_temp')
self.day_1_name = builder.get_object('day_1_name')
self.day_1_icon = builder.get_object('day_1_icon')
self.day_1_temp_max = builder.get_object('day_1_temp_max')
self.day_1_temp_min = builder.get_object('day_1_temp_min')
self.day_2_name = builder.get_object('day_2_name')
self.day_2_icon = builder.get_object('day_2_icon')
self.day_2_temp_max = builder.get_object('day_2_temp_max')
self.day_2_temp_min = builder.get_object('day_2_temp_min')
def onDestroy(self, *args):
Gtk.main_quit()
def on_button_search_clicked(self, widget):
# now.strftime('%A') to know how weekday is
import re, unicodedata
word = unicodedata.normalize('NFD', self.entry.get_text())
word = re.sub('[\u0300-\u036f]', '', word)
try:
now = datetime.now()
current_hour = int(now.strftime('%H'))
current_search = self.weather_instance.get_weather_info(word, current_hour=current_hour)
self.city_name.set_text(current_search['location']['name'] + '/' + current_search['location']['region'])
self.city_text.set_text(current_search['current']['condition']['text'])
self.main_temp.set_text(str(int(current_search['current']['temp_c'])) + '°')
weekday = now.strftime('%A')
self.weekday_name.set_text(weekday)
self.weekday_name_today.set_text('Today')
today_max_temp = str(int(current_search['forecast']['forecastday'][0]['day']['maxtemp_c']))
today_min_temp = str(int(current_search['forecast']['forecastday'][0]['day']['mintemp_c']))
self.temp_today_max.set_text(today_max_temp)
self.temp_today_min.set_text(today_min_temp)
### Hours informations ######################################################
def is_available(increase: int) -> bool:
return not (current_hour + increase > 23)
if is_available(0):
self.hour_1_now.set_text('Now')
if int(chance_of_rain := current_search['forecast']['forecastday'][0]['hour'][current_hour]['chance_of_rain'])>0:
self.hour_1_chance_of_rain.set_text(str(chance_of_rain) + '%')
self.hour_1_temp.set_text(str(int(current_search['forecast']['forecastday'][0]['hour'][current_hour]['temp_c'])))
else:
self.hour_1_now.set_text('unavailable')
self.hour_1_temp.set_text('tomorrow')
self.hour_1_icon.set_from_file('./images/hour_icon/1.png')
if is_available(1):
self.hour_2_clock.set_text(str(int(now.strftime('%I'))+1) + now.strftime('%p'))
if int(chance_of_rain := current_search['forecast']['forecastday'][0]['hour'][current_hour+1]['chance_of_rain'])>0:
self.hour_1_chance_of_rain.set_text(str(chance_of_rain) + '%')
self.hour_2_temp.set_text(str(int(current_search['forecast']['forecastday'][0]['hour'][current_hour+1]['temp_c'])))
else:
self.hour_2_clock.set_text('unavailable')
self.hour_2_temp.set_text('tomorrow')
self.hour_2_icon.set_from_file('./images/hour_icon/2.png')
if is_available(2):
self.hour_3_clock.set_text(str(int(now.strftime('%I'))+2) + now.strftime('%p'))
if int(chance_of_rain := current_search['forecast']['forecastday'][0]['hour'][current_hour+2]['chance_of_rain'])>0:
self.hour_3_chance_of_rain.set_text(str(chance_of_rain) + '%')
self.hour_3_temp.set_text(str(int(current_search['forecast']['forecastday'][0]['hour'][current_hour+2]['temp_c'])))
else:
self.hour_3_clock.set_text('unavailable')
self.hour_3_temp.set_text('tomorrow')
self.hour_3_icon.set_from_file('./images/hour_icon/3.png')
if is_available(3):
self.hour_4_clock.set_text(str(int(now.strftime('%I'))+3) + now.strftime('%p'))
if int(chance_of_rain := current_search['forecast']['forecastday'][0]['hour'][current_hour+3]['chance_of_rain'])>0:
self.hour_4_chance_of_rain.set_text(str(chance_of_rain) + '%')
self.hour_4_temp.set_text(str(int(current_search['forecast']['forecastday'][0]['hour'][current_hour+3]['temp_c'])))
else:
self.hour_4_clock.set_text('unavailable')
self.hour_4_temp.set_text('tomorrow')
self.hour_4_icon.set_from_file('./images/hour_icon/4.png')
if is_available(4):
self.hour_5_clock.set_text(str(int(now.strftime('%I'))+4) + now.strftime('%p'))
if int(chance_of_rain := current_search['forecast']['forecastday'][0]['hour'][current_hour+3]['chance_of_rain'])>0:
self.hour_5_chance_of_rain.set_text(str(chance_of_rain) + '%')
self.hour_5_temp.set_text(str(int(current_search['forecast']['forecastday'][0]['hour'][current_hour+4]['temp_c'])))
else:
self.hour_5_clock.set_text('unavailable')
self.hour_5_temp.set_text('tomorrow')
self.hour_5_icon.set_from_file('./images/hour_icon/5.png')
### days informations ######################################################
self.day_1_name.set_text(datetime.fromisoformat(current_search['forecast']['forecastday'][1]['date']).strftime('%A'))
self.day_1_icon.set_from_file('./images/days_icon/1.png')
self.day_1_temp_max.set_text(str(int(current_search['forecast']['forecastday'][1]['day']['maxtemp_c'])))
self.day_1_temp_min.set_text(str(int(current_search['forecast']['forecastday'][1]['day']['mintemp_c'])))
self.day_2_name.set_text(datetime.fromisoformat(current_search['forecast']['forecastday'][2]['date']).strftime('%A'))
self.day_2_icon.set_from_file('./images/days_icon/2.png')
self.day_2_temp_max.set_text(str(int(current_search['forecast']['forecastday'][2]['day']['maxtemp_c'])))
self.day_2_temp_min.set_text(str(int(current_search['forecast']['forecastday'][2]['day']['mintemp_c'])))
except Exception as error:
print(f'error {error}')
builder.connect_signals(Handler())
window = builder.get_object('window')
window.show_all()
Gtk.main()
| 53.736527
| 131
| 0.622131
| 8,657
| 0.964568
| 0
| 0
| 0
| 0
| 0
| 0
| 1,974
| 0.219944
|
f71f497fb7582513c2d45b7633de0c7c9d7f7303
| 3,186
|
py
|
Python
|
talk_lib/tests/testtalk.py
|
allankellynet/mimas
|
10025d43bba9e84f502a266760786842e7158a05
|
[
"MIT"
] | null | null | null |
talk_lib/tests/testtalk.py
|
allankellynet/mimas
|
10025d43bba9e84f502a266760786842e7158a05
|
[
"MIT"
] | 1
|
2020-02-05T13:00:29.000Z
|
2020-02-05T13:00:29.000Z
|
talk_lib/tests/testtalk.py
|
allankellynet/mimas
|
10025d43bba9e84f502a266760786842e7158a05
|
[
"MIT"
] | null | null | null |
#-----------------------------------------------------
# Mimas: conference submission and review system
# (c) Allan Kelly 2016-2020 http://www.allankelly.net
# Licensed under MIT License, see LICENSE file
# -----------------------------------------------------
import unittest
from google.appengine.ext import testbed
from speaker_lib import speaker
from talk_lib import talk
class TestTalk(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
self.testbed.deactivate()
def test_field_access(self):
t = talk.Talk()
self.assertEquals(t.title, "")
t.title = "Wonderful"
self.assertEquals(t.title, "Wonderful")
self.assertEquals(t.title, "Wonderful".encode('ascii', 'ignore'))
def test_talk_fields(self):
t = talk.Talk()
self.assertEquals(t.title, "")
t.title = "Great talk"
self.assertEquals(t.title, "Great talk")
def test_store_retrieve(self):
spk1 = speaker.make_new_speaker("who@email")
spk1.put()
t1 = talk.Talk(parent=spk1.key)
t1.title = "Wonderful"
t1.put()
t2 = talk.Talk(parent=spk1.key)
t2.title = "Great"
t2.put()
user1_talks = talk.all_user_talks_by_email(spk1.email)
self.assertEquals(len(user1_talks), 2)
spk2 = speaker.make_new_speaker("nobody@email")
spk2.put()
t3 = talk.Talk(parent=spk2.key)
t3.title = "Smashing"
t3.put()
user2_talks = talk.all_user_talks_by_email(spk2.email)
self.assertEquals(len(user2_talks), 1)
t2.key.delete()
user1_talks = talk.all_user_talks_by_email(spk1.email)
self.assertEquals(len(user1_talks), 1)
def test_store_retrieve_by_key(self):
spk1 = speaker.make_new_speaker("who@email")
spk1.put()
t1 = talk.Talk(parent=spk1.key)
t1.title = "Wonderful"
t1.put()
t2 = talk.Talk(parent=spk1.key)
t2.title = "Great"
t2.put()
user1_talks = talk.speaker_talks_by_key(spk1.key)
self.assertEquals(len(user1_talks), 2)
spk2 = speaker.make_new_speaker("nobody@email")
spk2.put()
t3 = talk.Talk(parent=spk2.key)
t3.title = "Smashing"
t3.put()
user2_talks = talk.speaker_talks_by_key(spk2.key)
self.assertEquals(len(user2_talks), 1)
t2.key.delete()
user1_talks = talk.all_user_talks_by_email(spk1.email)
self.assertEquals(len(user1_talks), 1)
def test_no_such_speaker(self):
talks = talk.all_user_talks_by_email("nosuch@nowhere")
self.assertEquals(len(talks), 0)
def test_directory_listing(self):
spk1 = speaker.make_new_speaker("who@email")
spk1.put()
t1_key = talk.mk_talk(spk1.key, "Wonderful")
t1 = t1_key.get()
self.assertTrue(t1.is_listed())
t1.hide_listing()
self.assertFalse(t1.is_listed())
t1.show_listing()
self.assertTrue(t1.is_listed())
| 29.775701
| 73
| 0.605775
| 2,804
| 0.8801
| 0
| 0
| 0
| 0
| 0
| 0
| 476
| 0.149404
|
f71f6972720d1f87a308457a99c2da6ef6fe19d9
| 63,620
|
py
|
Python
|
LeetCode/contest-2018-11-26/fair_candy_swap.py
|
Max-PJB/python-learning2
|
e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd
|
[
"MIT"
] | null | null | null |
LeetCode/contest-2018-11-26/fair_candy_swap.py
|
Max-PJB/python-learning2
|
e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd
|
[
"MIT"
] | null | null | null |
LeetCode/contest-2018-11-26/fair_candy_swap.py
|
Max-PJB/python-learning2
|
e8b05bef1574ee9abf8c90497e94ef20a7f4e3bd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : pengj
@ date : 2018/11/26 19:28
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : pengjianbiao@hotmail.com
-------------------------------------------------
Description : 888. 公平的糖果交换
虚拟 用户通过次数 0
虚拟 用户尝试次数 1
虚拟 通过次数 0
虚拟 提交次数 1
题目难度 Easy
爱丽丝和鲍勃有不同大小的糖果棒:A[i] 是爱丽丝拥有的第 i 块糖的大小,B[j] 是鲍勃拥有的第 j 块糖的大小。
因为他们是朋友,所以他们想交换一个糖果棒,这样交换后,他们都有相同的糖果总量。(一个人拥有的糖果总量是他们拥有的糖果棒大小的总和。)
返回一个整数数组 ans,其中 ans[0] 是爱丽丝必须交换的糖果棒的大小,ans[1] 是 Bob 必须交换的糖果棒的大小。
如果有多个答案,你可以返回其中任何一个。保证答案存在。
示例 1:
输入:A = [1,1], B = [2,2]
输出:[1,2]
示例 2:
输入:A = [1,2], B = [2,3]
输出:[1,2]
示例 3:
输入:A = [2], B = [1,3]
输出:[2,3]
示例 4:
输入:A = [1,2,5], B = [2,4]
输出:[5,4]
提示:
1 <= A.length <= 10000
1 <= B.length <= 10000
1 <= A[i] <= 100000
1 <= B[i] <= 100000
保证爱丽丝与鲍勃的糖果总量不同。
答案肯定存在。
-------------------------------------------------
"""
import time
__author__ = 'Max_Pengjb'
start = time.time()
# 下面写上代码块
class Solution(object):
def fairCandySwap(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: List[int]
"""
k = (sum(A) - sum(B)) // 2
b = dict(zip(B, [1 for _ in B]))
for i in A:
if i - k in b.keys():
return [i, i - k]
A = [1, 2, 5]
B = [2, 4]
a1 = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59,
61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113,
115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159,
161, 163, 165, 167, 169, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205,
207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251,
253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 295, 297,
299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 339, 341, 343,
345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 377, 379, 381, 383, 385, 387, 389,
391, 393, 395, 397, 399, 401, 403, 405, 407, 409, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435,
437, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 475, 477, 479, 481,
483, 485, 487, 489, 491, 493, 495, 497, 499, 501, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523, 525, 527,
529, 531, 533, 535, 537, 539, 541, 543, 545, 547, 549, 551, 553, 555, 557, 559, 561, 563, 565, 567, 569, 571, 573,
575, 577, 579, 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601, 603, 605, 607, 609, 611, 613, 615, 617, 619,
621, 623, 625, 627, 629, 631, 633, 635, 637, 639, 641, 643, 645, 647, 649, 651, 653, 655, 657, 659, 661, 663, 665,
667, 669, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 691, 693, 695, 697, 699, 701, 703, 705, 707, 709, 711,
713, 715, 717, 719, 721, 723, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 745, 747, 749, 751, 753, 755, 757,
759, 761, 763, 765, 767, 769, 771, 773, 775, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799, 801, 803,
805, 807, 809, 811, 813, 815, 817, 819, 821, 823, 825, 827, 829, 831, 833, 835, 837, 839, 841, 843, 845, 847, 849,
851, 853, 855, 857, 859, 861, 863, 865, 867, 869, 871, 873, 875, 877, 879, 881, 883, 885, 887, 889, 891, 893, 895,
897, 899, 901, 903, 905, 907, 909, 911, 913, 915, 917, 919, 921, 923, 925, 927, 929, 931, 933, 935, 937, 939, 941,
943, 945, 947, 949, 951, 953, 955, 957, 959, 961, 963, 965, 967, 969, 971, 973, 975, 977, 979, 981, 983, 985, 987,
989, 991, 993, 995, 997, 999, 1001, 1003, 1005, 1007, 1009, 1011, 1013, 1015, 1017, 1019, 1021, 1023, 1025, 1027,
1029, 1031, 1033, 1035, 1037, 1039, 1041, 1043, 1045, 1047, 1049, 1051, 1053, 1055, 1057, 1059, 1061, 1063, 1065,
1067, 1069, 1071, 1073, 1075, 1077, 1079, 1081, 1083, 1085, 1087, 1089, 1091, 1093, 1095, 1097, 1099, 1101, 1103,
1105, 1107, 1109, 1111, 1113, 1115, 1117, 1119, 1121, 1123, 1125, 1127, 1129, 1131, 1133, 1135, 1137, 1139, 1141,
1143, 1145, 1147, 1149, 1151, 1153, 1155, 1157, 1159, 1161, 1163, 1165, 1167, 1169, 1171, 1173, 1175, 1177, 1179,
1181, 1183, 1185, 1187, 1189, 1191, 1193, 1195, 1197, 1199, 1201, 1203, 1205, 1207, 1209, 1211, 1213, 1215, 1217,
1219, 1221, 1223, 1225, 1227, 1229, 1231, 1233, 1235, 1237, 1239, 1241, 1243, 1245, 1247, 1249, 1251, 1253, 1255,
1257, 1259, 1261, 1263, 1265, 1267, 1269, 1271, 1273, 1275, 1277, 1279, 1281, 1283, 1285, 1287, 1289, 1291, 1293,
1295, 1297, 1299, 1301, 1303, 1305, 1307, 1309, 1311, 1313, 1315, 1317, 1319, 1321, 1323, 1325, 1327, 1329, 1331,
1333, 1335, 1337, 1339, 1341, 1343, 1345, 1347, 1349, 1351, 1353, 1355, 1357, 1359, 1361, 1363, 1365, 1367, 1369,
1371, 1373, 1375, 1377, 1379, 1381, 1383, 1385, 1387, 1389, 1391, 1393, 1395, 1397, 1399, 1401, 1403, 1405, 1407,
1409, 1411, 1413, 1415, 1417, 1419, 1421, 1423, 1425, 1427, 1429, 1431, 1433, 1435, 1437, 1439, 1441, 1443, 1445,
1447, 1449, 1451, 1453, 1455, 1457, 1459, 1461, 1463, 1465, 1467, 1469, 1471, 1473, 1475, 1477, 1479, 1481, 1483,
1485, 1487, 1489, 1491, 1493, 1495, 1497, 1499, 1501, 1503, 1505, 1507, 1509, 1511, 1513, 1515, 1517, 1519, 1521,
1523, 1525, 1527, 1529, 1531, 1533, 1535, 1537, 1539, 1541, 1543, 1545, 1547, 1549, 1551, 1553, 1555, 1557, 1559,
1561, 1563, 1565, 1567, 1569, 1571, 1573, 1575, 1577, 1579, 1581, 1583, 1585, 1587, 1589, 1591, 1593, 1595, 1597,
1599, 1601, 1603, 1605, 1607, 1609, 1611, 1613, 1615, 1617, 1619, 1621, 1623, 1625, 1627, 1629, 1631, 1633, 1635,
1637, 1639, 1641, 1643, 1645, 1647, 1649, 1651, 1653, 1655, 1657, 1659, 1661, 1663, 1665, 1667, 1669, 1671, 1673,
1675, 1677, 1679, 1681, 1683, 1685, 1687, 1689, 1691, 1693, 1695, 1697, 1699, 1701, 1703, 1705, 1707, 1709, 1711,
1713, 1715, 1717, 1719, 1721, 1723, 1725, 1727, 1729, 1731, 1733, 1735, 1737, 1739, 1741, 1743, 1745, 1747, 1749,
1751, 1753, 1755, 1757, 1759, 1761, 1763, 1765, 1767, 1769, 1771, 1773, 1775, 1777, 1779, 1781, 1783, 1785, 1787,
1789, 1791, 1793, 1795, 1797, 1799, 1801, 1803, 1805, 1807, 1809, 1811, 1813, 1815, 1817, 1819, 1821, 1823, 1825,
1827, 1829, 1831, 1833, 1835, 1837, 1839, 1841, 1843, 1845, 1847, 1849, 1851, 1853, 1855, 1857, 1859, 1861, 1863,
1865, 1867, 1869, 1871, 1873, 1875, 1877, 1879, 1881, 1883, 1885, 1887, 1889, 1891, 1893, 1895, 1897, 1899, 1901,
1903, 1905, 1907, 1909, 1911, 1913, 1915, 1917, 1919, 1921, 1923, 1925, 1927, 1929, 1931, 1933, 1935, 1937, 1939,
1941, 1943, 1945, 1947, 1949, 1951, 1953, 1955, 1957, 1959, 1961, 1963, 1965, 1967, 1969, 1971, 1973, 1975, 1977,
1979, 1981, 1983, 1985, 1987, 1989, 1991, 1993, 1995, 1997, 1999, 2001, 2003, 2005, 2007, 2009, 2011, 2013, 2015,
2017, 2019, 2021, 2023, 2025, 2027, 2029, 2031, 2033, 2035, 2037, 2039, 2041, 2043, 2045, 2047, 2049, 2051, 2053,
2055, 2057, 2059, 2061, 2063, 2065, 2067, 2069, 2071, 2073, 2075, 2077, 2079, 2081, 2083, 2085, 2087, 2089, 2091,
2093, 2095, 2097, 2099, 2101, 2103, 2105, 2107, 2109, 2111, 2113, 2115, 2117, 2119, 2121, 2123, 2125, 2127, 2129,
2131, 2133, 2135, 2137, 2139, 2141, 2143, 2145, 2147, 2149, 2151, 2153, 2155, 2157, 2159, 2161, 2163, 2165, 2167,
2169, 2171, 2173, 2175, 2177, 2179, 2181, 2183, 2185, 2187, 2189, 2191, 2193, 2195, 2197, 2199, 2201, 2203, 2205,
2207, 2209, 2211, 2213, 2215, 2217, 2219, 2221, 2223, 2225, 2227, 2229, 2231, 2233, 2235, 2237, 2239, 2241, 2243,
2245, 2247, 2249, 2251, 2253, 2255, 2257, 2259, 2261, 2263, 2265, 2267, 2269, 2271, 2273, 2275, 2277, 2279, 2281,
2283, 2285, 2287, 2289, 2291, 2293, 2295, 2297, 2299, 2301, 2303, 2305, 2307, 2309, 2311, 2313, 2315, 2317, 2319,
2321, 2323, 2325, 2327, 2329, 2331, 2333, 2335, 2337, 2339, 2341, 2343, 2345, 2347, 2349, 2351, 2353, 2355, 2357,
2359, 2361, 2363, 2365, 2367, 2369, 2371, 2373, 2375, 2377, 2379, 2381, 2383, 2385, 2387, 2389, 2391, 2393, 2395,
2397, 2399, 2401, 2403, 2405, 2407, 2409, 2411, 2413, 2415, 2417, 2419, 2421, 2423, 2425, 2427, 2429, 2431, 2433,
2435, 2437, 2439, 2441, 2443, 2445, 2447, 2449, 2451, 2453, 2455, 2457, 2459, 2461, 2463, 2465, 2467, 2469, 2471,
2473, 2475, 2477, 2479, 2481, 2483, 2485, 2487, 2489, 2491, 2493, 2495, 2497, 2499, 2501, 2503, 2505, 2507, 2509,
2511, 2513, 2515, 2517, 2519, 2521, 2523, 2525, 2527, 2529, 2531, 2533, 2535, 2537, 2539, 2541, 2543, 2545, 2547,
2549, 2551, 2553, 2555, 2557, 2559, 2561, 2563, 2565, 2567, 2569, 2571, 2573, 2575, 2577, 2579, 2581, 2583, 2585,
2587, 2589, 2591, 2593, 2595, 2597, 2599, 2601, 2603, 2605, 2607, 2609, 2611, 2613, 2615, 2617, 2619, 2621, 2623,
2625, 2627, 2629, 2631, 2633, 2635, 2637, 2639, 2641, 2643, 2645, 2647, 2649, 2651, 2653, 2655, 2657, 2659, 2661,
2663, 2665, 2667, 2669, 2671, 2673, 2675, 2677, 2679, 2681, 2683, 2685, 2687, 2689, 2691, 2693, 2695, 2697, 2699,
2701, 2703, 2705, 2707, 2709, 2711, 2713, 2715, 2717, 2719, 2721, 2723, 2725, 2727, 2729, 2731, 2733, 2735, 2737,
2739, 2741, 2743, 2745, 2747, 2749, 2751, 2753, 2755, 2757, 2759, 2761, 2763, 2765, 2767, 2769, 2771, 2773, 2775,
2777, 2779, 2781, 2783, 2785, 2787, 2789, 2791, 2793, 2795, 2797, 2799, 2801, 2803, 2805, 2807, 2809, 2811, 2813,
2815, 2817, 2819, 2821, 2823, 2825, 2827, 2829, 2831, 2833, 2835, 2837, 2839, 2841, 2843, 2845, 2847, 2849, 2851,
2853, 2855, 2857, 2859, 2861, 2863, 2865, 2867, 2869, 2871, 2873, 2875, 2877, 2879, 2881, 2883, 2885, 2887, 2889,
2891, 2893, 2895, 2897, 2899, 2901, 2903, 2905, 2907, 2909, 2911, 2913, 2915, 2917, 2919, 2921, 2923, 2925, 2927,
2929, 2931, 2933, 2935, 2937, 2939, 2941, 2943, 2945, 2947, 2949, 2951, 2953, 2955, 2957, 2959, 2961, 2963, 2965,
2967, 2969, 2971, 2973, 2975, 2977, 2979, 2981, 2983, 2985, 2987, 2989, 2991, 2993, 2995, 2997, 2999, 3001, 3003,
3005, 3007, 3009, 3011, 3013, 3015, 3017, 3019, 3021, 3023, 3025, 3027, 3029, 3031, 3033, 3035, 3037, 3039, 3041,
3043, 3045, 3047, 3049, 3051, 3053, 3055, 3057, 3059, 3061, 3063, 3065, 3067, 3069, 3071, 3073, 3075, 3077, 3079,
3081, 3083, 3085, 3087, 3089, 3091, 3093, 3095, 3097, 3099, 3101, 3103, 3105, 3107, 3109, 3111, 3113, 3115, 3117,
3119, 3121, 3123, 3125, 3127, 3129, 3131, 3133, 3135, 3137, 3139, 3141, 3143, 3145, 3147, 3149, 3151, 3153, 3155,
3157, 3159, 3161, 3163, 3165, 3167, 3169, 3171, 3173, 3175, 3177, 3179, 3181, 3183, 3185, 3187, 3189, 3191, 3193,
3195, 3197, 3199, 3201, 3203, 3205, 3207, 3209, 3211, 3213, 3215, 3217, 3219, 3221, 3223, 3225, 3227, 3229, 3231,
3233, 3235, 3237, 3239, 3241, 3243, 3245, 3247, 3249, 3251, 3253, 3255, 3257, 3259, 3261, 3263, 3265, 3267, 3269,
3271, 3273, 3275, 3277, 3279, 3281, 3283, 3285, 3287, 3289, 3291, 3293, 3295, 3297, 3299, 3301, 3303, 3305, 3307,
3309, 3311, 3313, 3315, 3317, 3319, 3321, 3323, 3325, 3327, 3329, 3331, 3333, 3335, 3337, 3339, 3341, 3343, 3345,
3347, 3349, 3351, 3353, 3355, 3357, 3359, 3361, 3363, 3365, 3367, 3369, 3371, 3373, 3375, 3377, 3379, 3381, 3383,
3385, 3387, 3389, 3391, 3393, 3395, 3397, 3399, 3401, 3403, 3405, 3407, 3409, 3411, 3413, 3415, 3417, 3419, 3421,
3423, 3425, 3427, 3429, 3431, 3433, 3435, 3437, 3439, 3441, 3443, 3445, 3447, 3449, 3451, 3453, 3455, 3457, 3459,
3461, 3463, 3465, 3467, 3469, 3471, 3473, 3475, 3477, 3479, 3481, 3483, 3485, 3487, 3489, 3491, 3493, 3495, 3497,
3499, 3501, 3503, 3505, 3507, 3509, 3511, 3513, 3515, 3517, 3519, 3521, 3523, 3525, 3527, 3529, 3531, 3533, 3535,
3537, 3539, 3541, 3543, 3545, 3547, 3549, 3551, 3553, 3555, 3557, 3559, 3561, 3563, 3565, 3567, 3569, 3571, 3573,
3575, 3577, 3579, 3581, 3583, 3585, 3587, 3589, 3591, 3593, 3595, 3597, 3599, 3601, 3603, 3605, 3607, 3609, 3611,
3613, 3615, 3617, 3619, 3621, 3623, 3625, 3627, 3629, 3631, 3633, 3635, 3637, 3639, 3641, 3643, 3645, 3647, 3649,
3651, 3653, 3655, 3657, 3659, 3661, 3663, 3665, 3667, 3669, 3671, 3673, 3675, 3677, 3679, 3681, 3683, 3685, 3687,
3689, 3691, 3693, 3695, 3697, 3699, 3701, 3703, 3705, 3707, 3709, 3711, 3713, 3715, 3717, 3719, 3721, 3723, 3725,
3727, 3729, 3731, 3733, 3735, 3737, 3739, 3741, 3743, 3745, 3747, 3749, 3751, 3753, 3755, 3757, 3759, 3761, 3763,
3765, 3767, 3769, 3771, 3773, 3775, 3777, 3779, 3781, 3783, 3785, 3787, 3789, 3791, 3793, 3795, 3797, 3799, 3801,
3803, 3805, 3807, 3809, 3811, 3813, 3815, 3817, 3819, 3821, 3823, 3825, 3827, 3829, 3831, 3833, 3835, 3837, 3839,
3841, 3843, 3845, 3847, 3849, 3851, 3853, 3855, 3857, 3859, 3861, 3863, 3865, 3867, 3869, 3871, 3873, 3875, 3877,
3879, 3881, 3883, 3885, 3887, 3889, 3891, 3893, 3895, 3897, 3899, 3901, 3903, 3905, 3907, 3909, 3911, 3913, 3915,
3917, 3919, 3921, 3923, 3925, 3927, 3929, 3931, 3933, 3935, 3937, 3939, 3941, 3943, 3945, 3947, 3949, 3951, 3953,
3955, 3957, 3959, 3961, 3963, 3965, 3967, 3969, 3971, 3973, 3975, 3977, 3979, 3981, 3983, 3985, 3987, 3989, 3991,
3993, 3995, 3997, 3999, 4001, 4003, 4005, 4007, 4009, 4011, 4013, 4015, 4017, 4019, 4021, 4023, 4025, 4027, 4029,
4031, 4033, 4035, 4037, 4039, 4041, 4043, 4045, 4047, 4049, 4051, 4053, 4055, 4057, 4059, 4061, 4063, 4065, 4067,
4069, 4071, 4073, 4075, 4077, 4079, 4081, 4083, 4085, 4087, 4089, 4091, 4093, 4095, 4097, 4099, 4101, 4103, 4105,
4107, 4109, 4111, 4113, 4115, 4117, 4119, 4121, 4123, 4125, 4127, 4129, 4131, 4133, 4135, 4137, 4139, 4141, 4143,
4145, 4147, 4149, 4151, 4153, 4155, 4157, 4159, 4161, 4163, 4165, 4167, 4169, 4171, 4173, 4175, 4177, 4179, 4181,
4183, 4185, 4187, 4189, 4191, 4193, 4195, 4197, 4199, 4201, 4203, 4205, 4207, 4209, 4211, 4213, 4215, 4217, 4219,
4221, 4223, 4225, 4227, 4229, 4231, 4233, 4235, 4237, 4239, 4241, 4243, 4245, 4247, 4249, 4251, 4253, 4255, 4257,
4259, 4261, 4263, 4265, 4267, 4269, 4271, 4273, 4275, 4277, 4279, 4281, 4283, 4285, 4287, 4289, 4291, 4293, 4295,
4297, 4299, 4301, 4303, 4305, 4307, 4309, 4311, 4313, 4315, 4317, 4319, 4321, 4323, 4325, 4327, 4329, 4331, 4333,
4335, 4337, 4339, 4341, 4343, 4345, 4347, 4349, 4351, 4353, 4355, 4357, 4359, 4361, 4363, 4365, 4367, 4369, 4371,
4373, 4375, 4377, 4379, 4381, 4383, 4385, 4387, 4389, 4391, 4393, 4395, 4397, 4399, 4401, 4403, 4405, 4407, 4409,
4411, 4413, 4415, 4417, 4419, 4421, 4423, 4425, 4427, 4429, 4431, 4433, 4435, 4437, 4439, 4441, 4443, 4445, 4447,
4449, 4451, 4453, 4455, 4457, 4459, 4461, 4463, 4465, 4467, 4469, 4471, 4473, 4475, 4477, 4479, 4481, 4483, 4485,
4487, 4489, 4491, 4493, 4495, 4497, 4499, 4501, 4503, 4505, 4507, 4509, 4511, 4513, 4515, 4517, 4519, 4521, 4523,
4525, 4527, 4529, 4531, 4533, 4535, 4537, 4539, 4541, 4543, 4545, 4547, 4549, 4551, 4553, 4555, 4557, 4559, 4561,
4563, 4565, 4567, 4569, 4571, 4573, 4575, 4577, 4579, 4581, 4583, 4585, 4587, 4589, 4591, 4593, 4595, 4597, 4599,
4601, 4603, 4605, 4607, 4609, 4611, 4613, 4615, 4617, 4619, 4621, 4623, 4625, 4627, 4629, 4631, 4633, 4635, 4637,
4639, 4641, 4643, 4645, 4647, 4649, 4651, 4653, 4655, 4657, 4659, 4661, 4663, 4665, 4667, 4669, 4671, 4673, 4675,
4677, 4679, 4681, 4683, 4685, 4687, 4689, 4691, 4693, 4695, 4697, 4699, 4701, 4703, 4705, 4707, 4709, 4711, 4713,
4715, 4717, 4719, 4721, 4723, 4725, 4727, 4729, 4731, 4733, 4735, 4737, 4739, 4741, 4743, 4745, 4747, 4749, 4751,
4753, 4755, 4757, 4759, 4761, 4763, 4765, 4767, 4769, 4771, 4773, 4775, 4777, 4779, 4781, 4783, 4785, 4787, 4789,
4791, 4793, 4795, 4797, 4799, 4801, 4803, 4805, 4807, 4809, 4811, 4813, 4815, 4817, 4819, 4821, 4823, 4825, 4827,
4829, 4831, 4833, 4835, 4837, 4839, 4841, 4843, 4845, 4847, 4849, 4851, 4853, 4855, 4857, 4859, 4861, 4863, 4865,
4867, 4869, 4871, 4873, 4875, 4877, 4879, 4881, 4883, 4885, 4887, 4889, 4891, 4893, 4895, 4897, 4899, 4901, 4903,
4905, 4907, 4909, 4911, 4913, 4915, 4917, 4919, 4921, 4923, 4925, 4927, 4929, 4931, 4933, 4935, 4937, 4939, 4941,
4943, 4945, 4947, 4949, 4951, 4953, 4955, 4957, 4959, 4961, 4963, 4965, 4967, 4969, 4971, 4973, 4975, 4977, 4979,
4981, 4983, 4985, 4987, 4989, 4991, 4993, 4995, 4997, 4999, 5001, 5003, 5005, 5007, 5009, 5011, 5013, 5015, 5017,
5019, 5021, 5023, 5025, 5027, 5029, 5031, 5033, 5035, 5037, 5039, 5041, 5043, 5045, 5047, 5049, 5051, 5053, 5055,
5057, 5059, 5061, 5063, 5065, 5067, 5069, 5071, 5073, 5075, 5077, 5079, 5081, 5083, 5085, 5087, 5089, 5091, 5093,
5095, 5097, 5099, 5101, 5103, 5105, 5107, 5109, 5111, 5113, 5115, 5117, 5119, 5121, 5123, 5125, 5127, 5129, 5131,
5133, 5135, 5137, 5139, 5141, 5143, 5145, 5147, 5149, 5151, 5153, 5155, 5157, 5159, 5161, 5163, 5165, 5167, 5169,
5171, 5173, 5175, 5177, 5179, 5181, 5183, 5185, 5187, 5189, 5191, 5193, 5195, 5197, 5199, 5201, 5203, 5205, 5207,
5209, 5211, 5213, 5215, 5217, 5219, 5221, 5223, 5225, 5227, 5229, 5231, 5233, 5235, 5237, 5239, 5241, 5243, 5245,
5247, 5249, 5251, 5253, 5255, 5257, 5259, 5261, 5263, 5265, 5267, 5269, 5271, 5273, 5275, 5277, 5279, 5281, 5283,
5285, 5287, 5289, 5291, 5293, 5295, 5297, 5299, 5301, 5303, 5305, 5307, 5309, 5311, 5313, 5315, 5317, 5319, 5321,
5323, 5325, 5327, 5329, 5331, 5333, 5335, 5337, 5339, 5341, 5343, 5345, 5347, 5349, 5351, 5353, 5355, 5357, 5359,
5361, 5363, 5365, 5367, 5369, 5371, 5373, 5375, 5377, 5379, 5381, 5383, 5385, 5387, 5389, 5391, 5393, 5395, 5397,
5399, 5401, 5403, 5405, 5407, 5409, 5411, 5413, 5415, 5417, 5419, 5421, 5423, 5425, 5427, 5429, 5431, 5433, 5435,
5437, 5439, 5441, 5443, 5445, 5447, 5449, 5451, 5453, 5455, 5457, 5459, 5461, 5463, 5465, 5467, 5469, 5471, 5473,
5475, 5477, 5479, 5481, 5483, 5485, 5487, 5489, 5491, 5493, 5495, 5497, 5499, 5501, 5503, 5505, 5507, 5509, 5511,
5513, 5515, 5517, 5519, 5521, 5523, 5525, 5527, 5529, 5531, 5533, 5535, 5537, 5539, 5541, 5543, 5545, 5547, 5549,
5551, 5553, 5555, 5557, 5559, 5561, 5563, 5565, 5567, 5569, 5571, 5573, 5575, 5577, 5579, 5581, 5583, 5585, 5587,
5589, 5591, 5593, 5595, 5597, 5599, 5601, 5603, 5605, 5607, 5609, 5611, 5613, 5615, 5617, 5619, 5621, 5623, 5625,
5627, 5629, 5631, 5633, 5635, 5637, 5639, 5641, 5643, 5645, 5647, 5649, 5651, 5653, 5655, 5657, 5659, 5661, 5663,
5665, 5667, 5669, 5671, 5673, 5675, 5677, 5679, 5681, 5683, 5685, 5687, 5689, 5691, 5693, 5695, 5697, 5699, 5701,
5703, 5705, 5707, 5709, 5711, 5713, 5715, 5717, 5719, 5721, 5723, 5725, 5727, 5729, 5731, 5733, 5735, 5737, 5739,
5741, 5743, 5745, 5747, 5749, 5751, 5753, 5755, 5757, 5759, 5761, 5763, 5765, 5767, 5769, 5771, 5773, 5775, 5777,
5779, 5781, 5783, 5785, 5787, 5789, 5791, 5793, 5795, 5797, 5799, 5801, 5803, 5805, 5807, 5809, 5811, 5813, 5815,
5817, 5819, 5821, 5823, 5825, 5827, 5829, 5831, 5833, 5835, 5837, 5839, 5841, 5843, 5845, 5847, 5849, 5851, 5853,
5855, 5857, 5859, 5861, 5863, 5865, 5867, 5869, 5871, 5873, 5875, 5877, 5879, 5881, 5883, 5885, 5887, 5889, 5891,
5893, 5895, 5897, 5899, 5901, 5903, 5905, 5907, 5909, 5911, 5913, 5915, 5917, 5919, 5921, 5923, 5925, 5927, 5929,
5931, 5933, 5935, 5937, 5939, 5941, 5943, 5945, 5947, 5949, 5951, 5953, 5955, 5957, 5959, 5961, 5963, 5965, 5967,
5969, 5971, 5973, 5975, 5977, 5979, 5981, 5983, 5985, 5987, 5989, 5991, 5993, 5995, 5997, 5999, 6001, 6003, 6005,
6007, 6009, 6011, 6013, 6015, 6017, 6019, 6021, 6023, 6025, 6027, 6029, 6031, 6033, 6035, 6037, 6039, 6041, 6043,
6045, 6047, 6049, 6051, 6053, 6055, 6057, 6059, 6061, 6063, 6065, 6067, 6069, 6071, 6073, 6075, 6077, 6079, 6081,
6083, 6085, 6087, 6089, 6091, 6093, 6095, 6097, 6099, 6101, 6103, 6105, 6107, 6109, 6111, 6113, 6115, 6117, 6119,
6121, 6123, 6125, 6127, 6129, 6131, 6133, 6135, 6137, 6139, 6141, 6143, 6145, 6147, 6149, 6151, 6153, 6155, 6157,
6159, 6161, 6163, 6165, 6167, 6169, 6171, 6173, 6175, 6177, 6179, 6181, 6183, 6185, 6187, 6189, 6191, 6193, 6195,
6197, 6199, 6201, 6203, 6205, 6207, 6209, 6211, 6213, 6215, 6217, 6219, 6221, 6223, 6225, 6227, 6229, 6231, 6233,
6235, 6237, 6239, 6241, 6243, 6245, 6247, 6249, 6251, 6253, 6255, 6257, 6259, 6261, 6263, 6265, 6267, 6269, 6271,
6273, 6275, 6277, 6279, 6281, 6283, 6285, 6287, 6289, 6291, 6293, 6295, 6297, 6299, 6301, 6303, 6305, 6307, 6309,
6311, 6313, 6315, 6317, 6319, 6321, 6323, 6325, 6327, 6329, 6331, 6333, 6335, 6337, 6339, 6341, 6343, 6345, 6347,
6349, 6351, 6353, 6355, 6357, 6359, 6361, 6363, 6365, 6367, 6369, 6371, 6373, 6375, 6377, 6379, 6381, 6383, 6385,
6387, 6389, 6391, 6393, 6395, 6397, 6399, 6401, 6403, 6405, 6407, 6409, 6411, 6413, 6415, 6417, 6419, 6421, 6423,
6425, 6427, 6429, 6431, 6433, 6435, 6437, 6439, 6441, 6443, 6445, 6447, 6449, 6451, 6453, 6455, 6457, 6459, 6461,
6463, 6465, 6467, 6469, 6471, 6473, 6475, 6477, 6479, 6481, 6483, 6485, 6487, 6489, 6491, 6493, 6495, 6497, 6499,
6501, 6503, 6505, 6507, 6509, 6511, 6513, 6515, 6517, 6519, 6521, 6523, 6525, 6527, 6529, 6531, 6533, 6535, 6537,
6539, 6541, 6543, 6545, 6547, 6549, 6551, 6553, 6555, 6557, 6559, 6561, 6563, 6565, 6567, 6569, 6571, 6573, 6575,
6577, 6579, 6581, 6583, 6585, 6587, 6589, 6591, 6593, 6595, 6597, 6599, 6601, 6603, 6605, 6607, 6609, 6611, 6613,
6615, 6617, 6619, 6621, 6623, 6625, 6627, 6629, 6631, 6633, 6635, 6637, 6639, 6641, 6643, 6645, 6647, 6649, 6651,
6653, 6655, 6657, 6659, 6661, 6663, 6665, 6667, 6669, 6671, 6673, 6675, 6677, 6679, 6681, 6683, 6685, 6687, 6689,
6691, 6693, 6695, 6697, 6699, 6701, 6703, 6705, 6707, 6709, 6711, 6713, 6715, 6717, 6719, 6721, 6723, 6725, 6727,
6729, 6731, 6733, 6735, 6737, 6739, 6741, 6743, 6745, 6747, 6749, 6751, 6753, 6755, 6757, 6759, 6761, 6763, 6765,
6767, 6769, 6771, 6773, 6775, 6777, 6779, 6781, 6783, 6785, 6787, 6789, 6791, 6793, 6795, 6797, 6799, 6801, 6803,
6805, 6807, 6809, 6811, 6813, 6815, 6817, 6819, 6821, 6823, 6825, 6827, 6829, 6831, 6833, 6835, 6837, 6839, 6841,
6843, 6845, 6847, 6849, 6851, 6853, 6855, 6857, 6859, 6861, 6863, 6865, 6867, 6869, 6871, 6873, 6875, 6877, 6879,
6881, 6883, 6885, 6887, 6889, 6891, 6893, 6895, 6897, 6899, 6901, 6903, 6905, 6907, 6909, 6911, 6913, 6915, 6917,
6919, 6921, 6923, 6925, 6927, 6929, 6931, 6933, 6935, 6937, 6939, 6941, 6943, 6945, 6947, 6949, 6951, 6953, 6955,
6957, 6959, 6961, 6963, 6965, 6967, 6969, 6971, 6973, 6975, 6977, 6979, 6981, 6983, 6985, 6987, 6989, 6991, 6993,
6995, 6997, 6999, 7001, 7003, 7005, 7007, 7009, 7011, 7013, 7015, 7017, 7019, 7021, 7023, 7025, 7027, 7029, 7031,
7033, 7035, 7037, 7039, 7041, 7043, 7045, 7047, 7049, 7051, 7053, 7055, 7057, 7059, 7061, 7063, 7065, 7067, 7069,
7071, 7073, 7075, 7077, 7079, 7081, 7083, 7085, 7087, 7089, 7091, 7093, 7095, 7097, 7099, 7101, 7103, 7105, 7107,
7109, 7111, 7113, 7115, 7117, 7119, 7121, 7123, 7125, 7127, 7129, 7131, 7133, 7135, 7137, 7139, 7141, 7143, 7145,
7147, 7149, 7151, 7153, 7155, 7157, 7159, 7161, 7163, 7165, 7167, 7169, 7171, 7173, 7175, 7177, 7179, 7181, 7183,
7185, 7187, 7189, 7191, 7193, 7195, 7197, 7199, 7201, 7203, 7205, 7207, 7209, 7211, 7213, 7215, 7217, 7219, 7221,
7223, 7225, 7227, 7229, 7231, 7233, 7235, 7237, 7239, 7241, 7243, 7245, 7247, 7249, 7251, 7253, 7255, 7257, 7259,
7261, 7263, 7265, 7267, 7269, 7271, 7273, 7275, 7277, 7279, 7281, 7283, 7285, 7287, 7289, 7291, 7293, 7295, 7297,
7299, 7301, 7303, 7305, 7307, 7309, 7311, 7313, 7315, 7317, 7319, 7321, 7323, 7325, 7327, 7329, 7331, 7333, 7335,
7337, 7339, 7341, 7343, 7345, 7347, 7349, 7351, 7353, 7355, 7357, 7359, 7361, 7363, 7365, 7367, 7369, 7371, 7373,
7375, 7377, 7379, 7381, 7383, 7385, 7387, 7389, 7391, 7393, 7395, 7397, 7399, 7401, 7403, 7405, 7407, 7409, 7411,
7413, 7415, 7417, 7419, 7421, 7423, 7425, 7427, 7429, 7431, 7433, 7435, 7437, 7439, 7441, 7443, 7445, 7447, 7449,
7451, 7453, 7455, 7457, 7459, 7461, 7463, 7465, 7467, 7469, 7471, 7473, 7475, 7477, 7479, 7481, 7483, 7485, 7487,
7489, 7491, 7493, 7495, 7497, 7499, 7501, 7503, 7505, 7507, 7509, 7511, 7513, 7515, 7517, 7519, 7521, 7523, 7525,
7527, 7529, 7531, 7533, 7535, 7537, 7539, 7541, 7543, 7545, 7547, 7549, 7551, 7553, 7555, 7557, 7559, 7561, 7563,
7565, 7567, 7569, 7571, 7573, 7575, 7577, 7579, 7581, 7583, 7585, 7587, 7589, 7591, 7593, 7595, 7597, 7599, 7601,
7603, 7605, 7607, 7609, 7611, 7613, 7615, 7617, 7619, 7621, 7623, 7625, 7627, 7629, 7631, 7633, 7635, 7637, 7639,
7641, 7643, 7645, 7647, 7649, 7651, 7653, 7655, 7657, 7659, 7661, 7663, 7665, 7667, 7669, 7671, 7673, 7675, 7677,
7679, 7681, 7683, 7685, 7687, 7689, 7691, 7693, 7695, 7697, 7699, 7701, 7703, 7705, 7707, 7709, 7711, 7713, 7715,
7717, 7719, 7721, 7723, 7725, 7727, 7729, 7731, 7733, 7735, 7737, 7739, 7741, 7743, 7745, 7747, 7749, 7751, 7753,
7755, 7757, 7759, 7761, 7763, 7765, 7767, 7769, 7771, 7773, 7775, 7777, 7779, 7781, 7783, 7785, 7787, 7789, 7791,
7793, 7795, 7797, 7799, 7801, 7803, 7805, 7807, 7809, 7811, 7813, 7815, 7817, 7819, 7821, 7823, 7825, 7827, 7829,
7831, 7833, 7835, 7837, 7839, 7841, 7843, 7845, 7847, 7849, 7851, 7853, 7855, 7857, 7859, 7861, 7863, 7865, 7867,
7869, 7871, 7873, 7875, 7877, 7879, 7881, 7883, 7885, 7887, 7889, 7891, 7893, 7895, 7897, 7899, 7901, 7903, 7905,
7907, 7909, 7911, 7913, 7915, 7917, 7919, 7921, 7923, 7925, 7927, 7929, 7931, 7933, 7935, 7937, 7939, 7941, 7943,
7945, 7947, 7949, 7951, 7953, 7955, 7957, 7959, 7961, 7963, 7965, 7967, 7969, 7971, 7973, 7975, 7977, 7979, 7981,
7983, 7985, 7987, 7989, 7991, 7993, 7995, 7997, 7999, 8001, 8003, 8005, 8007, 8009, 8011, 8013, 8015, 8017, 8019,
8021, 8023, 8025, 8027, 8029, 8031, 8033, 8035, 8037, 8039, 8041, 8043, 8045, 8047, 8049, 8051, 8053, 8055, 8057,
8059, 8061, 8063, 8065, 8067, 8069, 8071, 8073, 8075, 8077, 8079, 8081, 8083, 8085, 8087, 8089, 8091, 8093, 8095,
8097, 8099, 8101, 8103, 8105, 8107, 8109, 8111, 8113, 8115, 8117, 8119, 8121, 8123, 8125, 8127, 8129, 8131, 8133,
8135, 8137, 8139, 8141, 8143, 8145, 8147, 8149, 8151, 8153, 8155, 8157, 8159, 8161, 8163, 8165, 8167, 8169, 8171,
8173, 8175, 8177, 8179, 8181, 8183, 8185, 8187, 8189, 8191, 8193, 8195, 8197, 8199, 8201, 8203, 8205, 8207, 8209,
8211, 8213, 8215, 8217, 8219, 8221, 8223, 8225, 8227, 8229, 8231, 8233, 8235, 8237, 8239, 8241, 8243, 8245, 8247,
8249, 8251, 8253, 8255, 8257, 8259, 8261, 8263, 8265, 8267, 8269, 8271, 8273, 8275, 8277, 8279, 8281, 8283, 8285,
8287, 8289, 8291, 8293, 8295, 8297, 8299, 8301, 8303, 8305, 8307, 8309, 8311, 8313, 8315, 8317, 8319, 8321, 8323,
8325, 8327, 8329, 8331, 8333, 8335, 8337, 8339, 8341, 8343, 8345, 8347, 8349, 8351, 8353, 8355, 8357, 8359, 8361,
8363, 8365, 8367, 8369, 8371, 8373, 8375, 8377, 8379, 8381, 8383, 8385, 8387, 8389, 8391, 8393, 8395, 8397, 8399,
8401, 8403, 8405, 8407, 8409, 8411, 8413, 8415, 8417, 8419, 8421, 8423, 8425, 8427, 8429, 8431, 8433, 8435, 8437,
8439, 8441, 8443, 8445, 8447, 8449, 8451, 8453, 8455, 8457, 8459, 8461, 8463, 8465, 8467, 8469, 8471, 8473, 8475,
8477, 8479, 8481, 8483, 8485, 8487, 8489, 8491, 8493, 8495, 8497, 8499, 8501, 8503, 8505, 8507, 8509, 8511, 8513,
8515, 8517, 8519, 8521, 8523, 8525, 8527, 8529, 8531, 8533, 8535, 8537, 8539, 8541, 8543, 8545, 8547, 8549, 8551,
8553, 8555, 8557, 8559, 8561, 8563, 8565, 8567, 8569, 8571, 8573, 8575, 8577, 8579, 8581, 8583, 8585, 8587, 8589,
8591, 8593, 8595, 8597, 8599, 8601, 8603, 8605, 8607, 8609, 8611, 8613, 8615, 8617, 8619, 8621, 8623, 8625, 8627,
8629, 8631, 8633, 8635, 8637, 8639, 8641, 8643, 8645, 8647, 8649, 8651, 8653, 8655, 8657, 8659, 8661, 8663, 8665,
8667, 8669, 8671, 8673, 8675, 8677, 8679, 8681, 8683, 8685, 8687, 8689, 8691, 8693, 8695, 8697, 8699, 8701, 8703,
8705, 8707, 8709, 8711, 8713, 8715, 8717, 8719, 8721, 8723, 8725, 8727, 8729, 8731, 8733, 8735, 8737, 8739, 8741,
8743, 8745, 8747, 8749, 8751, 8753, 8755, 8757, 8759, 8761, 8763, 8765, 8767, 8769, 8771, 8773, 8775, 8777, 8779,
8781, 8783, 8785, 8787, 8789, 8791, 8793, 8795, 8797, 8799, 8801, 8803, 8805, 8807, 8809, 8811, 8813, 8815, 8817,
8819, 8821, 8823, 8825, 8827, 8829, 8831, 8833, 8835, 8837, 8839, 8841, 8843, 8845, 8847, 8849, 8851, 8853, 8855,
8857, 8859, 8861, 8863, 8865, 8867, 8869, 8871, 8873, 8875, 8877, 8879, 8881, 8883, 8885, 8887, 8889, 8891, 8893,
8895, 8897, 8899, 8901, 8903, 8905, 8907, 8909, 8911, 8913, 8915, 8917, 8919, 8921, 8923, 8925, 8927, 8929, 8931,
8933, 8935, 8937, 8939, 8941, 8943, 8945, 8947, 8949, 8951, 8953, 8955, 8957, 8959, 8961, 8963, 8965, 8967, 8969,
8971, 8973, 8975, 8977, 8979, 8981, 8983, 8985, 8987, 8989, 8991, 8993, 8995, 8997, 8999, 9001, 9003, 9005, 9007,
9009, 9011, 9013, 9015, 9017, 9019, 9021, 9023, 9025, 9027, 9029, 9031, 9033, 9035, 9037, 9039, 9041, 9043, 9045,
9047, 9049, 9051, 9053, 9055, 9057, 9059, 9061, 9063, 9065, 9067, 9069, 9071, 9073, 9075, 9077, 9079, 9081, 9083,
9085, 9087, 9089, 9091, 9093, 9095, 9097, 9099, 9101, 9103, 9105, 9107, 9109, 9111, 9113, 9115, 9117, 9119, 9121,
9123, 9125, 9127, 9129, 9131, 9133, 9135, 9137, 9139, 9141, 9143, 9145, 9147, 9149, 9151, 9153, 9155, 9157, 9159,
9161, 9163, 9165, 9167, 9169, 9171, 9173, 9175, 9177, 9179, 9181, 9183, 9185, 9187, 9189, 9191, 9193, 9195, 9197,
9199, 9201, 9203, 9205, 9207, 9209, 9211, 9213, 9215, 9217, 9219, 9221, 9223, 9225, 9227, 9229, 9231, 9233, 9235,
9237, 9239, 9241, 9243, 9245, 9247, 9249, 9251, 9253, 9255, 9257, 9259, 9261, 9263, 9265, 9267, 9269, 9271, 9273,
9275, 9277, 9279, 9281, 9283, 9285, 9287, 9289, 9291, 9293, 9295, 9297, 9299, 9301, 9303, 9305, 9307, 9309, 9311,
9313, 9315, 9317, 9319, 9321, 9323, 9325, 9327, 9329, 9331, 9333, 9335, 9337, 9339, 9341, 9343, 9345, 9347, 9349,
9351, 9353, 9355, 9357, 9359, 9361, 9363, 9365, 9367, 9369, 9371, 9373, 9375, 9377, 9379, 9381, 9383, 9385, 9387,
9389, 9391, 9393, 9395, 9397, 9399, 9401, 9403, 9405, 9407, 9409, 9411, 9413, 9415, 9417, 9419, 9421, 9423, 9425,
9427, 9429, 9431, 9433, 9435, 9437, 9439, 9441, 9443, 9445, 9447, 9449, 9451, 9453, 9455, 9457, 9459, 9461, 9463,
9465, 9467, 9469, 9471, 9473, 9475, 9477, 9479, 9481, 9483, 9485, 9487, 9489, 9491, 9493, 9495, 9497, 9499, 9501,
9503, 9505, 9507, 9509, 9511, 9513, 9515, 9517, 9519, 9521, 9523, 9525, 9527, 9529, 9531, 9533, 9535, 9537, 9539,
9541, 9543, 9545, 9547, 9549, 9551, 9553, 9555, 9557, 9559, 9561, 9563, 9565, 9567, 9569, 9571, 9573, 9575, 9577,
9579, 9581, 9583, 9585, 9587, 9589, 9591, 9593, 9595, 9597, 9599, 9601, 9603, 9605, 9607, 9609, 9611, 9613, 9615,
9617, 9619, 9621, 9623, 9625, 9627, 9629, 9631, 9633, 9635, 9637, 9639, 9641, 9643, 9645, 9647, 9649, 9651, 9653,
9655, 9657, 9659, 9661, 9663, 9665, 9667, 9669, 9671, 9673, 9675, 9677, 9679, 9681, 9683, 9685, 9687, 9689, 9691,
9693, 9695, 9697, 9699, 9701, 9703, 9705, 9707, 9709, 9711, 9713, 9715, 9717, 9719, 9721, 9723, 9725, 9727, 9729,
9731, 9733, 9735, 9737, 9739, 9741, 9743, 9745, 9747, 9749, 9751, 9753, 9755, 9757, 9759, 9761, 9763, 9765, 9767,
9769, 9771, 9773, 9775, 9777, 9779, 9781, 9783, 9785, 9787, 9789, 9791, 9793, 9795, 9797, 9799, 9801, 9803, 9805,
9807, 9809, 9811, 9813, 9815, 9817, 9819, 9821, 9823, 9825, 9827, 9829, 9831, 9833, 9835, 9837, 9839, 9841, 9843,
9845, 9847, 9849, 9851, 9853, 9855, 9857, 9859, 9861, 9863, 9865, 9867, 9869, 9871, 9873, 9875, 9877, 9879, 9881,
9883, 9885, 9887, 9889, 9891, 9893, 9895, 9897, 9899, 9901, 9903, 9905, 9907, 9909, 9911, 9913, 9915, 9917, 9919,
9921, 9923, 9925, 9927, 9929, 9931, 9933, 9935, 9937, 9939, 9941, 9943, 9945, 9947, 9949, 9951, 9953, 9955, 9957,
9959, 9961, 9963, 9965, 9967, 9969, 9971, 9973, 9975, 9977, 9979, 9981, 9983, 9985, 9987, 9989, 9991, 9993, 9995,
9997, 9999, 4982]
b1 = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112,
114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158,
160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204,
206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250,
252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296,
298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 338, 340, 342,
344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388,
390, 392, 394, 396, 398, 400, 402, 404, 406, 408, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434,
436, 438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 476, 478, 480,
482, 484, 486, 488, 490, 492, 494, 496, 498, 500, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 524, 526,
528, 530, 532, 534, 536, 538, 540, 542, 544, 546, 548, 550, 552, 554, 556, 558, 560, 562, 564, 566, 568, 570, 572,
574, 576, 578, 580, 582, 584, 586, 588, 590, 592, 594, 596, 598, 600, 602, 604, 606, 608, 610, 612, 614, 616, 618,
620, 622, 624, 626, 628, 630, 632, 634, 636, 638, 640, 642, 644, 646, 648, 650, 652, 654, 656, 658, 660, 662, 664,
666, 668, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 690, 692, 694, 696, 698, 700, 702, 704, 706, 708, 710,
712, 714, 716, 718, 720, 722, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 744, 746, 748, 750, 752, 754, 756,
758, 760, 762, 764, 766, 768, 770, 772, 774, 776, 778, 780, 782, 784, 786, 788, 790, 792, 794, 796, 798, 800, 802,
804, 806, 808, 810, 812, 814, 816, 818, 820, 822, 824, 826, 828, 830, 832, 834, 836, 838, 840, 842, 844, 846, 848,
850, 852, 854, 856, 858, 860, 862, 864, 866, 868, 870, 872, 874, 876, 878, 880, 882, 884, 886, 888, 890, 892, 894,
896, 898, 900, 902, 904, 906, 908, 910, 912, 914, 916, 918, 920, 922, 924, 926, 928, 930, 932, 934, 936, 938, 940,
942, 944, 946, 948, 950, 952, 954, 956, 958, 960, 962, 964, 966, 968, 970, 972, 974, 976, 978, 980, 982, 984, 986,
988, 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018, 1020, 1022, 1024, 1026,
1028, 1030, 1032, 1034, 1036, 1038, 1040, 1042, 1044, 1046, 1048, 1050, 1052, 1054, 1056, 1058, 1060, 1062, 1064,
1066, 1068, 1070, 1072, 1074, 1076, 1078, 1080, 1082, 1084, 1086, 1088, 1090, 1092, 1094, 1096, 1098, 1100, 1102,
1104, 1106, 1108, 1110, 1112, 1114, 1116, 1118, 1120, 1122, 1124, 1126, 1128, 1130, 1132, 1134, 1136, 1138, 1140,
1142, 1144, 1146, 1148, 1150, 1152, 1154, 1156, 1158, 1160, 1162, 1164, 1166, 1168, 1170, 1172, 1174, 1176, 1178,
1180, 1182, 1184, 1186, 1188, 1190, 1192, 1194, 1196, 1198, 1200, 1202, 1204, 1206, 1208, 1210, 1212, 1214, 1216,
1218, 1220, 1222, 1224, 1226, 1228, 1230, 1232, 1234, 1236, 1238, 1240, 1242, 1244, 1246, 1248, 1250, 1252, 1254,
1256, 1258, 1260, 1262, 1264, 1266, 1268, 1270, 1272, 1274, 1276, 1278, 1280, 1282, 1284, 1286, 1288, 1290, 1292,
1294, 1296, 1298, 1300, 1302, 1304, 1306, 1308, 1310, 1312, 1314, 1316, 1318, 1320, 1322, 1324, 1326, 1328, 1330,
1332, 1334, 1336, 1338, 1340, 1342, 1344, 1346, 1348, 1350, 1352, 1354, 1356, 1358, 1360, 1362, 1364, 1366, 1368,
1370, 1372, 1374, 1376, 1378, 1380, 1382, 1384, 1386, 1388, 1390, 1392, 1394, 1396, 1398, 1400, 1402, 1404, 1406,
1408, 1410, 1412, 1414, 1416, 1418, 1420, 1422, 1424, 1426, 1428, 1430, 1432, 1434, 1436, 1438, 1440, 1442, 1444,
1446, 1448, 1450, 1452, 1454, 1456, 1458, 1460, 1462, 1464, 1466, 1468, 1470, 1472, 1474, 1476, 1478, 1480, 1482,
1484, 1486, 1488, 1490, 1492, 1494, 1496, 1498, 1500, 1502, 1504, 1506, 1508, 1510, 1512, 1514, 1516, 1518, 1520,
1522, 1524, 1526, 1528, 1530, 1532, 1534, 1536, 1538, 1540, 1542, 1544, 1546, 1548, 1550, 1552, 1554, 1556, 1558,
1560, 1562, 1564, 1566, 1568, 1570, 1572, 1574, 1576, 1578, 1580, 1582, 1584, 1586, 1588, 1590, 1592, 1594, 1596,
1598, 1600, 1602, 1604, 1606, 1608, 1610, 1612, 1614, 1616, 1618, 1620, 1622, 1624, 1626, 1628, 1630, 1632, 1634,
1636, 1638, 1640, 1642, 1644, 1646, 1648, 1650, 1652, 1654, 1656, 1658, 1660, 1662, 1664, 1666, 1668, 1670, 1672,
1674, 1676, 1678, 1680, 1682, 1684, 1686, 1688, 1690, 1692, 1694, 1696, 1698, 1700, 1702, 1704, 1706, 1708, 1710,
1712, 1714, 1716, 1718, 1720, 1722, 1724, 1726, 1728, 1730, 1732, 1734, 1736, 1738, 1740, 1742, 1744, 1746, 1748,
1750, 1752, 1754, 1756, 1758, 1760, 1762, 1764, 1766, 1768, 1770, 1772, 1774, 1776, 1778, 1780, 1782, 1784, 1786,
1788, 1790, 1792, 1794, 1796, 1798, 1800, 1802, 1804, 1806, 1808, 1810, 1812, 1814, 1816, 1818, 1820, 1822, 1824,
1826, 1828, 1830, 1832, 1834, 1836, 1838, 1840, 1842, 1844, 1846, 1848, 1850, 1852, 1854, 1856, 1858, 1860, 1862,
1864, 1866, 1868, 1870, 1872, 1874, 1876, 1878, 1880, 1882, 1884, 1886, 1888, 1890, 1892, 1894, 1896, 1898, 1900,
1902, 1904, 1906, 1908, 1910, 1912, 1914, 1916, 1918, 1920, 1922, 1924, 1926, 1928, 1930, 1932, 1934, 1936, 1938,
1940, 1942, 1944, 1946, 1948, 1950, 1952, 1954, 1956, 1958, 1960, 1962, 1964, 1966, 1968, 1970, 1972, 1974, 1976,
1978, 1980, 1982, 1984, 1986, 1988, 1990, 1992, 1994, 1996, 1998, 2000, 2002, 2004, 2006, 2008, 2010, 2012, 2014,
2016, 2018, 2020, 2022, 2024, 2026, 2028, 2030, 2032, 2034, 2036, 2038, 2040, 2042, 2044, 2046, 2048, 2050, 2052,
2054, 2056, 2058, 2060, 2062, 2064, 2066, 2068, 2070, 2072, 2074, 2076, 2078, 2080, 2082, 2084, 2086, 2088, 2090,
2092, 2094, 2096, 2098, 2100, 2102, 2104, 2106, 2108, 2110, 2112, 2114, 2116, 2118, 2120, 2122, 2124, 2126, 2128,
2130, 2132, 2134, 2136, 2138, 2140, 2142, 2144, 2146, 2148, 2150, 2152, 2154, 2156, 2158, 2160, 2162, 2164, 2166,
2168, 2170, 2172, 2174, 2176, 2178, 2180, 2182, 2184, 2186, 2188, 2190, 2192, 2194, 2196, 2198, 2200, 2202, 2204,
2206, 2208, 2210, 2212, 2214, 2216, 2218, 2220, 2222, 2224, 2226, 2228, 2230, 2232, 2234, 2236, 2238, 2240, 2242,
2244, 2246, 2248, 2250, 2252, 2254, 2256, 2258, 2260, 2262, 2264, 2266, 2268, 2270, 2272, 2274, 2276, 2278, 2280,
2282, 2284, 2286, 2288, 2290, 2292, 2294, 2296, 2298, 2300, 2302, 2304, 2306, 2308, 2310, 2312, 2314, 2316, 2318,
2320, 2322, 2324, 2326, 2328, 2330, 2332, 2334, 2336, 2338, 2340, 2342, 2344, 2346, 2348, 2350, 2352, 2354, 2356,
2358, 2360, 2362, 2364, 2366, 2368, 2370, 2372, 2374, 2376, 2378, 2380, 2382, 2384, 2386, 2388, 2390, 2392, 2394,
2396, 2398, 2400, 2402, 2404, 2406, 2408, 2410, 2412, 2414, 2416, 2418, 2420, 2422, 2424, 2426, 2428, 2430, 2432,
2434, 2436, 2438, 2440, 2442, 2444, 2446, 2448, 2450, 2452, 2454, 2456, 2458, 2460, 2462, 2464, 2466, 2468, 2470,
2472, 2474, 2476, 2478, 2480, 2482, 2484, 2486, 2488, 2490, 2492, 2494, 2496, 2498, 2500, 2502, 2504, 2506, 2508,
2510, 2512, 2514, 2516, 2518, 2520, 2522, 2524, 2526, 2528, 2530, 2532, 2534, 2536, 2538, 2540, 2542, 2544, 2546,
2548, 2550, 2552, 2554, 2556, 2558, 2560, 2562, 2564, 2566, 2568, 2570, 2572, 2574, 2576, 2578, 2580, 2582, 2584,
2586, 2588, 2590, 2592, 2594, 2596, 2598, 2600, 2602, 2604, 2606, 2608, 2610, 2612, 2614, 2616, 2618, 2620, 2622,
2624, 2626, 2628, 2630, 2632, 2634, 2636, 2638, 2640, 2642, 2644, 2646, 2648, 2650, 2652, 2654, 2656, 2658, 2660,
2662, 2664, 2666, 2668, 2670, 2672, 2674, 2676, 2678, 2680, 2682, 2684, 2686, 2688, 2690, 2692, 2694, 2696, 2698,
2700, 2702, 2704, 2706, 2708, 2710, 2712, 2714, 2716, 2718, 2720, 2722, 2724, 2726, 2728, 2730, 2732, 2734, 2736,
2738, 2740, 2742, 2744, 2746, 2748, 2750, 2752, 2754, 2756, 2758, 2760, 2762, 2764, 2766, 2768, 2770, 2772, 2774,
2776, 2778, 2780, 2782, 2784, 2786, 2788, 2790, 2792, 2794, 2796, 2798, 2800, 2802, 2804, 2806, 2808, 2810, 2812,
2814, 2816, 2818, 2820, 2822, 2824, 2826, 2828, 2830, 2832, 2834, 2836, 2838, 2840, 2842, 2844, 2846, 2848, 2850,
2852, 2854, 2856, 2858, 2860, 2862, 2864, 2866, 2868, 2870, 2872, 2874, 2876, 2878, 2880, 2882, 2884, 2886, 2888,
2890, 2892, 2894, 2896, 2898, 2900, 2902, 2904, 2906, 2908, 2910, 2912, 2914, 2916, 2918, 2920, 2922, 2924, 2926,
2928, 2930, 2932, 2934, 2936, 2938, 2940, 2942, 2944, 2946, 2948, 2950, 2952, 2954, 2956, 2958, 2960, 2962, 2964,
2966, 2968, 2970, 2972, 2974, 2976, 2978, 2980, 2982, 2984, 2986, 2988, 2990, 2992, 2994, 2996, 2998, 3000, 3002,
3004, 3006, 3008, 3010, 3012, 3014, 3016, 3018, 3020, 3022, 3024, 3026, 3028, 3030, 3032, 3034, 3036, 3038, 3040,
3042, 3044, 3046, 3048, 3050, 3052, 3054, 3056, 3058, 3060, 3062, 3064, 3066, 3068, 3070, 3072, 3074, 3076, 3078,
3080, 3082, 3084, 3086, 3088, 3090, 3092, 3094, 3096, 3098, 3100, 3102, 3104, 3106, 3108, 3110, 3112, 3114, 3116,
3118, 3120, 3122, 3124, 3126, 3128, 3130, 3132, 3134, 3136, 3138, 3140, 3142, 3144, 3146, 3148, 3150, 3152, 3154,
3156, 3158, 3160, 3162, 3164, 3166, 3168, 3170, 3172, 3174, 3176, 3178, 3180, 3182, 3184, 3186, 3188, 3190, 3192,
3194, 3196, 3198, 3200, 3202, 3204, 3206, 3208, 3210, 3212, 3214, 3216, 3218, 3220, 3222, 3224, 3226, 3228, 3230,
3232, 3234, 3236, 3238, 3240, 3242, 3244, 3246, 3248, 3250, 3252, 3254, 3256, 3258, 3260, 3262, 3264, 3266, 3268,
3270, 3272, 3274, 3276, 3278, 3280, 3282, 3284, 3286, 3288, 3290, 3292, 3294, 3296, 3298, 3300, 3302, 3304, 3306,
3308, 3310, 3312, 3314, 3316, 3318, 3320, 3322, 3324, 3326, 3328, 3330, 3332, 3334, 3336, 3338, 3340, 3342, 3344,
3346, 3348, 3350, 3352, 3354, 3356, 3358, 3360, 3362, 3364, 3366, 3368, 3370, 3372, 3374, 3376, 3378, 3380, 3382,
3384, 3386, 3388, 3390, 3392, 3394, 3396, 3398, 3400, 3402, 3404, 3406, 3408, 3410, 3412, 3414, 3416, 3418, 3420,
3422, 3424, 3426, 3428, 3430, 3432, 3434, 3436, 3438, 3440, 3442, 3444, 3446, 3448, 3450, 3452, 3454, 3456, 3458,
3460, 3462, 3464, 3466, 3468, 3470, 3472, 3474, 3476, 3478, 3480, 3482, 3484, 3486, 3488, 3490, 3492, 3494, 3496,
3498, 3500, 3502, 3504, 3506, 3508, 3510, 3512, 3514, 3516, 3518, 3520, 3522, 3524, 3526, 3528, 3530, 3532, 3534,
3536, 3538, 3540, 3542, 3544, 3546, 3548, 3550, 3552, 3554, 3556, 3558, 3560, 3562, 3564, 3566, 3568, 3570, 3572,
3574, 3576, 3578, 3580, 3582, 3584, 3586, 3588, 3590, 3592, 3594, 3596, 3598, 3600, 3602, 3604, 3606, 3608, 3610,
3612, 3614, 3616, 3618, 3620, 3622, 3624, 3626, 3628, 3630, 3632, 3634, 3636, 3638, 3640, 3642, 3644, 3646, 3648,
3650, 3652, 3654, 3656, 3658, 3660, 3662, 3664, 3666, 3668, 3670, 3672, 3674, 3676, 3678, 3680, 3682, 3684, 3686,
3688, 3690, 3692, 3694, 3696, 3698, 3700, 3702, 3704, 3706, 3708, 3710, 3712, 3714, 3716, 3718, 3720, 3722, 3724,
3726, 3728, 3730, 3732, 3734, 3736, 3738, 3740, 3742, 3744, 3746, 3748, 3750, 3752, 3754, 3756, 3758, 3760, 3762,
3764, 3766, 3768, 3770, 3772, 3774, 3776, 3778, 3780, 3782, 3784, 3786, 3788, 3790, 3792, 3794, 3796, 3798, 3800,
3802, 3804, 3806, 3808, 3810, 3812, 3814, 3816, 3818, 3820, 3822, 3824, 3826, 3828, 3830, 3832, 3834, 3836, 3838,
3840, 3842, 3844, 3846, 3848, 3850, 3852, 3854, 3856, 3858, 3860, 3862, 3864, 3866, 3868, 3870, 3872, 3874, 3876,
3878, 3880, 3882, 3884, 3886, 3888, 3890, 3892, 3894, 3896, 3898, 3900, 3902, 3904, 3906, 3908, 3910, 3912, 3914,
3916, 3918, 3920, 3922, 3924, 3926, 3928, 3930, 3932, 3934, 3936, 3938, 3940, 3942, 3944, 3946, 3948, 3950, 3952,
3954, 3956, 3958, 3960, 3962, 3964, 3966, 3968, 3970, 3972, 3974, 3976, 3978, 3980, 3982, 3984, 3986, 3988, 3990,
3992, 3994, 3996, 3998, 4000, 4002, 4004, 4006, 4008, 4010, 4012, 4014, 4016, 4018, 4020, 4022, 4024, 4026, 4028,
4030, 4032, 4034, 4036, 4038, 4040, 4042, 4044, 4046, 4048, 4050, 4052, 4054, 4056, 4058, 4060, 4062, 4064, 4066,
4068, 4070, 4072, 4074, 4076, 4078, 4080, 4082, 4084, 4086, 4088, 4090, 4092, 4094, 4096, 4098, 4100, 4102, 4104,
4106, 4108, 4110, 4112, 4114, 4116, 4118, 4120, 4122, 4124, 4126, 4128, 4130, 4132, 4134, 4136, 4138, 4140, 4142,
4144, 4146, 4148, 4150, 4152, 4154, 4156, 4158, 4160, 4162, 4164, 4166, 4168, 4170, 4172, 4174, 4176, 4178, 4180,
4182, 4184, 4186, 4188, 4190, 4192, 4194, 4196, 4198, 4200, 4202, 4204, 4206, 4208, 4210, 4212, 4214, 4216, 4218,
4220, 4222, 4224, 4226, 4228, 4230, 4232, 4234, 4236, 4238, 4240, 4242, 4244, 4246, 4248, 4250, 4252, 4254, 4256,
4258, 4260, 4262, 4264, 4266, 4268, 4270, 4272, 4274, 4276, 4278, 4280, 4282, 4284, 4286, 4288, 4290, 4292, 4294,
4296, 4298, 4300, 4302, 4304, 4306, 4308, 4310, 4312, 4314, 4316, 4318, 4320, 4322, 4324, 4326, 4328, 4330, 4332,
4334, 4336, 4338, 4340, 4342, 4344, 4346, 4348, 4350, 4352, 4354, 4356, 4358, 4360, 4362, 4364, 4366, 4368, 4370,
4372, 4374, 4376, 4378, 4380, 4382, 4384, 4386, 4388, 4390, 4392, 4394, 4396, 4398, 4400, 4402, 4404, 4406, 4408,
4410, 4412, 4414, 4416, 4418, 4420, 4422, 4424, 4426, 4428, 4430, 4432, 4434, 4436, 4438, 4440, 4442, 4444, 4446,
4448, 4450, 4452, 4454, 4456, 4458, 4460, 4462, 4464, 4466, 4468, 4470, 4472, 4474, 4476, 4478, 4480, 4482, 4484,
4486, 4488, 4490, 4492, 4494, 4496, 4498, 4500, 4502, 4504, 4506, 4508, 4510, 4512, 4514, 4516, 4518, 4520, 4522,
4524, 4526, 4528, 4530, 4532, 4534, 4536, 4538, 4540, 4542, 4544, 4546, 4548, 4550, 4552, 4554, 4556, 4558, 4560,
4562, 4564, 4566, 4568, 4570, 4572, 4574, 4576, 4578, 4580, 4582, 4584, 4586, 4588, 4590, 4592, 4594, 4596, 4598,
4600, 4602, 4604, 4606, 4608, 4610, 4612, 4614, 4616, 4618, 4620, 4622, 4624, 4626, 4628, 4630, 4632, 4634, 4636,
4638, 4640, 4642, 4644, 4646, 4648, 4650, 4652, 4654, 4656, 4658, 4660, 4662, 4664, 4666, 4668, 4670, 4672, 4674,
4676, 4678, 4680, 4682, 4684, 4686, 4688, 4690, 4692, 4694, 4696, 4698, 4700, 4702, 4704, 4706, 4708, 4710, 4712,
4714, 4716, 4718, 4720, 4722, 4724, 4726, 4728, 4730, 4732, 4734, 4736, 4738, 4740, 4742, 4744, 4746, 4748, 4750,
4752, 4754, 4756, 4758, 4760, 4762, 4764, 4766, 4768, 4770, 4772, 4774, 4776, 4778, 4780, 4782, 4784, 4786, 4788,
4790, 4792, 4794, 4796, 4798, 4800, 4802, 4804, 4806, 4808, 4810, 4812, 4814, 4816, 4818, 4820, 4822, 4824, 4826,
4828, 4830, 4832, 4834, 4836, 4838, 4840, 4842, 4844, 4846, 4848, 4850, 4852, 4854, 4856, 4858, 4860, 4862, 4864,
4866, 4868, 4870, 4872, 4874, 4876, 4878, 4880, 4882, 4884, 4886, 4888, 4890, 4892, 4894, 4896, 4898, 4900, 4902,
4904, 4906, 4908, 4910, 4912, 4914, 4916, 4918, 4920, 4922, 4924, 4926, 4928, 4930, 4932, 4934, 4936, 4938, 4940,
4942, 4944, 4946, 4948, 4950, 4952, 4954, 4956, 4958, 4960, 4962, 4964, 4966, 4968, 4970, 4972, 4974, 4976, 4978,
4980, 4982, 4984, 4986, 4988, 4990, 4992, 4994, 4996, 4998, 5000, 5002, 5004, 5006, 5008, 5010, 5012, 5014, 5016,
5018, 5020, 5022, 5024, 5026, 5028, 5030, 5032, 5034, 5036, 5038, 5040, 5042, 5044, 5046, 5048, 5050, 5052, 5054,
5056, 5058, 5060, 5062, 5064, 5066, 5068, 5070, 5072, 5074, 5076, 5078, 5080, 5082, 5084, 5086, 5088, 5090, 5092,
5094, 5096, 5098, 5100, 5102, 5104, 5106, 5108, 5110, 5112, 5114, 5116, 5118, 5120, 5122, 5124, 5126, 5128, 5130,
5132, 5134, 5136, 5138, 5140, 5142, 5144, 5146, 5148, 5150, 5152, 5154, 5156, 5158, 5160, 5162, 5164, 5166, 5168,
5170, 5172, 5174, 5176, 5178, 5180, 5182, 5184, 5186, 5188, 5190, 5192, 5194, 5196, 5198, 5200, 5202, 5204, 5206,
5208, 5210, 5212, 5214, 5216, 5218, 5220, 5222, 5224, 5226, 5228, 5230, 5232, 5234, 5236, 5238, 5240, 5242, 5244,
5246, 5248, 5250, 5252, 5254, 5256, 5258, 5260, 5262, 5264, 5266, 5268, 5270, 5272, 5274, 5276, 5278, 5280, 5282,
5284, 5286, 5288, 5290, 5292, 5294, 5296, 5298, 5300, 5302, 5304, 5306, 5308, 5310, 5312, 5314, 5316, 5318, 5320,
5322, 5324, 5326, 5328, 5330, 5332, 5334, 5336, 5338, 5340, 5342, 5344, 5346, 5348, 5350, 5352, 5354, 5356, 5358,
5360, 5362, 5364, 5366, 5368, 5370, 5372, 5374, 5376, 5378, 5380, 5382, 5384, 5386, 5388, 5390, 5392, 5394, 5396,
5398, 5400, 5402, 5404, 5406, 5408, 5410, 5412, 5414, 5416, 5418, 5420, 5422, 5424, 5426, 5428, 5430, 5432, 5434,
5436, 5438, 5440, 5442, 5444, 5446, 5448, 5450, 5452, 5454, 5456, 5458, 5460, 5462, 5464, 5466, 5468, 5470, 5472,
5474, 5476, 5478, 5480, 5482, 5484, 5486, 5488, 5490, 5492, 5494, 5496, 5498, 5500, 5502, 5504, 5506, 5508, 5510,
5512, 5514, 5516, 5518, 5520, 5522, 5524, 5526, 5528, 5530, 5532, 5534, 5536, 5538, 5540, 5542, 5544, 5546, 5548,
5550, 5552, 5554, 5556, 5558, 5560, 5562, 5564, 5566, 5568, 5570, 5572, 5574, 5576, 5578, 5580, 5582, 5584, 5586,
5588, 5590, 5592, 5594, 5596, 5598, 5600, 5602, 5604, 5606, 5608, 5610, 5612, 5614, 5616, 5618, 5620, 5622, 5624,
5626, 5628, 5630, 5632, 5634, 5636, 5638, 5640, 5642, 5644, 5646, 5648, 5650, 5652, 5654, 5656, 5658, 5660, 5662,
5664, 5666, 5668, 5670, 5672, 5674, 5676, 5678, 5680, 5682, 5684, 5686, 5688, 5690, 5692, 5694, 5696, 5698, 5700,
5702, 5704, 5706, 5708, 5710, 5712, 5714, 5716, 5718, 5720, 5722, 5724, 5726, 5728, 5730, 5732, 5734, 5736, 5738,
5740, 5742, 5744, 5746, 5748, 5750, 5752, 5754, 5756, 5758, 5760, 5762, 5764, 5766, 5768, 5770, 5772, 5774, 5776,
5778, 5780, 5782, 5784, 5786, 5788, 5790, 5792, 5794, 5796, 5798, 5800, 5802, 5804, 5806, 5808, 5810, 5812, 5814,
5816, 5818, 5820, 5822, 5824, 5826, 5828, 5830, 5832, 5834, 5836, 5838, 5840, 5842, 5844, 5846, 5848, 5850, 5852,
5854, 5856, 5858, 5860, 5862, 5864, 5866, 5868, 5870, 5872, 5874, 5876, 5878, 5880, 5882, 5884, 5886, 5888, 5890,
5892, 5894, 5896, 5898, 5900, 5902, 5904, 5906, 5908, 5910, 5912, 5914, 5916, 5918, 5920, 5922, 5924, 5926, 5928,
5930, 5932, 5934, 5936, 5938, 5940, 5942, 5944, 5946, 5948, 5950, 5952, 5954, 5956, 5958, 5960, 5962, 5964, 5966,
5968, 5970, 5972, 5974, 5976, 5978, 5980, 5982, 5984, 5986, 5988, 5990, 5992, 5994, 5996, 5998, 6000, 6002, 6004,
6006, 6008, 6010, 6012, 6014, 6016, 6018, 6020, 6022, 6024, 6026, 6028, 6030, 6032, 6034, 6036, 6038, 6040, 6042,
6044, 6046, 6048, 6050, 6052, 6054, 6056, 6058, 6060, 6062, 6064, 6066, 6068, 6070, 6072, 6074, 6076, 6078, 6080,
6082, 6084, 6086, 6088, 6090, 6092, 6094, 6096, 6098, 6100, 6102, 6104, 6106, 6108, 6110, 6112, 6114, 6116, 6118,
6120, 6122, 6124, 6126, 6128, 6130, 6132, 6134, 6136, 6138, 6140, 6142, 6144, 6146, 6148, 6150, 6152, 6154, 6156,
6158, 6160, 6162, 6164, 6166, 6168, 6170, 6172, 6174, 6176, 6178, 6180, 6182, 6184, 6186, 6188, 6190, 6192, 6194,
6196, 6198, 6200, 6202, 6204, 6206, 6208, 6210, 6212, 6214, 6216, 6218, 6220, 6222, 6224, 6226, 6228, 6230, 6232,
6234, 6236, 6238, 6240, 6242, 6244, 6246, 6248, 6250, 6252, 6254, 6256, 6258, 6260, 6262, 6264, 6266, 6268, 6270,
6272, 6274, 6276, 6278, 6280, 6282, 6284, 6286, 6288, 6290, 6292, 6294, 6296, 6298, 6300, 6302, 6304, 6306, 6308,
6310, 6312, 6314, 6316, 6318, 6320, 6322, 6324, 6326, 6328, 6330, 6332, 6334, 6336, 6338, 6340, 6342, 6344, 6346,
6348, 6350, 6352, 6354, 6356, 6358, 6360, 6362, 6364, 6366, 6368, 6370, 6372, 6374, 6376, 6378, 6380, 6382, 6384,
6386, 6388, 6390, 6392, 6394, 6396, 6398, 6400, 6402, 6404, 6406, 6408, 6410, 6412, 6414, 6416, 6418, 6420, 6422,
6424, 6426, 6428, 6430, 6432, 6434, 6436, 6438, 6440, 6442, 6444, 6446, 6448, 6450, 6452, 6454, 6456, 6458, 6460,
6462, 6464, 6466, 6468, 6470, 6472, 6474, 6476, 6478, 6480, 6482, 6484, 6486, 6488, 6490, 6492, 6494, 6496, 6498,
6500, 6502, 6504, 6506, 6508, 6510, 6512, 6514, 6516, 6518, 6520, 6522, 6524, 6526, 6528, 6530, 6532, 6534, 6536,
6538, 6540, 6542, 6544, 6546, 6548, 6550, 6552, 6554, 6556, 6558, 6560, 6562, 6564, 6566, 6568, 6570, 6572, 6574,
6576, 6578, 6580, 6582, 6584, 6586, 6588, 6590, 6592, 6594, 6596, 6598, 6600, 6602, 6604, 6606, 6608, 6610, 6612,
6614, 6616, 6618, 6620, 6622, 6624, 6626, 6628, 6630, 6632, 6634, 6636, 6638, 6640, 6642, 6644, 6646, 6648, 6650,
6652, 6654, 6656, 6658, 6660, 6662, 6664, 6666, 6668, 6670, 6672, 6674, 6676, 6678, 6680, 6682, 6684, 6686, 6688,
6690, 6692, 6694, 6696, 6698, 6700, 6702, 6704, 6706, 6708, 6710, 6712, 6714, 6716, 6718, 6720, 6722, 6724, 6726,
6728, 6730, 6732, 6734, 6736, 6738, 6740, 6742, 6744, 6746, 6748, 6750, 6752, 6754, 6756, 6758, 6760, 6762, 6764,
6766, 6768, 6770, 6772, 6774, 6776, 6778, 6780, 6782, 6784, 6786, 6788, 6790, 6792, 6794, 6796, 6798, 6800, 6802,
6804, 6806, 6808, 6810, 6812, 6814, 6816, 6818, 6820, 6822, 6824, 6826, 6828, 6830, 6832, 6834, 6836, 6838, 6840,
6842, 6844, 6846, 6848, 6850, 6852, 6854, 6856, 6858, 6860, 6862, 6864, 6866, 6868, 6870, 6872, 6874, 6876, 6878,
6880, 6882, 6884, 6886, 6888, 6890, 6892, 6894, 6896, 6898, 6900, 6902, 6904, 6906, 6908, 6910, 6912, 6914, 6916,
6918, 6920, 6922, 6924, 6926, 6928, 6930, 6932, 6934, 6936, 6938, 6940, 6942, 6944, 6946, 6948, 6950, 6952, 6954,
6956, 6958, 6960, 6962, 6964, 6966, 6968, 6970, 6972, 6974, 6976, 6978, 6980, 6982, 6984, 6986, 6988, 6990, 6992,
6994, 6996, 6998, 7000, 7002, 7004, 7006, 7008, 7010, 7012, 7014, 7016, 7018, 7020, 7022, 7024, 7026, 7028, 7030,
7032, 7034, 7036, 7038, 7040, 7042, 7044, 7046, 7048, 7050, 7052, 7054, 7056, 7058, 7060, 7062, 7064, 7066, 7068,
7070, 7072, 7074, 7076, 7078, 7080, 7082, 7084, 7086, 7088, 7090, 7092, 7094, 7096, 7098, 7100, 7102, 7104, 7106,
7108, 7110, 7112, 7114, 7116, 7118, 7120, 7122, 7124, 7126, 7128, 7130, 7132, 7134, 7136, 7138, 7140, 7142, 7144,
7146, 7148, 7150, 7152, 7154, 7156, 7158, 7160, 7162, 7164, 7166, 7168, 7170, 7172, 7174, 7176, 7178, 7180, 7182,
7184, 7186, 7188, 7190, 7192, 7194, 7196, 7198, 7200, 7202, 7204, 7206, 7208, 7210, 7212, 7214, 7216, 7218, 7220,
7222, 7224, 7226, 7228, 7230, 7232, 7234, 7236, 7238, 7240, 7242, 7244, 7246, 7248, 7250, 7252, 7254, 7256, 7258,
7260, 7262, 7264, 7266, 7268, 7270, 7272, 7274, 7276, 7278, 7280, 7282, 7284, 7286, 7288, 7290, 7292, 7294, 7296,
7298, 7300, 7302, 7304, 7306, 7308, 7310, 7312, 7314, 7316, 7318, 7320, 7322, 7324, 7326, 7328, 7330, 7332, 7334,
7336, 7338, 7340, 7342, 7344, 7346, 7348, 7350, 7352, 7354, 7356, 7358, 7360, 7362, 7364, 7366, 7368, 7370, 7372,
7374, 7376, 7378, 7380, 7382, 7384, 7386, 7388, 7390, 7392, 7394, 7396, 7398, 7400, 7402, 7404, 7406, 7408, 7410,
7412, 7414, 7416, 7418, 7420, 7422, 7424, 7426, 7428, 7430, 7432, 7434, 7436, 7438, 7440, 7442, 7444, 7446, 7448,
7450, 7452, 7454, 7456, 7458, 7460, 7462, 7464, 7466, 7468, 7470, 7472, 7474, 7476, 7478, 7480, 7482, 7484, 7486,
7488, 7490, 7492, 7494, 7496, 7498, 7500, 7502, 7504, 7506, 7508, 7510, 7512, 7514, 7516, 7518, 7520, 7522, 7524,
7526, 7528, 7530, 7532, 7534, 7536, 7538, 7540, 7542, 7544, 7546, 7548, 7550, 7552, 7554, 7556, 7558, 7560, 7562,
7564, 7566, 7568, 7570, 7572, 7574, 7576, 7578, 7580, 7582, 7584, 7586, 7588, 7590, 7592, 7594, 7596, 7598, 7600,
7602, 7604, 7606, 7608, 7610, 7612, 7614, 7616, 7618, 7620, 7622, 7624, 7626, 7628, 7630, 7632, 7634, 7636, 7638,
7640, 7642, 7644, 7646, 7648, 7650, 7652, 7654, 7656, 7658, 7660, 7662, 7664, 7666, 7668, 7670, 7672, 7674, 7676,
7678, 7680, 7682, 7684, 7686, 7688, 7690, 7692, 7694, 7696, 7698, 7700, 7702, 7704, 7706, 7708, 7710, 7712, 7714,
7716, 7718, 7720, 7722, 7724, 7726, 7728, 7730, 7732, 7734, 7736, 7738, 7740, 7742, 7744, 7746, 7748, 7750, 7752,
7754, 7756, 7758, 7760, 7762, 7764, 7766, 7768, 7770, 7772, 7774, 7776, 7778, 7780, 7782, 7784, 7786, 7788, 7790,
7792, 7794, 7796, 7798, 7800, 7802, 7804, 7806, 7808, 7810, 7812, 7814, 7816, 7818, 7820, 7822, 7824, 7826, 7828,
7830, 7832, 7834, 7836, 7838, 7840, 7842, 7844, 7846, 7848, 7850, 7852, 7854, 7856, 7858, 7860, 7862, 7864, 7866,
7868, 7870, 7872, 7874, 7876, 7878, 7880, 7882, 7884, 7886, 7888, 7890, 7892, 7894, 7896, 7898, 7900, 7902, 7904,
7906, 7908, 7910, 7912, 7914, 7916, 7918, 7920, 7922, 7924, 7926, 7928, 7930, 7932, 7934, 7936, 7938, 7940, 7942,
7944, 7946, 7948, 7950, 7952, 7954, 7956, 7958, 7960, 7962, 7964, 7966, 7968, 7970, 7972, 7974, 7976, 7978, 7980,
7982, 7984, 7986, 7988, 7990, 7992, 7994, 7996, 7998, 8000, 8002, 8004, 8006, 8008, 8010, 8012, 8014, 8016, 8018,
8020, 8022, 8024, 8026, 8028, 8030, 8032, 8034, 8036, 8038, 8040, 8042, 8044, 8046, 8048, 8050, 8052, 8054, 8056,
8058, 8060, 8062, 8064, 8066, 8068, 8070, 8072, 8074, 8076, 8078, 8080, 8082, 8084, 8086, 8088, 8090, 8092, 8094,
8096, 8098, 8100, 8102, 8104, 8106, 8108, 8110, 8112, 8114, 8116, 8118, 8120, 8122, 8124, 8126, 8128, 8130, 8132,
8134, 8136, 8138, 8140, 8142, 8144, 8146, 8148, 8150, 8152, 8154, 8156, 8158, 8160, 8162, 8164, 8166, 8168, 8170,
8172, 8174, 8176, 8178, 8180, 8182, 8184, 8186, 8188, 8190, 8192, 8194, 8196, 8198, 8200, 8202, 8204, 8206, 8208,
8210, 8212, 8214, 8216, 8218, 8220, 8222, 8224, 8226, 8228, 8230, 8232, 8234, 8236, 8238, 8240, 8242, 8244, 8246,
8248, 8250, 8252, 8254, 8256, 8258, 8260, 8262, 8264, 8266, 8268, 8270, 8272, 8274, 8276, 8278, 8280, 8282, 8284,
8286, 8288, 8290, 8292, 8294, 8296, 8298, 8300, 8302, 8304, 8306, 8308, 8310, 8312, 8314, 8316, 8318, 8320, 8322,
8324, 8326, 8328, 8330, 8332, 8334, 8336, 8338, 8340, 8342, 8344, 8346, 8348, 8350, 8352, 8354, 8356, 8358, 8360,
8362, 8364, 8366, 8368, 8370, 8372, 8374, 8376, 8378, 8380, 8382, 8384, 8386, 8388, 8390, 8392, 8394, 8396, 8398,
8400, 8402, 8404, 8406, 8408, 8410, 8412, 8414, 8416, 8418, 8420, 8422, 8424, 8426, 8428, 8430, 8432, 8434, 8436,
8438, 8440, 8442, 8444, 8446, 8448, 8450, 8452, 8454, 8456, 8458, 8460, 8462, 8464, 8466, 8468, 8470, 8472, 8474,
8476, 8478, 8480, 8482, 8484, 8486, 8488, 8490, 8492, 8494, 8496, 8498, 8500, 8502, 8504, 8506, 8508, 8510, 8512,
8514, 8516, 8518, 8520, 8522, 8524, 8526, 8528, 8530, 8532, 8534, 8536, 8538, 8540, 8542, 8544, 8546, 8548, 8550,
8552, 8554, 8556, 8558, 8560, 8562, 8564, 8566, 8568, 8570, 8572, 8574, 8576, 8578, 8580, 8582, 8584, 8586, 8588,
8590, 8592, 8594, 8596, 8598, 8600, 8602, 8604, 8606, 8608, 8610, 8612, 8614, 8616, 8618, 8620, 8622, 8624, 8626,
8628, 8630, 8632, 8634, 8636, 8638, 8640, 8642, 8644, 8646, 8648, 8650, 8652, 8654, 8656, 8658, 8660, 8662, 8664,
8666, 8668, 8670, 8672, 8674, 8676, 8678, 8680, 8682, 8684, 8686, 8688, 8690, 8692, 8694, 8696, 8698, 8700, 8702,
8704, 8706, 8708, 8710, 8712, 8714, 8716, 8718, 8720, 8722, 8724, 8726, 8728, 8730, 8732, 8734, 8736, 8738, 8740,
8742, 8744, 8746, 8748, 8750, 8752, 8754, 8756, 8758, 8760, 8762, 8764, 8766, 8768, 8770, 8772, 8774, 8776, 8778,
8780, 8782, 8784, 8786, 8788, 8790, 8792, 8794, 8796, 8798, 8800, 8802, 8804, 8806, 8808, 8810, 8812, 8814, 8816,
8818, 8820, 8822, 8824, 8826, 8828, 8830, 8832, 8834, 8836, 8838, 8840, 8842, 8844, 8846, 8848, 8850, 8852, 8854,
8856, 8858, 8860, 8862, 8864, 8866, 8868, 8870, 8872, 8874, 8876, 8878, 8880, 8882, 8884, 8886, 8888, 8890, 8892,
8894, 8896, 8898, 8900, 8902, 8904, 8906, 8908, 8910, 8912, 8914, 8916, 8918, 8920, 8922, 8924, 8926, 8928, 8930,
8932, 8934, 8936, 8938, 8940, 8942, 8944, 8946, 8948, 8950, 8952, 8954, 8956, 8958, 8960, 8962, 8964, 8966, 8968,
8970, 8972, 8974, 8976, 8978, 8980, 8982, 8984, 8986, 8988, 8990, 8992, 8994, 8996, 8998, 9000, 9002, 9004, 9006,
9008, 9010, 9012, 9014, 9016, 9018, 9020, 9022, 9024, 9026, 9028, 9030, 9032, 9034, 9036, 9038, 9040, 9042, 9044,
9046, 9048, 9050, 9052, 9054, 9056, 9058, 9060, 9062, 9064, 9066, 9068, 9070, 9072, 9074, 9076, 9078, 9080, 9082,
9084, 9086, 9088, 9090, 9092, 9094, 9096, 9098, 9100, 9102, 9104, 9106, 9108, 9110, 9112, 9114, 9116, 9118, 9120,
9122, 9124, 9126, 9128, 9130, 9132, 9134, 9136, 9138, 9140, 9142, 9144, 9146, 9148, 9150, 9152, 9154, 9156, 9158,
9160, 9162, 9164, 9166, 9168, 9170, 9172, 9174, 9176, 9178, 9180, 9182, 9184, 9186, 9188, 9190, 9192, 9194, 9196,
9198, 9200, 9202, 9204, 9206, 9208, 9210, 9212, 9214, 9216, 9218, 9220, 9222, 9224, 9226, 9228, 9230, 9232, 9234,
9236, 9238, 9240, 9242, 9244, 9246, 9248, 9250, 9252, 9254, 9256, 9258, 9260, 9262, 9264, 9266, 9268, 9270, 9272,
9274, 9276, 9278, 9280, 9282, 9284, 9286, 9288, 9290, 9292, 9294, 9296, 9298, 9300, 9302, 9304, 9306, 9308, 9310,
9312, 9314, 9316, 9318, 9320, 9322, 9324, 9326, 9328, 9330, 9332, 9334, 9336, 9338, 9340, 9342, 9344, 9346, 9348,
9350, 9352, 9354, 9356, 9358, 9360, 9362, 9364, 9366, 9368, 9370, 9372, 9374, 9376, 9378, 9380, 9382, 9384, 9386,
9388, 9390, 9392, 9394, 9396, 9398, 9400, 9402, 9404, 9406, 9408, 9410, 9412, 9414, 9416, 9418, 9420, 9422, 9424,
9426, 9428, 9430, 9432, 9434, 9436, 9438, 9440, 9442, 9444, 9446, 9448, 9450, 9452, 9454, 9456, 9458, 9460, 9462,
9464, 9466, 9468, 9470, 9472, 9474, 9476, 9478, 9480, 9482, 9484, 9486, 9488, 9490, 9492, 9494, 9496, 9498, 9500,
9502, 9504, 9506, 9508, 9510, 9512, 9514, 9516, 9518, 9520, 9522, 9524, 9526, 9528, 9530, 9532, 9534, 9536, 9538,
9540, 9542, 9544, 9546, 9548, 9550, 9552, 9554, 9556, 9558, 9560, 9562, 9564, 9566, 9568, 9570, 9572, 9574, 9576,
9578, 9580, 9582, 9584, 9586, 9588, 9590, 9592, 9594, 9596, 9598, 9600, 9602, 9604, 9606, 9608, 9610, 9612, 9614,
9616, 9618, 9620, 9622, 9624, 9626, 9628, 9630, 9632, 9634, 9636, 9638, 9640, 9642, 9644, 9646, 9648, 9650, 9652,
9654, 9656, 9658, 9660, 9662, 9664, 9666, 9668, 9670, 9672, 9674, 9676, 9678, 9680, 9682, 9684, 9686, 9688, 9690,
9692, 9694, 9696, 9698, 9700, 9702, 9704, 9706, 9708, 9710, 9712, 9714, 9716, 9718, 9720, 9722, 9724, 9726, 9728,
9730, 9732, 9734, 9736, 9738, 9740, 9742, 9744, 9746, 9748, 9750, 9752, 9754, 9756, 9758, 9760, 9762, 9764, 9766,
9768, 9770, 9772, 9774, 9776, 9778, 9780, 9782, 9784, 9786, 9788, 9790, 9792, 9794, 9796, 9798, 9800, 9802, 9804,
9806, 9808, 9810, 9812, 9814, 9816, 9818, 9820, 9822, 9824, 9826, 9828, 9830, 9832, 9834, 9836, 9838, 9840, 9842,
9844, 9846, 9848, 9850, 9852, 9854, 9856, 9858, 9860, 9862, 9864, 9866, 9868, 9870, 9872, 9874, 9876, 9878, 9880,
9882, 9884, 9886, 9888, 9890, 9892, 9894, 9896, 9898, 9900, 9902, 9904, 9906, 9908, 9910, 9912, 9914, 9916, 9918,
9920, 9922, 9924, 9926, 9928, 9930, 9932, 9934, 9936, 9938, 9940, 9942, 9944, 9946, 9948, 9950, 9952, 9954, 9956,
9958, 9960, 9962, 9964, 9966, 9968, 9970, 9972, 9974, 9976, 9978, 9980, 9982, 9984, 9986, 9988, 9990, 9992, 9994,
9996, 9998, 10000, 10002]
res = Solution().fairCandySwap(a1, b1)
print(res)
# 上面中间写上代码块
end = time.time()
print('Running time: %s Seconds' % (end - start))
| 104.638158
| 120
| 0.623546
| 326
| 0.005077
| 0
| 0
| 0
| 0
| 0
| 0
| 1,786
| 0.027817
|
f7216012bdabcc6a4f76ac1521c5236c58f42c7a
| 393
|
py
|
Python
|
bookitoBackend/User/urls.py
|
mazdakdev/Bookito
|
38e18fee22aafea95429da01e9769acf2748f676
|
[
"MIT"
] | 10
|
2021-12-09T04:39:03.000Z
|
2022-02-07T05:42:29.000Z
|
bookitoBackend/User/urls.py
|
mazdakdev/Bookito
|
38e18fee22aafea95429da01e9769acf2748f676
|
[
"MIT"
] | 2
|
2022-02-07T18:12:54.000Z
|
2022-02-10T10:27:37.000Z
|
bookitoBackend/User/urls.py
|
mazdakdev/Bookito
|
38e18fee22aafea95429da01e9769acf2748f676
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .api import *
from knox import views as knox_views
urlpatterns = [
#domain.dn/api/v1/register/ | POST
path('register/' , SignUpAPI.as_view() , name='register'),
#domain.dn/api/v1/register/ | POST
path('login/' , SignInAPI.as_view() , name='login'),
#domain.dn/api/v1/user | GET
path('user/', MainUser.as_view() , name='user'),
]
| 21.833333
| 62
| 0.64631
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 145
| 0.368957
|
f725220f95e7ed6a18489ee1563dd48ce5f224d6
| 2,985
|
py
|
Python
|
solutions/day18.py
|
nitekat1124/advent-of-code-2021
|
74501b84f0a08b33f48b4e5a2d66b8293c854150
|
[
"WTFPL"
] | 3
|
2021-12-22T17:44:39.000Z
|
2022-01-14T17:18:15.000Z
|
solutions/day18.py
|
nitekat1124/advent-of-code-2021
|
74501b84f0a08b33f48b4e5a2d66b8293c854150
|
[
"WTFPL"
] | null | null | null |
solutions/day18.py
|
nitekat1124/advent-of-code-2021
|
74501b84f0a08b33f48b4e5a2d66b8293c854150
|
[
"WTFPL"
] | null | null | null |
import re
from itertools import combinations
from utils.solution_base import SolutionBase
class Solution(SolutionBase):
def solve(self, part_num: int):
self.test_runner(part_num)
func = getattr(self, f"part{part_num}")
result = func(self.data)
return result
def test_runner(self, part_num):
test_inputs = self.get_test_input()
test_results = self.get_test_result(part_num)
test_counter = 1
func = getattr(self, f"part{part_num}")
for i, r in zip(test_inputs, test_results):
if len(r):
if func(i) == int(r[0]):
print(f"test {test_counter} passed")
else:
print(func(i))
print(r[0])
print(f"test {test_counter} NOT passed")
test_counter += 1
print()
def part1(self, data):
addition = data[0]
for i in data[1:]:
addition = f"[{addition},{i}]"
while (t := self.reduction(addition)) != addition:
addition = t
return self.calc_magnitude(addition)
def reduction(self, s: str):
# explode
depth = 0
for i, v in enumerate(s):
if v.isnumeric() and depth > 4:
pair_close_pos = s[i:].index("]")
before_pair, pair, after_pair = s[: i - 1], s[i : i + pair_close_pos], s[i + pair_close_pos + 1 :]
pair = [*map(int, pair.split(","))]
before_pair = self.add_exploded_pair(before_pair, pair, 0)
after_pair = self.add_exploded_pair(after_pair, pair, 1)
return before_pair + "0" + after_pair
else:
depth += [1, -1]["[]".index(v)] if v in "[]" else 0
# split
large_regulars = [i for i in re.findall(r"\d+", s) if int(i) > 9]
if len(large_regulars):
reg = large_regulars[0]
reg_pos = s.index(reg)
before_reg, after_reg = s[:reg_pos], s[reg_pos + len(reg) :]
reg = int(reg)
elem_left = reg // 2
elem_right = reg - elem_left
s = before_reg + f"[{elem_left},{elem_right}]" + after_reg
return s
def add_exploded_pair(self, line, pair, pair_index):
all_regulars = re.findall(r"\d+", line)
if len(all_regulars):
reg = all_regulars[pair_index - 1]
reg_pos = [line.rindex, line.index][pair_index](reg)
line = line[:reg_pos] + str(int(reg) + pair[pair_index]) + line[reg_pos + len(reg) :]
return line
def calc_magnitude(self, s: str):
while s.count("["):
pairs = re.findall(r"\[(\d+),(\d+)\]", s)
for a, b in pairs:
s = s.replace(f"[{a},{b}]", str(int(a) * 3 + int(b) * 2))
return int(s)
def part2(self, data):
return max(max(self.part1(i), self.part1(i[::-1])) for i in combinations(data, 2))
| 34.310345
| 114
| 0.524958
| 2,891
| 0.968509
| 0
| 0
| 0
| 0
| 0
| 0
| 222
| 0.074372
|
f725e0913b22178375a220d288839fa6706545f3
| 520
|
py
|
Python
|
backend/utils/management/commands/generate_dummy_skills.py
|
NumanIbnMazid/numanibnmazid.com
|
905e3afab285316d88bafa30dc080dfbb0611731
|
[
"MIT"
] | 1
|
2022-01-28T18:20:19.000Z
|
2022-01-28T18:20:19.000Z
|
backend/utils/management/commands/generate_dummy_skills.py
|
NumanIbnMazid/numanibnmazid.com
|
905e3afab285316d88bafa30dc080dfbb0611731
|
[
"MIT"
] | null | null | null |
backend/utils/management/commands/generate_dummy_skills.py
|
NumanIbnMazid/numanibnmazid.com
|
905e3afab285316d88bafa30dc080dfbb0611731
|
[
"MIT"
] | null | null | null |
from portfolios.factories.skill_factory import create_skills_with_factory
from django.db import transaction
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Generates dummy data"
def _generate_dummy_data(self):
# Create dummy data
create_skills_with_factory(
num_of_data=7,
delete_old_data=False
)
@transaction.atomic
def handle(self, *args, **kwargs):
# generate data
self._generate_dummy_data()
| 26
| 73
| 0.698077
| 357
| 0.686538
| 0
| 0
| 118
| 0.226923
| 0
| 0
| 56
| 0.107692
|
f726670921d44f21aa09f17d795a742ee0c1fa0c
| 8,397
|
py
|
Python
|
test/bitfinex_test.py
|
laisee/bitfinex
|
6a3e7cd412f186eca0039602d32c65938a392747
|
[
"MIT"
] | null | null | null |
test/bitfinex_test.py
|
laisee/bitfinex
|
6a3e7cd412f186eca0039602d32c65938a392747
|
[
"MIT"
] | null | null | null |
test/bitfinex_test.py
|
laisee/bitfinex
|
6a3e7cd412f186eca0039602d32c65938a392747
|
[
"MIT"
] | null | null | null |
import unittest
import mock
import requests
import httpretty
import settings
from bitfinex.client import Client, TradeClient
API_KEY = settings.API_KEY
API_SECRET = settings.API_SECRET
class BitfinexTest(unittest.TestCase):
def setUp(self):
self.client = Client()
def test_should_have_server(self):
self.assertEqual("https://api.bitfinex.com/v1", self.client.server())
def test_should_have_url_for_foo(self):
expected = "https://api.bitfinex.com/v1/foo"
self.assertEqual(expected, self.client.url_for("foo"))
def test_should_have_url_for_path_arg(self):
expected = "https://api.bitfinex.com/v1/foo/bar"
actual = self.client.url_for('foo/%s', path_arg="bar")
self.assertEqual(expected, actual)
def test_should_have_url_with_parameters(self):
expected = "https://api.bitfinex.com/v1/foo?a=1&b=2"
actual = self.client.url_for('foo', parameters={'a': 1, 'b': 2})
self.assertEqual(expected, actual)
def test_should_have_url_for(self):
expected = self.client.url_for("foo")
self.assertEqual("https://api.bitfinex.com/v1/foo", expected)
def test_should_have_url_for_with_path_arg(self):
expected = "https://api.bitfinex.com/v1/foo/bar"
path = "foo/%s"
self.assertEqual(expected, self.client.url_for(path, path_arg='bar'))
self.assertEqual(expected, self.client.url_for(path, 'bar'))
def test_should_have_url_for_with_parameters(self):
expected = "https://api.bitfinex.com/v1/foo?a=1"
self.assertEqual(expected, self.client.url_for("foo", parameters={'a': 1}))
self.assertEqual(expected, self.client.url_for("foo", None, {'a': 1}))
def test_should_have_url_for_with_path_arg_and_parameters(self):
expected = "https://api.bitfinex.com/v1/foo/bar?a=1"
path = "foo/%s"
self.assertEqual(expected, self.client.url_for(path, path_arg='bar', parameters={'a': 1}))
self.assertEqual(expected, self.client.url_for(path, 'bar', {'a': 1}))
@httpretty.activate
def test_should_have_symbols(self):
# mock out the request
mock_body = '["btcusd","ltcusd","ltcbtc"]'
url = self.client.url_for('symbols')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = ["btcusd","ltcusd","ltcbtc"]
self.assertEqual(expected, self.client.symbols())
@httpretty.activate
def test_should_have_ticker(self):
# mock out the request
mock_body = '{"mid":"562.56495","bid":"562.15","ask":"562.9799","last_price":"562.25","timestamp":"1395552658.339936691"}'
url = self.client.url_for('ticker/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"mid": 562.56495,
"bid": 562.15,
"ask": 562.9799,
"last_price": 562.25,
"timestamp": 1395552658.339936691
}
self.assertEqual(expected, self.client.ticker('btcusd'))
@httpretty.activate
def test_should_have_today(self):
# mock out the request
mock_body = '{"low":"550.09","high":"572.2398","volume":"7305.33119836"}'
url = self.client.url_for('today/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"low": 550.09,
"high": 572.2398,
"volume": 7305.33119836
}
self.assertEqual(expected, self.client.today('btcusd'))
@httpretty.activate
def test_should_have_stats(self):
# mock out the request
mock_body = '[{"period":1,"volume":"7410.27250155"},{"period":7,"volume":"52251.37118006"},{"period":30,"volume":"464505.07753251"}]'
url = self.client.url_for('stats/%s', path_arg='btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = [
{"period": 1, "volume": 7410.27250155},
{"period": 7, "volume": 52251.37118006},
{"period": 30,"volume": 464505.07753251}
]
self.assertEqual(expected, self.client.stats('btcusd'))
@httpretty.activate
def test_should_have_lendbook(self):
# mock out the request
mock_body = '{"bids":[{"rate":"5.475","amount":"15.03894663","period":30,"timestamp":"1395112149.0","frr":"No"},{"rate":"2.409","amount":"14.5121868","period":7,"timestamp":"1395497599.0","frr":"No"}],"asks":[{"rate":"6.351","amount":"15.5180735","period":5,"timestamp":"1395549996.0","frr":"No"},{"rate":"6.3588","amount":"626.94808249","period":30,"timestamp":"1395400654.0","frr":"Yes"}]}'
url = self.client.url_for('lendbook/%s', 'btc')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"rate": 5.475, "amount": 15.03894663, "period": 30, "timestamp": 1395112149.0, "frr": False},
{"rate": 2.409, "amount": 14.5121868, "period": 7, "timestamp": 1395497599.0, "frr": False}
],
"asks": [
{"rate": 6.351, "amount": 15.5180735, "period": 5, "timestamp": 1395549996.0, "frr": False},
{"rate": 6.3588, "amount": 626.94808249, "period": 30, "timestamp": 1395400654.0, "frr": True}
]
}
self.assertEqual(expected, self.client.lendbook('btc'))
@httpretty.activate
def test_should_have_lendbook_with_parameters(self):
# mock out the request
mock_body = '{"bids":[{"rate":"5.475","amount":"15.03894663","period":30,"timestamp":"1395112149.0","frr":"No"},{"rate":"2.409","amount":"14.5121868","period":7,"timestamp":"1395497599.0","frr":"No"}],"asks":[]}'
parameters = {'limit_bids': 2, 'limit_asks': 0}
url = self.client.url_for('lendbook/%s', 'btc', parameters)
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"rate": 5.475, "amount": 15.03894663, "period": 30, "timestamp": 1395112149.0, "frr": False},
{"rate": 2.409, "amount": 14.5121868, "period": 7, "timestamp": 1395497599.0, "frr": False}
],
"asks": [
]
}
self.assertEqual(expected, self.client.lendbook('btc', parameters))
@httpretty.activate
def test_should_have_order_book(self):
# mock out the request
mock_body = '{"bids":[{"price":"562.2601","amount":"0.985","timestamp":"1395567556.0"}],"asks":[{"price":"563.001","amount":"0.3","timestamp":"1395532200.0"}]}'
url = self.client.url_for('book/%s', 'btcusd')
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"price": 562.2601, "amount": 0.985, "timestamp": 1395567556.0}
],
"asks": [
{"price": 563.001, "amount": 0.3, "timestamp": 1395532200.0}
]
}
self.assertEqual(expected, self.client.order_book('btcusd'))
@httpretty.activate
def test_should_have_order_book_with_parameters(self):
# mock out the request
mock_body = '{"bids":[{"price":"562.2601","amount":"0.985","timestamp":"1395567556.0"}],"asks":[]}'
parameters = {'limit_asks': 0}
url = self.client.url_for('book/%s', 'btcusd', parameters)
httpretty.register_uri(httpretty.GET, url, body=mock_body, status=200)
expected = {
"bids": [
{"price": 562.2601, "amount": 0.985, "timestamp": 1395567556.0}
],
"asks": []
}
self.assertEqual(expected, self.client.order_book('btcusd', parameters))
class TestTradeClient(unittest.TestCase):
def setUp(self):
self.tc = TradeClient(API_KEY, API_SECRET)
def test_instantiate_tradeclient(self):
self.assertIsInstance(self.tc, TradeClient)
def test_get_active_orders_returns_json(self):
ao = self.tc.active_orders()
self.assertIsInstance(ao, list)
def test_get_active_positions_returns_json(self):
ap = self.tc.active_positions()
self.assertIsInstance(ap, list)
def test_get_full_history(self):
ap = self.tc.active_positions()
self.assertIsInstance(ap, list)
| 37.995475
| 400
| 0.609265
| 8,205
| 0.977135
| 0
| 0
| 5,686
| 0.677147
| 0
| 0
| 2,396
| 0.28534
|
f728ea2eb644fbcc81d1cbb2f7e623c7f87f0380
| 834
|
py
|
Python
|
src/bokeh_app/graph_view.py
|
avbatchelor/insight-articles-project
|
852b338b786cb5b9c281fcec2e378aed8d3dc617
|
[
"MIT"
] | null | null | null |
src/bokeh_app/graph_view.py
|
avbatchelor/insight-articles-project
|
852b338b786cb5b9c281fcec2e378aed8d3dc617
|
[
"MIT"
] | null | null | null |
src/bokeh_app/graph_view.py
|
avbatchelor/insight-articles-project
|
852b338b786cb5b9c281fcec2e378aed8d3dc617
|
[
"MIT"
] | null | null | null |
import networkx as nx
import pickle
from bokeh.io import show, output_file
from bokeh.plotting import figure
from bokeh.models.graphs import from_networkx
processed_data_folder = 'C:\\Users\\Alex\\Documents\\GitHub\\insight-articles-project\\data\\processed\\'
filename = processed_data_folder + 'graph_and_labels'
with open (filename, 'rb') as fp:
graph_mat, topic_labels = pickle.load(fp)
G = nx.from_numpy_matrix(graph_mat)
pos=nx.spring_layout(G)
nx.relabel_nodes(G,topic_labels)
nx.draw(G,pos)
nx.draw_networkx_labels(G,pos,topic_labels,font_size=16)
plot = figure(title="Blog Curator Demo", x_range=(-2.1,2.1), y_range=(-2.1,2.1),
tools="", toolbar_location=None)
graph = from_networkx(G, nx.spring_layout, scale=2, center=(0,0))
plot.renderers.append(graph)
output_file("networkx_graph.html")
show(plot)
| 29.785714
| 105
| 0.758993
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 145
| 0.173861
|
f72b30581d8ef30df8d3b88fde755c65a6390087
| 15,737
|
py
|
Python
|
dssm/data_input.py
|
nlpming/tensorflow-DSMM
|
dc982cc49bf03f474da2895e4dd4fb37061c0271
|
[
"MIT"
] | null | null | null |
dssm/data_input.py
|
nlpming/tensorflow-DSMM
|
dc982cc49bf03f474da2895e4dd4fb37061c0271
|
[
"MIT"
] | null | null | null |
dssm/data_input.py
|
nlpming/tensorflow-DSMM
|
dc982cc49bf03f474da2895e4dd4fb37061c0271
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding=utf-8
from inspect import getblock
import json
import os
from os import read
from numpy.core.fromnumeric import mean
import numpy as np
import paddlehub as hub
import six
import math
import random
import sys
from util import read_file
from config import Config
# 配置文件
conf = Config()
class Vocabulary(object):
def __init__(self, meta_file, max_len, allow_unk=0, unk="$UNK$", pad="$PAD$",):
self.voc2id = {}
self.id2voc = {}
self.unk = unk
self.pad = pad
self.max_len = max_len
self.allow_unk = allow_unk
with open(meta_file, encoding='utf-8') as f:
for i, line in enumerate(f):
line = convert_to_unicode(line.strip("\n"))
self.voc2id[line] = i
self.id2voc[i] = line
self.size = len(self.voc2id)
self.oov_num = self.size + 1
def fit(self, words_list):
"""
:param words_list: [[w11, w12, ...], [w21, w22, ...], ...]
:return:
"""
word_lst = []
word_lst_append = word_lst.append
for words in words_list:
if not isinstance(words, list):
print(words)
continue
for word in words:
word = convert_to_unicode(word)
word_lst_append(word)
word_counts = Counter(word_lst)
if self.max_num_word < 0:
self.max_num_word = len(word_counts)
sorted_voc = [w for w, c in word_counts.most_common(self.max_num_word)]
self.max_num_word = len(sorted_voc)
self.oov_index = self.max_num_word + 1
self.voc2id = dict(zip(sorted_voc, range(1, self.max_num_word + 1)))
return self
def _transform2id(self, word):
word = convert_to_unicode(word)
if word in self.voc2id:
return self.voc2id[word]
elif self.allow_unk:
return self.voc2id[self.unk]
else:
print(word)
raise ValueError("word:{} Not in voc2id, please check".format(word))
def _transform_seq2id(self, words, padding=0):
out_ids = []
words = convert_to_unicode(words)
if self.max_len:
words = words[:self.max_len]
for w in words:
out_ids.append(self._transform2id(w))
if padding and self.max_len:
while len(out_ids) < self.max_len:
out_ids.append(0)
return out_ids
def _transform_intent2ont_hot(self, words, padding=0):
# 将多标签意图转为 one_hot
out_ids = np.zeros(self.size, dtype=np.float32)
words = convert_to_unicode(words)
for w in words:
out_ids[self._transform2id(w)] = 1.0
return out_ids
def _transform_seq2bert_id(self, words, padding=0):
out_ids, seq_len = [], 0
words = convert_to_unicode(words)
if self.max_len:
words = words[:self.max_len]
seq_len = len(words)
# 插入 [CLS], [SEP]
out_ids.append(self._transform2id("[CLS]"))
for w in words:
out_ids.append(self._transform2id(w))
mask_ids = [1 for _ in out_ids]
if padding and self.max_len:
while len(out_ids) < self.max_len + 1:
out_ids.append(0)
mask_ids.append(0)
seg_ids = [0 for _ in out_ids]
return out_ids, mask_ids, seg_ids, seq_len
@staticmethod
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _transform_2seq2bert_id(self, seq1, seq2, padding=0):
out_ids, seg_ids, seq_len = [], [1], 0
seq1 = [x for x in convert_to_unicode(seq1)]
seq2 = [x for x in convert_to_unicode(seq2)]
# 截断
self._truncate_seq_pair(seq1, seq2, self.max_len - 2)
# 插入 [CLS], [SEP]
out_ids.append(self._transform2id("[CLS]"))
for w in seq1:
out_ids.append(self._transform2id(w))
seg_ids.append(0)
out_ids.append(self._transform2id("[SEP]"))
seg_ids.append(0)
for w in seq2:
out_ids.append(self._transform2id(w))
seg_ids.append(1)
mask_ids = [1 for _ in out_ids]
if padding and self.max_len:
while len(out_ids) < self.max_len + 1:
out_ids.append(0)
mask_ids.append(0)
seg_ids.append(0)
return out_ids, mask_ids, seg_ids, seq_len
def transform(self, seq_list, is_bert=0):
if is_bert:
return [self._transform_seq2bert_id(seq) for seq in seq_list]
else:
return [self._transform_seq2id(seq) for seq in seq_list]
def __len__(self):
return len(self.voc2id)
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def gen_word_set(file_path, out_path='./data/words.txt'):
word_set = set()
with open(file_path, encoding='utf-8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, query_pred, title, tag, label = spline
if label == '0':
continue
cur_arr = [prefix, title]
query_pred = json.loads(query_pred)
for w in prefix:
word_set.add(w)
for each in query_pred:
for w in each:
word_set.add(w)
with open(word_set, 'w', encoding='utf-8') as o:
for w in word_set:
o.write(w + '\n')
pass
def convert_word2id(query, vocab_map):
ids = []
for w in query:
if w in vocab_map:
ids.append(vocab_map[w])
else:
ids.append(vocab_map[conf.unk])
while len(ids) < conf.max_seq_len:
ids.append(vocab_map[conf.pad])
return ids[:conf.max_seq_len]
def convert_seq2bow(query, vocab_map):
bow_ids = np.zeros(conf.nwords)
for w in query:
if w in vocab_map:
bow_ids[vocab_map[w]] += 1
else:
bow_ids[vocab_map[conf.unk]] += 1
return bow_ids
def get_data(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, pos sample, 4 neg sample]], shape = [n, 6]
"""
data_map = {'query': [], 'query_len': [], 'doc_pos': [], 'doc_pos_len': [], 'doc_neg': [], 'doc_neg_len': []}
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, query_pred, title, tag, label = spline
if label == '0':
continue
cur_arr, cur_len = [], []
query_pred = json.loads(query_pred)
# only 4 negative sample
for each in query_pred:
if each == title:
continue
cur_arr.append(convert_word2id(each, conf.vocab_map))
each_len = len(each) if len(each) < conf.max_seq_len else conf.max_seq_len
cur_len.append(each_len)
if len(cur_arr) >= 4:
data_map['query'].append(convert_word2id(prefix, conf.vocab_map))
data_map['query_len'].append(len(prefix) if len(prefix) < conf.max_seq_len else conf.max_seq_len)
data_map['doc_pos'].append(convert_word2id(title, conf.vocab_map))
data_map['doc_pos_len'].append(len(title) if len(title) < conf.max_seq_len else conf.max_seq_len)
data_map['doc_neg'].extend(cur_arr[:4])
data_map['doc_neg_len'].extend(cur_len[:4])
pass
return data_map
def get_data_siamese_rnn(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, pos sample, 4 neg sample]], shape = [n, 6]
"""
data_arr = []
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, _, title, tag, label = spline
prefix_seq = convert_word2id(prefix, conf.vocab_map)
title_seq = convert_word2id(title, conf.vocab_map)
data_arr.append([prefix_seq, title_seq, int(label)])
return data_arr
def get_data_bow(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, prefix, label]], shape = [n, 3]
"""
data_arr = []
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, _, title, tag, label = spline
prefix_ids = convert_seq2bow(prefix, conf.vocab_map)
title_ids = convert_seq2bow(title, conf.vocab_map)
data_arr.append([prefix_ids, title_ids, int(label)])
return data_arr
def trans_lcqmc(dataset):
"""
最大长度
"""
out_arr, text_len = [], []
for each in dataset:
t1, t2, label = each.text_a, each.text_b, int(each.label)
t1_ids = convert_word2id(t1, conf.vocab_map)
t1_len = conf.max_seq_len if len(t1) > conf.max_seq_len else len(t1)
t2_ids = convert_word2id(t2, conf.vocab_map)
t2_len = conf.max_seq_len if len(t2) > conf.max_seq_len else len(t2)
# t2_len = len(t2)
out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label])
# out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label, t1, t2])
text_len.extend([len(t1), len(t2)])
pass
print("max len", max(text_len), "avg len", mean(text_len), "cover rate:", np.mean([x <= conf.max_seq_len for x in text_len]))
return out_arr
def get_lcqmc():
"""
使用LCQMC数据集,并将其转为word_id
"""
dataset = hub.dataset.LCQMC()
train_set = trans_lcqmc(dataset.train_examples)
dev_set = trans_lcqmc(dataset.dev_examples)
test_set = trans_lcqmc(dataset.test_examples)
return train_set, dev_set, test_set
# return test_set, test_set, test_set
def trans_lcqmc_bert(dataset:list, vocab:Vocabulary, is_merge=0):
"""
最大长度
"""
out_arr, text_len = [], []
for each in dataset:
t1, t2, label = each.text_a, each.text_b, int(each.label)
if is_merge:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_2seq2bert_id(t1, t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, label])
text_len.extend([len(t1) + len(t2)])
else:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_ids2, mask_ids2, seg_ids2, seq_len2 = vocab._transform_seq2bert_id(t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, out_ids2, mask_ids2, seg_ids2, seq_len2, label])
text_len.extend([len(t1), len(t2)])
pass
print("max len", max(text_len), "avg len", mean(text_len), "cover rate:", np.mean([x <= conf.max_seq_len for x in text_len]))
return out_arr
def get_lcqmc_bert(vocab:Vocabulary, is_merge=0):
"""
使用LCQMC数据集,并将每个query其转为word_id,
"""
dataset = hub.dataset.LCQMC()
train_set = trans_lcqmc_bert(dataset.train_examples, vocab, is_merge)
dev_set = trans_lcqmc_bert(dataset.dev_examples, vocab, is_merge)
test_set = trans_lcqmc_bert(dataset.test_examples, vocab, is_merge)
return train_set, dev_set, test_set
# test_set = test_set[:100]
# return test_set, test_set, test_set
def get_test(file_:str, vocab:Vocabulary):
test_arr = read_file(file_, '\t') # [[q1, q2],...]
out_arr = []
for line in test_arr:
if len(line) != 2:
print('wrong line size=', len(line))
t1, t2 = line # [t1_ids, t1_len, t2_ids, t2_len, label]
t1_ids = vocab._transform_seq2id(t1, padding=1)
t1_len = vocab.max_len if len(t1) > vocab.max_len else len(t1)
t2_ids = vocab._transform_seq2id(t2, padding=1)
t2_len = vocab.max_len if len(t2) > vocab.max_len else len(t2)
out_arr.append([t1_ids, t1_len, t2_ids, t2_len])
return out_arr, test_arr
def get_test_bert(file_:str, vocab:Vocabulary, is_merge=0):
test_arr = read_file(file_, '\t') # [[q1, q2],...]
out_arr, _ = get_test_bert_by_arr(test_arr, vocab, is_merge)
return out_arr, test_arr
def get_test_bert_by_arr(test_arr:list, vocab:Vocabulary, is_merge=0):
# test_arr # [[q1, q2],...]
out_arr = []
for line in test_arr:
if len(line) != 2:
print('wrong line size=', len(line))
t1, t2 = line # [t1_ids, t1_len, t2_ids, t2_len, label]
if is_merge:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_2seq2bert_id(t1, t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1])
else:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_ids2, mask_ids2, seg_ids2, seq_len2 = vocab._transform_seq2bert_id(t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, out_ids2, mask_ids2, seg_ids2, seq_len2])
return out_arr, test_arr
def get_test_bert_single(file_:str, vocab:Vocabulary, is_merge=0):
test_arr = read_file(file_) # [q1,...]
out_arr = []
for line in test_arr:
t1 = line # [t1_ids, t1_len, t2_ids, t2_len, label]
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1])
return out_arr, test_arr
def get_batch(dataset, batch_size=None, is_test=0):
# tf Dataset太难用,不如自己实现
# https://stackoverflow.com/questions/50539342/getting-batches-in-tensorflow
# dataset:每个元素是一个特征,[[x1, x2, x3,...], ...], 如果是测试集,可能就没有标签
if not batch_size:
batch_size = 32
if not is_test:
random.shuffle(dataset)
steps = int(math.ceil(float(len(dataset)) / batch_size))
for i in range(steps):
idx = i * batch_size
cur_set = dataset[idx: idx + batch_size]
cur_set = zip(*cur_set)
yield cur_set
if __name__ == '__main__':
# prefix, query_prediction, title, tag, label
# query_prediction 为json格式。
file_train = './data/oppo_round1_train_20180929.txt'
file_vali = './data/oppo_round1_vali_20180929.txt'
# data_train = get_data(file_train)
# data_train = get_data(file_vali)
# print(len(data_train['query']), len(data_train['doc_pos']), len(data_train['doc_neg']))
dataset = get_lcqmc()
print(dataset[1][:3])
for each in get_batch(dataset[1][:3], batch_size=2):
t1_ids, t1_len, t2_ids, t2_len, label = each
print(each)
pass
| 37.20331
| 129
| 0.599797
| 4,761
| 0.299114
| 612
| 0.038449
| 413
| 0.025947
| 0
| 0
| 2,457
| 0.154363
|
f72d2d7694c02f9baefa28ab714fa7d648759fe9
| 8,778
|
py
|
Python
|
groupbunk.py
|
shine-jayakumar/groupbunk-fb
|
ddf3d66cd902343e419dd2cf0c86f42850315f08
|
[
"MIT"
] | 1
|
2022-02-11T05:31:48.000Z
|
2022-02-11T05:31:48.000Z
|
groupbunk.py
|
shine-jayakumar/groupbunk-fb
|
ddf3d66cd902343e419dd2cf0c86f42850315f08
|
[
"MIT"
] | null | null | null |
groupbunk.py
|
shine-jayakumar/groupbunk-fb
|
ddf3d66cd902343e419dd2cf0c86f42850315f08
|
[
"MIT"
] | null | null | null |
"""
GroupBunk v.1.2
Leave your Facebook groups quietly
Author: Shine Jayakumar
Github: https://github.com/shine-jayakumar
LICENSE: MIT
"""
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import StaleElementReferenceException
from webdriver_manager.chrome import ChromeDriverManager
import argparse
import logging
import sys
from datetime import datetime
import time
from groupfuncs import *
import os
# suppress webdriver manager logs
os.environ['WDM_LOG_LEVEL'] = '0'
IGNORE_DIV = ['your feed', 'discover', 'your notifications']
FB_GROUP_URL = 'https://www.facebook.com/groups/feed/'
def display_intro():
'''
Displays intro of the script
'''
intro = """
GroupBunk v.1.2
Leave your Facebook groups quietly
Author: Shine Jayakumar
Github: https://github.com/shine-jayakumar
"""
print(intro)
def time_taken(start_time, logger):
'''
Calculates the time difference from now and start time
'''
end_time = time.time()
logger.info(f"Total time taken: {round(end_time - start_time, 4)} seconds")
def cleanup_and_quit(driver):
'''
Quits driver and exits the script
'''
if driver:
driver.quit()
sys.exit()
start_time = time.time()
# ====================================================
# Argument parsing
# ====================================================
description = "Leave your Facebook groups quietly"
usage = "groupbunk.py username password [-h] [-eg FILE] [-et TIMEOUT] [-sw WAIT] [-gr RETRYCOUNT] [-dg FILE]"
examples="""
Examples:
groupbunk.py bob101@email.com bobspassword101
groupbunk.py bob101@email.com bobspassword101 -eg keepgroups.txt
groupbunk.py bob101@email.com bobspassword101 -et 60 --scrollwait 10 -gr 7
groupbunk.py bob101@email.com bobspassword101 --dumpgroups mygroup.txt --groupretry 5
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description,
usage=usage,
epilog=examples,
prog='groupbunk')
# required arguments
parser.add_argument('username', type=str, help='Facebook username')
parser.add_argument('password', type=str, help='Facebook password')
# optional arguments
parser.add_argument('-eg', '--exgroups', type=str, metavar='', help='file with group names to exclude (one group per line)')
parser.add_argument('-et', '--eltimeout', type=int, metavar='', help='max timeout for elements to be loaded', default=30)
parser.add_argument('-sw', '--scrollwait', type=int, metavar='', help='time to wait after each scroll', default=4)
parser.add_argument('-gr', '--groupretry', type=int, metavar='', help='retry count while recapturing group names', default=5)
parser.add_argument('-dg', '--dumpgroups', type=str, metavar='', help='do not leave groups; only dump group names to a file')
parser.add_argument('-v', '--version', action='version', version='%(prog)s v.1.2')
args = parser.parse_args()
# ====================================================
# Setting up logger
# =====================================================
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s:%(name)s:%(lineno)d:%(levelname)s:%(message)s")
file_handler = logging.FileHandler(f'groupbunk_{datetime.now().strftime("%d_%m_%Y__%H_%M_%S")}.log', 'w', 'utf-8')
file_handler.setFormatter(formatter)
stdout_formatter = logging.Formatter("[*] => %(message)s")
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(stdout_formatter)
logger.addHandler(file_handler)
logger.addHandler(stdout_handler)
#=======================================================
try:
display_intro()
logger.info("script started")
# loading group names to be excluded
if args.exgroups:
logger.info("Loading group names to be excluded")
excluded_group_names = get_excluded_group_names(args.exgroups)
IGNORE_DIV.extend(excluded_group_names)
options = Options()
# supresses notifications
options.add_argument("--disable-notifications")
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument("--log-level=3")
logger.info("Downloading latest chrome webdriver")
# UNCOMMENT TO SPECIFY DRIVER LOCATION
# driver = webdriver.Chrome("D:/chromedriver/98/chromedriver.exe", options=options)
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
if not driver:
raise Exception('Unable to download chrome webdriver for your version of Chrome browser')
logger.info("Successfully downloaded chrome webdriver")
wait = WebDriverWait(driver, args.eltimeout)
logger.info(f"Opening FB GROUPS URL: {FB_GROUP_URL}")
driver.get(FB_GROUP_URL)
logger.info("Sending username")
wait.until(EC.visibility_of_element_located((By.ID, 'email'))).send_keys(args.username)
logger.info("Sending password")
driver.find_element(By.ID, 'pass').send_keys(args.password)
logger.info("Clicking on Log In")
wait.until(EC.presence_of_element_located((By.ID, 'loginbutton'))).click()
# get all the links inside divs representing group names
group_links = get_group_link_elements(driver, wait)
if not group_links:
raise Exception("Unable to find links")
no_of_currently_loaded_links = 0
logger.info(f"Initial link count: {len(group_links)-3}")
logger.info("Scrolling down to capture all the links")
# scroll until no new group links are loaded
while len(group_links) > no_of_currently_loaded_links:
no_of_currently_loaded_links = len(group_links)
logger.info(f"Updated link count: {no_of_currently_loaded_links-3}")
scroll_into_view(driver, group_links[no_of_currently_loaded_links-1])
time.sleep(args.scrollwait)
# re-capturing
group_links = get_group_link_elements(driver, wait)
logger.info(f"Total number of links found: {len(group_links)-3}")
# only show the group names and exit
if args.dumpgroups:
logger.info('Only dumping group names to file. Not leaving groups')
logger.info(f"Dumping group names to: {args.dumpgroups}")
dump_groups(group_links, args.dumpgroups)
time_taken(start_time, logger)
cleanup_and_quit(driver)
# first 3 links are for Your feed, 'Discover, Your notifications
i = 0
save_state = 0
no_of_retries = 0
failed_groups = []
total_groups = len(group_links)
while i < total_groups:
try:
# need only the group name and not Last Active
group_name = group_links[i].text.split('\n')[0]
# if group name not in ignore list
if group_name.lower() not in IGNORE_DIV:
logger.info(f"Leaving group: {group_name}")
link = group_links[i].get_attribute('href')
logger.info(f"Opening group link: {link}")
switch_tab(driver, open_new_tab(driver))
driver.get(link)
if not leave_group(wait):
logger.info('Unable to leave the group. You might not be a member of this group.')
driver.close()
switch_tab(driver, driver.window_handles[0])
else:
if group_name.lower() not in ['your feed', 'discover', 'your notifications']:
logger.info(f"Skipping group : {group_name}")
i += 1
except StaleElementReferenceException:
logger.error('Captured group elements gone stale. Recapturing...')
if no_of_retries > args.groupretry:
logger.error('Reached max number of retry attempts')
break
save_state = i
group_links = get_group_link_elements(driver, wait)
no_of_retries += 1
except Exception as ex:
logger.error(f"Unable to leave group {group_name}. Error: {ex}")
failed_groups.append(group_name)
i += 1
total_no_of_groups = len(group_links)-3
total_no_failed_groups = len(failed_groups)
logger.info(f"Total groups: {total_no_of_groups}")
logger.info(f"No. of groups failed to leave: {total_no_failed_groups}")
logger.info(f"Success percentage: {((total_no_of_groups - total_no_failed_groups)/total_no_of_groups) * 100} %")
if failed_groups:
failed_group_names = ", ".join(failed_groups)
logger.info(f"Failed groups: \n{failed_group_names}")
except Exception as ex:
logger.error(f"Script ended with exception: {ex}")
finally:
time_taken(start_time, logger)
cleanup_and_quit(driver)
| 35.97541
| 127
| 0.670084
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,822
| 0.435407
|
f72d8677c20fa3e3a54169d4eb48cb7ca7458055
| 11,575
|
py
|
Python
|
OneSpanAnalysis_Mdl.py
|
Ivanfdezr/CentralSoftware
|
8681fedd4814dc60deb527a370411350b40c994c
|
[
"MIT"
] | null | null | null |
OneSpanAnalysis_Mdl.py
|
Ivanfdezr/CentralSoftware
|
8681fedd4814dc60deb527a370411350b40c994c
|
[
"MIT"
] | 44
|
2021-02-10T23:58:28.000Z
|
2021-12-14T02:38:21.000Z
|
OneSpanAnalysis_Mdl.py
|
Ivanfdezr/CentralSoftware
|
8681fedd4814dc60deb527a370411350b40c994c
|
[
"MIT"
] | null | null | null |
import numpy as np
import numpy.linalg as la
from MdlUtilities import Field, FieldList
import MdlUtilities as mdl
def get_osaCasing_fields():
OD = Field(2030)
ID = Field(2031)
Weight = Field(2032)
Density = Field(2039)
E = Field(2040)
osaCasing_fields = FieldList()
osaCasing_fields.append( OD )
osaCasing_fields.append( ID )
osaCasing_fields.append( Weight )
osaCasing_fields.append( Density )
osaCasing_fields.append( E )
return osaCasing_fields
def get_osaCent_fields():
Type = Field(2049)
IPOD = Field(2009)
CentOD = Field(2011)
#CentID = Field(2012)
ResF_SO67 = Field(2018)
minResF = Field(2017)
SO_minResF = Field(2019)
ResF_SO67.set_representation('Res. Force @ SO=67%')
minResF.set_representation('minimum Res. Force')
SO_minResF.set_representation('StandOff @ min. Res. F.')
osaCent_fields = FieldList()
osaCent_fields.append( Type )
osaCent_fields.append( IPOD )
osaCent_fields.append( CentOD )
#osaCent_fields.append( CentID )
osaCent_fields.append( ResF_SO67 )
osaCent_fields.append( minResF )
osaCent_fields.append( SO_minResF )
return osaCent_fields
def get_osaWellbore_fields():
HoleID = Field(2010)
MaxSpan = Field(2061)
MudIPDensity = Field(2077)
MudOPDensity = Field(2077)
HoleID.set_representation('Hole ID')
HoleID.set_abbreviation('HoleID')
MaxSpan.set_representation('Max span')
MaxSpan.set_abbreviation('MaxSpan')
MudIPDensity.set_representation('Mud inside pipe')
MudIPDensity.set_abbreviation('MudIPDensity')
MudOPDensity.set_representation('Mud in annulus')
MudOPDensity.set_abbreviation('MudOPDensity')
osaWellbore_fields = FieldList()
osaWellbore_fields.append( HoleID )
osaWellbore_fields.append( MaxSpan )
osaWellbore_fields.append( MudIPDensity )
osaWellbore_fields.append( MudOPDensity )
return osaWellbore_fields
def get_osaOutputdata1_fields():
clearanceA = Field(2073, altBg=True, altFg=True)
clearanceB = Field(2073, altBg=True, altFg=True)
clearanceM = Field(2073, altBg=True, altFg=True)
sideForceA = Field(2074, altBg=True, altFg=True)
sideForceB = Field(2074, altBg=True, altFg=True)
sideForceM = Field(2074, altBg=True, altFg=True)
standoffA = Field(2078, altBg=True, altFg=True)
standoffB = Field(2078, altBg=True, altFg=True)
standoffM = Field(2078, altBg=True, altFg=True)
clearanceA.set_representation('Annular clearance @ cent. A')
clearanceA.set_abbreviation('ClearanceA')
clearanceB.set_representation('Annular clearance @ cent. B')
clearanceB.set_abbreviation('ClearanceB')
clearanceM.set_representation('Annular clearance @ mid span')
clearanceM.set_abbreviation('ClearanceM')
sideForceA.set_representation('Side force @ cent. A')
sideForceA.set_abbreviation('SideForceA')
sideForceB.set_representation('Side force @ cent. B')
sideForceB.set_abbreviation('SideForceB')
sideForceM.set_representation('Side force @ mid span')
sideForceM.set_abbreviation('SideForceM')
standoffA.set_representation('Standoff @ cent. A')
standoffA.set_abbreviation('StandoffA')
standoffB.set_representation('Standoff @ cent. B')
standoffB.set_abbreviation('StandoffB')
standoffM.set_representation('Standoff @ mid span')
standoffM.set_abbreviation('StandoffM')
osaOutputdata1_fields = FieldList()
osaOutputdata1_fields.append( clearanceA )
osaOutputdata1_fields.append( clearanceB )
osaOutputdata1_fields.append( clearanceM )
osaOutputdata1_fields.append( sideForceA )
osaOutputdata1_fields.append( sideForceB )
osaOutputdata1_fields.append( sideForceM )
osaOutputdata1_fields.append( standoffA )
osaOutputdata1_fields.append( standoffB )
osaOutputdata1_fields.append( standoffM )
return osaOutputdata1_fields
def get_osaOutputdata2_fields():
axialForce = Field(2075, altBg=True, altFg=True)
deflection = Field(2076, altBg=True, altFg=True)
wClearance = Field(2073, altBg=True, altFg=True)
wStandoff = Field(2078, altBg=True, altFg=True)
axialForce.set_representation('Axial extra force @ top')
axialForce.set_abbreviation('AxialForce')
deflection.set_representation('Max. pipe deflection')
deflection.set_abbreviation('MaxDeflection')
wClearance.set_representation('Mean wellbore clearance')
wClearance.set_abbreviation('WellboreClearance')
wStandoff.set_representation('Mean wellbore standoff')
wStandoff.set_abbreviation('WellboreStandoff')
osaOutputdata2_fields = FieldList()
osaOutputdata2_fields.append( axialForce )
osaOutputdata2_fields.append( deflection )
osaOutputdata2_fields.append( wClearance )
osaOutputdata2_fields.append( wStandoff )
return osaOutputdata2_fields
def get_casingDeflectionCurve(self):
# Equation(s) Reference 1:
# Hans C. Juvkam-Wold, Jiang Wu. Casing Deflection and Centralizer Spacing Calculations.
# SPE Drilling Engineering (December 1992).
# Equation(s) Reference 2:
# Hans C. Juvkam-Wold, Richard L. Baxter. Discussion of Optimal Spacing for Casing Centralizers.
# SPE Drilling Engineering (December 1988).
# Equation(s) Reference 3:
# Carlos F. H. Fonseca, Jacques Braile. Optimizing of Centralizer Distribution.
# SPE Latin American Petroleum Engineering Conference (October 1990).
self.osaCasing_fields.referenceUnitConvert_fields()
self.osaCentA_fields.referenceUnitConvert_fields()
self.osaCentB_fields.referenceUnitConvert_fields()
self.osaWellbore_fields.referenceUnitConvert_fields()
Rot = lambda φ: np.array( [[np.cos(φ),-np.sin(φ)],[np.sin(φ),np.cos(φ)]] )
dH = self.osaWellbore_fields.HoleID[0]
L = self.osaWellbore_fields.MaxSpan[0]*self.osaSpacing_slider.sliderPosition()/100
ρe = self.osaWellbore_fields.MudOPDensity[0]
ρi = self.osaWellbore_fields.MudIPDensity[0]
ρs = self.osaCasing_fields.Density[0]
E = self.osaCasing_fields.E[0]
w = self.osaCasing_fields.PW[0]
D = self.osaCasing_fields.OD[0]
d = self.osaCasing_fields.ID[0]
Type_A = self.osaCentA_fields.Type[0]
F_So67_A = self.osaCentA_fields.ResF_SO67[0]
minF_A = self.osaCentA_fields.minResF[0]
So_minF_A = self.osaCentA_fields.SO_minResF[0]
DA = self.osaCentA_fields.COD[0]
dA = self.osaCentA_fields.IPOD[0]
Type_B = self.osaCentB_fields.Type[0]
F_So67_B = self.osaCentB_fields.ResF_SO67[0]
minF_B = self.osaCentB_fields.minResF[0]
So_minF_B = self.osaCentB_fields.SO_minResF[0]
DB = self.osaCentB_fields.COD[0]
dB = self.osaCentB_fields.IPOD[0]
#kA = ResFA/(DA/2-0.335*(DA-D)) # Con esto se calculan los coeficientes de los resortes ( 0.335=0.67/2 )
#kB = ResFB/(DB/2-0.335*(DB-D))
for field in self.osaWellbore_fields:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
for field in self.osaCasing_fields:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
for field in self.osaCentA_fields[1:]:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
for field in self.osaCentB_fields[1:]:
if field[0]<0:
raise mdl.LogicalError('Every parameter should be greater than zero.')
if dA!=D or dB!=D or dH<=D:
raise mdl.LogicalError('The selected devices are not size-consistent.')
θ = np.pi*self.osaInclination_slider.sliderPosition()/180
I = np.pi/64*(D**4-d**4) # [Ref.3] Momento de inercia diferente a momento de inercia polar.
F = 30000 # [Ref.1]
Radio = L*1e6
aspr = L*0.02
buoyancyFactor = mdl.calculate_buoyancyFactor( OD=D, ID=d, ρs=ρs, ρe=ρe, ρi=ρi ) # [Ref.2]
w *= buoyancyFactor
fC = w*L*np.sin(θ)/2
if Type_A=='Resin': #mdl.isNoneEntry(ResFA):
yA = 0
dA = d
else:
kA = 2*(F_So67_A-minF_A)/(So_minF_A-0.67)/(DA-dA)
yA = fC/kA if (DA<dH) else fC/kA/2
if Type_B=='Resin': #mdl.isNoneEntry(ResFB):
yB = 0
dB = d
else:
kB = 2*(F_So67_B-minF_B)/(So_minF_B-0.67)/(DB-dB)
yB = fC/kB if (DB<dH) else fC/kB/2
R = D/2
rH = dH/2
rA_min = R+(DA/2-R)*0.1
rB_min = R+(DB/2-R)*0.1
rA = (DA/2-yA) if (DA<dH) else (rH-yA)
rB = (DB/2-yB) if (DB<dH) else (rH-yB)
rA = rA_min if (rA<=rA_min) else rA
rB = rB_min if (rB<=rB_min) else rB
α = np.arctan( (rB-rA)/L )
Lα = L/np.cos(α)
x = np.linspace( 0, Lα, 101 )
K = np.sqrt(F/E/I)
y = (Lα/2/Radio/K + w*Lα*np.sin(θ)/2/K/F)*( (np.cosh(K*x)-1)/np.tanh(K*Lα/2) + K*x - np.sinh(K*x) ) - w*np.sin(θ)/2/F*x**2 # [Ref.1]
Rα = Rot(α)
xy = np.array([x,y])
x,y = np.dot(Rα,xy)
Δy = rH-rB
y += Δy
cH = rH-R
cA = rA-R
cB = rB-R
indexes = y>cH
y[indexes] = cH
indexes = y<-cH
y[indexes] =-cH
cy = cH-y
rM = rH-y[50]
if y[50]==cH:
fM = fC
fC = 0
else:
fM = 0
cM = rM-R
x -= L/2
yoh = y*0
ohc = np.array([x, yoh])
ohp = np.array([x, (yoh+rH)*aspr])
ohm = np.array([x, (yoh-rH)*aspr])
xyc = np.array([x, y*aspr])
xyp = np.array([x, (y+R)*aspr])
xym = np.array([x, (y-R)*aspr])
φ = θ + np.pi/2
Rφ = Rot(φ)
OHc = np.dot(Rφ,ohc)
OHp = np.dot(Rφ,ohp)
OHm = np.dot(Rφ,ohm)
XYc = np.dot(Rφ,xyc)
XYp = np.dot(Rφ,xyp)
XYm = np.dot(Rφ,xym)
SA = cA/cH
SB = cB/cH
SM = cM/cH
Sy = cy/cH
δ = (cA+cB)/2-cM
self.osaOutputdata1_fields.clear_content()
self.osaOutputdata2_fields.clear_content()
self.osaOutputdata1_fields.ClearanceA.append( mdl.physicalValue( cA, self.osaOutputdata1_fields.ClearanceA.referenceUnit ) )
self.osaOutputdata1_fields.ClearanceB.append( mdl.physicalValue( cB, self.osaOutputdata1_fields.ClearanceB.referenceUnit ) )
self.osaOutputdata1_fields.ClearanceM.append( mdl.physicalValue( cM, self.osaOutputdata1_fields.ClearanceM.referenceUnit ) )
self.osaOutputdata1_fields.SideForceA.append( mdl.physicalValue( fC, self.osaOutputdata1_fields.SideForceA.referenceUnit ) )
self.osaOutputdata1_fields.SideForceB.append( mdl.physicalValue( fC, self.osaOutputdata1_fields.SideForceB.referenceUnit ) )
self.osaOutputdata1_fields.SideForceM.append( mdl.physicalValue( fM, self.osaOutputdata1_fields.SideForceM.referenceUnit ) )
self.osaOutputdata1_fields.StandoffA.append( mdl.physicalValue( SA, self.osaOutputdata1_fields.StandoffA.referenceUnit ) )
self.osaOutputdata1_fields.StandoffB.append( mdl.physicalValue( SB, self.osaOutputdata1_fields.StandoffB.referenceUnit ) )
self.osaOutputdata1_fields.StandoffM.append( mdl.physicalValue( SM, self.osaOutputdata1_fields.StandoffM.referenceUnit ) )
self.osaOutputdata2_fields.AxialForce.append( mdl.physicalValue( w*L*np.cos(θ), self.osaOutputdata2_fields.AxialForce.referenceUnit ) )
self.osaOutputdata2_fields.MaxDeflection.append( mdl.physicalValue( δ, self.osaOutputdata2_fields.MaxDeflection.referenceUnit ) )
self.osaOutputdata2_fields.WellboreClearance.append( mdl.physicalValue( np.mean(cy), self.osaOutputdata2_fields.WellboreClearance.referenceUnit ) )
self.osaOutputdata2_fields.WellboreStandoff.append( mdl.physicalValue( np.mean(Sy), self.osaOutputdata2_fields.WellboreStandoff.referenceUnit ) )
self.osaCasing_fields.inverseReferenceUnitConvert_fields()
self.osaCentA_fields.inverseReferenceUnitConvert_fields()
self.osaCentB_fields.inverseReferenceUnitConvert_fields()
self.osaWellbore_fields.inverseReferenceUnitConvert_fields()
self.osaOutputdata1_fields.inverseReferenceUnitConvert_fields()
self.osaOutputdata2_fields.inverseReferenceUnitConvert_fields()
lim = L/2*1.05
return OHc, OHp, OHm, XYc, XYp, XYm, lim, rA, rB, rM
| 35.506135
| 149
| 0.723629
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,749
| 0.150542
|
f72d949d658d47131c4a502292aadd093d90b245
| 212
|
py
|
Python
|
test-examples/million_points.py
|
tlambert03/image-demos
|
a2974bcc7f040fd4d14e659c4cbfeabcf726c707
|
[
"BSD-3-Clause"
] | null | null | null |
test-examples/million_points.py
|
tlambert03/image-demos
|
a2974bcc7f040fd4d14e659c4cbfeabcf726c707
|
[
"BSD-3-Clause"
] | null | null | null |
test-examples/million_points.py
|
tlambert03/image-demos
|
a2974bcc7f040fd4d14e659c4cbfeabcf726c707
|
[
"BSD-3-Clause"
] | null | null | null |
"""Test converting an image to a pyramid.
"""
import numpy as np
import napari
points = np.random.randint(100, size=(50_000, 2))
with napari.gui_qt():
viewer = napari.view_points(points, face_color='red')
| 19.272727
| 57
| 0.712264
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 50
| 0.235849
|
f72ddd7241194452b55a3968e1f8f4807cdc48eb
| 1,166
|
py
|
Python
|
pact/test/test_constants.py
|
dwang7/pact-python
|
da03551e812508652e062fc4ba6071f1119e5bf2
|
[
"MIT"
] | null | null | null |
pact/test/test_constants.py
|
dwang7/pact-python
|
da03551e812508652e062fc4ba6071f1119e5bf2
|
[
"MIT"
] | null | null | null |
pact/test/test_constants.py
|
dwang7/pact-python
|
da03551e812508652e062fc4ba6071f1119e5bf2
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from mock import patch
from .. import constants
class mock_service_exeTestCase(TestCase):
def setUp(self):
super(mock_service_exeTestCase, self).setUp()
self.addCleanup(patch.stopall)
self.mock_os = patch.object(constants, 'os', autospec=True).start()
def test_other(self):
self.mock_os.name = 'posix'
self.assertEqual(constants.mock_service_exe(), 'pact-mock-service')
def test_windows(self):
self.mock_os.name = 'nt'
self.assertEqual(constants.mock_service_exe(), 'pact-mock-service.bat')
class provider_verifier_exeTestCase(TestCase):
def setUp(self):
super(provider_verifier_exeTestCase, self).setUp()
self.addCleanup(patch.stopall)
self.mock_os = patch.object(constants, 'os', autospec=True).start()
def test_other(self):
self.mock_os.name = 'posix'
self.assertEqual(
constants.provider_verifier_exe(), 'pact-provider-verifier')
def test_windows(self):
self.mock_os.name = 'nt'
self.assertEqual(
constants.provider_verifier_exe(), 'pact-provider-verifier.bat')
| 30.684211
| 79
| 0.679245
| 1,080
| 0.926244
| 0
| 0
| 0
| 0
| 0
| 0
| 124
| 0.106346
|
f72eb585890bafe8941f0c78a9d950477be13230
| 2,555
|
py
|
Python
|
backtrader/backtrader/indicators/__init__.py
|
harshabakku/live-back-testing-trader
|
1fd69c7598dc15bea740f160eed886f396bcba2c
|
[
"MIT"
] | 1
|
2021-07-14T22:04:08.000Z
|
2021-07-14T22:04:08.000Z
|
backtrader/backtrader/indicators/__init__.py
|
ajmal017/LiveBackTestingTrader
|
8b4f5804c0aa6046128f6706582f9cde78a0519a
|
[
"MIT"
] | null | null | null |
backtrader/backtrader/indicators/__init__.py
|
ajmal017/LiveBackTestingTrader
|
8b4f5804c0aa6046128f6706582f9cde78a0519a
|
[
"MIT"
] | 3
|
2021-03-07T16:29:40.000Z
|
2022-03-17T21:42:38.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015, 2016, 2017 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from backtrader import Indicator
from backtrader.functions import *
# The modules below should/must define __all__ with the Indicator objects
# of prepend an "_" (underscore) to private classes/variables
from .basicops import *
# base for moving averages
from .mabase import *
# moving averages (so envelope and oscillators can be auto-generated)
from .sma import *
from .ema import *
from .smma import *
from .wma import *
from .dema import *
from .kama import *
from .zlema import *
from .hma import *
from .zlind import *
from .dma import *
# depends on moving averages
from .deviation import *
# depend on basicops, moving averages and deviations
from .atr import *
from .aroon import *
from .bollinger import *
from .cci import *
from .crossover import *
from .dpo import *
from .directionalmove import *
from .envelope import *
from .heikinashi import *
from .lrsi import *
from .macd import *
from .momentum import *
from .oscillator import *
from .percentchange import *
from .percentrank import *
from .pivotpoint import *
from .prettygoodoscillator import *
from .priceoscillator import *
from .psar import *
from .rsi import *
from .stochastic import *
from .trix import *
from .tsi import *
from .ultimateoscillator import *
from .williams import *
from .rmi import *
from .awesomeoscillator import *
from .accdecoscillator import *
from .dv2 import * # depends on percentrank
# Depends on Momentum
from .kst import *
from .ichimoku import *
from .hurst import *
from .ols import *
from .hadelta import *
| 28.076923
| 79
| 0.699413
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,250
| 0.489237
|
f72f3f991d29cfcde8c404665347a2b2067bd01a
| 3,145
|
py
|
Python
|
tests/test_game_map.py
|
brittleshinpass/mossbread
|
6a225e5d11fdf1957d1bfe74c5a76d105561e12e
|
[
"MIT"
] | 1
|
2020-05-30T19:45:58.000Z
|
2020-05-30T19:45:58.000Z
|
tests/test_game_map.py
|
brittleshinpass/mossbread
|
6a225e5d11fdf1957d1bfe74c5a76d105561e12e
|
[
"MIT"
] | null | null | null |
tests/test_game_map.py
|
brittleshinpass/mossbread
|
6a225e5d11fdf1957d1bfe74c5a76d105561e12e
|
[
"MIT"
] | null | null | null |
import pytest
from array import array
from game_map import GameMap
from tests.conftest import get_relative_path
sample_map_data = tuple(
reversed(
(
array("I", (0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)),
array("I", (0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0)),
array("I", (1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1)),
array("I", (1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1)),
array("I", (1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)),
array("I", (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)),
array("I", (1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1)),
array("I", (0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0)),
array("I", (0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0)),
)
)
)
def test_game_map_from_file(sample_game_map, sample_tiles):
assert sample_game_map.map_data == sample_map_data
assert sample_game_map.width == 21
assert sample_game_map.height == 21
assert sample_game_map.tile_data == sample_tiles
# Assert map is read right-up
assert sample_game_map.get(16, 2) == 0
assert sample_game_map.get(16, 18) == 1
def test_game_map_get_out_of_bounds(sample_game_map):
with pytest.raises(AssertionError):
sample_game_map.get(-1, 0)
sample_game_map.get(0, -1)
sample_game_map.get(-1, -1)
sample_game_map.get(21, 0)
sample_game_map.get(0, 21)
sample_game_map.get(21, 21)
def test_game_map_load_mapfile_nonrectangular():
with pytest.raises(AssertionError):
GameMap.load_mapfile(get_relative_path("fixtures/map_nonrectangular.csv"))
def test_game_map_traversable(sample_game_map):
assert sample_game_map.traversable(2, 2)
assert not sample_game_map.traversable(1, 1)
assert sample_game_map.traversable(16, 2)
assert not sample_game_map.traversable(16, 18)
| 46.25
| 88
| 0.475994
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 125
| 0.039746
|
f7300289bf48754135726dad8a8c684a9ab7d495
| 14,855
|
py
|
Python
|
queryable_properties/managers.py
|
W1ldPo1nter/django-queryable-properties
|
9bb4ecb4fbdd7a9e0f610f937c8101a643027fb1
|
[
"BSD-3-Clause"
] | 36
|
2019-10-22T11:44:37.000Z
|
2022-03-15T21:27:03.000Z
|
queryable_properties/managers.py
|
W1ldPo1nter/django-queryable-properties
|
9bb4ecb4fbdd7a9e0f610f937c8101a643027fb1
|
[
"BSD-3-Clause"
] | 6
|
2020-10-03T15:13:26.000Z
|
2021-09-25T14:05:50.000Z
|
queryable_properties/managers.py
|
W1ldPo1nter/django-queryable-properties
|
9bb4ecb4fbdd7a9e0f610f937c8101a643027fb1
|
[
"BSD-3-Clause"
] | 3
|
2021-04-26T08:30:46.000Z
|
2021-08-18T09:04:49.000Z
|
# encoding: utf-8
from __future__ import unicode_literals
import six
from django.db.models import Manager
from django.db.models.query import QuerySet
from .compat import (ANNOTATION_SELECT_CACHE_NAME, ANNOTATION_TO_AGGREGATE_ATTRIBUTES_MAP, chain_query, chain_queryset,
ModelIterable, ValuesQuerySet)
from .exceptions import QueryablePropertyDoesNotExist, QueryablePropertyError
from .query import QueryablePropertiesQueryMixin
from .utils import get_queryable_property
from .utils.internal import InjectableMixin, QueryPath, QueryablePropertyReference
class QueryablePropertiesIterable(InjectableMixin):
"""
An iterable that yields the actual results of a queryset while correctly
processing columns of queryable properties. It is closely related to
Django's BaseIterable and will be used as a mixin for its subclasses in all
(recent) Django versions that have it. In all other (older) versions, this
class will be used as a standalone iterable instead.
"""
def __init__(self, queryset, *args, **kwargs):
"""
Initialize a new iterable for the given queryset. If an iterable is
given it will be used to retrieve the model instances before applying
queryable properties logic (standalone usage for older Django
versions). Otherwise, the __iter__ implementation of the base class
is used to get the model instances (usage as mixin).
:param QuerySet queryset: The queryset to perform the database query
for.
:param collections.Iterable iterable: The optional iterable to use for
standalone usage.
:param args: Positional arguments to pass through to the base class
initialization when used as a mixin.
:param kwargs: Keyword arguments to pass through to the base class
initialization when used as a mixin.
:keyword collections.Iterable iterable: The optional iterable to use
for standalone usage.
"""
self.queryset = queryset
# Only perform the super call if the class is used as a mixin
if self.__class__.__bases__ != (InjectableMixin,):
super(QueryablePropertiesIterable, self).__init__(queryset, *args, **kwargs)
self.iterable = kwargs.get('iterable') or super(QueryablePropertiesIterable, self).__iter__()
self.yields_model_instances = ((ModelIterable is not None and isinstance(self, ModelIterable)) or
(ValuesQuerySet is not None and not isinstance(self.queryset, ValuesQuerySet)))
def __iter__(self):
"""
Yield the model objects for the queryset associated with this iterator
with their correctly processed selected queryable properties.
:return: A generator that yields the model objects.
"""
original_query = self.queryset.query
try:
self.queryset.query = chain_query(original_query)
final_aliases = self._setup_queryable_properties()
for obj in self.iterable:
if self.yields_model_instances:
# Retrieve the annotation values from each renamed
# attribute and use it to populate the cache for the
# corresponding queryable property on each object while
# removing the weird, renamed attributes.
for changed_name, property_ref in six.iteritems(final_aliases):
value = getattr(obj, changed_name)
delattr(obj, changed_name)
if property_ref:
property_ref.descriptor.set_cached_value(obj, value)
yield obj
finally:
self.queryset.query = original_query
def _setup_queryable_properties(self):
"""
Perform the required setup to correctly process queryable property
values.
Change the internal aliases of the annotations that belong to queryable
properties in the query of the associated queryset to something unique
and return a dictionary mapping the queryable properties to the changed
aliases. This is necessary to allow Django to populate the annotation
attributes on the resulting model instances, which would otherwise call
the setter of the queryable properties. This way, Django can populate
attributes with different names and avoid using the setter methods.
Also make sure that ordering by queryable properties works in older
Django versions.
:return: A dictionary mapping the final aliases for queryable
properties to the corresponding references to be able to
retrieve the values from the DB and apply them to the correct
property. The property reference may be None, indicating that
the retrieved value should be discarded.
:rtype: dict[str, QueryablePropertyReference | None]
"""
query = self.queryset.query
final_aliases = {}
select = dict(query.annotation_select)
for property_ref in query._queryable_property_annotations:
annotation_name = six.text_type(property_ref.full_path)
# Older Django versions don't work with the annotation select dict
# when it comes to ordering, so queryable property annotations used
# for ordering need special treatment.
order_by_occurrences = []
if ANNOTATION_TO_AGGREGATE_ATTRIBUTES_MAP: # pragma: no cover
order_by_occurrences = [index for index, field_name in enumerate(query.order_by)
if field_name in (annotation_name, '-{}'.format(annotation_name))]
if order_by_occurrences and annotation_name not in select and annotation_name in query.annotations:
select[annotation_name] = query.annotations[annotation_name]
final_aliases[annotation_name] = None
if not self.yields_model_instances or annotation_name not in select:
# The queryable property annotation does not require selection
# or no renaming needs to occur since the queryset doesn't
# yield model instances.
continue
# Suffix the original annotation name with the lookup separator to
# create a non-clashing name: both model field an queryable
# property names are not allowed to contain the separator and a
# relation path ending with the separator would be invalid as well.
changed_name = six.text_type(property_ref.full_path + '')
final_aliases[changed_name] = final_aliases.pop(annotation_name, property_ref)
select[changed_name] = select.pop(annotation_name)
for index in order_by_occurrences: # pragma: no cover
# Apply the changed names to the ORDER BY clause.
query.order_by[index] = query.order_by[index].replace(annotation_name, changed_name)
# Patch the correct select property on the query with the new names,
# since this property is used by the SQL compiler to build the actual
# SQL query (which is where the changed names should be used).
setattr(query, ANNOTATION_SELECT_CACHE_NAME, select)
return final_aliases
class QueryablePropertiesQuerySetMixin(InjectableMixin):
"""
A mixin for Django's :class:`django.db.models.QuerySet` objects that allows
to use queryable properties in filters, annotations and update queries.
"""
def init_injected_attrs(self):
# To work correctly, a query using the QueryablePropertiesQueryMixin is
# required. If the current query is not using the mixin already, it
# will be dynamically injected into the query. That way, other Django
# extensions using custom query objects are also supported.
class_name = 'QueryableProperties' + self.query.__class__.__name__
self.query = QueryablePropertiesQueryMixin.inject_into_object(chain_query(self.query), class_name)
@property
def _iterable_class(self):
# Override the regular _iterable_class attribute of recent Django
# versions with a property that also stores the value in the instance
# dict, but automatically mixes the QueryablePropertiesModelIterable
# into the base class on getter access if the base class yields model
# instances. That way, the queryable properties extensions stays
# compatible to custom iterable classes while querysets can still be
# pickled due to the base class being in the instance dict.
cls = self.__dict__['_iterable_class']
return QueryablePropertiesIterable.mix_with_class(cls, 'QueryableProperties' + cls.__name__)
@_iterable_class.setter
def _iterable_class(self, value):
self.__dict__['_iterable_class'] = value
def _clone(self, klass=None, *args, **kwargs):
if klass: # pragma: no cover
# In older Django versions, the class of the queryset may be
# replaced with a dynamically created class based on the current
# class and the value of klass while cloning (e.g when using
# .values()). Therefore this needs to be re-injected to be on top
# of the MRO again to enable queryable properties functionality.
klass = QueryablePropertiesQuerySetMixin.mix_with_class(klass, 'QueryableProperties' + klass.__name__)
args = (klass,) + args
clone = super(QueryablePropertiesQuerySetMixin, self)._clone(*args, **kwargs)
# Since the _iterable_class property may return a dynamically created
# class, the value of a clone must be reset to the base class.
if '_iterable_class' in self.__dict__:
clone._iterable_class = self.__dict__['_iterable_class']
return clone
def _resolve_update_kwargs(self, **kwargs):
"""
Look for the names of queryable properties in the given keyword
arguments for an update query and correctly resolve them into their
actual keyword arguments.
:param kwargs: Keyword arguments of an update query.
:return: A dictionary containing the resolved arguments.
:rtype: dict
"""
original_names = set(kwargs)
for original_name in original_names:
try:
prop = get_queryable_property(self.model, original_name)
except QueryablePropertyDoesNotExist:
continue
if not prop.get_update_kwargs:
raise QueryablePropertyError('Queryable property "{}" does not implement queryset updating.'
.format(prop))
# Call the method recursively since queryable properties can build
# upon each other.
additional_kwargs = self._resolve_update_kwargs(
**prop.get_update_kwargs(self.model, kwargs.pop(original_name)))
# Make sure that there are no conflicting values after resolving
# the update keyword arguments of the queryable properties.
for additional_name, value in six.iteritems(additional_kwargs):
if additional_name in kwargs and kwargs[additional_name] != value:
raise QueryablePropertyError(
'Updating queryable property "{prop}" would change field "{field}", but a conflicting value '
'was set for this field by another queryable property or explicitly in the update arguments.'
.format(prop=prop, field=additional_name)
)
kwargs[additional_name] = value
return kwargs
def select_properties(self, *names):
"""
Add the annotations of the queryable properties with the specified
names to this query. The annotation values will be cached in the
properties of resulting model instances, regardless of the regular
caching behavior of the queried properties.
:param names: Names of queryable properties.
:return: A copy of this queryset with the added annotations.
:rtype: QuerySet
"""
queryset = chain_queryset(self)
for name in names:
property_ref = QueryablePropertyReference(get_queryable_property(self.model, name), self.model, QueryPath())
# A full GROUP BY is required if the query is not limited to
# certain fields. Since only certain types of queries had the
# _fields attribute in old Django versions, fall back to checking
# for existing selection, on which the GROUP BY would be based.
full_group_by = not getattr(self, '_fields', self.query.select)
with queryset.query._add_queryable_property_annotation(property_ref, full_group_by, select=True):
pass
return queryset
def iterator(self, *args, **kwargs):
# Recent Django versions use the associated iterable class for the
# iterator() implementation, where the QueryablePropertiesModelIterable
# will be already mixed in. In older Django versions, use a standalone
# QueryablePropertiesModelIterable instead to perform the queryable
# properties processing.
iterable = super(QueryablePropertiesQuerySetMixin, self).iterator(*args, **kwargs)
if '_iterable_class' not in self.__dict__: # pragma: no cover
return iter(QueryablePropertiesIterable(self, iterable=iterable))
return iterable
def update(self, **kwargs):
# Resolve any queryable properties into their actual update kwargs
# before calling the base update method.
kwargs = self._resolve_update_kwargs(**kwargs)
return super(QueryablePropertiesQuerySetMixin, self).update(**kwargs)
class QueryablePropertiesQuerySet(QueryablePropertiesQuerySetMixin, QuerySet):
"""
A special queryset class that allows to use queryable properties in its
filter conditions, annotations and update queries.
"""
pass
if hasattr(Manager, 'from_queryset'):
QueryablePropertiesManager = Manager.from_queryset(QueryablePropertiesQuerySet)
else: # pragma: no cover
class QueryablePropertiesManager(Manager):
def get_queryset(self):
return QueryablePropertiesQuerySet(self.model, using=self._db)
get_query_set = get_queryset
def select_properties(self, *names):
return self.get_queryset().select_properties(*names)
| 51.401384
| 120
| 0.671289
| 14,114
| 0.950118
| 1,213
| 0.081656
| 823
| 0.055402
| 0
| 0
| 7,582
| 0.510401
|
f7309823f58463b82e823f3fd4ecc77467f835fd
| 11,759
|
py
|
Python
|
pml/engineer_tests.py
|
gatapia/py_ml_utils
|
844d8b62a7c5cc0a80f4f62c0bfda092aac57ade
|
[
"MIT"
] | 183
|
2015-01-11T13:01:01.000Z
|
2022-02-08T04:45:33.000Z
|
pml/engineer_tests.py
|
gatapia/py_ml_utils
|
844d8b62a7c5cc0a80f4f62c0bfda092aac57ade
|
[
"MIT"
] | 13
|
2015-05-12T17:39:42.000Z
|
2018-07-29T18:01:38.000Z
|
pml/engineer_tests.py
|
gatapia/py_ml_utils
|
844d8b62a7c5cc0a80f4f62c0bfda092aac57ade
|
[
"MIT"
] | 166
|
2015-01-28T18:05:55.000Z
|
2022-02-08T04:45:34.000Z
|
from __future__ import print_function, absolute_import
import unittest, math
import pandas as pd
import numpy as np
from . import *
class T(base_pandas_extensions_tester.BasePandasExtensionsTester):
def test_concat(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f']})
df.engineer('concat(c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,c_2)'].values,
np.array(['ad', 'be', 'cf'], 'object')))
def test_concat_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2': ['d', 'e', 'f'], 'c_3': ['h', 'i', 'j']})
df.engineer('concat(c_3, c_1, c_2)')
self.assertTrue(np.array_equal(df['c_concat(c_3,c_1,c_2)'].values,
np.array(['had', 'ibe', 'jcf'], 'object')))
def test_concat_with_numerical_col(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3]})
df.engineer('concat(c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(c_1,n_2)'].values,
np.array(['a1', 'b2', 'c3'], 'object')))
def test_concat_with_numerical_col_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6]})
df.engineer('concat(n_3,c_1,n_2)')
self.assertTrue(np.array_equal(df['c_concat(n_3,c_1,n_2)'].values,
np.array(['4a1', '5b2', '6c3'], 'object')))
def test_multiplication(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3)'].values,
np.array([4, 10, 18], long)))
def test_multiplication_3_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('mult(n_2, n_3, n_4)')
self.assertTrue(np.array_equal(df['n_mult(n_2,n_3,n_4)'].values,
np.array([4*7, 80, 18*9], long)))
def test_square_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 1*1, 4*4, 7*7],
['b', 2, 5, 8, 2*2, 5*5, 8*8],
['c', 3, 6, 9, 3*3, 6*6, 9*9],
], 'object'))
def test_square_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('pow(n_3, 2)')
np.testing.assert_array_equal(df.values,
np.array([
['a', 1, 4, 7, 4*4],
['b', 2, 5, 8, 5*5],
['c', 3, 6, 9, 6*6],
], 'object'))
def test_log_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(1), math.log(4), math.log(7)],
['b', 2, 5, 8, math.log(2), math.log(5), math.log(8)],
['c', 3, 6, 9, math.log(3), math.log(6), math.log(9)],
], 'object')))
def test_log_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('lg(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.log(4)],
['b', 2, 5, 8, math.log(5)],
['c', 3, 6, 9, math.log(6)],
], 'object')))
def test_sqrt_on_whole_data_frame(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt()')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(1), math.sqrt(4), math.sqrt(7)],
['b', 2, 5, 8, math.sqrt(2), math.sqrt(5), math.sqrt(8)],
['c', 3, 6, 9, math.sqrt(3), math.sqrt(6), math.sqrt(9)],
], 'object')))
def test_sqrt_on_cols(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('sqrt(n_3)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 1, 4, 7, math.sqrt(4)],
['b', 2, 5, 8, math.sqrt(5)],
['c', 3, 6, 9, math.sqrt(6)],
], 'object')))
def test_rolling_sum_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_sum(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_' + col])
def test_rolling_mean_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_mean(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_' + col], rtol=1e-3)
def test_rolling_median_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_median(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_' + col])
def test_rolling_min_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_min(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_' + col])
def test_rolling_max_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_max(n_1,3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_' + col])
def test_rolling_std_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_std(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_' + col], rtol=1e-3)
def test_rolling_var_on_single_col(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34]})
col = 'rolling_var(n_1,3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_' + col], rtol=1e-3)
# Multiple Columns
def test_rolling_sum_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_sum(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 35, 40, 30, 29, 48], df['n_rolling_sum(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 6, 10, 10, 9, 8], df['n_rolling_sum(n_2,3)'])
def test_rolling_mean_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_mean(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 11.66, 13.33, 10, 9.66, 16], df['n_rolling_mean(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 2, 3.333, 3.333, 3, 2.666], df['n_rolling_mean(n_2,3)'], rtol=1e-3)
def test_rolling_median_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_median(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 12, 13, 13, 12, 12], df['n_rolling_median(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 2, 3, 3, 2, 2], df['n_rolling_median(n_2,3)'])
def test_rolling_min_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_min(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 10, 12, 2, 2, 2], df['n_rolling_min(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 1, 2, 2, 2, 2], df['n_rolling_min(n_2,3)'])
def test_rolling_max_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_max(3)'
df.engineer(col)
np.testing.assert_array_equal([np.nan, np.nan, 13, 15, 15, 15, 34], df['n_rolling_max(n_1,3)'])
np.testing.assert_array_equal([np.nan, np.nan, 3, 5, 5, 5, 4], df['n_rolling_max(n_2,3)'])
def test_rolling_std_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_std(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 1.528, 1.528, 7, 6.807, 16.371], df['n_rolling_std(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 1.528, 1.528, 1.732, 1.1547], df['n_rolling_std(n_2,3)'], rtol=1e-3)
def test_rolling_var_on_multi_cols(self):
df = pd.DataFrame({'n_1': [10, 12, 13, 15, 2, 12, 34], 'n_2': [1, 2, 3, 5, 2, 2, 4]})
col = 'rolling_var(3)'
df.engineer(col)
np.testing.assert_allclose([np.nan, np.nan, 2.333, 2.333, 49, 46.333, 268], df['n_rolling_var(n_1,3)'], rtol=1e-3)
np.testing.assert_allclose([np.nan, np.nan, 1, 2.333, 2.333, 3, 1.333], df['n_rolling_var(n_2,3)'], rtol=1e-3)
def test_method_chaining(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.\
engineer('concat(c_1, c_2)').\
engineer('concat(c_1, n_2)').\
engineer('mult(n_2, n_3)').\
engineer('lg(n_2)').\
engineer('pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_call_semi_col_sep(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)')
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_chaining_single_with_arr_arg(self):
df = pd.DataFrame({'c_1':['a', 'b', 'c'], 'c_2':['d', 'e', 'f'],
'n_2': [1, 2, 3], 'n_3': [4, 5, 6], 'n_4': [7, 8, 9]})
df.engineer('concat(c_1, c_2);concat(c_1, n_2);mult(n_2, n_3);lg(n_2);pow(n_3, 2)'.split(';'))
self.assertTrue(np.array_equal(df.values,
np.array([
['a', 'd', 1, 4, 7, 'ad', 'a1', 4, math.log(1), 4*4],
['b', 'e', 2, 5, 8, 'be', 'b2', 10, math.log(2), 5*5],
['c', 'f', 3, 6, 9, 'cf', 'c3', 18, math.log(3), 6*6]
], 'object')))
def test_long_method_chains(self):
df1 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df2 = pd.DataFrame({'n_1': [1, 2, 3], 'n_2': [4, 5, 6]})
df1.engineer('mult(lg(mult(n_1, n_2)), lg(pow(n_1, 3)))')
df2.engineer('mult(n_1,n_2);pow(n_1,3)')
df2.engineer('lg(pow(n_1,3));lg(mult(n_1, n_2))')
df2.engineer('mult(lg(mult(n_1,n_2)),lg(pow(n_1, 3)))')
np.testing.assert_array_equal(df1.columns.values.sort(), df2.columns.values.sort());
np.testing.assert_array_equal(df1['n_mult(n_1,n_2)'].values, df2['n_mult(n_1,n_2)'].values);
np.testing.assert_array_equal(df1['n_pow(n_1,3)'], df2['n_pow(n_1,3)']);
np.testing.assert_array_equal(df1['n_lg(pow(n_1,3))'], df2['n_lg(pow(n_1,3))']);
np.testing.assert_array_equal(df1['n_lg(mult(n_1,n_2))'], df2['n_lg(mult(n_1,n_2))']);
np.testing.assert_array_equal(df1['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))'], df2['n_mult(lg(mult(n_1,n_2)),lg(pow(n_1,3)))']);
| 46.478261
| 133
| 0.541628
| 11,616
| 0.987839
| 0
| 0
| 0
| 0
| 0
| 0
| 2,439
| 0.207416
|
f730db018b5a100d3b9690cd2c3518425836dcfb
| 2,353
|
py
|
Python
|
setup.py
|
wdv4758h/rsglob
|
342f950c240b5d84c629ecf4fec348401975d2ba
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
wdv4758h/rsglob
|
342f950c240b5d84c629ecf4fec348401975d2ba
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
wdv4758h/rsglob
|
342f950c240b5d84c629ecf4fec348401975d2ba
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import sys
from setuptools import find_packages, setup, Extension
try: # for pip >= 10
from pip._internal.req import parse_requirements
except ImportError:
from pip.req import parse_requirements
try:
from setuptools_rust import RustExtension
except ImportError:
import subprocess
errno = subprocess.call(
[sys.executable, '-m', 'pip', 'install', 'setuptools-rust'])
if errno:
print("Please install setuptools-rust package")
raise SystemExit(errno)
else:
from setuptools_rust import RustExtension
def get_requirements(filename):
# parse_requirements() returns generator of pip.req.InstallRequirement instance
install_requires = parse_requirements(
os.path.join(ROOT_DIR, filename),
session=False,
)
# requirements is a list of requirement
requirements = list(map(lambda x: str(x).split()[0], install_requires))
return requirements
ROOT_DIR = os.path.dirname(os.path.realpath(__file__))
version = __import__('rsglob').VERSION
setup_requires = ['setuptools-rust>=0.6.0']
install_requires = get_requirements('requirements.txt')
test_requires = get_requirements('requirements-test.txt')
rust_extensions = [RustExtension('rsglob._rsglob', 'Cargo.toml')]
setup(
name='rsglob',
version=version,
url='https://github.com/wdv4758h/rsglob',
author='Chiu-Hsiang Hsu',
author_email='wdv4758h@gmail.com',
description=('Python glob in Rust'),
long_description=open("README.rst").read(),
download_url="https://github.com/wdv4758h/rsglob/archive/v{}.zip".format(
version
),
license='BSD',
tests_require=test_requires,
install_requires=install_requires,
packages=find_packages(),
rust_extensions=rust_extensions,
zip_safe=False,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| 30.960526
| 83
| 0.677008
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 897
| 0.381215
|
f73176b9df2d9d3e6551836091e9a8f8bdc64a68
| 9,041
|
py
|
Python
|
src/pyrobot/habitat/base.py
|
cihuang123/pyrobot
|
fe620097e31d11453b5ea7ac15e40f5f5721b29a
|
[
"MIT"
] | 2,150
|
2019-06-12T20:55:41.000Z
|
2022-03-21T07:14:51.000Z
|
src/pyrobot/habitat/base.py
|
cihuang123/pyrobot
|
fe620097e31d11453b5ea7ac15e40f5f5721b29a
|
[
"MIT"
] | 124
|
2019-06-22T17:12:27.000Z
|
2022-02-26T11:43:13.000Z
|
src/pyrobot/habitat/base.py
|
cihuang123/pyrobot
|
fe620097e31d11453b5ea7ac15e40f5f5721b29a
|
[
"MIT"
] | 329
|
2019-06-13T03:03:54.000Z
|
2022-03-30T07:04:55.000Z
|
import numpy as np
import math
import pyrobot.utils.util as prutil
import rospy
import habitat_sim.agent as habAgent
import habitat_sim.utils as habUtils
from habitat_sim.agent.controls import ActuationSpec
import habitat_sim.errors
import quaternion
from tf.transformations import euler_from_quaternion, euler_from_matrix
class LoCoBotBase(object):
"""docstring for SimpleBase"""
def __init__(self, configs, simulator):
self.configs = configs
self.sim = simulator.sim
self.agent = self.sim.get_agent(self.configs.COMMON.SIMULATOR.DEFAULT_AGENT_ID)
self.transform = None
self.init_state = self.get_full_state()
def execute_action(self, action_name, actuation):
# actions = "turn_right" or "turn_left" or "move_forward"
# returns a bool showing if collided or not
return self._act(action_name, actuation)
def get_full_state(self):
# Returns habitat_sim.agent.AgentState
return self.agent.get_state()
def _rot_matrix(self, habitat_quat):
quat_list = [habitat_quat.x, habitat_quat.y, habitat_quat.z, habitat_quat.w]
return prutil.quat_to_rot_mat(quat_list)
def get_state(self, state_type="odom"):
# Returns (x, y, yaw)
assert state_type == "odom", "Error: Only Odom state is available"
cur_state = self.get_full_state()
init_rotation = self._rot_matrix(self.init_state.rotation)
# true position here refers to the relative position from
# where `self.init_state` is treated as origin
true_position = cur_state.position - self.init_state.position
true_position = np.matmul(init_rotation.transpose(), true_position, dtype=np.float64)
cur_rotation = self._rot_matrix(cur_state.rotation)
cur_rotation = np.matmul(init_rotation.transpose(), cur_rotation, dtype=np.float64)
(r, pitch, yaw) = euler_from_matrix(cur_rotation, axes="sxzy")
# Habitat has y perpendicular to map where as ROS has z perpendicular
# to the map. Where as x is same.
# Here ROS_X = -1 * habitat_z and ROS_Y = -1*habitat_x
return (-1 * true_position[2], -1 * true_position[0], yaw)
def stop(self):
raise NotImplementedError("Veclocity control is not supported in Habitat-Sim!!")
def set_vel(self, fwd_speed, turn_speed, exe_time=1):
raise NotImplementedError("Veclocity control is not supported in Habitat-Sim!!")
def go_to_relative(
self, xyt_position, use_map=False, close_loop=False, smooth=False
):
"""
Moves the robot to the robot to given
goal state relative to its initial pose.
:param xyt_position: The relative goal state of the form (x,y,t)
:param use_map: When set to "True", ensures that controler is
using only free space on the map to move the robot.
:param close_loop: When set to "True", ensures that controler is
operating in open loop by
taking account of odometry.
:param smooth: When set to "True", ensures that the motion
leading to the goal is a smooth one.
:type xyt_position: list
:type use_map: bool
:type close_loop: bool
:type smooth: bool
:return: True if successful; False otherwise (timeout, etc.)
:rtype: bool
"""
try:
if use_map:
raise NotImplementedError(
"Using map feature is not yet supported for Habitat-Sim"
)
if close_loop:
raise NotImplementedError(
"Closed-loop postion control is not supported in Habitat-Sim!"
)
if smooth:
raise NotImplementedError(
"Smooth position control feature is not yet for Habitat-Sim"
)
except Exception as error:
print(error)
return False
(cur_x, cur_y, cur_yaw) = self.get_state()
abs_yaw = cur_yaw + xyt_position[2]
return self._go_to_relative_pose(xyt_position[0], xyt_position[1], abs_yaw)
def go_to_absolute(
self, xyt_position, use_map=False, close_loop=False, smooth=False
):
"""
Moves the robot to the robot to given goal state in the world frame.
:param xyt_position: The goal state of the form (x,y,t)
in the world (map) frame.
:param use_map: When set to "True", ensures that controler is using
only free space on the map to move the robot.
:param close_loop: When set to "True", ensures that controler is
operating in open loop by
taking account of odometry.
:param smooth: When set to "True", ensures that the motion
leading to the goal is a smooth one.
:type xyt_position: list
:type use_map: bool
:type close_loop: bool
:type smooth: bool
:return: True if successful; False otherwise (timeout, etc.)
:rtype: bool
"""
try:
if use_map:
raise NotImplementedError(
"Using map feature is not yet supported for Habitat-Sim"
)
if close_loop:
raise NotImplementedError(
"Closed-loop postion control is not supported in Habitat-Sim!"
)
if smooth:
raise NotImplementedError(
"Smooth position control feature is not yet for Habitat-Sim"
)
except Exception as error:
print(error)
return False
(cur_x, cur_y, cur_yaw) = self.get_state()
rel_X = xyt_position[0] - cur_x
rel_Y = xyt_position[1] - cur_y
abs_yaw = xyt_position[2]
# convert rel_X & rel_Y from global frame to current frame
R = np.array([[np.cos(cur_yaw), np.sin(cur_yaw)],
[-np.sin(cur_yaw), np.cos(cur_yaw)]])
rel_x, rel_y = np.matmul(R, np.array([rel_X, rel_Y]).reshape(-1,1))
return self._go_to_relative_pose(rel_x[0], rel_y[0], abs_yaw)
def _act(self, action_name, actuation):
"""Take the action specified by action_id
:param action_id: ID of the action. Retreives the action from
`agent_config.action_space <AgentConfiguration.action_space>`
:return: Whether or not the action taken resulted in a collision
"""
did_collide = False
act_spec = ActuationSpec(actuation)
did_collide = self.agent.controls.action(
self.agent.scene_node, action_name, act_spec, apply_filter=True
)
return did_collide
def _go_to_relative_pose(self, rel_x, rel_y, abs_yaw):
# clip relative movements beyond 10 micrometer precision
# this is done to improve determinism, as habitat-sim doesn't
# seem to precisely move the robot beyond sub milimeter precision anyways
if abs(rel_x) < 1e-5:
rel_x = 0
if abs(rel_y) < 1e-5:
rel_y = 0
if math.sqrt(rel_x ** 2 + rel_y ** 2) > 0.0:
# rotate to point to (x, y) point
action_name = "turn_left"
if rel_y < 0.0:
action_name = "turn_right"
v1 = np.asarray([1, 0], dtype=np.float64)
v2 = np.asarray([rel_x, rel_y], dtype=np.float64)
cosine_angle = np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
angle = np.arccos(cosine_angle)
did_collide = self._act(action_name, math.degrees(angle))
if did_collide:
print("Error: Collision accured while 1st rotating!")
return False
# move to (x,y) point
did_collide = self._act("move_forward", math.sqrt(rel_x ** 2 + rel_y ** 2))
if did_collide:
print("Error: Collision accured while moving straight!")
return False
# rotate to match the final yaw!
(cur_x, cur_y, cur_yaw) = self.get_state()
rel_yaw = abs_yaw - cur_yaw
# clip to micro-degree precision to preserve determinism
if abs(rel_yaw) < 1e-4:
rel_yaw = 0
action_name = "turn_left"
if rel_yaw < 0.0:
action_name = "turn_right"
rel_yaw *= -1
did_collide = self._act(action_name, math.degrees(rel_yaw))
if did_collide:
print("Error: Collision accured while rotating!")
return False
return True
def track_trajectory(self, states, controls, close_loop):
"""
State trajectory that the robot should track.
:param states: sequence of (x,y,t) states that the robot should track.
:param controls: optionally specify control sequence as well.
:param close_loop: whether to close loop on the
computed control sequence or not.
:type states: list
:type controls: list
:type close_loop: bool
:return: True if successful; False otherwise (timeout, etc.)
:rtype: bool
"""
raise NotImplementedError
| 36.603239
| 93
| 0.623825
| 8,714
| 0.963831
| 0
| 0
| 0
| 0
| 0
| 0
| 3,772
| 0.41721
|
f731ffc418c409ea5c8ec121e5505721921146e2
| 164
|
py
|
Python
|
natwork/chats/admin.py
|
Potisin/Natwork
|
a42b89f18fdd8f8ac69e56cb7184696d6883a9f7
|
[
"BSD-3-Clause"
] | null | null | null |
natwork/chats/admin.py
|
Potisin/Natwork
|
a42b89f18fdd8f8ac69e56cb7184696d6883a9f7
|
[
"BSD-3-Clause"
] | null | null | null |
natwork/chats/admin.py
|
Potisin/Natwork
|
a42b89f18fdd8f8ac69e56cb7184696d6883a9f7
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import admin
from .models import Chat
class ChatAdmin(admin.ModelAdmin):
list_display = ("pk",)
admin.site.register(Chat, ChatAdmin)
| 12.615385
| 36
| 0.737805
| 61
| 0.371951
| 0
| 0
| 0
| 0
| 0
| 0
| 4
| 0.02439
|
f732fdf1128b31b7b49d386c93aa86199f8cc84f
| 109
|
py
|
Python
|
examples/etcc.py
|
t-pimpisa/pythainlp17
|
cc6bc4991dfffd68953dcdb26fd99c22d60a4c1f
|
[
"Apache-2.0"
] | null | null | null |
examples/etcc.py
|
t-pimpisa/pythainlp17
|
cc6bc4991dfffd68953dcdb26fd99c22d60a4c1f
|
[
"Apache-2.0"
] | null | null | null |
examples/etcc.py
|
t-pimpisa/pythainlp17
|
cc6bc4991dfffd68953dcdb26fd99c22d60a4c1f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from pythainlp.tokenize import etcc
print(etcc.etcc("คืนความสุข")) # /คืน/ความสุข
| 18.166667
| 46
| 0.642202
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 89
| 0.597315
|
f733fdfd6251a8d71a648d7c85c3dac02509dfc4
| 75
|
py
|
Python
|
cumulogenesis.py
|
stelligent/cumulogenesis
|
f5a3587aebd2592642c98cb4ad93d52a927dceeb
|
[
"MIT"
] | 1
|
2021-03-22T21:50:10.000Z
|
2021-03-22T21:50:10.000Z
|
cumulogenesis.py
|
stelligent/cumulogenesis
|
f5a3587aebd2592642c98cb4ad93d52a927dceeb
|
[
"MIT"
] | 1
|
2021-03-25T22:23:04.000Z
|
2021-03-25T22:23:04.000Z
|
cumulogenesis.py
|
stelligent/cumulogenesis
|
f5a3587aebd2592642c98cb4ad93d52a927dceeb
|
[
"MIT"
] | 1
|
2019-04-03T19:09:34.000Z
|
2019-04-03T19:09:34.000Z
|
#!/usr/bin/env python
from cumulogenesis.interfaces import cli
cli.run()
| 12.5
| 40
| 0.76
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 21
| 0.28
|
f7356fdd90f419efa0300e27fdfd55d90e10cc07
| 2,897
|
py
|
Python
|
nanpy/bmp180.py
|
AFTC-1/Arduino-rpi
|
c46079f937d7e07cc0a930cc7ae278036f50a47d
|
[
"MIT"
] | 178
|
2015-01-03T11:56:49.000Z
|
2021-12-23T14:47:55.000Z
|
nanpy/bmp180.py
|
AFTC-1/Arduino-rpi
|
c46079f937d7e07cc0a930cc7ae278036f50a47d
|
[
"MIT"
] | 88
|
2015-01-23T09:06:43.000Z
|
2021-12-26T19:58:51.000Z
|
nanpy/bmp180.py
|
AFTC-1/Arduino-rpi
|
c46079f937d7e07cc0a930cc7ae278036f50a47d
|
[
"MIT"
] | 77
|
2015-02-18T17:26:11.000Z
|
2021-09-28T02:47:25.000Z
|
from __future__ import division
import logging
from nanpy.i2c import I2C_Master
from nanpy.memo import memoized
import time
log = logging.getLogger(__name__)
def to_s16(n):
return (n + 2 ** 15) % 2 ** 16 - 2 ** 15
class Bmp180(object):
"""Control of BMP180 Digital pressure sensor (I2C)
calculation is based on Bosch datasheet."""
def __init__(self, wire, address=0x77, oss=3):
self.i2c = I2C_Master(wire)
self.address = address
self.oss = oss
def read_bytes(self, address, count):
self.i2c.send(self.address, [address])
x = self.i2c.request(self.address, count)
return x
def write_byte(self, address, data):
self.i2c.send(self.address, [address, data])
@property
@memoized
def eeprom(self):
return self.read_bytes(0xaa, 22)
def read_temperature_raw(self):
self.write_byte(0xf4, 0x2e)
time.sleep(0.005)
MSB, LSB = self.read_bytes(0xf6, 2)
UT = (MSB << 8) + LSB
return UT
def read_pressure_raw(self):
self.write_byte(0xf4, 0x34 + (self.oss << 6))
time.sleep(0.005)
MSB, LSB, XLSB = self.read_bytes(0xf6, 3)
UP = ((MSB << 16) + (LSB << 8) + XLSB) >> (8 - self.oss)
return UP
@classmethod
def calculate(cls, pressure_raw, temperature_raw, oss, eeprom):
'''
return: Pascal, Celsius
'''
UT = temperature_raw
UP = pressure_raw
def ushort(i):
return (eeprom[2 * i] << 8) + eeprom[2 * i + 1]
def short(i):
return to_s16(ushort(i))
AC1 = short(0)
AC2 = short(1)
AC3 = short(2)
AC4 = ushort(3)
AC5 = ushort(4)
AC6 = ushort(5)
B1 = short(6)
B2 = short(7)
# MB = short(8)
MC = short(9)
MD = short(10)
X1 = ((UT - AC6) * AC5) >> 15
X2 = (MC << 11) // (X1 + MD)
B5 = X1 + X2
T = (B5 + 8) >> 4
B6 = B5 - 4000
X1 = (B2 * ((B6 * B6) >> 12)) >> 11
X2 = (AC2 * B6) >> 11
X3 = X1 + X2
B3 = (((AC1 * 4 + X3) << oss) + 2) // 4
X1 = (AC3 * B6) >> 13
X2 = (B1 * ((B6 * B6) >> 12)) >> 16
X3 = ((X1 + X2) + 2) // 4
B4 = (AC4 * (X3 + 32768)) >> 15
B7 = (UP - B3) * (50000 >> oss)
p = (B7 * 2) // B4 if B7 < 0x80000000 else (B7 // B4) * 2
X1 = (p >> 8) * (p >> 8)
X1 = (X1 * 3038) >> 16
X2 = (-7357 * p) >> 16
p += (X1 + X2 + 3791) >> 4
return p, T / 10
def read(self):
'''
return: Pascal, Celsius
'''
temperature_raw = self.read_temperature_raw()
pressure_raw = self.read_pressure_raw()
return self.calculate(
pressure_raw,
temperature_raw,
self.oss,
self.eeprom,
)
| 24.550847
| 67
| 0.491543
| 2,671
| 0.921988
| 0
| 0
| 1,376
| 0.474974
| 0
| 0
| 216
| 0.07456
|
f735f8fc14c7fe9404c2a5d90d59491063b15f84
| 1,539
|
py
|
Python
|
pygna/cli.py
|
Gee-3/pygna
|
61f2128e918e423fef73d810e0c3af5761933096
|
[
"MIT"
] | 32
|
2019-07-11T22:58:14.000Z
|
2022-03-04T19:34:55.000Z
|
pygna/cli.py
|
Gee-3/pygna
|
61f2128e918e423fef73d810e0c3af5761933096
|
[
"MIT"
] | 3
|
2021-05-24T14:03:13.000Z
|
2022-01-07T03:47:32.000Z
|
pygna/cli.py
|
Gee-3/pygna
|
61f2128e918e423fef73d810e0c3af5761933096
|
[
"MIT"
] | 5
|
2019-07-24T09:38:07.000Z
|
2021-12-30T09:20:20.000Z
|
import logging
import argh
import pygna.command as cmd
import pygna.painter as paint
import pygna.utils as utils
import pygna.block_model as bm
import pygna.degree_model as dm
"""
autodoc
"""
logging.basicConfig(level=logging.INFO)
def main():
argh.dispatch_commands([
# network summary and graph file
cmd.network_summary,
cmd.network_graphml,
cmd.get_connected_components,
# geneset network topology analyses
cmd.test_topology_total_degree,
cmd.test_topology_internal_degree,
cmd.test_topology_module,
cmd.test_topology_sp,
cmd.test_topology_rwr,
cmd.test_diffusion_hotnet,
# comparison analysis
cmd.test_association_sp,
cmd.test_association_rwr,
# building functions
cmd.build_distance_matrix,
cmd.build_rwr_diffusion,
# paint
paint.paint_datasets_stats,
paint.paint_comparison_matrix,
paint.plot_adjacency,
paint.paint_volcano_plot,
paint.paint_summary_gnt,
# utils
utils.convert_gmt,
utils.geneset_from_table,
utils.convert_csv,
utils.generate_group_gmt,
# simulations
bm.generate_gnt_sbm,
bm.generate_gna_sbm,
dm.generate_hdn_network,
bm.generate_sbm_network,
bm.generate_sbm2_network,
dm.hdn_add_partial,
dm.hdn_add_extended,
dm.hdn_add_branching,
], )
if __name__ == "__main__":
"""
MAIN
"""
main()
| 23.676923
| 43
| 0.654321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 180
| 0.116959
|
f7367b85ef33529c5c360e68d214cb8e6a80a38f
| 4,752
|
py
|
Python
|
dist/Platform.app/Contents/Resources/lib/python3.7/wx/lib/colourchooser/canvas.py
|
njalloul90/Genomics_Oncology_Platform
|
9bf6d0edca5df783f4e371fa1bc46b7b1576fe70
|
[
"MIT"
] | 6
|
2021-07-26T14:21:25.000Z
|
2021-07-26T14:32:01.000Z
|
dist/Platform.app/Contents/Resources/lib/python3.7/wx/lib/colourchooser/canvas.py
|
njalloul90/Genomics_Oncology_Platform
|
9bf6d0edca5df783f4e371fa1bc46b7b1576fe70
|
[
"MIT"
] | 9
|
2021-03-18T23:10:27.000Z
|
2022-03-11T23:43:55.000Z
|
dist/Platform.app/Contents/Resources/lib/python3.7/wx/lib/colourchooser/canvas.py
|
njalloul90/Genomics_Oncology_Platform
|
9bf6d0edca5df783f4e371fa1bc46b7b1576fe70
|
[
"MIT"
] | 2
|
2019-03-11T05:06:49.000Z
|
2019-03-22T21:48:49.000Z
|
"""
PyColourChooser
Copyright (C) 2002 Michael Gilfix <mgilfix@eecs.tufts.edu>
This file is part of PyColourChooser.
This version of PyColourChooser is open source; you can redistribute it
and/or modify it under the licensed terms.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
"""
# 12/14/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o 2.5 compatibility update.
#
# 12/21/2003 - Jeff Grimmett (grimmtooth@softhome.net)
#
# o wxPyColorChooser -> PyColorChooser
# o wxPyColourChooser -> PyColourChooser
#
# Tags: phoenix-port
import wx
class BitmapBuffer(wx.MemoryDC):
"""A screen buffer class.
This class implements a screen output buffer. Data is meant to
be drawn in the buffer class and then blitted directly to the
output device, or on-screen window.
"""
def __init__(self, width, height, colour):
"""Initialize the empty buffer object."""
wx.MemoryDC.__init__(self)
self.width = width
self.height = height
self.colour = colour
self.bitmap = wx.Bitmap(self.width, self.height)
self.SelectObject(self.bitmap)
# Initialize the buffer to the background colour
self.SetBackground(wx.Brush(self.colour, wx.BRUSHSTYLE_SOLID))
self.Clear()
# Make each logical unit of the buffer equal to 1 pixel
self.SetMapMode(wx.MM_TEXT)
def GetBitmap(self):
"""Returns the internal bitmap for direct drawing."""
return self.bitmap
# GetPixel seems to always return (-1, -1, -1, 255)
# on OS X so this is a workaround for that issue.
def GetPixelColour(self, x, y):
"""Gets the color value of the pixel at the given
cords.
"""
img = self.GetAsBitmap().ConvertToImage()
red = img.GetRed(x, y)
green = img.GetGreen(x, y)
blue = img.GetBlue(x, y)
return wx.Colour(red, green, blue)
class Canvas(wx.Window):
"""A canvas class for arbitrary drawing.
The Canvas class implements a window that allows for drawing
arbitrary graphics. It implements a double buffer scheme and
blits the off-screen buffer to the window during paint calls
by the windowing system for speed.
Some other methods for determining the canvas colour and size
are also provided.
"""
def __init__(self, parent, id,
pos=wx.DefaultPosition,
style=wx.SIMPLE_BORDER,
forceClientSize=None):
"""Creates a canvas instance and initializes the off-screen
buffer. Also sets the handler for rendering the canvas
automatically via size and paint calls from the windowing
system."""
wx.Window.__init__(self, parent, id, pos, style=style)
if forceClientSize:
self.SetMaxClientSize(forceClientSize)
self.SetMinClientSize(forceClientSize)
# Perform an intial sizing
self.ReDraw()
# Register event handlers
self.Bind(wx.EVT_SIZE, self.onSize)
self.Bind(wx.EVT_PAINT, self.onPaint)
def MakeNewBuffer(self):
size = self.GetClientSize()
self.buffer = BitmapBuffer(size[0], size[1],
self.GetBackgroundColour())
def onSize(self, event):
"""Perform actual redraw to off-screen buffer only when the
size of the canvas has changed. This saves a lot of computation
since the same image can be re-used, provided the canvas size
hasn't changed."""
self.MakeNewBuffer()
self.DrawBuffer()
self.Refresh()
def ReDraw(self):
"""Explicitly tells the canvas to redraw it's contents."""
self.onSize(None)
def Refresh(self):
"""Re-draws the buffer contents on-screen."""
dc = wx.ClientDC(self)
self.Blit(dc)
def onPaint(self, event):
"""Renders the off-screen buffer on-screen."""
dc = wx.PaintDC(self)
self.Blit(dc)
def Blit(self, dc):
"""Performs the blit of the buffer contents on-screen."""
width, height = self.buffer.GetSize()
dc.Blit(0, 0, width, height, self.buffer, 0, 0)
def GetBoundingRect(self):
"""Returns a tuple that contains the co-ordinates of the
top-left and bottom-right corners of the canvas."""
x, y = self.GetPosition()
w, h = self.GetSize()
return(x, y + h, x + w, y)
def DrawBuffer(self):
"""Actual drawing function for drawing into the off-screen
buffer. To be overrideen in the implementing class. Do nothing
by default."""
pass
| 32.547945
| 71
| 0.643729
| 4,063
| 0.855008
| 0
| 0
| 0
| 0
| 0
| 0
| 2,575
| 0.541877
|
f738260086ccd3653bc2367e7b8083819a301d9b
| 1,807
|
py
|
Python
|
preprocessing/metadata.py
|
skincare-deep-learning/Skincare-backend
|
80ed6b7a735291848be9248035231fbd55c93990
|
[
"Apache-2.0"
] | 1
|
2019-11-27T20:56:27.000Z
|
2019-11-27T20:56:27.000Z
|
preprocessing/metadata.py
|
skincare-deep-learning/Skincare-backend
|
80ed6b7a735291848be9248035231fbd55c93990
|
[
"Apache-2.0"
] | 10
|
2021-04-02T19:47:15.000Z
|
2022-01-13T01:52:53.000Z
|
preprocessing/metadata.py
|
skincare-deep-learning/Skincare-backend
|
80ed6b7a735291848be9248035231fbd55c93990
|
[
"Apache-2.0"
] | null | null | null |
import json
import csv
import pandas as pd
from isic_api import ISICApi
from pandas.io.json import json_normalize
# Initialize the API; no login is necessary for public data
api = ISICApi(username="SkinCare", password="unbdeeplearning")
outputFileName = 'imagedata'
imageList = api.getJson('image?limit=25000&offset=0&sort=name')
print('Fetching metadata for %s images' % len(imageList))
imageDetails = []
i = 0
for image in imageList:
print(' ', image['name'])
# Pull image details
imageDetail = api.getJson('image/%s' % image['_id'])
imageDetails.append(imageDetail)
"""
# Testing Parameters
print("****************************")
print(imageDetails[0]['meta']['clinical']['anatom_site_general'])
print("****************************")
data = json_normalize(imageDetails[0])
print(data.loc[0])
data = json_normalize(imageDetails[0])
print(data.loc[0])
print("========================================================")
print(data.loc[0]['dataset.name'])
"""
# Determine the union of all image metadata fields
metadataFields = set(
field
for imageDetail in imageDetails
for field in imageDetail['meta']['clinical'].keys()
)
metadataFields = ['isic_id'] + sorted(metadataFields)
# print(metadataFields)
outputFilePath = './metadata.csv'
# Write Metadata to a CSV
print('Writing metadata to CSV: %s' % 'metadata.csv')
with open(outputFilePath, 'w') as outputStream:
csvWriter = csv.DictWriter(outputStream, fieldnames=metadataFields)
csvWriter.writeheader() # Columns Names
for imageDetail in imageDetails:
rowDict = imageDetail['meta']['clinical'].copy()
rowDict['isic_id'] = imageDetail['name']
# rowDict['anatom_site_general'] = imageDetail['meta']['clinical']['anatom_site_general'] # Subjective
csvWriter.writerow(rowDict)
| 30.627119
| 110
| 0.672939
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 934
| 0.516879
|
f7387b7a0fda396aca3fe13d2312bd4427223bec
| 1,317
|
py
|
Python
|
nasbench/scripts/generate-all-graphs.py
|
bkj/nasbench
|
a238cf26d843aaffbe037569528ef96d3e37eb04
|
[
"Apache-2.0"
] | null | null | null |
nasbench/scripts/generate-all-graphs.py
|
bkj/nasbench
|
a238cf26d843aaffbe037569528ef96d3e37eb04
|
[
"Apache-2.0"
] | null | null | null |
nasbench/scripts/generate-all-graphs.py
|
bkj/nasbench
|
a238cf26d843aaffbe037569528ef96d3e37eb04
|
[
"Apache-2.0"
] | 1
|
2021-07-25T16:36:34.000Z
|
2021-07-25T16:36:34.000Z
|
#!/usr/bin/env python
"""
generate-all-graphs.py
python generate-all-graphs.py | gzip -c > all-graphs.gz
"""
import sys
import json
import itertools
import numpy as np
from tqdm import tqdm
from nasbench.lib import graph_util
from joblib import delayed, Parallel
max_vertices = 7
num_ops = 3
max_edges = 9
def make_graphs(vertices, bits):
matrix = np.fromfunction(graph_util.gen_is_edge_fn(bits), (vertices, vertices), dtype=np.int8)
if graph_util.num_edges(matrix) > max_edges:
return []
if not graph_util.is_full_dag(matrix):
return []
out = []
for labeling in itertools.product(*[range(num_ops) for _ in range(vertices-2)]):
labeling = [-1] + list(labeling) + [-2]
out.append({
"hash" : graph_util.hash_module(matrix, labeling),
"adj" : matrix.tolist(),
"labeling" : labeling,
})
return out
adjs = []
for vertices in range(2, max_vertices+1):
for bits in range(2 ** (vertices * (vertices-1) // 2)):
adjs.append((vertices, bits))
adjs = [adjs[i] for i in np.random.permutation(len(adjs))]
jobs = [delayed(make_graphs)(*adj) for adj in adjs]
res = Parallel(n_jobs=40, backend='multiprocessing', verbose=10)(jobs)
for r in res:
for rr in r:
print(json.dumps(rr))
| 24.388889
| 98
| 0.642369
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 152
| 0.115414
|
f738e084271100fae4934591514291316a9bafdd
| 1,500
|
py
|
Python
|
ui/mext.py
|
szymonkaliski/nott
|
fa85e64b570f71733ea199dddbd0bc0f013a613b
|
[
"MIT"
] | 25
|
2019-07-01T14:58:48.000Z
|
2021-11-13T17:00:44.000Z
|
ui/mext.py
|
szymonkaliski/nott
|
fa85e64b570f71733ea199dddbd0bc0f013a613b
|
[
"MIT"
] | 6
|
2019-12-30T02:50:19.000Z
|
2021-05-10T16:41:47.000Z
|
ui/mext.py
|
szymonkaliski/nott
|
fa85e64b570f71733ea199dddbd0bc0f013a613b
|
[
"MIT"
] | 2
|
2020-01-05T13:02:07.000Z
|
2020-05-21T15:54:57.000Z
|
# FIXME: fix all "happy paths coding" issues
import liblo
from threading import Thread
class Mext(object):
device = None
def __init__(self, device_port=5000):
self.device_receiver = liblo.ServerThread(device_port)
self.device_receiver.add_method("/monome/grid/key", "iii", self.on_grid_key)
self.device_receiver.add_method(
"/serialosc/device", "ssi", self.on_serialosc_device
)
self.device_receiver.start()
liblo.send(liblo.Address(12002), "/serialosc/list", "127.0.0.1", device_port)
def set_grid_key_callback(self, fn):
self.grid_key_callback = fn
def set_led_level(self, x, y, value):
Thread(
target=(
lambda: liblo.send(
self.device, "/monome/grid/led/level/set", x, y, value
)
)
).start()
def set_led_map(self, offset_x, offset_y, values):
Thread(
target=(
lambda: liblo.send(
self.device,
"/monome/grid/led/level/map",
offset_x,
offset_y,
*values
)
)
).start()
def on_grid_key(self, path, args):
x, y, edge = args
if self.grid_key_callback:
self.grid_key_callback(x, y, edge)
def on_serialosc_device(self, path, args):
_, sysId, port = args
self.device = liblo.Address(port)
| 26.315789
| 85
| 0.544
| 1,409
| 0.939333
| 0
| 0
| 0
| 0
| 0
| 0
| 175
| 0.116667
|
f73a4d22041854c5326afaffc36927b22884b07a
| 5,370
|
py
|
Python
|
workers/test/test_exportactionlogsworker.py
|
kwestpharedhat/quay
|
a0df895005bcd3e53847046f69f6a7add87c88fd
|
[
"Apache-2.0"
] | null | null | null |
workers/test/test_exportactionlogsworker.py
|
kwestpharedhat/quay
|
a0df895005bcd3e53847046f69f6a7add87c88fd
|
[
"Apache-2.0"
] | null | null | null |
workers/test/test_exportactionlogsworker.py
|
kwestpharedhat/quay
|
a0df895005bcd3e53847046f69f6a7add87c88fd
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
import pytest
from datetime import datetime, timedelta
import boto3
from httmock import urlmatch, HTTMock
from moto import mock_s3
from app import storage as test_storage
from data import model, database
from data.logs_model import logs_model
from storage import S3Storage, StorageContext, DistributedStorage
from workers.exportactionlogsworker import ExportActionLogsWorker, POLL_PERIOD_SECONDS
from test.fixtures import *
_TEST_CONTENT = os.urandom(1024)
_TEST_BUCKET = "somebucket"
_TEST_USER = "someuser"
_TEST_PASSWORD = "somepassword"
_TEST_PATH = "some/cool/path"
_TEST_CONTEXT = StorageContext("nyc", None, None, None)
@pytest.fixture(params=["test", "mock_s3"])
def storage_engine(request):
if request.param == "test":
yield test_storage
else:
with mock_s3():
# Create a test bucket and put some test content.
boto3.client("s3").create_bucket(Bucket=_TEST_BUCKET)
engine = DistributedStorage(
{
"foo": S3Storage(
_TEST_CONTEXT, "some/path", _TEST_BUCKET, _TEST_USER, _TEST_PASSWORD
)
},
["foo"],
)
yield engine
def test_export_logs_failure(initialized_db):
# Make all uploads fail.
test_storage.put_content("local_us", "except_upload", b"true")
repo = model.repository.get_repository("devtable", "simple")
user = model.user.get_user("devtable")
worker = ExportActionLogsWorker(None)
called = [{}]
@urlmatch(netloc=r"testcallback")
def handle_request(url, request):
called[0] = json.loads(request.body)
return {"status_code": 200, "content": "{}"}
def format_date(datetime):
return datetime.strftime("%m/%d/%Y")
now = datetime.now()
with HTTMock(handle_request):
with pytest.raises(IOError):
worker._process_queue_item(
{
"export_id": "someid",
"repository_id": repo.id,
"namespace_id": repo.namespace_user.id,
"namespace_name": "devtable",
"repository_name": "simple",
"start_time": format_date(now + timedelta(days=-10)),
"end_time": format_date(now + timedelta(days=10)),
"callback_url": "http://testcallback/",
"callback_email": None,
},
test_storage,
)
test_storage.remove("local_us", "except_upload")
assert called[0]
assert called[0]["export_id"] == "someid"
assert called[0]["status"] == "failed"
@pytest.mark.parametrize(
"has_logs",
[
True,
False,
],
)
def test_export_logs(initialized_db, storage_engine, has_logs):
# Delete all existing logs.
database.LogEntry3.delete().execute()
repo = model.repository.get_repository("devtable", "simple")
user = model.user.get_user("devtable")
now = datetime.now()
if has_logs:
# Add new logs over a multi-day period.
for index in range(-10, 10):
logs_model.log_action(
"push_repo",
"devtable",
user,
"0.0.0.0",
{"index": index},
repo,
timestamp=now + timedelta(days=index),
)
worker = ExportActionLogsWorker(None)
called = [{}]
@urlmatch(netloc=r"testcallback")
def handle_request(url, request):
called[0] = json.loads(request.body)
return {"status_code": 200, "content": "{}"}
def format_date(datetime):
return datetime.strftime("%m/%d/%Y")
with HTTMock(handle_request):
worker._process_queue_item(
{
"export_id": "someid",
"repository_id": repo.id,
"namespace_id": repo.namespace_user.id,
"namespace_name": "devtable",
"repository_name": "simple",
"start_time": format_date(now + timedelta(days=-10)),
"end_time": format_date(now + timedelta(days=10)),
"callback_url": "http://testcallback/",
"callback_email": None,
},
storage_engine,
)
assert called[0]
assert called[0]["export_id"] == "someid"
assert called[0]["status"] == "success"
url = called[0]["exported_data_url"]
if url.find("http://localhost:5000/exportedlogs/") == 0:
storage_id = url[len("http://localhost:5000/exportedlogs/") :]
else:
assert url.find("https://somebucket.s3.amazonaws.com/some/path/exportedactionlogs/") == 0
storage_id, _ = url[
len("https://somebucket.s3.amazonaws.com/some/path/exportedactionlogs/") :
].split("?")
created = storage_engine.get_content(
storage_engine.preferred_locations, "exportedactionlogs/" + storage_id
)
created_json = json.loads(created)
if has_logs:
found = set()
for log in created_json["logs"]:
if log.get("terminator"):
continue
found.add(log["metadata"]["index"])
for index in range(-10, 10):
assert index in found
else:
assert created_json["logs"] == [{"terminator": True}]
| 30.338983
| 97
| 0.58324
| 0
| 0
| 544
| 0.101304
| 3,431
| 0.63892
| 0
| 0
| 1,225
| 0.228119
|
f73c025048313646ffa657c41d4c35ef79bc7325
| 6,699
|
py
|
Python
|
pageplot/plotmodel.py
|
JBorrow/pageplot
|
8abad574fda476d26a59fc8b7d36da2838f2c11e
|
[
"MIT"
] | null | null | null |
pageplot/plotmodel.py
|
JBorrow/pageplot
|
8abad574fda476d26a59fc8b7d36da2838f2c11e
|
[
"MIT"
] | null | null | null |
pageplot/plotmodel.py
|
JBorrow/pageplot
|
8abad574fda476d26a59fc8b7d36da2838f2c11e
|
[
"MIT"
] | null | null | null |
"""
The base top-level plot model class.
From this all data and plotting flow.
"""
from pageplot.exceptions import PagePlotParserError
from pathlib import Path
from typing import Any, Optional, Dict, List, Union
from pageplot.extensionmodel import PlotExtension
from pageplot.extensions import built_in_extensions
from pageplot.io.spec import IOSpecification
from pageplot.config import GlobalConfig
from pageplot.mask import get_mask
import matplotlib.pyplot as plt
import numpy as np
import unyt
import attr
@attr.s(auto_attribs=True)
class PlotModel:
"""
Model describing an individual plot. De-serializes the input
json describing an individual figure's extension values.
To use this, you'll need to initialise it with the configuration
(for all the extensions!), and then associate the data with
the appropraite method. The plots can then be created using the
methods in the following order:
``setup_figures`` - creates Figure and Axes objects
``run_extensions`` - runs all of the extensions' ``preprocess`` steps
``perform_blitting`` - runs the extensions' ``blit`` functions
``save`` - writes out the figures to disk
``finalize`` - closes the Figure object
You can also serialize the contents of the whole figure to a dictionary
with the ``serialize`` object.
Parameters
----------
name: str
Plot name. This is the filename of the plot (without file extension).
config: GlobalConfig
Global configuration object.
plot_spec: Dict[str, Any]
Data controlling the behaviour of each extension. The keys should
be the same as the used extensions. Mis-matches will raise a
``PagePlotParserError``.
x, y, z: str, optional
Strings to be passed to the data to load appropriate x, y, and z
data. Here only x is required.
x_units, y_units, z_units: Union[str, None, unyt.unyt_quantity]
Expected output units for the plot, to be parsed.
mask: str, optional
Mask text (see :func:`get_mask`).
"""
name: str
config: GlobalConfig
plot_spec: Dict[str, Any]
x: str
y: Optional[str] = None
z: Optional[str] = None
# Output units for the plot.
x_units: Union[str, None, unyt.unyt_quantity] = None
y_units: Union[str, None, unyt.unyt_quantity] = None
z_units: Union[str, None, unyt.unyt_quantity] = None
mask: Optional[str] = None
data: IOSpecification = attr.ib(init=False)
fig: plt.Figure = attr.ib(init=False)
axes: plt.Axes = attr.ib(init=False)
extensions: Dict[str, PlotExtension] = attr.ib(init=False)
def associate_data(self, data: IOSpecification):
"""
Associates the data file (which conforms to the
``IOSpecification``) with the plot.
data: IOSpecification
Any data file that conforms to the specification.
"""
self.data = data
def setup_figures(self):
"""
Sets up the internal figure and axes.
"""
self.fig, self.axes = plt.subplots()
return
def run_extensions(
self, additional_extensions: Optional[Dict[str, PlotExtension]] = None
):
"""
Run the figure extensions (these provide all data for the figures,
excluding the plotting). Internal extensions are performed
first, then any additional extensions are executed.
additional_extensions: Dict[str, PlotExtension]
Any additional extensions conforming to the specification.
"""
# First, sort out units and masking
units = {
"x_units": self.x_units,
"y_units": self.y_units,
"z_units": self.z_units,
}
for name, value in units.items():
if value is None:
if (associated_data := getattr(self, name[0])) is None:
units[name] = unyt.unyt_quantity(1.0, None)
else:
units[name] = unyt.unyt_quantity(
1.0, associated_data.split(" ", 1)[1]
)
else:
units[name] = unyt.unyt_quantity(1.0, value)
mask = get_mask(data=self.data, mask_text=self.mask)
self.extensions = {}
if additional_extensions is None:
additional_extensions = {}
combined_extensions = {**built_in_extensions, **additional_extensions}
for name in self.plot_spec.keys():
try:
Extension = combined_extensions[name]
except KeyError:
raise PagePlotParserError(
name, "Unable to find matching extension for configuration value."
)
extension = Extension(
name=name,
config=self.config,
metadata=self.data.metadata,
x=self.data.data_from_string(self.x, mask=mask),
y=self.data.data_from_string(self.y, mask=mask),
z=self.data.data_from_string(self.z, mask=mask),
**units,
**self.plot_spec.get(name, {}),
)
extension.preprocess()
self.extensions[name] = extension
return
def perform_blitting(self):
"""
Performs the blitting (creating the figure).
Without this, the extensions are just 'created' and pre-processed
without affecting or creating the figure.
"""
for extension in self.extensions.values():
extension.blit(fig=self.fig, axes=self.axes)
def save(self, filename: Path):
"""
Saves the figure to file.
filename: Path
Filename that you would like to save the figure to. Can have
any matplotlib-compatible file extension.
Notes
-----
It's suggested that you run finalzie() after this function, otherwise
there will be lots of figures open at one time causing potential slowdowns.
"""
self.fig.savefig(filename)
return
def serialize(self) -> Dict[str, Any]:
"""
Serializes the contents of the extensions to a dictionary.
Note that you do not have to have 'created' the figure to run this,
if you just want the data you should be able to just request
the serialized data.
"""
serialized = {name: ext.serialize() for name, ext in self.extensions.items()}
return serialized
def finalize(self):
"""
Closes figures and cleans up.
"""
plt.close(self.fig)
class Config:
arbitrary_types_allowed = True
| 29.641593
| 86
| 0.618749
| 6,155
| 0.918794
| 0
| 0
| 6,182
| 0.922824
| 0
| 0
| 3,250
| 0.485147
|
f73d133f1804d0833d771530b775e1da1e558e30
| 853
|
py
|
Python
|
src.py
|
duldiev/Assignment-2-Scrapping
|
a9dbb4bb14b7fe0a1c5ec6eba73491008ff8da52
|
[
"MIT"
] | null | null | null |
src.py
|
duldiev/Assignment-2-Scrapping
|
a9dbb4bb14b7fe0a1c5ec6eba73491008ff8da52
|
[
"MIT"
] | null | null | null |
src.py
|
duldiev/Assignment-2-Scrapping
|
a9dbb4bb14b7fe0a1c5ec6eba73491008ff8da52
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup as soup
from selenium import webdriver
class Scrapper:
def getArticles(self, cryptoName):
url = 'https://coinmarketcap.com/currencies/' + cryptoName + '/news/'
driver = webdriver.Firefox()
driver.get(url)
page = driver.page_source
page_soup = soup(page, 'html.parser')
headers = page_soup.findAll("h3", {"class": "sc-1q9q90x-0", "class": "gEZmSc"})
paragraphs = page_soup.findAll("p", {"class": "sc-1eb5slv-0", "class": "svowul-3", "class": "ddtKCV"})
print('Latest news about', cryptoName.capitalize(), end=':')
print()
for i in range(0, min(len(headers), len(paragraphs))):
print('Article', (i + 1), end=':')
print()
print(headers[i].text.strip(), '\n', 'More:', paragraphs[i].text.strip(), '\n')
| 38.772727
| 110
| 0.588511
| 783
| 0.917937
| 0
| 0
| 0
| 0
| 0
| 0
| 205
| 0.240328
|
f73dddf470763349a7f01540ff083d75743566dd
| 19,409
|
py
|
Python
|
qsubm.py
|
mark-caprio/mcscript
|
7a5a69667857f27b8f2d2f9387b90301bc321df2
|
[
"MIT"
] | 1
|
2017-05-30T20:45:24.000Z
|
2017-05-30T20:45:24.000Z
|
qsubm.py
|
mark-caprio/mcscript
|
7a5a69667857f27b8f2d2f9387b90301bc321df2
|
[
"MIT"
] | 3
|
2020-06-15T16:10:23.000Z
|
2020-10-15T02:47:21.000Z
|
qsubm.py
|
mark-caprio/mcscript
|
7a5a69667857f27b8f2d2f9387b90301bc321df2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
"""qsubm -- generic queue submission for task-oriented batch scripts
Environment variables:
MCSCRIPT_DIR should specify the directory in which the mcscript package is
installed, i.e., the directory where the file qsubm.py is found. (Note that
qsubm uses this information to locate certain auxiliary script files used as
part of the job submission process.)
MCSCRIPT_RUN_HOME must specify the directory in which job files are found.
MCSCRIPT_WORK_HOME should specify the parent directory in which run scratch
directories should be made.
MCSCRIPT_INSTALL_HOME must specify the directory in which executables are found.
MCSCRIPT_LAUNCH_HOME (optional) should specify the parent directory in which
run subdirectories for qsub invocation and output logging should be made.
Otherwise, this will default to MCSCRIPT_WORK_HOME.
MCSCRIPT_PYTHON should give the full qualified filename (i.e., including
path) to the Python 3 executable for running run script files. A typical
value will simply be "python3", assuming the Python 3 executable is in the
shell's command search PATH. However, see note on "Availability of Python"
in INSTALL.md.
MCSCRIPT_RUN_PREFIX should specify the prefix for run names, e.g., set to
"run" if your scripts are to be named run<XXXX>.py.
Requires local definitions file config.py to translate options into
arguments for local batch server. See directions in readme.txt. Your local
definitions might not make use of or support all the parallel environment
options.
Language: Python 3
M. A. Caprio
University of Notre Dame
+ 3/6/13 (mac): Based on earlier qsubm csh script.
+ 7/4/13 (mac): Support for multiple cluster flavors via qsubm_local.
+ 1/22/14 (mac): Python 3 update.
+ 10/27/14 (mac): Updates to --archive handling.
+ 5/14/15 (mac):
- Insert "future" statements for Python 2 legacy support.
- Add --noredirect switch.
- Mandatory environment variable QSUBM_PYTHON.
+ 8/4/15 (mac): Make user environment variable definitions into option.
+ 6/13/16 (mac): Rename environment variables to MCSCRIPT_*.
+ 6/22/16 (mac): Update to use config.py for local configuration.
+ 12/14/16 (mac): Add --here option.
+ 12/29/16 (mac):
- Add --spread option.
- Remove --pernode option.
- Make --opt option repeatable.
+ 1/16/17 (mac): Add --serialthreads option.
+ 2/23/17 (mac): Switch from os.mkdir to mcscript.utils.mkdir.
+ 3/16/17 (mac):
- Add --setup option.
- Change environment interface to pass MCSCRIPT_TASK_MODE.
+ 3/18/17 (mac):
- Revise to support updated hybrid run parameters.
- Rename option --setup to --prerun.
+ 5/22/17 (mac): Fix processing of boolean option --redirect.
+ 10/11/17 (pjf): Add --switchwaittime option.
+ 01/05/18 (pjf): Sort arguments into groups.
+ 02/11/18 (pjf):
- Pass through MCSCRIPT_INSTALL_HOME.
- Use job_environ for submission.
+ 07/06/18 (pjf):
- Pass queue via MCSCRIPT_RUN_QUEUE.
- Remove MCSCRIPT_HYBRID_NODESIZE.
+ 06/04/19 (pjf):
- Add hook for individual configurations to add command-line arguments.
- Move --switchwaittime option into config-slurm-nersc.py.
+ 09/11/19 (pjf): Add expert mode argument.
"""
import argparse
import os
import shutil
import subprocess
import sys
import mcscript.config # local configuration (usually symlink)
import mcscript.utils
################################################################
# argument parsing
################################################################
parser = argparse.ArgumentParser(
description="Queue submission for numbered run.",
usage=
"%(prog)s [option] run queue|RUN wall [var1=val1, ...]\n",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
epilog=
"""Simply omit the queue name and leave off the wall time for a
local interactive run.
Environment variables for qsubm are described in INSTALL.md.
Note that qsubm relies upon code in the local `config.py`
configuration file for the system or cluster you are running on, in
order to interpret the following arguments and translate them into
arguments for your local batch system. Your local configuration
file might not make use of or support all the parallel environment
options listed below.
"""
)
# general arguments
parser.add_argument("run", help="Run number (e.g., 0000 for run0000)")
# latter arguments are made optional to simplify bare-bones syntax for --toc, etc., calls
parser.add_argument("queue", nargs='?', help="Submission queue, or RUN for direct interactive run", default="RUN")
parser.add_argument("wall", type=int, nargs='?', help="Wall time (minutes)", default=60)
##parser.add_argument("vars", nargs="?", help="Environment variables to pass to script, with optional values, comma delimited (e.g., METHOD2, PARAM=1.0)")
parser.add_argument("--here", action="store_true", help="Force run in current working directory")
parser.add_argument("--vars", help="Environment variables to pass to script, with optional values, comma delimited (e.g., --vars=METHOD2, PARAM=1.0)")
## parser.add_argument("--stat", action="store_true", help="Display queue status information")
parser.add_argument("--num", type=int, default=1, help="Number of repetitions")
parser.add_argument("--opt", action="append", help="Additional option arguments to be passed to job submission command (e.g., --opt=\"-m ae\" or --opt=\"--mail-type=END,FAIL\"), may be repeated (e.g., --opt=\"-A acct\" --opt=\"-a 1200\"); beware the spaces may be important to the job submission command")
parser.add_argument("--expert", action="store_true", help="Run mcscript in expert mode")
# serial run parallelization parameters
serial_group = parser.add_argument_group("serial run options (single-node, non-MPI)")
serial_group.add_argument("--serialthreads", type=int, default=1, help="OMP threads")
# hybrid run parallelization parameters
#
# Not all local configuration files need necessarily require or
# respect all of the following parameters.
hybrid_group = parser.add_argument_group("hybrid run options")
hybrid_group.add_argument("--nodes", type=int, default=1, help="number of nodes")
hybrid_group.add_argument("--ranks", type=int, default=1, help="number of MPI ranks")
hybrid_group.add_argument("--threads", type=int, default=1, help="OMP threads per rank)")
hybrid_group.add_argument("--nodesize", type=int, default=0, help="logical threads available per node"
" (might instead be interpreted physical CPUs depending on local config file)")
##hybrid_group.add_argument("--undersubscription", type=int, default=1, help="undersubscription factor (e.g., spread=2 requests twice the cores needed)")
# multi-task interface: invocation modes
task_mode_group = parser.add_mutually_exclusive_group()
task_mode_group.add_argument("--toc", action="store_true", help="Invoke run script to generate task table of contents")
task_mode_group.add_argument("--unlock", action="store_true", help="Delete any .lock or .fail flags for tasks")
task_mode_group.add_argument("--archive", action="store_true", help="Invoke archive-generation run")
task_mode_group.add_argument("--prerun", action="store_true", help="Invoke prerun mode, for argument validation and file staging only")
task_mode_group.add_argument("--offline", action="store_true", help="Invoke offline mode, to create batch scripts for later submission instead of running compute codes")
# multi-task interface: task selection
task_selection_group = parser.add_argument_group("multi-task run options")
task_selection_group.add_argument("--pool", help="Set task pool (or ALL) for task selection")
task_selection_group.add_argument("--phase", type=int, default=0, help="Set task phase for task selection")
task_selection_group.add_argument("--start", type=int, help="Set starting task number for task selection")
task_selection_group.add_argument("--limit", type=int, help="Set task count limit for task selection")
task_selection_group.add_argument("--redirect", default="True", choices=["True", "False"], help="Allow redirection of standard"
" output/error to file (may want to disable for interactive debugging)")
# some special options (deprecated?)
##parser.add_argument("--epar", type=int, default=None, help="Width for embarassingly parallel job")
##parser.add_argument("--nopar", action="store_true", help="Disable parallel resource requests (for use on special serial queues)")
# site-local options
try:
mcscript.config.qsubm_arguments(parser)
except AttributeError:
# local config doesn't provide arguments, ignore gracefully
pass
##parser.print_help()
##print
args = parser.parse_args()
##printargs
################################################################
# special mode: status display
################################################################
# TODO
# will have to modify argument processing to allow no arguments, local
# customization for qstat
# @ i = 0
# while (($i == 0) || ($loop))
# @ i++
# clear
# echo "****************************************************************"
# qstat -u $user
# if ($loop) sleep 5
# end
## if (args.stat):
## pass
################################################################
# environment processing
################################################################
if (args.here):
run_home = os.environ["PWD"]
elif ("MCSCRIPT_RUN_HOME" in os.environ):
run_home = os.environ["MCSCRIPT_RUN_HOME"]
else:
print("MCSCRIPT_RUN_HOME not found in environment")
exit(1)
if (args.here):
work_home = os.environ["PWD"]
elif ("MCSCRIPT_WORK_HOME" in os.environ):
work_home = os.environ["MCSCRIPT_WORK_HOME"]
else:
print("MCSCRIPT_WORK_HOME not found in environment")
exit(1)
if (args.here):
launch_home = os.environ["PWD"]
elif ("MCSCRIPT_LAUNCH_HOME" in os.environ):
launch_home = os.environ["MCSCRIPT_LAUNCH_HOME"]
else:
launch_home = work_home
if ("MCSCRIPT_RUN_PREFIX" in os.environ):
run_prefix = os.environ["MCSCRIPT_RUN_PREFIX"]
else:
print("MCSCRIPT_RUN_PREFIX not found in environment")
exit(1)
if ("MCSCRIPT_PYTHON" in os.environ):
python_executable = os.environ["MCSCRIPT_PYTHON"]
else:
print("MCSCRIPT_PYTHON not found in environment")
exit(1)
if ("MCSCRIPT_DIR" in os.environ):
qsubm_path = os.environ["MCSCRIPT_DIR"]
else:
print("MCSCRIPT_DIR not found in environment")
exit(1)
################################################################
# argument processing
################################################################
# set run name
run = run_prefix + args.run
print("Run:", run)
# ...and process run file
script_extensions = [".py", ".csh"]
job_file = None
for extension in script_extensions:
filename = os.path.join(run_home, run+extension)
if (filename):
job_file = filename
job_extension = extension
break
print(" Run home:", run_home) # useful to report now, in case job file missing
if (job_file is None):
print("No job file %s.* found with an extension in the set %s." % (run, script_extensions))
exit(1)
print(" Job file:", job_file)
# set queue and flag batch or local mode
# force local run for task.py toc mode
if ((args.queue == "RUN") or args.toc or args.unlock):
run_mode = "local"
run_queue = "local"
print(" Mode:", run_mode)
else:
run_mode = "batch"
run_queue = args.queue
print(" Mode:", run_mode, "(%s)" % args.queue)
# set wall time
wall_time_min = args.wall
print(" Wall time (min): {:d}".format(wall_time_min))
wall_time_sec = wall_time_min*60
# environment definitions: general run parameters
environment_definitions = [
"MCSCRIPT_RUN={:s}".format(run),
"MCSCRIPT_JOB_FILE={:s}".format(job_file),
"MCSCRIPT_RUN_MODE={:s}".format(run_mode),
"MCSCRIPT_RUN_QUEUE={:s}".format(run_queue),
"MCSCRIPT_WALL_SEC={:d}".format(wall_time_sec)
]
# environment definitions: serial run parameters
environment_definitions += [
"MCSCRIPT_SERIAL_THREADS={:d}".format(args.serialthreads)
]
# environment definitions: hybrid run parameters
environment_definitions += [
"MCSCRIPT_HYBRID_NODES={:d}".format(args.nodes),
"MCSCRIPT_HYBRID_RANKS={:d}".format(args.ranks),
"MCSCRIPT_HYBRID_THREADS={:d}".format(args.threads),
]
# set multi-task run parameters
if (args.toc):
task_mode = mcscript.task.TaskMode.kTOC
elif (args.unlock):
task_mode = mcscript.task.TaskMode.kUnlock
elif (args.archive):
task_mode = mcscript.task.TaskMode.kArchive
elif (args.prerun):
task_mode = mcscript.task.TaskMode.kPrerun
elif (args.offline):
task_mode = mcscript.task.TaskMode.kOffline
else:
task_mode = mcscript.task.TaskMode.kRun
# TODO (mac): neaten up so that these arguments are always provided
# (and simplify this code to a simple list += as above)
environment_definitions.append("MCSCRIPT_TASK_MODE={:d}".format(task_mode.value))
if (args.pool is not None):
environment_definitions.append("MCSCRIPT_TASK_POOL={:s}".format(args.pool))
if (args.phase is not None):
environment_definitions.append("MCSCRIPT_TASK_PHASE={:d}".format(args.phase))
if (args.start is not None):
environment_definitions.append("MCSCRIPT_TASK_START_INDEX={:d}".format(args.start))
if (args.limit is not None):
environment_definitions.append("MCSCRIPT_TASK_COUNT_LIMIT={:d}".format(args.limit))
environment_definitions.append("MCSCRIPT_TASK_REDIRECT={:s}".format(args.redirect))
# pass through install directory
if os.environ.get("MCSCRIPT_INSTALL_HOME"):
environment_definitions += [
"MCSCRIPT_INSTALL_HOME={:s}".format(os.environ["MCSCRIPT_INSTALL_HOME"])
]
elif os.environ.get("MCSCRIPT_INSTALL_DIR"):
# TODO remove deprecated environment variable
print("****************************************************************")
print("MCSCRIPT_INSTALL_DIR is now MCSCRIPT_INSTALL_HOME.")
print("Please update your environment variables.")
print("****************************************************************")
environment_definitions += [
"MCSCRIPT_INSTALL_HOME={:s}".format(os.environ["MCSCRIPT_INSTALL_DIR"])
]
else:
print("MCSCRIPT_INSTALL_HOME not found in environment")
exit(1)
# include additional environment setup if defined
if os.environ.get("MCSCRIPT_SOURCE"):
environment_definitions += [
"MCSCRIPT_SOURCE={:s}".format(os.environ["MCSCRIPT_SOURCE"])
]
# set user-specified variable definitions
# Note conditional is required since "".split(", ") is [""] rather than [].
if (args.vars is None):
user_environment_definitions = []
else:
user_environment_definitions = args.vars.split(",")
print(" User environment definitions:", user_environment_definitions)
environment_definitions += user_environment_definitions
################################################################
# directory setup
################################################################
# set up scratch directory (for batch job work)
# name is defined here, but creation is left up to job script,
# in case scratch is local to the compute note
work_dir = os.path.join(work_home, run)
## if ( not os.path.exists(work_dir)):
## mcscript.utils.mkdir(work_dir)
environment_definitions.append("MCSCRIPT_WORK_DIR=%s" % work_dir)
# set up run launch directory (for batch job output logging)
launch_dir_parent = os.path.join(launch_home, run)
if ( not os.path.exists(launch_home)):
mcscript.utils.mkdir(launch_home)
if ( not os.path.exists(launch_dir_parent)):
mcscript.utils.mkdir(launch_dir_parent)
if (args.archive):
# archive mode
# launch in archive directory rather than usual batch job output directory
# (important since if batch job server directs output to the
# regular output directory while tar is archiving that directory,
# tar will return with an error code, torpedoing the archive task)
launch_dir = os.path.join(launch_home, run, "archive")
else:
# standard run mode
launch_dir = os.path.join(launch_home, run, "batch")
if ( not os.path.exists(launch_dir)):
mcscript.utils.mkdir(launch_dir)
environment_definitions.append("MCSCRIPT_LAUNCH_DIR=%s" % launch_dir)
################################################################
# job environment setup
################################################################
# construct job name
job_name = "%s" % run
##job_name += "-w%d" % args.width
if (args.pool is not None):
job_name += "-%s" % args.pool
job_name += "-%s" % args.phase
print(" Job name:", job_name)
# process environment definitions
# regularize environment definitions
# Convert all plain variable name definitions "VAR" into definition
# as null string "VAR=". Note that "VAR" would be an environment
# variable pass-through request to qsub, but it causes trouble with
# defining an environment for local execution. So doing this
# regularization simplifies further processing and ensures
# uniformity of the environment between batch and local runs.
for i in range(len(environment_definitions)):
if (not "=" in environment_definitions[i]):
environment_definitions[i] += "="
print()
print("Vars:", ",".join(environment_definitions))
# for local run
job_environ=os.environ
environment_keyvalues = [
entry.split("=")
for entry in environment_definitions
]
job_environ.update(dict(environment_keyvalues))
################################################################
# run invocation
################################################################
# flush script output before invoking job
print()
sys.stdout.flush()
# handle batch run
if (run_mode == "batch"):
# set local qsub arguments
(submission_args, submission_input_string, repetitions) = mcscript.config.submission(job_name, job_file, qsubm_path, environment_definitions, args)
# notes: options must come before command on some platforms (e.g., Univa)
print(" ".join(submission_args))
print(submission_input_string)
print()
print("-"*64)
for i in range(repetitions):
process = subprocess.Popen(
submission_args,
stdin=subprocess.PIPE, # to take input from communicate
stdout=subprocess.PIPE, # to send output to communicate -- default merged stderr
env=job_environ,
cwd=launch_dir
)
stdout_bytes = process.communicate(input=submission_input_string)[0]
stdout_string = stdout_bytes.decode("utf-8")
print(stdout_string)
# handle interactive run
# Note: We call interpreter rather than trying to directly execute
# job file since this saves us from bothering with execute permissions.
# But, beware the interpreter enforced by the script's shebang line might
# be different from the version of the interpreter found in the below invocation,
# especially in a "module" environment.
elif (run_mode == "local"):
if (extension == ".py"):
popen_args = [python_executable, job_file]
elif (extension == ".csh"):
popen_args = ["csh", job_file]
print()
print("-"*64)
process = subprocess.Popen(popen_args, cwd=launch_dir, env=job_environ)
process.wait()
| 40.77521
| 305
| 0.679273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12,482
| 0.643104
|
f73ea882b3c478b64d849ace9aad77a4fd64c642
| 504
|
py
|
Python
|
trees.py
|
dmancevo/trees
|
a76a8d9c8e11c67042e3d947d58a84fee83ad6b5
|
[
"Apache-2.0"
] | null | null | null |
trees.py
|
dmancevo/trees
|
a76a8d9c8e11c67042e3d947d58a84fee83ad6b5
|
[
"Apache-2.0"
] | null | null | null |
trees.py
|
dmancevo/trees
|
a76a8d9c8e11c67042e3d947d58a84fee83ad6b5
|
[
"Apache-2.0"
] | null | null | null |
from ctypes import *
class Node(Structure): pass
Node._fields_ = [
("leaf", c_int),
("g", c_float),
("min_samples", c_int),
("split_ind", c_int),
("split", c_float),
("left", POINTER(Node)),
("right", POINTER(Node))]
trees = CDLL("./trees.so")
trees.get_root.argtypes = (c_int, )
trees.get_root.restype = POINTER(Node)
class Tree(object):
def __init__(self, min_samples=1):
self.root = trees.get_root(min_samples)
if __name__ == '__main__':
tree = Tree()
| 21
| 47
| 0.621032
| 133
| 0.263889
| 0
| 0
| 0
| 0
| 0
| 0
| 75
| 0.14881
|
f73eadae4fdc856f5258f55231ef39bc6666f5e3
| 2,657
|
py
|
Python
|
qf_lib/backtesting/order/order.py
|
webclinic017/qf-lib
|
96463876719bba8a76c8269cef76addf3a2d836d
|
[
"Apache-2.0"
] | 198
|
2019-08-16T15:09:23.000Z
|
2022-03-30T12:44:00.000Z
|
qf_lib/backtesting/order/order.py
|
webclinic017/qf-lib
|
96463876719bba8a76c8269cef76addf3a2d836d
|
[
"Apache-2.0"
] | 13
|
2021-01-07T10:15:19.000Z
|
2022-03-29T13:01:47.000Z
|
qf_lib/backtesting/order/order.py
|
webclinic017/qf-lib
|
96463876719bba8a76c8269cef76addf3a2d836d
|
[
"Apache-2.0"
] | 29
|
2019-08-16T15:21:28.000Z
|
2022-02-23T09:53:49.000Z
|
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from qf_lib.backtesting.contract.contract import Contract
from qf_lib.backtesting.order.execution_style import ExecutionStyle
from qf_lib.backtesting.order.time_in_force import TimeInForce
class Order(object):
"""
Order generated by a strategy, then processed by PositionSizer.
Finally executed by ExecutionHandler.
"""
def __init__(self, contract: Contract, quantity: int, execution_style: ExecutionStyle,
time_in_force: TimeInForce, order_state=""):
"""
This __init__ shouldn't be used anywhere beyond this module. Use OrderFactory for creating Order objects.
"""
self.id = None # type:int
self.contract = contract
self.quantity = quantity
self.time_in_force = time_in_force
self.execution_style = execution_style
self.order_state = order_state
def __str__(self):
return '\nOrder:\n' \
'\tid: {}\n' \
'\tcontract: {}\n' \
'\tquantity: {}\n' \
'\ttif: {}\n' \
'\texecution_style: {}\n' \
'\torder_state: {}'.format(self.id, str(self.contract), self.quantity, str(self.time_in_force),
self.execution_style, self.order_state)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, Order):
return False
# one Order has id and another hasn't
if (self.id is None) != (other.id is None):
return False
if self.id is not None and other.id == self.id:
return True
# when both ids are none -> compare the values
return (self.contract, self.quantity, self.time_in_force, self.execution_style) == \
(other.contract, other.quantity, other.time_in_force, other.execution_style)
def __hash__(self):
return hash((self.contract, self.quantity, self.time_in_force, self.execution_style))
| 39.073529
| 113
| 0.640572
| 1,803
| 0.678074
| 0
| 0
| 0
| 0
| 0
| 0
| 1,112
| 0.418202
|
f73f526fb320491a4e8c361c6ccf86f4cd4462be
| 8,080
|
py
|
Python
|
purity_fb/purity_fb_1dot12/models/multi_protocol_rule.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 5
|
2017-09-08T20:47:22.000Z
|
2021-06-29T02:11:05.000Z
|
purity_fb/purity_fb_1dot12/models/multi_protocol_rule.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 16
|
2017-11-27T20:57:48.000Z
|
2021-11-23T18:46:43.000Z
|
purity_fb/purity_fb_1dot12/models/multi_protocol_rule.py
|
tlewis-ps/purity_fb_python_client
|
652835cbd485c95a86da27f8b661679727ec6ea0
|
[
"Apache-2.0"
] | 22
|
2017-10-13T15:33:05.000Z
|
2021-11-08T19:56:21.000Z
|
# coding: utf-8
"""
Pure Storage FlashBlade REST 1.12 Python SDK
Pure Storage FlashBlade REST 1.12 Python SDK. Compatible with REST API versions 1.0 - 1.12. Developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.12
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class MultiProtocolRule(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
#BEGIN_CUSTOM
# IR-51527: Prevent Pytest from attempting to collect this class based on name.
__test__ = False
#END_CUSTOM
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'access_control_style': 'str',
'safeguard_acls': 'bool'
}
attribute_map = {
'access_control_style': 'access_control_style',
'safeguard_acls': 'safeguard_acls'
}
def __init__(self, access_control_style=None, safeguard_acls=None): # noqa: E501
"""MultiProtocolRule - a model defined in Swagger""" # noqa: E501
self._access_control_style = None
self._safeguard_acls = None
self.discriminator = None
if access_control_style is not None:
self.access_control_style = access_control_style
if safeguard_acls is not None:
self.safeguard_acls = safeguard_acls
@property
def access_control_style(self):
"""Gets the access_control_style of this MultiProtocolRule. # noqa: E501
The access control style that is utilized for client actions such as setting file and directory ACLs. Possible values include `nfs`, `smb`, `shared`, `independent`, and `mode-bits`. If `nfs` is specified, then SMB clients will be unable to set permissions on files and directories. If `smb` is specified, then NFS clients will be unable to set permissions on files and directories. If `shared` is specified, then NFS and SMB clients will both be able to set permissions on files and directories. Any client will be able to overwrite the permissions set by another client, regardless of protocol. If `independent` is specified, then NFS and SMB clients will both be able to set permissions on files and directories, and can access files and directories created over any protocol. Permissions set by SMB clients will not affect NFS clients and vice versa. NFS clients will be restricted to only using mode bits to set permissions. If `mode-bits` is specified, then NFS and SMB clients will both be able to set permissions on files and directories, but only mode bits may be used to set permissions for NFS clients. When SMB clients set an ACL, it will be converted to have the same permission granularity as NFS mode bits. # noqa: E501
:return: The access_control_style of this MultiProtocolRule. # noqa: E501
:rtype: str
"""
return self._access_control_style
@access_control_style.setter
def access_control_style(self, access_control_style):
"""Sets the access_control_style of this MultiProtocolRule.
The access control style that is utilized for client actions such as setting file and directory ACLs. Possible values include `nfs`, `smb`, `shared`, `independent`, and `mode-bits`. If `nfs` is specified, then SMB clients will be unable to set permissions on files and directories. If `smb` is specified, then NFS clients will be unable to set permissions on files and directories. If `shared` is specified, then NFS and SMB clients will both be able to set permissions on files and directories. Any client will be able to overwrite the permissions set by another client, regardless of protocol. If `independent` is specified, then NFS and SMB clients will both be able to set permissions on files and directories, and can access files and directories created over any protocol. Permissions set by SMB clients will not affect NFS clients and vice versa. NFS clients will be restricted to only using mode bits to set permissions. If `mode-bits` is specified, then NFS and SMB clients will both be able to set permissions on files and directories, but only mode bits may be used to set permissions for NFS clients. When SMB clients set an ACL, it will be converted to have the same permission granularity as NFS mode bits. # noqa: E501
:param access_control_style: The access_control_style of this MultiProtocolRule. # noqa: E501
:type: str
"""
self._access_control_style = access_control_style
@property
def safeguard_acls(self):
"""Gets the safeguard_acls of this MultiProtocolRule. # noqa: E501
If set to `true`, prevents NFS clients from erasing a configured ACL when setting NFS mode bits. If this is `true`, then attempts to set mode bits on a file or directory will fail if they cannot be combined with the existing ACL set on a file or directory without erasing the ACL. Attempts to set mode bits that would not erase an existing ACL will still succeed and the mode bit changes will be merged with the existing ACL. This must be `false` when `access_control_style` is set to either `independent` or `mode-bits`. # noqa: E501
:return: The safeguard_acls of this MultiProtocolRule. # noqa: E501
:rtype: bool
"""
return self._safeguard_acls
@safeguard_acls.setter
def safeguard_acls(self, safeguard_acls):
"""Sets the safeguard_acls of this MultiProtocolRule.
If set to `true`, prevents NFS clients from erasing a configured ACL when setting NFS mode bits. If this is `true`, then attempts to set mode bits on a file or directory will fail if they cannot be combined with the existing ACL set on a file or directory without erasing the ACL. Attempts to set mode bits that would not erase an existing ACL will still succeed and the mode bit changes will be merged with the existing ACL. This must be `false` when `access_control_style` is set to either `independent` or `mode-bits`. # noqa: E501
:param safeguard_acls: The safeguard_acls of this MultiProtocolRule. # noqa: E501
:type: bool
"""
self._safeguard_acls = safeguard_acls
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MultiProtocolRule, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MultiProtocolRule):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 53.509934
| 1,242
| 0.688738
| 7,563
| 0.936015
| 0
| 0
| 4,774
| 0.590842
| 0
| 0
| 5,722
| 0.708168
|
f73f780a7a7ee00c38db35dff1b7df923b5843be
| 3,789
|
py
|
Python
|
webdriver/tests/actions/mouse_dblclick.py
|
shs96c/web-platform-tests
|
61acad6dd9bb99d32340eb41f5146de64f542359
|
[
"BSD-3-Clause"
] | null | null | null |
webdriver/tests/actions/mouse_dblclick.py
|
shs96c/web-platform-tests
|
61acad6dd9bb99d32340eb41f5146de64f542359
|
[
"BSD-3-Clause"
] | null | null | null |
webdriver/tests/actions/mouse_dblclick.py
|
shs96c/web-platform-tests
|
61acad6dd9bb99d32340eb41f5146de64f542359
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
from tests.actions.support.mouse import assert_move_to_coordinates, get_center
from tests.actions.support.refine import get_events, filter_dict
_DBLCLICK_INTERVAL = 640
# Using local fixtures because we want to start a new session between
# each test, otherwise the clicks in each test interfere with each other.
@pytest.fixture(autouse=True)
def release_actions(dblclick_session, request):
# release all actions after each test
# equivalent to a teardown_function, but with access to session fixture
request.addfinalizer(dblclick_session.actions.release)
@pytest.fixture
def dblclick_session(new_session, url, add_browser_capabilites):
_, session = new_session({"capabilities": {"alwaysMatch": add_browser_capabilites({})}})
session.url = url("/webdriver/tests/actions/support/test_actions_wdspec.html")
return session
@pytest.fixture
def mouse_chain(dblclick_session):
return dblclick_session.actions.sequence(
"pointer",
"pointer_id",
{"pointerType": "mouse"})
@pytest.mark.parametrize("click_pause", [0, 200])
def test_dblclick_at_coordinates(dblclick_session, mouse_chain, click_pause):
div_point = {
"x": 82,
"y": 187,
}
mouse_chain \
.pointer_move(div_point["x"], div_point["y"]) \
.click() \
.pause(click_pause) \
.click() \
.perform()
events = get_events(dblclick_session)
assert_move_to_coordinates(div_point, "outer", events)
expected = [
{"type": "mousedown", "button": 0},
{"type": "mouseup", "button": 0},
{"type": "click", "button": 0},
{"type": "mousedown", "button": 0},
{"type": "mouseup", "button": 0},
{"type": "click", "button": 0},
{"type": "dblclick", "button": 0},
]
assert len(events) == 8
filtered_events = [filter_dict(e, expected[0]) for e in events]
assert expected == filtered_events[1:]
def test_dblclick_with_pause_after_second_pointerdown(dblclick_session, mouse_chain):
outer = dblclick_session.find.css("#outer", all=False)
center = get_center(outer.rect)
mouse_chain \
.pointer_move(int(center["x"]), int(center["y"])) \
.click() \
.pointer_down() \
.pause(_DBLCLICK_INTERVAL + 10) \
.pointer_up() \
.perform()
events = get_events(dblclick_session)
expected = [
{"type": "mousedown", "button": 0},
{"type": "mouseup", "button": 0},
{"type": "click", "button": 0},
{"type": "mousedown", "button": 0},
{"type": "mouseup", "button": 0},
{"type": "click", "button": 0},
{"type": "dblclick", "button": 0},
]
assert len(events) == 8
filtered_events = [filter_dict(e, expected[0]) for e in events]
assert expected == filtered_events[1:]
def test_no_dblclick(dblclick_session, mouse_chain):
outer = dblclick_session.find.css("#outer", all=False)
center = get_center(outer.rect)
mouse_chain \
.pointer_move(int(center["x"]), int(center["y"])) \
.click() \
.pause(_DBLCLICK_INTERVAL + 10) \
.click() \
.perform()
events = get_events(dblclick_session)
expected = [
{"type": "mousedown", "button": 0},
{"type": "mouseup", "button": 0},
{"type": "click", "button": 0},
{"type": "mousedown", "button": 0},
{"type": "mouseup", "button": 0},
{"type": "click", "button": 0},
]
assert len(events) == 7
filtered_events = [filter_dict(e, expected[0]) for e in events]
assert expected == filtered_events[1:]
| 34.761468
| 92
| 0.594088
| 0
| 0
| 0
| 0
| 1,606
| 0.423859
| 0
| 0
| 899
| 0.237266
|
f740e9188e23989d7d8cb429eceb0134b86a65bd
| 194
|
py
|
Python
|
hallucinate/api.py
|
SySS-Research/hallucinate
|
f6dbeea0599e232707e6cf27c3fe592edba92f6f
|
[
"MIT"
] | 199
|
2021-07-27T13:47:14.000Z
|
2022-03-05T09:18:56.000Z
|
hallucinate/api.py
|
avineshwar/hallucinate
|
f6dbeea0599e232707e6cf27c3fe592edba92f6f
|
[
"MIT"
] | 1
|
2021-12-08T19:32:29.000Z
|
2021-12-08T19:32:29.000Z
|
hallucinate/api.py
|
avineshwar/hallucinate
|
f6dbeea0599e232707e6cf27c3fe592edba92f6f
|
[
"MIT"
] | 13
|
2021-07-27T18:55:03.000Z
|
2021-08-09T06:15:35.000Z
|
class BaseHandler:
def send(self, data, p):
pass
def recv(self, data, p):
pass
def shutdown(self, p, direction=2):
pass
def close(self):
pass
| 13.857143
| 39
| 0.525773
| 193
| 0.994845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f741f9a134a29cde4417d89a622f8515e0c9db99
| 2,842
|
py
|
Python
|
actions.py
|
ratnasankeerthanreddy/Chatbot-for-Personal-assisatance
|
6c584601af4c98a3bebedf7073e0ccf2ad8ecf76
|
[
"MIT"
] | 1
|
2020-10-28T15:57:44.000Z
|
2020-10-28T15:57:44.000Z
|
actions.py
|
ratnasankeerthanreddy/Chatbot-for-Personal-assisatance
|
6c584601af4c98a3bebedf7073e0ccf2ad8ecf76
|
[
"MIT"
] | null | null | null |
actions.py
|
ratnasankeerthanreddy/Chatbot-for-Personal-assisatance
|
6c584601af4c98a3bebedf7073e0ccf2ad8ecf76
|
[
"MIT"
] | null | null | null |
from typing import Any, Text, Dict, List
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from utils import convert_timestamp
from rasa_sdk.events import AllSlotsReset
import datetime
from datetime import timedelta, date
import dateutil.parser
import boto3
from boto3.dynamodb.conditions import Key
# class ActionHelloWorld(Action):
#
# def name(self) -> Text:
# return "action_hello_world"
#
# def run(self, dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
# dispatcher.utter_message(text="Hello World!")
# return []
def get_doc(date):
dynamodb = boto3.resource('dynamodb', region_name='us-east-1')
table = dynamodb.Table('sensor_daily_summary')
doc = table.get_item(Key={'farmId':'demo_farm_1','date': date})
return doc['Item']
# class ActionSearchRestaurant(Action):
# def name(self) -> Text:
# return "action_search_restaurant"
# def run(self, dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
# entities = tracker.latest_message['entities']
# print(entities)
# for e in entities:
# if e['entity'] == 'type':
# name = e['value']
# if name == 'indian':
# message = "Items: Indian1, Indian2, Indian3, Indian4"
# dispatcher.utter_message(text=message)
# return []
param_arr = ["salinity", "solarRad", "airTemp", "aeration", "potassium", "moisture", "soilTemp", "respiration", "pressure", "phosphorus", "pH", "humidity", "nitrogen", "evapotranspiration(ET)"]
class ActionGetDate(Action):
def name(self):
return 'action_date' #****This is used in the story!****
def run(self, dispatcher, tracker, domain):
try:
slots = tracker.current_slot_values()
slot_time = slots['time']
f_date = convert_timestamp(slot_time)
date_s = f_date.strftime("%Y-%m-%d")
str_date = f_date.strftime('%B %d, %Y')
except:
f_date = date.today()
date_s = f_date.strftime("%Y-%m-%d")
str_date = f_date.strftime('%B %d, %Y')
# dispatcher.utter_message(text='Please enter the date properly')
# return [AllSlotsReset()]
try:
doc = get_doc(date_s)
# st = f"""DATE: {date}\nAir Temparature: {doc['airTemp']}\nSoil Temparature: {doc['soilTemp']}\nMoisture: {doc['moisture']}\nPressure: {doc['pressure']}\nHumidity: {doc['humidity']}\nPhosphorus: {doc['phosphorus']}\nNitrogen: {doc['nitrogen']}\nPotassium: {doc['potassium']}\nSolar Radiation: {doc['solarRad']}\nSalinity: {doc['salinity']}\nPH: {doc['pH']}"""
st = f'Sensor data on {str_date}'
for key in param_arr:
st += '\n{:<12}: {:.2f}'.format(key, float(doc[key]))
dispatcher.utter_message(text=st)
except:
dispatcher.utter_message(text='No data recorded on '+str_date)
return [AllSlotsReset()]
| 35.974684
| 363
| 0.678044
| 1,228
| 0.43209
| 0
| 0
| 0
| 0
| 0
| 0
| 1,677
| 0.590077
|
f7421c61dc4a9f2905083616a76cf5c9a110855b
| 4,688
|
py
|
Python
|
main.py
|
Davidswinkels/DownloadWalkingRoutes
|
9ceaee0b96507149086aef7081790a09ab6b3653
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
Davidswinkels/DownloadWalkingRoutes
|
9ceaee0b96507149086aef7081790a09ab6b3653
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
Davidswinkels/DownloadWalkingRoutes
|
9ceaee0b96507149086aef7081790a09ab6b3653
|
[
"Apache-2.0"
] | null | null | null |
from scripts.downloader import *
import fiona
from shapely.geometry import shape
import geopandas as gpd
import matplotlib.pyplot as plt
from pprint import pprint
import requests
import json
import time
import os
# Constant variables
input_min_lat = 50.751797561
input_min_lon = 5.726110232
input_max_lat = 50.938216069
input_max_lon = 6.121604582
route_search_url = "https://api.routeyou.com/2.0/json/Route/k-9aec2fc1705896b901c3ea17d6223f0a/mapSearch"
route_search_headers = {"Accept": "*/*",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "nl,en-US;q=0.7,en;q=0.3",
"Connection": "keep-alive",
"Content-Length": "331",
"Content-Type": "text/plain;charset=UTF-8",
"DNT": "1",
"Host": "api.routeyou.com",
"Origin": "https://www.routeyou.com",
"Referer": "https://www.routeyou.com/route/search/2/walking-route-search",
"TE": "Trailers",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0"}
default_headers = {"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "nl,en-US;q=0.7,en;q=0.3",
"Connection": "test",
"Cookie": "rtysid=5gf59rik6gf8o7b5an7nalcsh0; "
"_ga=GA1.2.1811204879.1553438381; _"
"gid=GA1.2.1815573989.1553438381; __"
"gads=ID=fab95f7aaf65227e:T=1553438384:S=ALNI_MaIjkdo1dKpYiyQKfWZEymqT7HgUQ",
"Host": "download.routeyou.com",
"Referer": "https://www.routeyou.com/nl-be/route/view/5653357/wandelroute/"
"in-het-spoor-van-napoleon-kasteel-reinhardstein-en-de-stuwdam-van-robertville",
"TE": "Trailers",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0"}
# # Setup script
# bounding_boxes_list = create_bounding_boxes(input_min_lat, input_min_lon, input_max_lat, input_max_lon,
# nr_of_rows=12, nr_of_columns=12)
# for index, bounding_box in enumerate(bounding_boxes_list):
# route_search_data = '{"jsonrpc":"2.0","id":"3","method":"searchAdvanced","params":' \
# '[{"bounds":{"min":{"lat":%s,"lon":%s},"max":{"lat":%s,"lon":%s}},' \
# '"type.id":2,"score.min":0.5,"bounds.comparator":"geometry"},null,100,0,' \
# '{"clusters":false,"addLanguage":"en","media":false,"description":false}]}' \
# % (bounding_box['min_lat'], bounding_box['min_lon'], bounding_box['max_lat'], bounding_box['max_lon'])
# response = requests.post(url=route_search_url, headers=route_search_headers,
# data=route_search_data)
# with open("D:/Wandelroutes/Text/routes_{}.txt".format(index), "wb") as file:
# file.write(response.content)
# data = json.loads(response.content)
# print("Index / routes count / total routes: ", index, "/", len(data['result']['routes']), "/", data['result']['total'])
#
# for route in data['result']['routes']:
# time.sleep(0.5)
# route_url = "https://download.routeyou.com/k-9aec2fc1705896b901c3ea17d6223f0a/route/{}.gpx?language=nl".format(route['id'])
# filepath = "D:/Wandelroutes/GPX/{}.gpx".format(route['id'])
# download_to_file(route_url, default_headers, filepath)
dir_filepath = "D:/Wandelroutes/GPX"
filenames = os.listdir(dir_filepath)
rows_list = []
for filename in filenames:
layer = fiona.open(os.path.join(dir_filepath, filename), layer='tracks')
geom = layer[0]
route_name = geom['properties']['name']
route_geodata = {'type': 'MultiLineString',
'coordinates': geom['geometry']['coordinates']}
route_geometry = shape(route_geodata)
route_id = os.path.splitext(os.path.basename(filename))[0]
route_dict = {'id': str(route_id),
'name': route_name,
'url': "https://www.routeyou.com/nl-nl/route/view/" + str(route_id),
'geometry': route_geometry}
rows_list.append(route_dict)
routes_gdf = gpd.GeoDataFrame(rows_list)
routes_gdf.crs = {'init': 'epsg:4326', 'no_defs': True}
routes_gdf.to_file("D:/Wandelroutes/walking_routes.shp")
| 53.272727
| 133
| 0.588097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,988
| 0.637372
|
f7441602c73a268dd40291e6397b91ed0f1027f6
| 3,185
|
py
|
Python
|
seq2seq-chatbot/chat_web.py
|
rohitkujur1997/chatbot
|
76cd460b09f75532a7259d783114d8cf3dda246f
|
[
"MIT"
] | 104
|
2018-03-28T20:30:25.000Z
|
2022-02-18T19:43:21.000Z
|
seq2seq-chatbot/chat_web.py
|
rohitkujur1997/chatbot
|
76cd460b09f75532a7259d783114d8cf3dda246f
|
[
"MIT"
] | 37
|
2018-04-16T15:39:17.000Z
|
2021-05-29T11:28:26.000Z
|
seq2seq-chatbot/chat_web.py
|
rohitkujur1997/chatbot
|
76cd460b09f75532a7259d783114d8cf3dda246f
|
[
"MIT"
] | 63
|
2018-05-18T09:52:20.000Z
|
2021-07-26T08:11:17.000Z
|
"""
Script for serving a trained chatbot model over http
"""
import datetime
import click
from os import path
from flask import Flask, request, send_from_directory
from flask_cors import CORS
from flask_restful import Resource, Api
import general_utils
import chat_command_handler
from chat_settings import ChatSettings
from chatbot_model import ChatbotModel
from vocabulary import Vocabulary
app = Flask(__name__)
CORS(app)
@app.cli.command()
@click.argument("checkpointfile")
@click.option("-p", "--port", type=int)
def serve_chat(checkpointfile, port):
api = Api(app)
#Read the hyperparameters and configure paths
model_dir, hparams, checkpoint = general_utils.initialize_session_server(checkpointfile)
#Load the vocabulary
print()
print ("Loading vocabulary...")
if hparams.model_hparams.share_embedding:
shared_vocab_filepath = path.join(model_dir, Vocabulary.SHARED_VOCAB_FILENAME)
input_vocabulary = Vocabulary.load(shared_vocab_filepath)
output_vocabulary = input_vocabulary
else:
input_vocab_filepath = path.join(model_dir, Vocabulary.INPUT_VOCAB_FILENAME)
input_vocabulary = Vocabulary.load(input_vocab_filepath)
output_vocab_filepath = path.join(model_dir, Vocabulary.OUTPUT_VOCAB_FILENAME)
output_vocabulary = Vocabulary.load(output_vocab_filepath)
#Create the model
print ("Initializing model...")
print()
with ChatbotModel(mode = "infer",
model_hparams = hparams.model_hparams,
input_vocabulary = input_vocabulary,
output_vocabulary = output_vocabulary,
model_dir = model_dir) as model:
#Load the weights
print()
print ("Loading model weights...")
model.load(checkpoint)
# Setting up the chat
chatlog_filepath = path.join(model_dir, "chat_logs", "web_chatlog_{0}.txt".format(datetime.datetime.now().strftime("%Y%m%d_%H%M%S")))
chat_settings = ChatSettings(hparams.model_hparams, hparams.inference_hparams)
chat_command_handler.print_commands()
class Answer(Resource):
def get(self, question):
is_command, terminate_chat, _ = chat_command_handler.handle_command(question, model, chat_settings)
if terminate_chat:
answer = "[Can't terminate from http request]"
elif is_command:
answer = "[Command processed]"
else:
#If it is not a command (it is a question), pass it on to the chatbot model to get the answer
_, answer = model.chat(question, chat_settings)
if chat_settings.inference_hparams.log_chat:
chat_command_handler.append_to_chatlog(chatlog_filepath, question, answer)
return answer
class UI(Resource):
def get(self):
return send_from_directory(".", "chat_ui.html")
api.add_resource(Answer, "/chat/<string:question>")
api.add_resource(UI, "/chat_ui/")
app.run(debug=False, port=port)
| 37.916667
| 141
| 0.661538
| 892
| 0.280063
| 0
| 0
| 2,757
| 0.86562
| 0
| 0
| 538
| 0.168917
|
f744580853ac0dc47dbf987d1497464099a8f500
| 1,907
|
py
|
Python
|
tests/test-checkbox.py
|
JonathanRRogers/twill
|
e1afc10366dcd29b82eeae2d586e49ca7737039a
|
[
"MIT"
] | null | null | null |
tests/test-checkbox.py
|
JonathanRRogers/twill
|
e1afc10366dcd29b82eeae2d586e49ca7737039a
|
[
"MIT"
] | null | null | null |
tests/test-checkbox.py
|
JonathanRRogers/twill
|
e1afc10366dcd29b82eeae2d586e49ca7737039a
|
[
"MIT"
] | null | null | null |
import twilltestlib
import twill
from twill import namespaces, commands
from twill.errors import TwillAssertionError
from mechanize import BrowserStateError
def setup_module():
global url
url = twilltestlib.get_url()
def test_select_multiple():
namespaces.new_local_dict()
twill.commands.reset_browser()
browser = twill.get_browser()
try:
browser.get_title()
assert 0, "should never get here"
except BrowserStateError:
pass
commands.go(url)
commands.go('/test_checkboxes')
commands.fv('1', 'checkboxtest', 'one')
commands.fv('1', 'checkboxtest', 'two')
commands.fv('1', 'checkboxtest', 'three')
commands.fv('1', 'checkboxtest', '-one')
commands.fv('1', 'checkboxtest', '-two')
commands.fv('1', 'checkboxtest', '-three')
commands.submit()
assert not 'CHECKBOXTEST' in browser.get_html()
commands.fv('1', 'checkboxtest', '+one')
commands.fv('1', 'checkboxtest', '+two')
commands.fv('1', 'checkboxtest', '+three')
commands.submit()
assert 'CHECKBOXTEST: ==one,two,three==' in browser.get_html()
commands.fv('1', 'checkboxtest', '-one')
commands.fv('1', 'checkboxtest', '-two')
commands.fv('1', 'checkboxtest', '-three')
commands.submit()
assert not 'CHECKBOXTEST' in browser.get_html()
def test_select_single():
namespaces.new_local_dict()
twill.commands.reset_browser()
browser = twill.get_browser()
try:
browser.get_title()
assert 0, "should never get here"
except BrowserStateError:
pass
commands.go(url)
commands.go('/test_checkboxes')
for x in ('1', '0', 'True', 'False'):
try:
commands.fv('1', 'checkboxtest', x)
assert False, ("Should not be able to use a bool style for when "
"there are multiple checkboxes")
except:
pass
| 26.859155
| 77
| 0.631358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 541
| 0.283692
|
f74471527dea41ff8d2f932ecbb41c7a4779f9c6
| 894
|
py
|
Python
|
native/release/test.py
|
ncbray/pystream
|
70bba5646d6512adb6803564c22268d3424c66d8
|
[
"Apache-2.0"
] | 6
|
2015-09-19T18:22:33.000Z
|
2020-11-29T15:21:17.000Z
|
native/release/test.py
|
ncbray/pystream
|
70bba5646d6512adb6803564c22268d3424c66d8
|
[
"Apache-2.0"
] | 1
|
2015-08-04T08:03:46.000Z
|
2015-08-04T08:03:46.000Z
|
native/release/test.py
|
ncbray/pystream
|
70bba5646d6512adb6803564c22268d3424c66d8
|
[
"Apache-2.0"
] | 1
|
2019-12-09T08:27:09.000Z
|
2019-12-09T08:27:09.000Z
|
# Copyright 2011 Nicholas Bray
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import pybind
a = pybind.vec3(1.0, 2.0, 3.0)
b = pybind.vec3(2.0, 1.0, 3.0)
def dot(a, b):
return a.x*b.x+a.y*b.y+a.z*b.z
def dummy(a, b):
pass
def test(f):
start = time.clock()
for i in range(1000000):
f(a, b)
return time.clock()-start
t1 = test(pybind.dot)
t2 = test(dummy)
print t1/t2
| 22.35
| 74
| 0.710291
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 564
| 0.630872
|
f7451e04b5509a3b3a1ecacc27d8ca29e4bae31f
| 657
|
py
|
Python
|
4.py
|
Andrey543/Prack_10
|
263cae3204ed624b68d5797bd8d8833dd88e4682
|
[
"PSF-2.0"
] | null | null | null |
4.py
|
Andrey543/Prack_10
|
263cae3204ed624b68d5797bd8d8833dd88e4682
|
[
"PSF-2.0"
] | null | null | null |
4.py
|
Andrey543/Prack_10
|
263cae3204ed624b68d5797bd8d8833dd88e4682
|
[
"PSF-2.0"
] | null | null | null |
enru=open('en-ru.txt','r')
input=open('input.txt','r')
output=open('output.txt','w')
s=enru.read()
x=''
prov={'q','w','e','r','t','y','u','i','o','p','a','s','d','f','g','h','j','k','l','z','x','c','v','b','n','m'}
slovar={}
s=s.replace('\t-\t',' ')
while len(s)>0:
slovar[s[:s.index(' ')]]=s[s.index(' '):s.index('\n')]
s=s[s.index('\n')+1:]
print(slovar)
s=input.read()
s=s.lower()
while len(s)>0:
a=s[0]
if a not in prov:
if x in slovar:
print(slovar[x],a, file=output, sep='',end='')
else:
print(x,a, file=output, sep='',end='')
x=''
else:
x+=a
s=s[1:]
| 22.655172
| 110
| 0.438356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 157
| 0.238965
|
f74545257d4ee21af8ad7aae2149ad290991f512
| 2,485
|
py
|
Python
|
examples/siamese_mnist.py
|
DmitryUlyanov/deeppy
|
c9644d348e22b78d32ea049fb0ac14bf3b750941
|
[
"MIT"
] | 1
|
2015-09-16T08:01:21.000Z
|
2015-09-16T08:01:21.000Z
|
examples/siamese_mnist.py
|
rajat1994/deeppy
|
79cc7cb552f30bc70eeea9ee7ff4976b0899ea66
|
[
"MIT"
] | null | null | null |
examples/siamese_mnist.py
|
rajat1994/deeppy
|
79cc7cb552f30bc70eeea9ee7ff4976b0899ea66
|
[
"MIT"
] | 2
|
2020-04-05T21:41:14.000Z
|
2021-09-28T18:05:49.000Z
|
#!/usr/bin/env python
"""
Siamese networks
================
"""
import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
import deeppy as dp
# Fetch MNIST data
dataset = dp.dataset.MNIST()
x_train, y_train, x_test, y_test = dataset.data(flat=True, dp_dtypes=True)
# Normalize pixel intensities
scaler = dp.StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
# Generate image pairs
n_pairs = 100000
x1 = np.empty((n_pairs, 28*28), dtype=dp.float_)
x2 = np.empty_like(x1, dtype=dp.float_)
y = np.empty(n_pairs, dtype=dp.int_)
n_imgs = x_train.shape[0]
n = 0
while n < n_pairs:
i = random.randint(0, n_imgs-1)
j = random.randint(0, n_imgs-1)
if i == j:
continue
x1[n, ...] = x_train[i]
x2[n, ...] = x_train[j]
if y_train[i] == y_train[j]:
y[n] = 1
else:
y[n] = 0
n += 1
# Prepare network inputs
train_input = dp.SupervisedSiameseInput(x1, x2, y, batch_size=128)
# Setup network
w_gain = 1.5
w_decay = 1e-4
net = dp.SiameseNetwork(
siamese_layers=[
dp.FullyConnected(
n_out=1024,
weights=dp.Parameter(dp.AutoFiller(w_gain), weight_decay=w_decay),
),
dp.ReLU(),
dp.FullyConnected(
n_out=1024,
weights=dp.Parameter(dp.AutoFiller(w_gain), weight_decay=w_decay),
),
dp.ReLU(),
dp.FullyConnected(
n_out=2,
weights=dp.Parameter(dp.AutoFiller(w_gain)),
),
],
loss=dp.ContrastiveLoss(margin=1.0),
)
# Train network
trainer = dp.StochasticGradientDescent(
max_epochs=15,
learn_rule=dp.RMSProp(learn_rate=0.01),
)
trainer.train(net, train_input)
# Plot 2D embedding
test_input = dp.Input(x_test)
x_test = np.reshape(x_test, (-1,) + dataset.img_shape)
feat = net.features(test_input)
feat -= np.min(feat, 0)
feat /= np.max(feat, 0)
plt.figure()
ax = plt.subplot(111)
shown_images = np.array([[1., 1.]])
for i in range(feat.shape[0]):
dist = np.sum((feat[i] - shown_images)**2, 1)
if np.min(dist) < 6e-4:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [feat[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(x_test[i], zoom=0.6, cmap=plt.cm.gray_r),
xy=feat[i], frameon=False
)
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
plt.title('Embedding from the last layer of the network')
| 24.60396
| 78
| 0.639437
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 289
| 0.116298
|
f7455caca893431c335911322deda0eadbce921b
| 1,580
|
py
|
Python
|
py/tests/tests_integ_yarn.py
|
My-Technical-Architect/sparkling-water
|
b1381891baefd63fd15f8dc2a73b049828a919bc
|
[
"Apache-2.0"
] | null | null | null |
py/tests/tests_integ_yarn.py
|
My-Technical-Architect/sparkling-water
|
b1381891baefd63fd15f8dc2a73b049828a919bc
|
[
"Apache-2.0"
] | null | null | null |
py/tests/tests_integ_yarn.py
|
My-Technical-Architect/sparkling-water
|
b1381891baefd63fd15f8dc2a73b049828a919bc
|
[
"Apache-2.0"
] | null | null | null |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Integration tests for pySparkling for Spark running in YARN mode
"""
from integ_test_utils import IntegTestSuite
import test_utils
class YarnIntegTestSuite(IntegTestSuite):
def test_chicago_crime(self):
self.spark_master("yarn-client")
# Configure YARN environment
self.conf("spark.yarn.max.executor.failures", 1) # In fail of executor, fail the test
self.conf("spark.executor.instances", 3)
self.conf("spark.executor.memory", "2g")
self.conf("spark.ext.h2o.port.base", 63331)
self.conf("spark.driver.memory", "2g")
self.launch("examples/scripts/ChicagoCrimeDemo.py")
if __name__ == '__main__':
test_utils.run_tests([YarnIntegTestSuite], file_name="py_integ_yarn_tests_report")
| 39.5
| 97
| 0.725316
| 538
| 0.340506
| 0
| 0
| 0
| 0
| 0
| 0
| 1,130
| 0.71519
|
f747016ef477946806c1d818d38cadb8c27e681e
| 12,988
|
py
|
Python
|
exchanges/virtualExchangeService.py
|
AshWorkshop/Trandash
|
a96b523fbd171ba2d43a6720ef2e2a496a0cf75a
|
[
"MIT"
] | 1
|
2018-07-25T03:43:35.000Z
|
2018-07-25T03:43:35.000Z
|
exchanges/virtualExchangeService.py
|
AshWorkshop/Trandash
|
a96b523fbd171ba2d43a6720ef2e2a496a0cf75a
|
[
"MIT"
] | null | null | null |
exchanges/virtualExchangeService.py
|
AshWorkshop/Trandash
|
a96b523fbd171ba2d43a6720ef2e2a496a0cf75a
|
[
"MIT"
] | null | null | null |
from twisted.internet import defer, task
from twisted.python.failure import Failure
from exchanges.base import ExchangeService
from exchange import calcVirtualOrderBooks
import copy
import time
def defaultErrHandler(failure):
print(failure.getBriefTraceback())
return failure
def handleMultipleErr(data):
flag = True
for state, err in res:
if not state:
print(err)
flag = False
return flag
class OrderData(object):
def __init__(self, orders=None):
if orders is None:
self._orders = {}
self._newId = 0
else:
self._orders = orders
self._newId = max(self._orders) + 1
def resetNewId(self):
self._newId = max(self._orders) + 1
def _takeNewId(self):
id = self._newId
self._newId = self._newId + 1
return id
def getNewId(self):
return self._newId
def recordOrder(self, orderInfo):
key = self._takeNewId()
self._orders[key] = copy.deepcopy(orderInfo)
return key
def getOrder(self, orderId, defaultValue = None):
return copy.deepcopy(self._orders.get(orderId, defaultValue))
def getOrderRef(self, orderId, defaultValue = None):
if orderId in self._orders:
return self._orders[orderId]
else:
return defaultValue
def getOrders(self):
return copy.deepcopy(self._orders)
def delOrder(self, orderId):
if orderId in self._orders:
del self._orders[orderId]
def loadData(self, path):
pass # TODO
def saveData(self, path):
pass # TODO
class VirtualExchange(ExchangeService):
def __init__(self, exchange, mediums, orders = None):
self.exchange = exchange
if not isinstance(mediums, tuple):
raise TypeError("type of 'mediums' must be 'tuple'")
self.medium = mediums[0]
self.orderBookData = None
self.orderBookPairs = None
if orders is None:
self.orders = OrderData()
else:
self.orders = orders
self.retryTimes = 3
self.retryWaitTime = 1 # second
def cleanOrderBookData(self):
self.orderBookData = None
def setMediums(self, mediums):
if not isinstance(mediums, tuple):
raise TypeError("type of 'medium' must be 'tuple'")
self.medium = mediums[0]
self.cleanOrderBookData()
def getBalance(self, coin):
return exchange.getBalance(coin)
def getBalances(self, coins=None):
return exchange.getBalances(coins)
def getOrderState(self, statusA, statusB):
unusual = ('error', 'cancelled')
if statusA == 'done' and statusB == 'done':
status = 'done'
elif statusA == 'cancelled' and statusB == 'cancelled':
status = 'cancelled'
elif statusA in unusual or statusB in unusual: # TODO: could be improved
status = 'error'
else:
status = 'open'
return status
def getOrderBook(self, pairs):
self.orderBookPairs = pairs
dA = self.exchange.getOrderBook( (pairs[0], self.medium) )
dB = self.exchange.getOrderBook( (self.medium, pairs[1]) )
d = defer.DeferredList( [dA, dB], consumeErrors=True)
def handleBody(datas):
(stateA, dataA), (stateB, dataB) = datas
if stateA and stateB:
virtualOB, medium = calcVirtualOrderBooks(dataA, dataB)
self.orderBookData = (virtualOB, medium)
return virtualOB
else:
for state, data in datas:
if not state:
print(data)
self.cleanData()
return None
d.addCallback(handleBody)
d.addErrback(defaultErrHandler)
return d
def buy(self, pairs, price, amount):
data = self.orderBookData
# check if data is available
if data is None:
d = defer.fail(Exception('No available order book data'))
elif pairs != self.orderBookPairs:
d = defer.fail(Exception("coin pairs 'pairs' does not match the order book data"))
else:
PRICE, AMOUNT = range(2)
(_, sell), (_, mediumSell) = data
overflow = False
A = amount
B = price * amount
M = 0
# calculate the amount of medium
sumM = 0
for l, order in enumerate(sell):
s = sumM + order[AMOUNT]
if s == A:
M = sum(mediumSell[:l + 1])
break
elif s > A:
M = sum(mediumSell[:l]) + (A - sumM) / order[AMOUNT] * mediumSell[l]
break
sumM = s
else:
overflow = True
if overflow:
d = defer.fail(Exception("'amount' is too big"))
else:
# initiate transaction
symbol = self.exchange.getSymbol(pairs)
dA = lambda :self.exchange.buy( (pairs[0], self.medium) , M / A, A)
dB = lambda :self.exchange.buy( (self.medium, pairs[1]) , B / M, M)
@defer.inlineCallbacks
def transaction():
taskA, taskB = dA, dB
for t in range(1 + self.retryTimes):
res = yield defer.DeferredList( [taskA(), taskB()], consumeErrors=True)
(stateA, dataA), (stateB, dataB) = res
if stateA and stateB: # succeeded
break
time.sleep(self.retryWaitTime)
taskA, taskB = lambda: defer.succeed(dataA), lambda: defer.succeed(dataB)
if not stateA:
print(dataA)
print(f"start {pairs[0], self.medium} buy order failed")
print(f"retry times: {t}")
taskA = dA
if not stateB:
print(dataB)
print(f"start {self.medium, pairs[1]} buy order failed")
print(f"retry times: {t}")
taskB = dB
else:
print(f"out of retry times, starting buy order failed")
returnValue(None)
id = self.orders.recordOrder({
'orderId': (dataA, dataB),
'type': 'buy',
'initPrice': price,
'initAmount': amount,
'coinPair': symbol,
'status': 'open',
})
returnValue(id)
d = transaction()
d.addErrback(defaultErrHandler)
return d
def sell(self, pairs, price, amount):
data = self.orderBookData
# check if data is available
if data is None:
d = defer.fail(Exception('No available order book data'))
elif pairs != self.orderBookPairs:
d = defer.fail(Exception("coin pairs 'pairs' does not match the order book data"))
else:
PRICE, AMOUNT = range(2)
(buy, _), (mediumBuy, _) = data
overflow = False
A = amount
B = price * amount
M = 0
# calculate the amount of medium
sumM = 0
for l, order in enumerate(buy):
s = sumM + order[AMOUNT]
if s == A:
M = sum(mediumBuy[:l + 1])
break
elif s > A:
M = sum(mediumBuy[:l]) + (A - sumM) / order[AMOUNT] * mediumBuy[l]
break
sumM = s
else:
overflow = True
if overflow:
d = defer.fail(Exception("'amount' is too big"))
else:
# initiate transaction
symbol = self.exchange.getSymbol(pairs)
dA = lambda :self.exchange.sell( (pairs[0], self.medium) , M / A, A)
dB = lambda :self.exchange.sell( (self.medium, pairs[1]) , B / M, M)
@defer.inlineCallbacks
def transaction():
taskA, taskB = dA, dB
for t in range(1 + self.retryTimes):
res = yield defer.DeferredList( [taskA(), taskB()], consumeErrors=True)
(stateA, dataA), (stateB, dataB) = res
if stateA and stateB: # succeeded
break
time.sleep(self.retryWaitTime)
taskA, taskB = lambda: defer.succeed(dataA), lambda: defer.succeed(dataB)
if not stateA:
print(dataA)
print(f"start {pairs[0], self.medium} sell order failed")
print(f"retry times: {t}")
taskA = dA
if not stateB:
print(dataB)
print(f"start {self.medium, pairs[1]} sell order failed")
print(f"retry times: {t}")
taskB = dB
else:
print(f"out of retry times, starting sell order failed")
returnValue(None)
id = self.orders.recordOrder({
'orderId': (dataA, dataB),
'type': 'sell',
'initPrice': price,
'initAmount': amount,
'coinPair': symbol,
'status': 'open',
})
returnValue(id)
d = transaction()
d.addErrback(defaultErrHandler)
return d
def getOrder(self, pairs, orderId, fromRemote=True):
"""method to query the order info with order id
:param fromRemote: flag used to determine order data from obtained local or remote server
"""
data = self.orders.getOrder(orderId)
symbol = self.exchange.getSymbol(pairs)
# check if the orderId exist
if data is None:
d = defer.fail(Exception('this orderId does not exist'))
elif symbol != data['coinPair']:
d = defer.fail(Exception("'pairs' does not match this order"))
elif fromRemote:
idA, idB = data['orderId']
dA = self.exchange.getOrder( (pairs[0], self.medium), idA)
dB = self.exchange.getOrder( (self.medium, pairs[1]), idB)
d = defer.DeferredList( [dA, dB] , consumeErrors=True)
def handleBody(res):
if not handleMultipleErr(res):
return None
(_, resA), (_, resB) = res
statusA, statusB = resA['status'], resB['status']
status = self.getOrderState(statusA, statusB)
# update local data
self.orders.getOrderRef(orderId)['status'] = status
order = {
'orderId': orderId,
'type': data['type'],
'initPrice': data['initPrice'],
'initAmount': data['initAmount'],
'coinPair': symbol,
'status': status,
}
return order
d.addCallback(handleBody)
else:
defer.succeed(data)
d.addErrback(defaultErrHandler)
return d
def cancel(self, pairs, orderId):
data = self.orders.getOrderRef(orderId)
symbol = self.exchange.getSymbol(pairs)
# check if the orderId exist
if data is None:
d = defer.fail(Exception('this orderId does not exist'))
elif symbol != data['coinPair']:
d = defer.fail(Exception("'pairs' does not match this order"))
else:
idA, idB = data['orderId']
dA = self.exchange.cancel( (pairs[0], self.medium), idA)
dB = self.exchange.cancel( (self.medium, pairs[1]), idB)
d = defer.DeferredList( [dA, dB] , consumeErrors=True)
def handleBody(res):
if not handleMultipleErr(res):
return None
(_, (stateA, dataA)), (_, (stateB, dataB)) = res
if stateA and stateB:
data['status'] = 'cancelled'
return True
else:
return False
d.addErrback(defaultErrHandler)
return d
if __name__ == '__main__':
from exchanges.bitfinex.BitfinexService import bitfinex
VirtualExchange(bitfinex, ('ETH',) )
| 34.542553
| 97
| 0.495303
| 12,410
| 0.955497
| 6,199
| 0.477287
| 3,210
| 0.247151
| 0
| 0
| 1,642
| 0.126424
|
f7479f1a05ace202d6234fbd90b428551eb021a1
| 7,989
|
py
|
Python
|
testplan/testing/cpp/cppunit.py
|
Morgan-Stanley/Testplan
|
9374d6e0da6ae9aa7a1b5e08b42cd21993485837
|
[
"Apache-2.0"
] | null | null | null |
testplan/testing/cpp/cppunit.py
|
Morgan-Stanley/Testplan
|
9374d6e0da6ae9aa7a1b5e08b42cd21993485837
|
[
"Apache-2.0"
] | null | null | null |
testplan/testing/cpp/cppunit.py
|
Morgan-Stanley/Testplan
|
9374d6e0da6ae9aa7a1b5e08b42cd21993485837
|
[
"Apache-2.0"
] | null | null | null |
import os
from schema import Or
from testplan.common.config import ConfigOption
from ..base import ProcessRunnerTest, ProcessRunnerTestConfig
from ...importers.cppunit import CPPUnitResultImporter, CPPUnitImportedResult
class CppunitConfig(ProcessRunnerTestConfig):
"""
Configuration object for :py:class:`~testplan.testing.cpp.cppunit.Cppunit`.
"""
@classmethod
def get_options(cls):
return {
ConfigOption("file_output_flag", default="-y"): Or(
None, lambda x: x.startswith("-")
),
ConfigOption("output_path", default=""): str,
ConfigOption("filtering_flag", default=None): Or(
None, lambda x: x.startswith("-")
),
ConfigOption("cppunit_filter", default=""): str,
ConfigOption("listing_flag", default=None): Or(
None, lambda x: x.startswith("-")
),
ConfigOption("parse_test_context", default=None): Or(
None, lambda x: callable(x)
),
}
class Cppunit(ProcessRunnerTest):
"""
Subprocess test runner for Cppunit: https://sourceforge.net/projects/cppunit
For original docs please see:
http://cppunit.sourceforge.net/doc/1.8.0/
http://cppunit.sourceforge.net/doc/cvs/cppunit_cookbook.html
Please note that the binary (either native binary or script) should output
in XML format so that Testplan is able to parse the result. By default
Testplan reads from stdout, but if `file_output_flag` is set (e.g. "-y"),
the binary should accept a file path and write the result to that file,
which will be loaded and parsed by Testplan. For example:
.. code-block:: bash
./cppunit_bin -y /path/to/test/result
:param name: Test instance name, often used as uid of test entity.
:type name: ``str``
:param binary: Path to the application binary or script.
:type binary: ``str``
:param description: Description of test instance.
:type description: ``str``
:param file_output_flag: Customized command line flag for specifying path
of output file, default to -y
:type file_output_flag: ``NoneType`` or ``str``
:param output_path: Where to save the test report, should work with
`file_output_flag`, if not provided a default path can be generated.
:type output_path: ``str``
:param filtering_flag: Customized command line flag for filtering testcases,
"-t" is suggested, for example: ./cppunit_bin -t *.some_text.*
:type filtering_flag: ``NoneType`` or ``str``
:param cppunit_filter: Native test filter pattern that will be used by
Cppunit internally.
:type cppunit_filter: ``str``
:param listing_flag: Customized command line flag for listing all testcases,
"-l" is suggested, for example: ./cppunit_bin -l
:type listing_flag: ``NoneType`` or ``str``
:param parse_test_context: Function to parse the output which contains
listed test suites and testcases. refer to the default implementation
:py:meth:`~testplan.testing.cpp.cppunit.Cppunit.parse_test_context`.
:type parse_test_context: ``NoneType`` or ``callable``
Also inherits all
:py:class:`~testplan.testing.base.ProcessRunnerTest` options.
"""
CONFIG = CppunitConfig
def __init__(
self,
name,
binary,
description=None,
file_output_flag="-y",
output_path="",
filtering_flag=None,
cppunit_filter="",
listing_flag=None,
parse_test_context=None,
**options
):
options.update(self.filter_locals(locals()))
super(Cppunit, self).__init__(**options)
@property
def report_path(self):
if self.cfg.file_output_flag and self.cfg.output_path:
return self.cfg.output_path
else:
return os.path.join(self._runpath, "report.xml")
def _test_command(self):
cmd = [self.cfg.binary]
if self.cfg.filtering_flag and self.cfg.cppunit_filter:
cmd.extend([self.cfg.filtering_flag, self.cfg.cppunit_filter])
if self.cfg.file_output_flag:
cmd.extend([self.cfg.file_output_flag, self.report_path])
return cmd
def _list_command(self):
if self.cfg.listing_flag:
return [self.cfg.binary, self.cfg.listing_flag]
else:
return super(Cppunit, self)._list_command()
def read_test_data(self):
importer = CPPUnitResultImporter(
self.report_path if self.cfg.file_output_flag else self.stdout
)
return importer.import_result()
def process_test_data(self, test_data: CPPUnitImportedResult):
"""
XML output contains entries for skipped testcases
as well, which are not included in the report.
"""
return test_data.results()
def parse_test_context(self, test_list_output):
"""
Default implementation of parsing Cppunit test listing from stdout.
Assume the format of output is like that of GTest listing. If the
Cppunit test lists the test suites and testcases in other format,
then this function needs to be re-implemented.
"""
# Sample command line output:
#
# Comparison.
# testNotEqual
# testGreater
# testLess
# testMisc
# LogicalOp.
# testOr
# testAnd
# testNot
# testXor
#
#
# Sample Result:
#
# [
# ['Comparison',
# ['testNotEqual', 'testGreater', 'testLess', 'testMisc']],
# ['LogicalOp', ['testOr', 'testAnd', 'testNot', 'testXor']],
# ]
if self.cfg.parse_test_context:
return self.cfg.parse_test_context(test_list_output)
# Default implementation: suppose that the output of
# listing testcases is the same like that of GTest.
result = []
for line in test_list_output.splitlines():
line = line.rstrip()
if line.endswith(".") and len(line.lstrip()) > 1:
result.append([line.lstrip()[:-1], []])
elif result and (line.startswith(" ") or line.startswith("\t")):
result[-1][1].append(line.lstrip())
return result
def update_test_report(self):
"""
Attach XML report contents to the report, which can be
used by XML exporters, but will be discarded by serializers.
"""
super(Cppunit, self).update_test_report()
try:
with open(
self.report_path if self.cfg.file_output_flag else self.stdout
) as report_xml:
self.result.report.xml_string = report_xml.read()
except Exception:
self.result.report.xml_string = ""
def test_command_filter(self, testsuite_pattern, testcase_pattern):
"""
Return the base test command with additional filtering to run a
specific set of testcases.
"""
if testsuite_pattern not in (
"*",
self._DEFAULT_SUITE_NAME,
self._VERIFICATION_SUITE_NAME,
):
raise RuntimeError(
"Cannot run individual test suite {}".format(testsuite_pattern)
)
if testcase_pattern not in ("*", self._VERIFICATION_TESTCASE_NAME):
self.logger.debug(
'Should run testcases in pattern "%s", but cannot run'
" individual testcases thus will run the whole test suite",
testcase_pattern,
)
return self.test_command()
def list_command_filter(self, testsuite_pattern, testcase_pattern):
"""
Return the base list command with additional filtering to list a
specific set of testcases.
"""
return None # Cppunit does not support listing by filter
| 35.506667
| 80
| 0.620729
| 7,760
| 0.971336
| 0
| 0
| 904
| 0.113156
| 0
| 0
| 3,884
| 0.486168
|
f747a38fbb26a5157c21b6d60ed98e858e4c0dcd
| 8,191
|
py
|
Python
|
commands/song.py
|
Princ3x/ddmbot
|
088eb6b46447a1ec184b1bc7fea493b66ee35284
|
[
"MIT"
] | 8
|
2016-12-13T17:52:51.000Z
|
2019-06-23T22:11:42.000Z
|
commands/song.py
|
Princ3x/ddmbot
|
088eb6b46447a1ec184b1bc7fea493b66ee35284
|
[
"MIT"
] | 13
|
2016-12-13T17:35:09.000Z
|
2017-07-08T10:53:51.000Z
|
commands/song.py
|
Princ3x/ddmbot
|
088eb6b46447a1ec184b1bc7fea493b66ee35284
|
[
"MIT"
] | 4
|
2016-12-13T17:52:53.000Z
|
2019-01-01T17:43:33.000Z
|
import discord.ext.commands as dec
import database.song
from commands.common import *
class Song:
"""Song insertion, querying and manipulation"""
def __init__(self, bot):
self._bot = bot
self._db = database.song.SongInterface(bot.loop)
_help_messages = {
'group': 'Song information, querying and manipulation',
'blacklist': '* Puts the specified song to the blacklist\n\n'
'Song ID can be located in the square brackets just before the title. It is included in the status message '
'and all the listings.\nThis does not prevent users from including blacklisted song in their playlist, song '
'is skipped just before playing.',
'deduplicate': '* Marks a song as a duplicate of another song\n\n'
'This is a destructive operation. The duplicate is replaced by the "original" just before playing. All tests '
'(blacklist, length, overplay) are performed on the "original" song.\nThis function is useful for replacing '
'songs with a bad quality and is necessary for overplay protection to work correctly.\nSong IDs can be located '
'in the square brackets just before the title. It is included in the status message and all the listings. You '
'can also use \'search\' command to obtain the IDs.',
'failed_clear': '* Removes the songs from the failed list\n\n'
'Songs marked as duplicates are not affected. Individual songs can be removed by specifying their ID. You can '
'use the command to fix the automatic playlist after a service outage or bot connection problems.',
'failed_list': 'Lists all the songs that have failed to download\n\n'
'Up to 20 songs are returned. Songs marked as a duplicate are considered resolved and are excluded from the '
'list. Songs are automatically removed from this list after a successful download, or manually by using '
'\'clear\' subcommand.\n\nSongs that are marked as failed to download are excluded from the automatic '
'playlist. Bot operators are expected to investigate download issues and provide an alternative source for '
'the songs if necessary.',
'info': 'Displays information about the song stored in the database\n\n'
'Mainly for debugging purposes, as an aid for the bot operators.',
'permit': '* Removes the specified song from the blacklist\n\n'
'Song ID can be located in the square brackets just before the title. It is included in the status message '
'and all the listings.',
'rename': '* Changes the title of a specified song\n\n'
'This command can be used to rename the song stored in the database. It does not update the status message; '
'the new name is used next time the song is played.\nSong ID can be located in the square brackets just before '
'the title. It is included in the status message and all the listings.',
'search': 'Queries the database for songs\n\n'
'Title and UURI are matched against the specified keywords. All the keywords must match either the title or '
'UURI. Up to 20 results are returned.\nThis command can be used to lookup song IDs.',
'split': '* Marks a given song as an original\n\n'
'This command can be used to fix duplication status of the song. After this command is issued, the song '
'specified won\'t be marked as a duplicate anymore.\nThis is the inverse command to the \'deduplicate\'. '
'Just like the \'deduplicate\', this command does not manipulate with timestamps nor credit counts.\nSong ID '
'can be located in the square brackets just before the song title. It is included in the status message and '
'all the listings.'
}
@dec.group(invoke_without_command=True, aliases=['s'], help=_help_messages['group'])
async def song(self, subcommand: str, *arguments: str):
raise dec.UserInputError('Command *song* has no subcommand named {}. Please use `{}help song` to list all '
'the available subcommands.'
.format(subcommand, self._bot.config['ddmbot']['delimiter']))
@privileged
@song.command(ignore_extra=False, help=_help_messages['blacklist'])
async def blacklist(self, song_id: int):
await self._db.blacklist(song_id)
await self._bot.message('Song [{}] has been blacklisted'.format(song_id))
@privileged
@song.command(ignore_extra=False, help=_help_messages['deduplicate'])
async def deduplicate(self, which_id: int, target_id: int):
await self._db.merge(which_id, target_id)
await self._bot.message('Song [{}] has been marked as a duplicate of the song [{}]'.format(which_id, target_id))
@song.group(ignore_extra=False, invoke_without_command=True)
async def failed(self):
raise dec.UserInputError('You need to provide a subcommand to the *song failed* command')
@privileged
@failed.command(name='clear', ignore_extra=False, help=_help_messages['failed_clear'])
async def failed_clear(self, song_id: int = None):
raise dec.UserInputError('You need to provide a subcommand to the *song failed* command')
@failed.command(name='list', ignore_extra=False, aliases=['l'], help=_help_messages['failed_list'])
async def failed_list(self):
items, total = await self._db.list_failed(20)
if not items:
await self._bot.whisper('There are no songs flagged because of a download failure')
return
reply = '**{} songs (out of {}) flagged because of a download failure:**\n **>** '.format(len(items), total) + \
'\n **>** '.join(['[{}] {}'.format(*item) for item in items])
await self._bot.whisper(reply)
@song.command(ignore_extra=False, aliases=['i'], help=_help_messages['info'])
async def info(self, song_id: int):
info = await self._db.get_info(song_id)
reply = '**Song [{id}] information:**\n' \
' **Source URL:** [{url}]\n' \
' **Title:** {title}\n' \
' **Last played:** {last_played!s}\n' \
' **Listener count:** {total_listener_count} ({listener_count})\n' \
' **Skip vote count:** {total_skip_vote_count} ({skip_vote_count})\n' \
' **Duration:** {duration}s\n' \
' **Credits remaining:** {credit_count}\n\n' \
' **Blacklisted:** {is_blacklisted}\n' \
' **Has failed to download:** {has_failed}\n\n' \
' **Marked as a duplicate of:** {duplicates}\n' \
' **Is duplicated by:** {duplicated_by}'.format_map(info)
await self._bot.whisper(reply)
@privileged
@song.command(ignore_extra=False, help=_help_messages['permit'])
async def permit(self, song_id: int):
await self._db.permit(song_id)
await self._bot.message('Song [{}] has been removed from blacklist'.format(song_id))
@privileged
@song.command(ignore_extra=False, help=_help_messages['rename'])
async def rename(self, song_id: int, new_title: str):
await self._db.rename(song_id, new_title)
await self._bot.message('Song [{}] has been renamed to "{}"'.format(song_id, new_title))
@song.command(ignore_extra=False, aliases=['s'], help=_help_messages['search'])
async def search(self, *keywords: str):
items, total = await self._db.search(keywords, 20)
if not items:
await self._bot.whisper('Search for songs with keywords {} has not returned any result'.format(keywords))
return
reply = '**{} songs (out of {}) matching the keywords {}:**\n **>** '.format(len(items), total, keywords) + \
'\n **>** '.join(['[{}] {}'.format(*item) for item in items])
await self._bot.whisper(reply)
@privileged
@song.command(ignore_extra=False, help=_help_messages['split'])
async def split(self, song_id: int):
await self._db.merge(song_id, song_id)
await self._bot.message('Song [{}] has been marked as unique'.format(song_id))
| 56.881944
| 120
| 0.652545
| 8,101
| 0.989012
| 0
| 0
| 4,341
| 0.529972
| 3,378
| 0.412404
| 4,570
| 0.557929
|
f747c5f4148789ffa72b834beacbd044a5cb6421
| 2,468
|
py
|
Python
|
linear_error_analysis/src/main.py
|
spacesys-finch/Science
|
623c9d77de6a52e87571debf7970cea7af591f2a
|
[
"MIT"
] | null | null | null |
linear_error_analysis/src/main.py
|
spacesys-finch/Science
|
623c9d77de6a52e87571debf7970cea7af591f2a
|
[
"MIT"
] | null | null | null |
linear_error_analysis/src/main.py
|
spacesys-finch/Science
|
623c9d77de6a52e87571debf7970cea7af591f2a
|
[
"MIT"
] | 1
|
2021-10-09T19:35:26.000Z
|
2021-10-09T19:35:26.000Z
|
"""
main.py
Main driver for the Linear Error Analysis program.
Can be run using `lea.sh`.
Can choose which plots to see by toggling on/off `show_fig` param.
Author(s): Adyn Miles, Shiqi Xu, Rosie Liang
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import config
import libs.gta_xch4 as gta_xch4
import libs.photon_noise as pn
from errors import Errors
from forward import Forward
from isrf import ISRF
from optim import Optim
if __name__ == "__main__":
cfg = config.parse_config()
forward = Forward(cfg)
surface, molec, atm, sun_lbl = forward.get_atm_params()
optics = forward.opt_properties()
(
wave_meas,
rad_tot,
rad_ch4,
rad_co2,
rad_h2o,
d_rad_ch4,
d_rad_co2,
d_rad_h2o,
rad_conv_tot,
rad_conv_ch4,
rad_conv_co2,
rad_conv_h2o,
dev_conv_ch4,
dev_conv_co2,
dev_conv_h2o,
) = forward.plot_transmittance(show_fig=False)
state_vector = forward.produce_state_vec()
isrf = ISRF(cfg)
isrf_func = isrf.define_isrf(show_fig=False)
isrf_conv = isrf.convolve_isrf(rad_tot, show_fig=False)
lea = Errors(cfg, wave_meas)
sys_errors = lea.sys_errors()
rand_errors = lea.rand_errors()
# sys_nonlinearity = lea.sys_err_vector(1)
# sys_stray_light = lea.sys_err_vector(2)
# sys_crosstalk = lea.sys_err_vector(3)
# sys_flat_field = lea.sys_err_vector(4)
# sys_bad_px = lea.sys_err_vector(5)
# sys_key_smile = lea.sys_err_vector(6)
# sys_striping = lea.sys_err_vector(7)
# sys_memory = lea.sys_err_vector(8)
ecm = lea.error_covariance()
path_root = os.path.dirname(os.path.dirname(__file__))
np.savetxt(os.path.join(path_root, "outputs", "ecm.csv"), ecm, delimiter=",")
optim = Optim(cfg, wave_meas)
jacobian = optim.jacobian(dev_conv_ch4, dev_conv_co2, dev_conv_h2o, show_fig=False)
gain = optim.gain(ecm)
modified_meas_vector = optim.modify_meas_vector(state_vector, rad_conv_tot, ecm)
spectral_res, snr = optim.state_estimate(ecm, modified_meas_vector, sys_errors)
print("Estimated Solution: " + str(spectral_res))
print("Uncertainty of Solution: " + str(snr))
# plot interpolated photon noise
# plt.plot(lea.wave_meas, lea.photon_noise_interp)
# plt.title("Interpolated Photon Noise")
# plt.xlabel("Wavelength (nm)")
# plt.ylabel("Photon Noise (UNITS?)") # TODO
# plt.show()
| 28.697674
| 87
| 0.687601
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 810
| 0.328201
|
f74835f3182443e8b2003e77b093fbfc09c67fcf
| 7,153
|
py
|
Python
|
src/anu/constants/amino_acid.py
|
ankitskvmdam/anu
|
699598fb60dcc23f6cccd5abb30a03b294d21598
|
[
"MIT"
] | null | null | null |
src/anu/constants/amino_acid.py
|
ankitskvmdam/anu
|
699598fb60dcc23f6cccd5abb30a03b294d21598
|
[
"MIT"
] | null | null | null |
src/anu/constants/amino_acid.py
|
ankitskvmdam/anu
|
699598fb60dcc23f6cccd5abb30a03b294d21598
|
[
"MIT"
] | null | null | null |
"""Enum for amino acid."""
from enum import Enum
from typing import Dict, TypedDict
class AcidityBasicity(Enum):
"""Enum for acidity and basicity."""
U = 3 # Neutral
A = 1 # Acid
B = 2 # Base
class Charge(Enum):
"""Enum for charge."""
U = 3 # Neutral
P = 1 # Positive
N = 2 # Negative
class Hydropathy(Enum):
"""Enum for hydropathy."""
HL = 1 # Hydrophilic
HB = 2 # Hydrophobic
M = 3 # Moderate
class AminoAcidToInt(Enum):
"""Enum for Amino Acid one letter code to integer."""
A = 1 # Alanine
C = 2 # Cysteine
D = 3 # Aspartic Acid
E = 4 # Glutamic Acid
F = 5 # Phenylalanine
G = 6 # Glycine
H = 7 # Histidine
I = 8 # Isoleucine
K = 9 # Lysine
L = 10 # Leucine
M = 11 # Methionine
N = 12 # Asparagine
P = 13 # Proline
Q = 14 # Glutamine
R = 15 # Arginine
S = 16 # Serine
T = 17 # Threonine
V = 18 # Valine
W = 19 # Tryptophan
Y = 20 # Tyrosine
class AminoAcidProperty(TypedDict):
"""Dictionary shape for Amino acid."""
code = int
hydropathy = int
hydropathy_index = float
acidity_basicity = int
mass = float
isoelectric_point = float
charge = int
amino_acid: Dict[str, AminoAcidProperty] = {
"A": {
"code": AminoAcidToInt["A"].value,
"hydropathy": Hydropathy["HB"].value,
"hydropathy_index": 1.8,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 89.09,
"isoelectric_point": 6.00,
"charge": Charge["U"].value,
},
"C": {
"code": AminoAcidToInt["C"].value,
"hydropathy": Hydropathy["M"].value,
"hydropathy_index": 2.5,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 121.16,
"isoelectric_point": 5.02,
"charge": Charge["U"].value,
},
"D": {
"code": AminoAcidToInt["D"].value,
"hydropathy": Hydropathy["HL"].value,
"hydropathy_index": -3.5,
"acidity_basicity": AcidityBasicity["A"].value,
"mass": 133.10,
"isoelectric_point": 2.77,
"charge": Charge["N"].value,
},
"E": {
"code": AminoAcidToInt["E"].value,
"hydropathy": Hydropathy["HL"].value,
"hydropathy_index": -3.5,
"acidity_basicity": AcidityBasicity["A"].value,
"mass": 147.13,
"isoelectric_point": 3.22,
"charge": Charge["N"].value,
},
"F": {
"code": AminoAcidToInt["F"].value,
"hydropathy": Hydropathy["HB"].value,
"hydropathy_index": 2.8,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 165.19,
"isoelectric_point": 5.44,
"charge": Charge["U"].value,
},
"G": {
"code": AminoAcidToInt["G"].value,
"hydropathy": Hydropathy["HB"].value,
"hydropathy_index": -0.4,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 75.07,
"isoelectric_point": 5.97,
"charge": Charge["U"].value,
},
"H": {
"code": AminoAcidToInt["H"].value,
"hydropathy": Hydropathy["M"].value,
"hydropathy_index": -3.2,
"acidity_basicity": AcidityBasicity["B"].value,
"mass": 155.16,
"isoelectric_point": 7.47,
"charge": Charge["P"].value,
},
"I": {
"code": AminoAcidToInt["I"].value,
"hydropathy": Hydropathy["HB"].value,
"hydropathy_index": 4.5,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 131.8,
"isoelectric_point": 5.94,
"charge": Charge["U"].value,
},
"K": {
"code": AminoAcidToInt["K"].value,
"hydropathy": Hydropathy["HL"].value,
"hydropathy_index": -3.9,
"acidity_basicity": AcidityBasicity["B"].value,
"mass": 146.19,
"isoelectric_point": 9.59,
"charge": Charge["P"].value,
},
"L": {
"code": AminoAcidToInt["L"].value,
"hydropathy": Hydropathy["HB"].value,
"hydropathy_index": 3.8,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 131.18,
"isoelectric_point": 5.98,
"charge": Charge["U"].value,
},
"M": {
"code": AminoAcidToInt["M"].value,
"hydropathy": Hydropathy["M"].value,
"hydropathy_index": 1.9,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 149.21,
"isoelectric_point": 5.74,
"charge": Charge["U"].value,
},
"N": {
"code": AminoAcidToInt["N"].value,
"hydropathy": Hydropathy["HL"].value,
"hydropathy_index": -3.5,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 132.12,
"isoelectric_point": 5.41,
"charge": Charge["U"].value,
},
"P": {
"code": AminoAcidToInt["P"].value,
"hydropathy": Hydropathy["HB"].value,
"hydropathy_index": -1.6,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 115.13,
"isoelectric_point": 6.30,
"charge": Charge["U"].value,
},
"Q": {
"code": AminoAcidToInt["Q"].value,
"hydropathy": Hydropathy["HL"].value,
"hydropathy_index": -3.5,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 146.15,
"isoelectric_point": 5.65,
"charge": Charge["N"].value,
},
"R": {
"code": AminoAcidToInt["R"].value,
"hydropathy": Hydropathy["HL"].value,
"hydropathy_index": -4.5,
"acidity_basicity": AcidityBasicity["B"].value,
"mass": 174.20,
"isoelectric_point": 11.15,
"charge": Charge["P"].value,
},
"S": {
"code": AminoAcidToInt["S"].value,
"hydropathy": Hydropathy["HL"].value,
"hydropathy_index": -0.8,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 165.09,
"isoelectric_point": 5.68,
"charge": Charge["U"].value,
},
"T": {
"code": AminoAcidToInt["T"].value,
"hydropathy": Hydropathy["HL"].value,
"hydropathy_index": -0.7,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 119.12,
"isoelectric_point": 5.64,
"charge": Charge["U"].value,
},
"V": {
"code": AminoAcidToInt["V"].value,
"hydropathy": Hydropathy["HB"].value,
"hydropathy_index": 4.2,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 117.15,
"isoelectric_point": 5.96,
"charge": Charge["U"].value,
},
"W": {
"code": AminoAcidToInt["W"].value,
"hydropathy": Hydropathy["HB"].value,
"hydropathy_index": -0.9,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 204.23,
"isoelectric_point": 5.89,
"charge": Charge["U"].value,
},
"Y": {
"code": AminoAcidToInt["Y"].value,
"hydropathy": Hydropathy["HB"].value,
"hydropathy_index": -1.3,
"acidity_basicity": AcidityBasicity["U"].value,
"mass": 181.19,
"isoelectric_point": 5.66,
"charge": Charge["U"].value,
},
}
| 28.612
| 57
| 0.533622
| 1,156
| 0.161611
| 0
| 0
| 0
| 0
| 0
| 0
| 2,560
| 0.357892
|
f74b1a4debef16881b74152eff013915a6f5da94
| 8,550
|
py
|
Python
|
user_chainload.py
|
Phidica/sublime-execline
|
a3c1b76de0c9a420ae73467c28f445b698f7f508
|
[
"MIT"
] | 2
|
2020-08-28T16:04:37.000Z
|
2020-08-28T20:06:21.000Z
|
user_chainload.py
|
Phidica/sublime-execline
|
a3c1b76de0c9a420ae73467c28f445b698f7f508
|
[
"MIT"
] | null | null | null |
user_chainload.py
|
Phidica/sublime-execline
|
a3c1b76de0c9a420ae73467c28f445b698f7f508
|
[
"MIT"
] | null | null | null |
import logging
import os
import re
import sublime
# external dependencies (see dependencies.json)
import jsonschema
import yaml # pyyaml
# This plugin generates a hidden syntax file containing rules for additional
# chainloading commands defined by the user. The syntax is stored in the cache
# directory to avoid the possibility of it falling under user version control in
# the usual packages directory
userSyntaxName = 'execline-user-chainload.sublime-syntax'
pkgName = 'execline'
settingsName = 'execline.sublime-settings'
mainSyntaxPath = 'Packages/{}/execline.sublime-syntax'.format(pkgName)
schemaPath = 'Packages/{}/execline.sublime-settings.schema.json'.format(pkgName)
ruleNamespaces = {
'keyword': 'keyword.other',
'function': 'support.function',
}
ruleContexts = {
'argument': {
'generic': 'command-call-common-arg-aside-&pop',
'variable': 'command-call-common-variable-&pop',
'pattern': 'command-call-common-glob-&pop',
},
'block': {
'program': 'block-run-prog',
'arguments': 'block-run-arg',
'trap': 'block-trap',
'multidefine': 'block-multidefine',
},
'options': {
'list': 'command-call-common-opt-list-&pop',
'list-with-args': {
'match': '(?=-[{}])',
'push': 'command-call-common-opt-arg-&pop',
'include': 'command-call-common-opt-list-&pop',
},
},
}
logging.basicConfig()
logger = logging.getLogger(__name__)
# Fully resolve the name of a context in the main syntax file
def _resolve_context(context):
return mainSyntaxPath + '#' + context
# Create a match rule describing a command of a certain type, made of a list of
# elements
def _make_rule(cmd_name, cmd_elements, cmd_type):
try:
namespace = ruleNamespaces[cmd_type]
except KeyError:
logger.warning("Ignoring command of unrecognised type '{}'".format(cmd_type))
return
rule = {}
# Careful to sanitise user input. Only literal command names accepted here
rule['match'] = r'{{chain_pre}}' + re.escape(cmd_name) + r'{{chain_post}}'
rule['scope'] = ' '.join([
'meta.function-call.name.execline',
'{}.user.{}.execline'.format(namespace, cmd_name),
'meta.string.unquoted.execline',
])
contextSeq = []
for elem in cmd_elements:
context = None
# Resolve the element into a name and possible argument
elemType,elemSubtype = elem[0:2]
try:
elemArg = elem[2]
except IndexError:
elemArg = ''
# Look up the context named by this element
try:
contextData = ruleContexts[elemType][elemSubtype]
if isinstance(contextData, str):
contextData = { 'include': contextData }
except KeyError:
logger.warning("Ignoring key '{}' not found in context dictionary".format(elem))
continue
if len(contextData) > 1 and not elemArg:
logger.warning("Ignoring element '{}' with missing data".format(elem))
continue
if len(contextData) == 1:
# context = _resolve_context(contextData['include'])
# Although a basic include could be provided as the target context name
# directly to the 'push' list, this can break if there are a mix of other
# types of contexts being pushed to the stack. A context containing a sole
# include is safe from this
context = [ {'include': _resolve_context(contextData['include'])} ]
elif elemType == 'options':
# Careful to sanitise user input, this must behave as a list of characters
matchPattern = contextData['match'].format( re.escape(elemArg) )
context = [
{'match': matchPattern, 'push': _resolve_context(contextData['push'])},
{'include': _resolve_context(contextData['include'])},
]
if context:
contextSeq.append(context)
# Convert context sequence into context stack
if contextSeq:
rule['push'] = contextSeq
rule['push'].reverse()
return rule
def _validate_settings():
# Read the schema using Sublime Text's builtin JSON parser
try:
schema = sublime.decode_value( sublime.load_resource(schemaPath) )
except Exception as ex:
logger.error("Failed loading schema: {}".format(ex))
return validSets
settings = sublime.load_settings(settingsName)
activeSets = settings.get('user_chainload_active')
if not activeSets:
return []
validSets = []
for setName in activeSets:
if not setName:
sublime.error_message("Error in {}: Set name cannot be the empty string".format(settingsName))
continue
setName = 'user_chainload_set_' + setName
setDict = settings.get(setName)
if setDict == None:
sublime.error_message("Error in {}: Couldn't find expected setting '{}'".format(settingsName, setName))
continue
try:
jsonschema.validate(setDict, schema)
logger.debug("Validation success for {}".format(setName))
validSets.append(setName)
except jsonschema.exceptions.SchemaError as ex:
# A problem in the schema itself for me as the developer to resolve
logger.error("Failed validating schema: {}".format(ex))
break
except jsonschema.exceptions.ValidationError as ex:
# A problem in the settings file for the user to resolve
sublime.error_message("Error in {} in setting '{}': \n{}".format(settingsName, setName, str(ex)))
continue
return validSets if validSets else None
def _write_user_chainload():
# Read settings file and validate
settings = sublime.load_settings(settingsName)
validSets = _validate_settings()
# Prepare output syntax file
cacheDir = os.path.join(sublime.cache_path(), pkgName)
if not os.path.isdir(cacheDir):
os.mkdir(cacheDir)
userSyntaxPath = os.path.join(cacheDir, userSyntaxName)
userSyntaxExists = os.path.isfile(userSyntaxPath)
# Skip writing the syntax if it already exists in a valid form and we don't
# have a valid set of rules for regenerating it
if userSyntaxExists:
if validSets == None:
logger.warning("Not regenerating syntax due to lack of any valid settings")
return
else:
logger.info("Regenerating syntax with sets: {}".format(validSets))
else:
logger.info("Generating syntax with sets: {}".format(validSets))
userSyntax = open(userSyntaxPath, 'w')
# Can't seem to get PyYAML to write a header, so do it manually
header = '\n'.join([
r'%YAML 1.2',
r'# THIS IS AN AUTOMATICALLY GENERATED FILE.',
r'# DO NOT EDIT. CHANGES WILL BE LOST.',
r'---',
'',
])
userSyntax.write(header)
yaml.dump({'hidden': True, 'scope': 'source.shell.execline'}, userSyntax)
# Repeat all the variables from the main syntax file, for convenience
mainDB = yaml.load(sublime.load_resource(mainSyntaxPath),
Loader = yaml.BaseLoader)
yaml.dump({'variables': mainDB['variables']}, userSyntax)
# Create list of rules from the sets of user settings which are currently
# valid
rulesList = []
for rule in [r for s in validSets for r in settings.get(s)]:
# Schema validation guarantees we can trust all the following inputs
# Read a name or list of names
cmdNames = rule['name']
if isinstance(cmdNames, str):
cmdNames = [cmdNames]
# Get type with 'function' being default if not provided
cmdType = rule.get('type', 'function')
cmdElements = []
for elem in rule['elements']:
# Get the sole kv pair, apparently this is most efficient way
key,value = next( iter(elem.items()) )
if key in ruleContexts:
cmdElements.append( (key,value) )
elif 'options_then_' in key:
opts = ''.join( value.get('options_taking_arguments', []) )
if opts:
cmdElements.append( ('options', 'list-with-args', opts) )
else:
cmdElements.append( ('options', 'list') )
then = key.split('_')[-1]
if then == 'end':
# Ignore all further elements
break
else:
# Add the block, etc
cmdElements.append( (then, value[then]) )
for cmdName in cmdNames:
rulesList.append( _make_rule(cmdName, cmdElements, cmdType) )
# Only keep non-empty rules. Sublime doesn't mind if the list of rules ends up
# empty
content = {'contexts': {'main': [r for r in rulesList if r]}}
yaml.dump(content, userSyntax)
def plugin_loaded():
settings = sublime.load_settings(settingsName)
settings.clear_on_change(__name__)
settings.add_on_change(__name__, _write_user_chainload)
if settings.get('user_chainload_debugging'):
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.WARNING)
_write_user_chainload()
| 31.090909
| 109
| 0.68
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,710
| 0.433918
|
f74b23abf6614d599940d2f82ff8df5980edce4e
| 378
|
py
|
Python
|
nmutant_model/retrain_mu_test0.py
|
asplos2020/DRTest
|
c3de497142d9b226e518a1a0f95f7350d2f7acd6
|
[
"MIT"
] | 1
|
2021-04-01T07:31:17.000Z
|
2021-04-01T07:31:17.000Z
|
nmutant_model/retrain_mu_test0.py
|
Justobe/DRTest
|
85c3c9b2a46cafa7184130f2596c5f9eb3b20bff
|
[
"MIT"
] | null | null | null |
nmutant_model/retrain_mu_test0.py
|
Justobe/DRTest
|
85c3c9b2a46cafa7184130f2596c5f9eb3b20bff
|
[
"MIT"
] | 1
|
2020-12-24T12:12:54.000Z
|
2020-12-24T12:12:54.000Z
|
import os
import numpy as np
import sys
sys.path.append("../")
for model in ['lenet1', 'lenet4', 'lenet5']:
for attack in ['fgsm', 'cw', 'jsma']:
for mu_var in ['gf', 'nai', 'ns', 'ws']:
os.system('CUDA_VISIBLE_DEVICES=0 python retrain_mu_mnist.py --datasets=mnist --attack=' + attack + ' --model_name=' + model + ' --mu_var=' + mu_var + ' --epochs=50')
| 42
| 178
| 0.600529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 182
| 0.481481
|
f74bfae14ea8e361bfd9a147cec9f55e5eecb5a2
| 5,197
|
py
|
Python
|
characterise_inauthentic_tweets.py
|
weberdc/socmed_repeatability
|
85da18cbffa53f18279844117b2aed226104ce11
|
[
"Apache-2.0"
] | 2
|
2021-06-30T07:29:10.000Z
|
2022-01-20T15:17:26.000Z
|
characterise_inauthentic_tweets.py
|
weberdc/socmed_repeatability
|
85da18cbffa53f18279844117b2aed226104ce11
|
[
"Apache-2.0"
] | null | null | null |
characterise_inauthentic_tweets.py
|
weberdc/socmed_repeatability
|
85da18cbffa53f18279844117b2aed226104ce11
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
from __future__ import print_function
import gzip
import json
import re
import sys
# import time
from argparse import ArgumentParser
# from datetime import datetime
class Options:
def __init__(self):
self.usage = 'characterise_inauthentic_tweets.py -i <file of tweets> [-v|--verbose]'
self._init_parser()
def _init_parser(self):
self.parser = ArgumentParser(usage=self.usage, conflict_handler='resolve')
self.parser.add_argument(
'-i', '--tweets-file',
default='-',
required=True,
dest='tweets_file',
help='Tweets file (default: all)'
)
self.parser.add_argument(
'-v', '--verbose',
action='store_true',
default=False,
dest='verbose',
help='Turn on verbose logging (default: False)'
)
def parse(self, args=None):
return self.parser.parse_args(args)
TWITTER_TS_FORMAT = '%a %b %d %H:%M:%S +0000 %Y' #Tue Apr 26 08:57:55 +0000 2011
# def parse_ts(ts_str, fmt=TWITTER_TS_FORMAT):
# try:
# time_struct = time.strptime(ts_str, fmt)
# except TypeError:
# return int(ts_str) # epoch millis
# return datetime.fromtimestamp(time.mktime(time_struct))
def extract_text(tweet):
"""Gets the full text from a tweet if it's short or long (extended)."""
def get_available_text(t):
if t['truncated'] and 'extended_tweet' in t:
# if a tweet is retreived in 'compatible' mode, it may be
# truncated _without_ the associated extended_tweet
#eprint('#%s' % t['id_str'])
return t['extended_tweet']['full_text']
else:
return t['text'] if 'text' in t else t['full_text']
if 'retweeted_status' in tweet:
rt = tweet['retweeted_status']
return extract_text(rt)
# return 'RT @%s: %s' % (rt['user']['screen_name'], extract_text(rt))
# if 'quoted_status' in tweet:
# qt = tweet['quoted_status']
# return get_available_text(tweet) + " --> " + extract_text(qt)
return get_available_text(tweet)
def fetch_lines(file=None):
"""Gets the lines from the given file or stdin if it's None or '' or '-'."""
if file and file != '-':
with gzip.open(file, 'rt') if file[-1] in 'zZ' else open(file, 'r', encoding='utf-8') as f:
return [l.strip() for l in f.readlines()]
else:
return [l.strip() for l in sys.stdin]
def extract_tokens(pattern, str):
return list(
filter(
lambda t: len(t) > 0,
map(
lambda t: t.strip(),
re.findall(pattern, str)
)
)
)
def count_tokens_starting_with(chars, tokens):
return sum([1 for _ in tokens if _[0] in chars])
def eprint(*args, **kwargs):
"""Print to stderr"""
print(*args, file=sys.stderr, **kwargs)
DEBUG=False
def log(msg):
if DEBUG: eprint(msg)
if __name__=='__main__':
options = Options()
opts = options.parse(sys.argv[1:])
DEBUG=opts.verbose
tweets_file = opts.tweets_file
# pretty = opts.pretty
tweets = [json.loads(l) for l in fetch_lines(tweets_file)]
log(f'read: {len(tweets)} tweets')
hashtags_only = 0
hashtags_plus_url = 0
mentions_plus_hashtags = 0
mentions_hashtags_plus_url = 0
ht_splitter_re = '[a-zA-Z#]+'
me_splitter_re = '[a-zA-Z@]+'
htme_splitter_re = '[a-zA-Z#@]+'
X = 0
for t in tweets:
text = extract_text(t)
# hashtag(s) only
if '#' in text:
tokens = extract_tokens(ht_splitter_re, text)
if len(tokens) == count_tokens_starting_with('#', tokens):
hashtags_only += 1
log(tokens)
# hashtag(s) and URL
if '#' in text and 'http' in text:
tokens = extract_tokens(htme_splitter_re, text[:text.index('http')])
if len(tokens) == count_tokens_starting_with('#', tokens):
hashtags_plus_url += 1
# print(tokens)
log(text)
# mention(s) and hashtag(s)
if '#' in text and '@' in text:
tokens = extract_tokens(htme_splitter_re, text)
if len(tokens) == count_tokens_starting_with('#@', tokens):
mentions_plus_hashtags += 1
log(tokens)
# mention(s), hashtag(s) and URL
if '#' in text and '@' in text and 'http' in text:
tokens = extract_tokens(htme_splitter_re, text[:text.index('http')])
if len(tokens) == count_tokens_starting_with('#@', tokens):
mentions_hashtags_plus_url += 1
# print(tokens)
log(text)
print(f'All: {len(tweets):,}')
print(f'HT: {hashtags_only:>6} ({float(hashtags_only)/len(tweets):.1%})')
print(f'HT+URL: {hashtags_plus_url:>6} ({float(hashtags_plus_url)/len(tweets):.1%})')
print(f'@m+HT: {mentions_plus_hashtags:>6} ({float(mentions_plus_hashtags)/len(tweets):.1%})')
print(f'@m+HT+URL: {mentions_hashtags_plus_url:>6} ({float(mentions_hashtags_plus_url)/len(tweets):.1%})')
| 31.307229
| 110
| 0.583414
| 780
| 0.150087
| 0
| 0
| 0
| 0
| 0
| 0
| 1,870
| 0.359823
|
f74ca0e4b20b83d509bc3a77fa6331062c311e10
| 3,577
|
py
|
Python
|
localshop/apps/packages/utils.py
|
rcoup/localshop
|
b7d0803afd9335862accfc79dee047a6b0e67ad6
|
[
"BSD-3-Clause"
] | null | null | null |
localshop/apps/packages/utils.py
|
rcoup/localshop
|
b7d0803afd9335862accfc79dee047a6b0e67ad6
|
[
"BSD-3-Clause"
] | null | null | null |
localshop/apps/packages/utils.py
|
rcoup/localshop
|
b7d0803afd9335862accfc79dee047a6b0e67ad6
|
[
"BSD-3-Clause"
] | null | null | null |
import inspect
import hashlib
import logging
import os
from django.core.files.uploadedfile import TemporaryUploadedFile
from django.db.models import FieldDoesNotExist
from django.db.models.fields.files import FileField
from django.http import QueryDict
from django.utils.datastructures import MultiValueDict
logger = logging.getLogger(__name__)
def parse_distutils_request(request):
"""Parse the `request.body` and update the request POST and FILES
attributes .
"""
sep = request.body.splitlines()[1]
request.POST = QueryDict('', mutable=True)
try:
request._files = MultiValueDict()
except Exception:
pass
for part in filter(lambda e: e.strip(), request.body.split(sep)):
try:
header, content = part.lstrip().split('\n', 1)
except Exception:
continue
if content.startswith('\n'):
content = content[1:]
if content.endswith('\n'):
content = content[:-1]
headers = parse_header(header)
if "name" not in headers:
continue
if "filename" in headers and headers['name'] == 'content':
dist = TemporaryUploadedFile(name=headers["filename"],
size=len(content),
content_type="application/gzip",
charset='utf-8')
dist.write(content)
dist.seek(0)
request.FILES.appendlist('distribution', dist)
else:
# Distutils sends UNKNOWN for empty fields (e.g platform)
# [russell.sim@gmail.com]
if content == 'UNKNOWN':
content = None
request.POST.appendlist(headers["name"], content)
def parse_header(header):
headers = {}
for kvpair in filter(lambda p: p,
map(lambda p: p.strip(),
header.split(';'))):
try:
key, value = kvpair.split("=", 1)
except ValueError:
continue
headers[key.strip()] = value.strip('"')
return headers
def delete_files(sender, **kwargs):
"""Signal callback for deleting old files when database item is deleted"""
for fieldname in sender._meta.get_all_field_names():
try:
field = sender._meta.get_field(fieldname)
except FieldDoesNotExist:
continue
if isinstance(field, FileField):
instance = kwargs['instance']
fieldfile = getattr(instance, fieldname)
if not hasattr(fieldfile, 'path'):
return
if not os.path.exists(fieldfile.path):
return
# Check if there are other instances which reference this fle
is_referenced = (
instance.__class__._default_manager
.filter(**{'%s__exact' % fieldname: fieldfile})
.exclude(pk=instance._get_pk_val())
.exists())
if is_referenced:
return
try:
field.storage.delete(fieldfile.path)
except Exception:
logger.exception(
'Error when trying to delete file %s of package %s:' % (
instance.pk, fieldfile.path))
def md5_hash_file(fh):
"""Return the md5 hash of the given file-object"""
md5 = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
md5.update(data)
return md5.hexdigest()
| 29.808333
| 78
| 0.559966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 555
| 0.155158
|
f74d7b65bf1d537a02c073dba7f2c762c4daaaf9
| 4,228
|
py
|
Python
|
bika/lims/browser/worksheet/views/analyses_transposed.py
|
hocinebendou/bika.gsoc
|
85bc0c587de7f52073ae0e89bddbc77bf875f295
|
[
"MIT"
] | null | null | null |
bika/lims/browser/worksheet/views/analyses_transposed.py
|
hocinebendou/bika.gsoc
|
85bc0c587de7f52073ae0e89bddbc77bf875f295
|
[
"MIT"
] | null | null | null |
bika/lims/browser/worksheet/views/analyses_transposed.py
|
hocinebendou/bika.gsoc
|
85bc0c587de7f52073ae0e89bddbc77bf875f295
|
[
"MIT"
] | null | null | null |
# coding=utf-8
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from bika.lims.browser.bika_listing import BikaListingTable
from bika.lims.browser.worksheet.views.analyses import AnalysesView
class AnalysesTransposedView(AnalysesView):
""" The view for displaying the table of manage_results transposed.
Analysis Requests are displayed in columns and analyses in rows.
Uses most of the logic provided by BikaListingView through
bika.lims.worksheet.views.AnalysesView to generate the items,
but renders its own template, which is highly specific for
display analysis results. Because of this, some generic
BikaListing functionalities, such as sorting, pagination,
contextual menus for columns, etc. will not work in this view.
"""
def contents_table(self, table_only = True):
""" Overrides contents_table method from the parent class
BikaListingView, using the transposed template instead
of the classic template.
"""
table = AnalysesTransposedTable(bika_listing = self, table_only = True)
return table.render(self)
class AnalysesTransposedTable(BikaListingTable):
""" The BikaListingTable that uses a transposed template for
displaying the results.
"""
render = ViewPageTemplateFile("../templates/analyses_transposed.pt")
render_cell = ViewPageTemplateFile("../templates/analyses_transposed_cell.pt")
def __init__(self, bika_listing = None, table_only = False):
BikaListingTable.__init__(self, bika_listing, True)
self.rows_headers = []
self.trans_items = {}
self.positions = []
self._transpose_data()
def _transpose_data(self):
cached = []
index = 0
#ignore = ['Analysis', 'Service', 'Result', 'ResultDM']
include = ['Attachments', 'DetectionLimit', 'DueDate','Pos', 'ResultDM']
for col in self.bika_listing.review_state['columns']:
if col == 'Result':
# Further interims will be inserted in this position
resindex = index
if col not in include:
continue
lcol = self.bika_listing.columns[col]
self.rows_headers.append({'id': col,
'title': lcol['title'],
'type': lcol.get('type',''),
'row_type': 'field',
'hidden': not lcol.get('toggle', True),
'input_class': lcol.get('input_class',''),
'input_width': lcol.get('input_width','')})
cached.append(col)
index += 1
for item in self.items:
if item['Service'] not in cached:
self.rows_headers.insert(resindex,
{'id': item['Service'],
'title': item['title'],
'type': item.get('type',''),
'row_type': 'analysis',
'index': index})
resindex += 1
cached.append(item['Service'])
pos = item['Pos']
if pos in self.trans_items:
self.trans_items[pos][item['Service']] = item
else:
self.trans_items[pos] = {item['Service']: item}
if pos not in self.positions:
self.positions.append(pos)
def rendered_items(self, cat=None, **kwargs):
return ''
def render_row_cell(self, rowheader, position = ''):
self.current_rowhead = rowheader
self.current_position = position
if rowheader['row_type'] == 'field':
# Only the first item for this position contains common
# data for all the analyses with the same position
its = [i for i in self.items if i['Pos'] == position]
self.current_item = its[0] if its else {}
elif position in self.trans_items \
and rowheader['id'] in self.trans_items[position]:
self.current_item = self.trans_items[position][rowheader['id']]
else:
return ''
return self.render_cell()
| 41.45098
| 82
| 0.585856
| 4,007
| 0.947729
| 0
| 0
| 0
| 0
| 0
| 0
| 1,467
| 0.346973
|
f74df4ac592a375e715e992b4854e0bf766ac654
| 865
|
py
|
Python
|
lab1_rest/project/apps/core/views.py
|
mratkovic/RZNU-Lab
|
2930b249994619c2f17493544db2c0d471ca6cbc
|
[
"MIT"
] | null | null | null |
lab1_rest/project/apps/core/views.py
|
mratkovic/RZNU-Lab
|
2930b249994619c2f17493544db2c0d471ca6cbc
|
[
"MIT"
] | null | null | null |
lab1_rest/project/apps/core/views.py
|
mratkovic/RZNU-Lab
|
2930b249994619c2f17493544db2c0d471ca6cbc
|
[
"MIT"
] | null | null | null |
from rest_framework import viewsets
from .models import User, Photo
from .serializers import UserSerializer, PhotoSerializer
from .mixins import RequestLogViewMixin
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated, IsAuthenticatedOrReadOnly
class PhotoViewSet(RequestLogViewMixin, viewsets.ModelViewSet):
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticatedOrReadOnly,)
queryset = Photo.objects.all()
serializer_class = PhotoSerializer
class UserViewSet(RequestLogViewMixin, viewsets.ModelViewSet):
authentication_classes = (SessionAuthentication, BasicAuthentication)
permission_classes = (IsAuthenticatedOrReadOnly,)
queryset = User.objects.all()
serializer_class = UserSerializer
| 41.190476
| 84
| 0.834682
| 529
| 0.611561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
f74f0070abe0b831d8cd12d2943b6e264b00e54d
| 215
|
py
|
Python
|
arc/arc009/arc009b.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | 1
|
2019-08-21T00:49:34.000Z
|
2019-08-21T00:49:34.000Z
|
arc/arc009/arc009b.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | null | null | null |
arc/arc009/arc009b.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | null | null | null |
def conv(x):
return int(''.join(t[c] for c in x))
b = input().split()
N = int(input())
a = [input() for _ in range(N)]
t = {b[i]: str(i) for i in range(10)}
a.sort(key = lambda x: conv(x))
print(*a, sep='\n')
| 19.545455
| 40
| 0.548837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6
| 0.027907
|
f75197db0d5043fe351ad2be154d400c859209b0
| 965
|
py
|
Python
|
setup.py
|
refnode/spartakiade-2021-session-effective-python
|
6b1a25c4ec79261de4ed6385a81b6a31a06d6b58
|
[
"Apache-2.0"
] | 1
|
2021-06-04T14:05:31.000Z
|
2021-06-04T14:05:31.000Z
|
setup.py
|
refnode/spartakiade-2021-session-effective-python
|
6b1a25c4ec79261de4ed6385a81b6a31a06d6b58
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
refnode/spartakiade-2021-session-effective-python
|
6b1a25c4ec79261de4ed6385a81b6a31a06d6b58
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open("README.adoc") as fh_readme:
readme = fh_readme.read()
install_reqs = []
setup(
author="Sven Wilhelm",
author_email='refnode@gmail.com',
python_requires='>=3.8',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
],
description="Spartakiade 2021 Session Effective Python",
install_requires=install_reqs,
long_description=readme,
include_package_data=True,
keywords='spartakiade-2021-session-effective-python',
name='spartakiade-2021-session-effective-python',
packages=find_packages(where="src"),
url='https://github.com/refnode/spartakiade-2021-session-effective-python',
version='0.1.0',
zip_safe=False,
)
| 28.382353
| 79
| 0.675648
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 483
| 0.500518
|
f75630cbc7b1eef703d5e902537d65487d1b7612
| 4,126
|
py
|
Python
|
sim/pid.py
|
jmagine/rf-selection
|
ba9dcb5ca550916873ce68baa71da983f2dd4be5
|
[
"MIT"
] | 1
|
2020-05-06T01:28:06.000Z
|
2020-05-06T01:28:06.000Z
|
sim/pid.py
|
jmagine/multiuav-rf
|
ba9dcb5ca550916873ce68baa71da983f2dd4be5
|
[
"MIT"
] | null | null | null |
sim/pid.py
|
jmagine/multiuav-rf
|
ba9dcb5ca550916873ce68baa71da983f2dd4be5
|
[
"MIT"
] | null | null | null |
'''*-----------------------------------------------------------------------*---
Author: Jason Ma
Date : Oct 18 2018
TODO
File Name : pid.py
Description: TODO
---*-----------------------------------------------------------------------*'''
import time
import matplotlib.animation as anim
import matplotlib.pyplot as plt
import threading
import math
import numpy as np
'''[Global Vars]------------------------------------------------------------'''
ORIGIN_X = 0.0
ORIGIN_Y = 0.0
C_R = 10
#plt.autoscale(enable=True, axis="both")
fig = plt.figure()
ax = fig.add_subplot(2,1,1)
ax2 = fig.add_subplot(2,1,2)
scat = ax.scatter([], [])
ax.set_xlim([-1 * C_R - 1, C_R + 1])
ax.set_ylim([-1 * C_R - 1, C_R + 1])
scat.set_facecolors(['g', 'r'])
scat.set_sizes([31, 31])
prev_time = time.time()
vel = np.array([0.0, 0.0])
errors = [0, 1]
error_plot, = ax2.plot([i for i in range(len(errors))], errors, color="g")
class drone():
def __init__(self, p, vel):
self.pos = np.array(p)
self.v = np.array(vel)
self.prev_error = np.zeros((2))
self.integral = np.zeros((2))
self.dt = 0.01
self.kp = 0.8 * 2.0
self.ki = 0
self.kd = 0
#self.ki = 2.0 * self.kp / 2.0
#self.kd = self.kp * 2.0 / 8.0
#self.ki = 2 * self.kp / 1.0
#self.kd = self.kp * 0.01 / 8
def callback(self):
pass
def run(self, ref_pos, vx=None, vy=None):
self.pos += self.v
#print(self.integral)
if vx:
self.v[0] = vx
if vy:
self.v[1] = vy
#compute PID output
error = ref_pos - self.pos
self.integral = self.integral * 0.99 + error * self.dt
'''
for i in range(2):
if self.integral[i] > 1:
self.integral[i] = 1
elif self.integral[i] < -1:
self.integral[i] = -1
'''
#print(self.integral)
derivative = (error - self.prev_error) / self.dt
for i in range(2):
if derivative[i] > 0.1:
derivative[i] = 0.1
elif derivative[i] < -0.1:
derivative[i] = -0.1
self.prev_error = error
pid_output = (self.kp * error) + (self.ki * self.integral) + (self.kd * derivative)
print(self.pos, pid_output, self.kp * error, self.ki * self.integral, self.kd * derivative)
#print(error[0])
#errors.append(error[0])
return pid_output
d = drone([ORIGIN_X + C_R, ORIGIN_Y], [0.0, 0.0])
def dist(x1, y1, x2, y2):
return ((x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1))**(1/2)
def dist(p1, p2):
assert len(p1) == len(p2)
dims = len(p1)
total = 0
for i in range(dims):
total += (p2[i] - p1[i]) * (p2[i] - p1[i])
return (total)**(1/2)
#def pid_angle(x, y, ref_x, ref_y, d):
# return math.atan(-1 * (C_R - dist(x, y, ORIGIN_X, ORIGIN_Y)) / d) + math.atan((y - ORIGIN_Y) / (x - ORIGIN_X)) + math.pi / 2
def ref(t):
return np.array([ORIGIN_X + C_R * math.cos(t), ORIGIN_Y + C_R * math.sin(t)])
def update(i):
global prev_time, vel
#update reference point position
curr_time = time.time()
ref_point = ref(i / 25.0)
#ref_x = ref_point[0]
#ref_y = ref_point[1]
out = d.run(ref_point)
for i in range(2):
if out[i] > 10 or out[i] < -10:
out = out * 10 / out[i]
#print(d.pos, out)
d.v = out
while time.time() - prev_time < d.dt:
time.sleep(d.dt / 10)
prev_time = time.time()
#print the desired angle of drone
#pid_ang = pid_angle(d.x, d.y, ref_point[0], ref_point[1], 0.05)
#print(math.cos(pid_ang), math.sin(pid_ang))
#d.run(math.cos(pid_ang), math.sin(pid_ang))
scat.set_offsets([[ref_point[0], ref_point[1]], [d.pos[0], d.pos[1]]])
errors.append(dist(ref_point, d.pos))
error_plot.set_xdata([i for i in range(len(errors))])
error_plot.set_ydata(errors)
ax2.set_xlim([-1, len(errors) + 1])
ax2.set_ylim([1, min(errors)])
def main():
d = drone(ORIGIN_X + C_R, ORIGIN_Y, 1)
if __name__ == '__main__':
#main()
a = anim.FuncAnimation(fig, update, range(1000), interval=1, blit=False, repeat=False)
plt.show()
| 25.469136
| 127
| 0.537082
| 1,387
| 0.336161
| 0
| 0
| 0
| 0
| 0
| 0
| 1,360
| 0.329617
|
f756ddf67db85cf74cd46b5e90ed19ef6fd17367
| 32,091
|
py
|
Python
|
gazoo_device/fire_manager.py
|
isabella232/gazoo-device
|
0e1e276d72333e713b47152815708b9c74c45409
|
[
"Apache-2.0"
] | null | null | null |
gazoo_device/fire_manager.py
|
isabella232/gazoo-device
|
0e1e276d72333e713b47152815708b9c74c45409
|
[
"Apache-2.0"
] | 1
|
2021-06-24T19:20:50.000Z
|
2021-06-24T19:20:50.000Z
|
gazoo_device/fire_manager.py
|
isabella232/gazoo-device
|
0e1e276d72333e713b47152815708b9c74c45409
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fire Manager module.
Used for CLI-specific commands and flags.
Built to work with Python Fire: https://github.com/google/python-fire.
"""
import codecs
import enum
import inspect
import json
import logging
import os.path
import pydoc
import re
import sys
import textwrap
import time
from typing import Any, Collection, Optional, Type
from gazoo_device import config
from gazoo_device import decorators
from gazoo_device import errors
from gazoo_device import gdm_logger
from gazoo_device import manager
from gazoo_device import package_registrar
from gazoo_device import testbed
from gazoo_device.utility import parallel_utils
from gazoo_device.utility import usb_utils
logger = gdm_logger.get_logger()
class AttributeClassification(enum.Enum):
"""Types of class attributes recognized by the "man" method."""
CAPABILITY = "capability"
DEPRECATED_METHOD = "deprecated method"
DEPRECATED_PROPERTY = "deprecated property"
HEALTH_CHECK = "health check"
OTHER = "other"
PRIVATE_METHOD = "private method"
PROPERTY = "property"
CLASS_PROPERTY = "class property"
PUBLIC_METHOD = "public method"
HEALTHY_DEVICE_HEALTH = {
"is_healthy": True,
"unhealthy_reason": "",
"err_type": "",
"checks_passed": [],
"properties": {}
}
MAX_TIME_TO_WAIT_FOR_INITATION = 5
_DOC_INDENT_SIZE = 4
# Capability attributes visible on device summary man page
# (e.g. "man cambrionix").
_VISIBLE_CAPABILITY_ATTRIBUTES = [
AttributeClassification.PROPERTY, AttributeClassification.PUBLIC_METHOD
]
def _log_man_warning_for_multiple_flavors(
capability_classes: Collection[Type[Any]],
capability_name: str,
device_type: str,
capability_class: Type[Any]) -> None:
"""Logs 'gdm man' warning when multiple capability flavors are available.
Args:
capability_classes: All available capability flavors.
capability_name: Name of the capability.
device_type: Type of the device with this capability.
capability_class: Capability flavor selected to print documentation for.
Capabilities can have multiple flavors in one device class, although this is
somewhat rare. The flavor used is determined based on device firmware at
runtime. Since we don't know which flavor will be used without an attached
device, log a warning and print documentation for any single flavor.
"""
flavors = [a_cls.__name__ for a_cls in capability_classes]
logger.warning(
f"{len(flavors)} flavors ({flavors}) of capability {capability_name!r} "
f"are available for {device_type}.\n"
f"Showing documentation for flavor {capability_class}.\n")
class FireManager(manager.Manager):
"""Manages the setup and communication of smart devices."""
def __init__(self, debug=False, dev_debug=False, quiet=False):
stream_debug = debug or dev_debug
debug_level = logging.WARNING if quiet else logging.DEBUG
super().__init__(debug_level=debug_level, stream_debug=stream_debug)
def exec(self, identifier):
"""Alias for create_device with health checks disabled.
Important: ensure the device is in a healthy state before using "exec"
by running health checks via `gdm health-check device-1234`.
Usage from the CLI: `gdm exec device-1234 - shell "echo 'foo'"`.
Args:
identifier (str): The identifier string (name) which specifies the device.
Returns:
Object: The device found and created by the identifier specified.
"""
return self.create_device(identifier, make_device_ready="off")
def get_persistent_prop_devices(self, devices):
"""Gets persistent props of one or more devices and returns a json response.
This assumes the provided devices are healthy. If properties are unable to
be fetched, an empty json object will be returned for the device for which
the properties could not be retrieved.
Args:
devices (list): Device identifiers to get properties of.
Returns:
str: json formatted persistent properties, e.g.
{
'device-1234': {
'build_date': '180124',
'ftdi_serial_number': '1f824023'
},
'device-5678': {},
...
}
"""
logger.setLevel(logging.ERROR) # silence logging to reduce CLI output
devices_props = {}
if isinstance(devices, str):
devices = devices.split(",")
for device_name in devices:
persistent_props = {}
try:
device_props = self.get_device_configuration(device_name)
persistent_props = device_props.get("persistent", {})
except errors.DeviceError:
pass # unhealthy devices will have empty props
devices_props[device_name] = persistent_props
return json.dumps(devices_props)
def get_prop(self, device_name, prop=None):
"""Prints the device properties.
Args:
device_name (str): identifier for device.
prop (str): identifier for property.
"""
format_line = " {:22s} {}"
if prop is not None: # If requested a single property...
value = self.get_device_prop(device_name, prop)
logger.info(format_line.format(prop, str(value)))
else:
props_dicts = self.get_device_prop(device_name, prop)
device_property_types = ["persistent", "optional", "dynamic"]
manager_property_types = ["settable"]
for property_type in device_property_types + manager_property_types:
if property_type not in props_dicts:
continue
title = "{} Properties:".format(property_type.capitalize())
logger.info("")
logger.info(title)
for prop_name in sorted(props_dicts[property_type]):
prop_value = props_dicts[property_type][prop_name]
if isinstance(prop_value, list) and len(prop_value) > 1:
logger.info(format_line.format(prop_name, ""))
for value in prop_value:
if callable(value):
value = value.__name__
logger.info("\t\t\t {}".format(str(value)))
logger.info("")
elif isinstance(prop_value, dict) and len(prop_value) > 1:
logger.info(format_line.format(prop_name, ""))
for key in sorted(prop_value):
logger.info("\t\t\t {:25s} {!r}".format(key, prop_value[key]))
logger.info("")
else:
logger.info(format_line.format(prop_name, prop_value))
def health_check(self, identifier, recover=False):
"""CLI command for running device health checks.
Usage from the CLI: `gdm health-check device-1234`.
Args:
identifier (str): The identifier string (name) which specifies the
device.
recover (bool): whether to automatically recover from health check
failures.
"""
make_device_ready_setting = "check_only"
if recover:
make_device_ready_setting = "on"
device = self.create_device(identifier, make_device_ready="off")
try:
device.make_device_ready(setting=make_device_ready_setting)
finally:
device.close()
@classmethod
def helpfull(cls):
"""Prints a general overview of GDM's features.
Invoked by 'man' without arguments.
"""
description = textwrap.dedent("""
Gazoo Device Manager (gdm) command line interface is a single utility for control of a
variety of Gazoo hardware.
Commands:
The CLI is dynamic, meaning commands are generated from API methods (see
https://github.com/google/python-fire).
There are several groups of commands.
"Manager" commands operate on local config files on the PC.
These commands are generated from API methods in the Manager class.
The command may talk to devices (detect), but they don't change the state of
the device (i.e. read-only)
The command may change the config files stored on the PC, to detect new
devices, add an alias, etc.
To see a list of available manager commands, run "gdm".
"Device" commands talk to or modify a Gazoo device.
These commands are generated from API methods in device classes.
This includes upgrade, reboot, etc.
In general you enter a device command as "gdm issue <device_name> - <command>"
Examples::
gdm issue raspberrypi-zdwm - reboot
gdm issue raspberrypi-zdwm - shell "echo 'foo'"
To see the list of device commands available for a device, run "gdm man <device_type>"
For example:: gdm man cambrionix
You can get more details on a particular command at the command line with:
"gdm <command> -h"
For example:: gdm detect -h
You can pass in flags to the CLI with:
"gdm --<flag> - <command>"
For example:: gdm --debug - devices
To see a list of available flags, run "gdm -h"
Supported device types:
Primary:
{}
Auxiliary:
{}
Virtual:
{}
To explore available device functionality through the dynamic CLI, you will need a device
attached to your host.
Use "gdm man" to access static documentation (no devices necessary, but has limitations):
"gdm man <device_type>" to see all functionality supported by a device.
"gdm man <device_type> --deprecated" to see just deprecated functionality for a device.
"gdm man <device_type> <class_attribute>" for device attribute documentation.
"gdm man <device_type> <capability> <capability_attribute>" for capability attribute documentation.
""").format(
cls._indent_doc_lines(cls.get_supported_primary_device_types()),
cls._indent_doc_lines(cls.get_supported_auxiliary_device_types()),
cls._indent_doc_lines(cls.get_supported_virtual_device_types()))[1:-1]
logger.info(description)
def issue(self, identifier, **kwargs):
"""Alias for create_device with health checks enabled by default.
Usage from the CLI: `gdm issue device-1234 - shell "echo 'foo'"`.
Args:
identifier (str): The identifier string (name) which specifies the
device.
**kwargs (dict): keyword-only arguments passed on to create_device:
"log_file_name" (str) -- a string log file name to use for log
results. "log_directory" (str) -- a directory path to use for storing
log file. "make_device_ready" (str) -- health check setting ("on",
"check_only", "off").
Returns:
Object: The device found and created by the identifier specified.
"""
return self.create_device(
identifier,
log_file_name=kwargs.get("log_file_name"),
log_directory=kwargs.get("log_directory"),
make_device_ready=kwargs.get("make_device_ready", "on"))
def log(self, device_name, log_file_name=None, duration=2000):
"""Streams device logs to stdout.
Args:
device_name (str): device identifier.
log_file_name (str): log_file_name. Used for testing purposes.
duration (float): how long to stream logs for.
Raises:
DeviceError: if unable to initiate log file to stream in 10 seconds.
"""
logger.info("Streaming logs for max {}s".format(duration))
device = self.create_device(device_name, log_file_name=log_file_name)
# Disable log rotation feature
if hasattr(type(device), "switchboard"):
device.switchboard.set_max_log_size(0)
try:
# Wait up to 10 seconds for log file to be created
end_time = time.time() + MAX_TIME_TO_WAIT_FOR_INITATION
while time.time() < end_time:
if os.path.exists(device.log_file_name):
break
time.sleep(0.001)
else:
raise errors.DeviceError(
"Streaming logs for {} failed. "
"Log file not created within {} seconds".format(
device_name, MAX_TIME_TO_WAIT_FOR_INITATION))
start_time = time.time()
end_time = start_time + duration
# Open log file and process log file
with codecs.open(
device.log_file_name, "r", encoding="utf-8",
errors="replace") as log_file:
while time.time() < end_time:
line = log_file.readline()
if line:
sys.stdout.write(line)
sys.stdout.flush()
else:
time.sleep(0.001)
finally:
sys.stdout.flush()
device.close()
def make_devices_ready(self, devices, testing_props=None, aggressive=False):
"""Makes one or more devices ready and returns a json response.
Args:
devices (list): Devices identifiers to make ready.
testing_props (dict): Properties of the testbed used for testing.
aggressive (bool): Re-flash the device with a valid build if recovery
fails.
Returns:
str: json formatted device health, e.g.
{
'device-1234': {
'is_healthy': true or false if the device is healthy or not,
'unhealthy_reason': error message if device is unhealthy,
'err_type': type of exception raised, if any
},
'device-5678': {
'is_healthy': ...,
'unhealthy_reason': ...,
'err_type': ...
},
...
}
"""
logger.setLevel(logging.ERROR) # silence logging to reduce CLI output
combined_results = {}
if isinstance(devices, str):
devices = devices.split(",")
# create device instances and construct parameter dicts
device_instances = []
parameter_dicts = {}
for device_name in devices:
try:
device = self.create_device(device_name, make_device_ready="off")
except errors.DeviceError as err:
combined_results[
device_name] = self._construct_health_dict_from_exception(err)
else:
device_instances.append(device)
# pass make_device_ready setting to support aggressive recovery
setting = "flash_build" if aggressive else "on"
parameter_dicts[device.DEVICE_TYPE] = {"setting": setting}
# execute manager method with each device instance in parallel
if device_instances:
results = parallel_utils.parallel_process(
action_name="make_device_ready",
fcn=self._make_devices_ready_single_device,
devices=device_instances,
parameter_dicts=parameter_dicts)
# combine results of parallel calls
for result in results:
if isinstance(result, dict):
combined_results.update(result)
else:
logger.info(result)
# execute testbed health checks if testing props contain property keys that
# exist in Testbed.PROP_TO_HEALTH_CHECK
if testing_props:
try:
testbed.Testbed(device_instances, testing_props).make_testbed_ready()
except errors.DeviceError as err:
combined_results[
"testbed"] = self._construct_health_dict_from_exception(err)
else:
combined_results["testbed"] = HEALTHY_DEVICE_HEALTH
# device instances no longer needed
for device in device_instances:
device.close()
return json.dumps(combined_results)
@classmethod
def man(cls,
device_type: Optional[str] = None,
class_attr_name: Optional[str] = None,
capability_attr_name: Optional[str] = None,
deprecated: bool = False) -> None:
"""Prints documentation without reliance on device instance creation.
Dynamic device documentation ("gdm <command> --help") is more complete, but
requires a device attached to the host to generate documentation.
Args:
device_type: Device type.
class_attr_name: Name of the class attribute to display documentation for.
capability_attr_name: Name of the capability attribute to display
documentation for.
deprecated: Display only deprecated methods and properties.
Raises:
AttributeError: Requested attribute does not exist.
TypeError: Requested capability attribute documentation for a
non-capability.
"""
# Handle both space-separated and dot-separated inputs:
# gdm man cambrionix switch_power power_on
# gdm man cambrionix.switch_power.power_on
args = " ".join(
arg for arg in (device_type, class_attr_name, capability_attr_name)
if arg)
args = args.replace(".", " ")
# Allow both dashes and underscores for names.
args = args.replace("-", "_")
args_list = args.split()
device_type, class_attr_name, capability_attr_name = (
args_list + [None] * (3 - len(args_list)))
if not device_type:
cls.helpfull()
else:
device_class = cls.get_supported_device_class(device_type)
if not class_attr_name:
cls._man_device(device_type, device_class, deprecated)
else:
if not hasattr(device_class, class_attr_name):
raise AttributeError(f"{device_type} ({device_class}) does not have "
f"attribute {class_attr_name!r}")
class_attribute = getattr(device_class, class_attr_name)
classification = cls._classify_attribute(class_attribute)
class_attr_description = classification.value
if classification == AttributeClassification.CAPABILITY:
capability_classes = list(class_attribute.capability_classes)
class_attribute = capability_classes[0]
class_attr_description += f"; flavor: {class_attribute.__name__}"
if len(capability_classes) > 1:
_log_man_warning_for_multiple_flavors(
capability_classes, class_attr_name, device_type,
class_attribute)
if not capability_attr_name:
cls._man_class_attribute(
device_class, class_attr_name, class_attr_description,
class_attribute)
else:
if classification != AttributeClassification.CAPABILITY:
raise TypeError(
f"{device_type}.{class_attr_name} ({class_attribute}) is not a "
"capability.")
if not hasattr(class_attribute, capability_attr_name):
raise AttributeError(
f"{device_type}.{class_attr_name} ({class_attribute}) "
f"does not have attribute {capability_attr_name!r}")
capability_attribute = getattr(class_attribute, capability_attr_name)
capability_attr_description = cls._classify_attribute(
capability_attribute).value + f" of {class_attribute.__name__}"
cls._man_capability_attribute(
device_class, class_attr_name, capability_attr_name,
capability_attr_description, capability_attribute)
def print_usb_info(self):
"""Prints the usb_info dictionary in a human readable form."""
values = usb_utils.get_address_to_usb_info_dict().values()
logger.info("{} USB connections found.".format(len(values)))
for num, usb_info_dict in enumerate(values):
keys = usb_info_dict.get_properties()
logger.info("Connection {}:".format(num))
for key in keys:
logger.info("\t{:15} {:15}".format(key,
str(getattr(usb_info_dict, key))))
def register(self, package_name: str) -> None:
"""Registers the given package with GDM CLI.
Args:
package_name: Name of the package to register. For example,
"foo_extension_package" or "my_package.bar_devices".
Note that this only registers the package for CLI usage. Tests and Python
interpreter users must use package_registrar.register() instead.
"""
registered_cli_packages = self.config.get("cli_extension_packages", [])
if package_name not in registered_cli_packages:
if package_registrar.import_and_register(package_name,
include_cli_instructions=True):
self._set_config_prop("cli_extension_packages",
registered_cli_packages + [package_name])
logger.info(f"Registered package {package_name!r} with GDM CLI.")
else:
logger.info(
f"Package {package_name!r} is already registered with GDM CLI.")
def unregister(self, package_name: str) -> None:
"""Removes the given package from GDM CLI.
Args:
package_name: Name of the package to unregister. For example,
"foo_extension_package" or "my_package.bar_devices".
Note that this only removes the package from the CLI.
"""
registered_cli_packages = self.config.get("cli_extension_packages", [])
if package_name in registered_cli_packages:
updated_packages = registered_cli_packages.copy()
updated_packages.remove(package_name)
self._set_config_prop("cli_extension_packages", updated_packages)
logger.info(f"Removed package {package_name!r} from GDM CLI.")
else:
logger.info(f"Package {package_name!r} is not registered with GDM CLI.")
def update_gdm(self):
"""Update GDM in this virtual environment.
To update GDM in this virtual environment, call the GDM launcher script
by using the full path as shown above.
If no version is specified then GDM will be updated to the latest
version available otherwise the version specified will be installed instead.
"""
logger.info(textwrap.dedent("""
Unable to update Gazoo Device Manager using this tool.
If you want to update GDM call the GDM launcher script directly like this:
/usr/local/bin/gdm update-gdm [version]
If after doing the above you see this message again, then you probably did a
'sudo pip install gazoo-device' and overwrote the GDM launcher script. Please
reinstall GDM.
"""))
@classmethod
def _classify_attribute(cls, class_attr):
"""Classifies the class attribute."""
class_attr = decorators.unwrap(class_attr)
if isinstance(class_attr, decorators.CapabilityProperty):
return AttributeClassification.CAPABILITY
if (hasattr(class_attr, "__deprecated__") or
hasattr(getattr(class_attr, "fget", None), "__deprecated__")):
if inspect.isroutine(class_attr):
return AttributeClassification.DEPRECATED_METHOD
if isinstance(class_attr, property):
return AttributeClassification.DEPRECATED_PROPERTY
if isinstance(class_attr, config.CLASS_PROPERTY_TYPES):
return AttributeClassification.CLASS_PROPERTY
if isinstance(class_attr, property):
return AttributeClassification.PROPERTY
if inspect.isroutine(class_attr):
if hasattr(class_attr, "__health_check__"):
return AttributeClassification.HEALTH_CHECK
if class_attr.__name__.startswith("_"):
return AttributeClassification.PRIVATE_METHOD
return AttributeClassification.PUBLIC_METHOD
return AttributeClassification.OTHER
def _construct_health_dict_from_exception(self, exc):
"""Constructs a dict containing info about an unhealthy device's issues.
Args:
exc (Exception): the exception raised that is causing the device's issues
Returns:
str: json formatted device health, e.g.
{
device-1234: {
'is_healthy': true,
'unhealthy_reason': "",
'err_type': "",
'checks_passed': [],
'properties': {}
}
}
"""
device_health = {}
device_health["is_healthy"] = False
device_health["unhealthy_reason"] = str(exc)
device_health["err_type"] = type(exc).__name__
device_health["checks_passed"] = getattr(exc, "checks_passed", [])
device_health["properties"] = getattr(exc, "properties", {})
return device_health
@classmethod
def _indent_doc_lines(cls, doc_lines, indent=_DOC_INDENT_SIZE):
"""Indents docstring lines."""
indent_str = " " * indent
return "\n".join(indent_str + line for line in doc_lines)
def _make_devices_ready_single_device(self, device, parameter_dict):
"""Execute make_device_ready for a single device.
Args:
device (GazooDeviceBase or AuxiliaryDeviceBase): device to execute
make_device_ready on
parameter_dict (dict): dictionary storing the setting to pass to
make_device_ready.
Returns:
dict: device health, e.g.
'device-1234': {
'is_healthy': true or false if the device is healthy or not,
'unhealthy_reason': error message if device is unhealthy,
'err_type': type of exception raised, if any
}
Note:
Intended to be executed in parallel by parallel_utils, triggered by
the make_devices_ready method.
"""
device_health = HEALTHY_DEVICE_HEALTH
try:
device.make_device_ready(setting=parameter_dict.get("setting"))
except errors.DeviceError as err:
device_health = self._construct_health_dict_from_exception(err)
device_health["logs"] = device.log_file_name
return {device.name: device_health}
@classmethod
def _man_capability_attribute(cls,
device_class: Type[Any],
capability_name: str,
capability_attr_name: str,
description: str,
attribute: Any) -> None:
"""Prints capability attribute documentation.
Invoked by 'man' with 3 args (device, capability, attribute).
Args:
device_class: Device class which contains the capability.
capability_name: Name of the capability containing the attribute.
capability_attr_name: Capability attribute to print documentation for.
description: Description of the capability attribute.
attribute: The capability attribute to display documentation for.
"""
doc_title = (
f"Manual for "
f"{device_class.__name__}.{capability_name}.{capability_attr_name} "
f"({description})\n")
pydoc_lines = pydoc.render_doc(attribute).splitlines()
doc = doc_title + "\n".join(pydoc_lines[1:]) # Replace pydoc's title
logger.info(doc)
@classmethod
def _man_class_attribute(cls,
device_class: Type[Any],
class_attr_name: str,
description: str,
attribute: Any) -> None:
"""Prints class attribute documentation.
Invoked by 'man' with 2 args (device, attribute).
Args:
device_class: Device class which contains the attribute.
class_attr_name: Name of the class attribute to display documentation for.
description: Description of the class attribute.
attribute: The attribute to display documentation for.
"""
doc_title = (f"Manual for {device_class.__name__}.{class_attr_name} "
f"({description})\n")
pydoc_lines = pydoc.render_doc(attribute).splitlines()
doc = doc_title + "\n".join(pydoc_lines[1:]) # Replace pydoc's title
logger.info(doc)
@classmethod
def _man_device(cls,
device_type: str,
device_class: Type[Any],
deprecated: bool) -> None:
"""Prints supported device features.
Args:
device_type: Type of device.
device_class: Device class to show documentation for.
deprecated: Whether to display manuals of deprecated attributes.
Invoked by 'man' with 1 argument (device type).
"""
# Group device class attributes into properties, health checks, methods,
# and capabilities
capability_name_to_classes = {}
property_names = []
class_properties = []
public_methods_names = []
health_check_names = []
deprecated_methods = []
deprecated_properties = []
for name, attribute in inspect.getmembers(device_class):
classification = cls._classify_attribute(attribute)
if classification == AttributeClassification.PROPERTY:
property_names.append(name)
elif classification == AttributeClassification.CLASS_PROPERTY:
if not name.startswith("_"):
class_properties.append(f"{name}: {attribute}")
elif classification == AttributeClassification.HEALTH_CHECK:
health_check_names.append(name)
elif classification == AttributeClassification.PUBLIC_METHOD:
public_methods_names.append(name)
elif classification == AttributeClassification.CAPABILITY:
capability_name_to_classes[name] = list(attribute.capability_classes)
elif classification == AttributeClassification.DEPRECATED_METHOD:
deprecated_methods.append(f"{name} ({attribute.__deprecated__})")
elif classification == AttributeClassification.DEPRECATED_PROPERTY:
# Parse alias from deprecated property docstring
match = re.search(r'See "(?P<alias>.*)".', attribute.__doc__)
if match:
alias = match.group("alias")
deprecated_properties.append(f"{name} ({alias})")
# Generate a summary of supported capability methods and properties for each
# capability
capability_lines = []
for cap_name, cap_classes in capability_name_to_classes.items():
# There may be several flavors of a capability for a device class.
for cap_class in cap_classes:
capability_lines.append("{} ({})".format(cap_name, cap_class.__name__))
methods_and_props = [
name for name, member in inspect.getmembers(cap_class)
if cls._classify_attribute(member) in _VISIBLE_CAPABILITY_ATTRIBUTES
]
indented_lines = [
" " * _DOC_INDENT_SIZE + line for line in methods_and_props
]
capability_lines.extend(indented_lines)
docs_capabilities = cls._indent_doc_lines(capability_lines)
docs_methods = cls._indent_doc_lines(public_methods_names)
docs_health_checks = cls._indent_doc_lines(health_check_names)
docs_properties = cls._indent_doc_lines(property_names)
docs_class_properties = cls._indent_doc_lines(class_properties)
docs_deprecated_methods = cls._indent_doc_lines(deprecated_methods)
docs_deprecated_properties = cls._indent_doc_lines(deprecated_properties)
if deprecated:
template = textwrap.dedent("""
Deprecated manual for device type '{device_type}' (class {device_class})
Deprecated methods:
{deprecated_methods}
Deprecated properties:
{deprecated_properties}
Use "gdm man {device_type} <deprecated_attribute>" for device attribute documentation.
""")[1:-1]
else:
template = textwrap.dedent("""
Manual for device type '{device_type}' (class {device_class})
Owned (maintained) by {device_class._OWNER_EMAIL}
Supported capabilities:
{capabilities}
Supported methods:
{methods}
Supported health check methods:
{health_checks}
Supported properties:
{properties}
Class properties:
{class_properties}
Use:
"gdm man <device_type> <class_attribute>" for device attribute documentation.
"gdm man <device_type> <capability> <capability_attribute>" for capability attribute documentation.
""")[1:-1]
doc = template.format(
device_type=device_type,
device_class=device_class,
capabilities=docs_capabilities,
methods=docs_methods,
health_checks=docs_health_checks,
properties=docs_properties,
class_properties=docs_class_properties,
deprecated_methods=docs_deprecated_methods,
deprecated_properties=docs_deprecated_properties)
logger.info(doc)
| 38.249106
| 109
| 0.675361
| 29,315
| 0.913496
| 0
| 0
| 14,307
| 0.445826
| 0
| 0
| 15,719
| 0.489826
|
f7583f3c89da3d4e9ea5d5c4fffa4b29559b7e57
| 4,814
|
py
|
Python
|
py/desispec/scripts/humidity_corrected_fiberflat.py
|
echaussidon/desispec
|
8a8bd59653861509dd630ffc8e1cd6c67f6cdd51
|
[
"BSD-3-Clause"
] | null | null | null |
py/desispec/scripts/humidity_corrected_fiberflat.py
|
echaussidon/desispec
|
8a8bd59653861509dd630ffc8e1cd6c67f6cdd51
|
[
"BSD-3-Clause"
] | null | null | null |
py/desispec/scripts/humidity_corrected_fiberflat.py
|
echaussidon/desispec
|
8a8bd59653861509dd630ffc8e1cd6c67f6cdd51
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, division
import os
import fitsio
import argparse
import numpy as np
from desiutil.log import get_logger
from desispec.io import read_fiberflat,write_fiberflat,findfile,read_frame
from desispec.io.fiberflat_vs_humidity import get_humidity,read_fiberflat_vs_humidity
from desispec.calibfinder import CalibFinder
from desispec.fiberflat_vs_humidity import compute_humidity_corrected_fiberflat
def parse(options=None):
parser = argparse.ArgumentParser(description="Compute a fiberflat corrected for variations with humidity.")
parser.add_argument('-i','--infile', type = str, default = None, required=True,
help = 'path of DESI exposure frame fits file')
parser.add_argument('--fiberflat', type = str, default = None, required=True,
help = 'path of DESI fiberflat fits file')
parser.add_argument('--use-sky-fibers', action = 'store_true',
help = 'use sky fibers to improve the correction')
parser.add_argument('-o','--outfile', type = str, default = None, required=True,
help = 'path of output fiberflar file')
args = None
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args) :
log = get_logger()
# just read frame header in case we don't need to do anything
frame_header = fitsio.read_header(args.infile,"FLUX")
if args.use_sky_fibers :
# need full frame to adjust correction on data
frame = read_frame(args.infile)
else :
frame = None
cfinder = CalibFinder([frame_header])
if not cfinder.haskey("FIBERFLATVSHUMIDITY"):
log.info("No information on fiberflat vs humidity for camera {}, simply link the input fiberflat".format(frame_header["CAMERA"]))
if not os.path.islink(args.outfile) :
relpath=os.path.relpath(args.fiberflat,os.path.dirname(args.outfile))
os.symlink(relpath,args.outfile)
return 0
# read fiberflat
calib_fiberflat = read_fiberflat(args.fiberflat)
# read mean fiberflat vs humidity
filename = cfinder.findfile("FIBERFLATVSHUMIDITY")
log.info(f"reading {filename}")
mean_fiberflat_vs_humidity , humidity_array, ffh_wave, ffh_header = read_fiberflat_vs_humidity(filename)
assert(np.allclose(calib_fiberflat.wave,ffh_wave))
# now need to find the humidity for this frame and for this fiberflat
night=frame_header["NIGHT"]
camera=frame_header["CAMERA"]
current_frame_humidity =get_humidity(night=night,expid=frame_header["EXPID"],camera=camera)
log.info("humidity during current exposure={:.2f}".format(current_frame_humidity))
# we can compute the correction now that we have everything in hand
improved_fiberflat = compute_humidity_corrected_fiberflat(calib_fiberflat, mean_fiberflat_vs_humidity , humidity_array, current_frame_humidity, frame = frame)
# add telemetry humidity for the dome flats for the record
# try to read the night exposure table to get the list of flats
first_expid = calib_fiberflat.header["EXPID"]
calib_night = calib_fiberflat.header["NIGHT"]
calib_humidity=[ get_humidity(calib_night,first_expid,camera) ]
fiberflat_expid=[ first_expid]
for expid in range(first_expid+1,first_expid+40) :
filename=findfile("raw",calib_night,expid)
if not os.path.isfile(filename): continue
head=fitsio.read_header(filename,1)
if not "OBSTYPE" in head.keys() or head["OBSTYPE"]!="FLAT" :
break
fiberflat_expid.append(expid)
calib_humidity.append(get_humidity(calib_night,expid,camera))
log.debug("calib expids={}".format(fiberflat_expid))
log.debug("calib humidities={}".format(calib_humidity))
calib_humidity=np.mean(calib_humidity)
if np.isnan(calib_humidity) :
log.warning("missing humidity info for fiber flat, use link to input")
calib_humidity=0.
else :
log.info("mean humidity during calibration exposures={:.2f}".format(calib_humidity))
fit_humidity = improved_fiberflat.header["CALFHUM"]
if np.abs(fit_humidity-calib_humidity)>10 :
message="large difference between best fit humidity during dome flats ({:.1f}) and value from telemetry ({:.1f})".format(fit_humidity,calib_humidity)
if np.abs(fit_humidity-calib_humidity)>20 :
log.error(message)
raise RuntimeError(message)
log.warning(message)
improved_fiberflat.header["CALTHUM"] = (calib_humidity,"dome flat humidity from telemetry")
# write it
write_fiberflat(args.outfile,improved_fiberflat)
log.info("wrote humidity corrected flat {}".format(args.outfile))
return 0
| 42.60177
| 162
| 0.706689
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,311
| 0.272331
|
f759437ada91a66b6fb489d96b3bf88ad2d186f2
| 1,856
|
py
|
Python
|
config.py
|
jfernan2/PRInspector
|
c09aad3b49263d3c679fd6cfd307de404425f924
|
[
"MIT"
] | null | null | null |
config.py
|
jfernan2/PRInspector
|
c09aad3b49263d3c679fd6cfd307de404425f924
|
[
"MIT"
] | null | null | null |
config.py
|
jfernan2/PRInspector
|
c09aad3b49263d3c679fd6cfd307de404425f924
|
[
"MIT"
] | null | null | null |
IS_TEST = True
REPOSITORY = 'cms-sw/cmssw'
def get_repo_url():
return 'https://github.com/' + REPOSITORY + '/'
CERN_SSO_CERT_FILE = 'private/cert.pem'
CERN_SSO_KEY_FILE = 'private/cert.key'
CERN_SSO_COOKIES_LOCATION = 'private/'
TWIKI_CONTACTS_URL = 'https://ppdcontacts.web.cern.ch/PPDContacts/ppd_contacts'
TWIKI_TAG_COLLECTOR_URL = 'https://twiki.cern.ch/twiki/bin/edit/CMS/DQMP5TagCollector?nowysiwyg=1'
TWIKI_TAG_COLLECTOR_CANCEL_EDIT_URL = 'https://twiki.cern.ch/twiki/bin/save/CMS/DQMP5TagCollector'
CATEGORIES_MAP_URL = 'https://raw.githubusercontent.com/cms-sw/cms-bot/master/categories_map.py'
TWIKI_TIMEOUT_SECONDS = 10
__github_client_id = None
__github_client_secret = None
def get_github_client_id():
global __github_client_id
if __github_client_id == None:
__github_client_id = open('private/github_oauth_data.txt', 'r').readlines()[1].strip()
return __github_client_id
def get_github_client_secret():
global __github_client_secret
if __github_client_secret == None:
__github_client_secret = open('private/github_oauth_data.txt', 'r').readlines()[2].strip()
return __github_client_secret
def get_subsystems():
return ['l1t',
'hlt',
'tracker',
'sistrip',
'pixel',
'ecal',
'hcal',
'dt',
'rpc',
'csc',
'ct-pps',
'ctpps',
'bril',
'gem',
'hgcal',
'tracking',
'btag',
'vertexing',
'e-gamma',
'jetmet',
'lumi',
'muon',
'tau',
'generators',
'hfnose',
'beamspot',
'jme',
'jet',
'eventdisplay',
'castor',
'validation',
]
| 27.294118
| 98
| 0.581358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 646
| 0.34806
|
f75aab6d4d19a6e9c9dca13b0cb77061bc6e8325
| 5,748
|
py
|
Python
|
trainer/ml/utils.py
|
Telcrome/ai-trainer
|
54bca3252e194c054bdd3af2b94d6dde940a2a86
|
[
"MIT"
] | 1
|
2021-05-05T12:57:42.000Z
|
2021-05-05T12:57:42.000Z
|
trainer/ml/utils.py
|
Telcrome/ai-trainer
|
54bca3252e194c054bdd3af2b94d6dde940a2a86
|
[
"MIT"
] | null | null | null |
trainer/ml/utils.py
|
Telcrome/ai-trainer
|
54bca3252e194c054bdd3af2b94d6dde940a2a86
|
[
"MIT"
] | null | null | null |
from enum import Enum
from typing import Generator, Tuple, Iterable, Dict, List
import cv2
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy.ndimage import label, generate_binary_structure
from scipy.ndimage.morphology import distance_transform_edt as dist_trans
import trainer.lib as lib
class ImageNormalizations(Enum):
UnitRange = 1
def duplicate_columns(data, minoccur=2):
ind = np.lexsort(data)
diff = np.any(data.T[ind[1:]] != data.T[ind[:-1]], axis=1)
edges = np.where(diff)[0] + 1
result = np.split(ind, edges)
result = [group for group in result if len(group) >= minoccur]
return result
def pad(small_arr: np.ndarray, size=(30, 30)) -> np.ndarray:
# if small_arr.shape[0] < size[0] or small_arr.shape[1] < size[1]:
size = max(small_arr.shape[0], size[0]), max(small_arr.shape[1], size[1])
res = np.zeros(size, dtype=np.int32)
res[:small_arr.shape[0], :small_arr.shape[1]] = small_arr
return res
# else:
# return small_arr # There is no need for padding
def split_into_regions(arr: np.ndarray, mode=0) -> List[np.ndarray]:
"""
Splits an array into its coherent regions.
:param mode: 0 for orthogonal connection, 1 for full connection
:param arr: Numpy array with shape [W, H]
:return: A list with length #NumberOfRegions of arrays with shape [W, H]
"""
res = []
if mode == 0:
rs, num_regions = label(arr)
elif mode == 1:
rs, num_regions = label(arr, structure=generate_binary_structure(2, 2))
else:
raise Exception("Please specify a valid Neighborhood mode for split_into_regions")
for i in range(1, num_regions + 1):
res.append(rs == i)
return res
def normalize_im(im: np.ndarray, norm_type=ImageNormalizations.UnitRange) -> np.ndarray:
"""
Currently just normalizes an image with pixel intensities in range [0, 255] to [-1, 1]
:return: The normalized image
"""
if norm_type == ImageNormalizations.UnitRange:
return (im.astype(np.float32) / 127.5) - 1
else:
raise Exception("Unknown Normalization type")
def distance_transformed(mask: np.ndarray) -> np.ndarray:
if mask.dtype != np.bool:
mask = mask.astype(np.bool)
return dist_trans(np.invert(mask).astype(np.float32))
def one_hot_to_cont(x: np.ndarray) -> np.ndarray:
"""
Convert a one hot encoded image into the same image with integer representations.
:param x: np.ndarray with (C, W, H)
:return: np.ndarray with (W, H)
"""
return np.argmax(x, axis=len(x.shape) - 3)
def cont_to_ont_hot(arr: np.ndarray, n_values=-1) -> np.ndarray:
if n_values == -1:
n_values = np.max(arr) + 1
res = np.zeros((n_values,) + arr.shape)
for v in np.unique(arr):
res[v, :, :][arr == v] = 1
return res
def reduce_by_attention(arr: np.ndarray, att: np.ndarray):
"""
Reduce an array by a field of attention, such that the result is a rectangle with the empty borders cropped.
:param arr: Target array. The last two dimensions need to be of the same shape as the attention field
:param att: field of attention
:return: cropped array
"""
assert arr.shape[-2] == att.shape[0] and arr.shape[-1] == att.shape[1]
ones = np.argwhere(att)
lmost, rmost = np.min(ones[:, 0]), np.max(ones[:, 0]) + 1
bmost, tmost = np.min(ones[:, 1]), np.max(ones[:, 1]) + 1
grid_slice = [slice(None) for _ in range(len(arr.shape) - 2)]
grid_slice.extend([slice(lmost, rmost), slice(bmost, tmost)])
return arr[tuple(grid_slice)], att[lmost:rmost, bmost:tmost], (lmost, rmost, bmost, tmost)
def pair_augmentation(g: Iterable[Tuple[np.ndarray, np.ndarray]], aug_ls) -> Iterable[Tuple[np.ndarray, np.ndarray]]:
import imgaug.augmenters as iaa
seq = iaa.Sequential(aug_ls)
for im, gt, frame_number in g:
im_prep = im[frame_number] if im.shape[3] > 1 else im.squeeze()
gt_prep = np.expand_dims(gt, len(gt.shape))
images_aug = seq(images=[im_prep], segmentation_maps=[gt_prep])
yield images_aug[0][0].astype(np.float32), images_aug[1][0][:, :, 0].astype(np.float32), frame_number
def insert_np_at(a1: np.ndarray, a2: np.ndarray, pos: Tuple[int, int], filter_arr=None) -> np.ndarray:
assert len(a1.shape) == 2 and len(a2.shape) == 2
if filter_arr is None:
filter_arr = np.ones_like(a2).astype(np.bool)
x, y = pos
res = np.copy(a1)
a1_x = slice(x, min(x + a2.shape[0], a1.shape[0]))
a1_y = slice(y, min(y + a2.shape[1], a1.shape[1]))
if x + a2.shape[0] <= a1.shape[0]:
a2_x = slice(0, a2.shape[0])
else:
a2_x = slice(0, a1.shape[0] - (x + a2.shape[0]))
if y + a2.shape[1] <= a1.shape[1]:
a2_y = slice(0, a2.shape[1])
else:
a2_y = slice(0, a1.shape[1] - (y + a2.shape[1]))
item_filter = filter_arr[(a2_x, a2_y)]
assert res[(a1_x, a1_y)].shape == a2[(a2_x, a2_y)].shape
res[(a1_x, a1_y)][item_filter] = a2[(a2_x, a2_y)][item_filter]
return res
if __name__ == '__main__':
fit = insert_np_at(np.ones((10, 10)), np.ones((3, 3)) * 2, (2, 3))
too_big1 = insert_np_at(np.ones((10, 10)), np.ones((3, 10)) * 2, (2, 3))
too_big = insert_np_at(np.ones((10, 10)), np.ones((10, 10)) * 2, (2, 3))
# def put_array(big_arr: np.ndarray, small_arr: np.ndarray, offset=(0, 0)) -> np.ndarray:
# """
# Puts the small array into the big array. Ignores problems and does its best to fulfill the task
# """
# b, t =
# big_arr[]
# big_arr = np.putmask(big_arr, )
# if __name__ == '__main__':
# # a = np.zeros((10, 10))
# # b = np.random.random((4, 4))
# # c = put_array(a, b)
# # lib.logger.debug_var(c)
| 35.04878
| 117
| 0.636047
| 50
| 0.008699
| 527
| 0.091684
| 0
| 0
| 0
| 0
| 1,507
| 0.262178
|
f75b61928d5ab139ffa7aac0fa8d2448bbd25e2c
| 7,613
|
py
|
Python
|
widgets/ImageDetailArea.py
|
JaySon-Huang/SecertPhotos
|
e741cc26c19a5b249d45cc70959ac6817196cb8a
|
[
"MIT"
] | null | null | null |
widgets/ImageDetailArea.py
|
JaySon-Huang/SecertPhotos
|
e741cc26c19a5b249d45cc70959ac6817196cb8a
|
[
"MIT"
] | 3
|
2015-05-19T08:43:46.000Z
|
2015-06-10T17:55:28.000Z
|
widgets/ImageDetailArea.py
|
JaySon-Huang/SecertPhotos
|
e741cc26c19a5b249d45cc70959ac6817196cb8a
|
[
"MIT"
] | null | null | null |
from PyQt5.QtCore import Qt, pyqtSignal, QSize
from PyQt5.QtWidgets import (
QLabel, QWidget, QTreeWidgetItem, QHeaderView,
QVBoxLayout, QHBoxLayout,
)
from .ImageLabel import ImageLabel
from .AdaptiveTreeWidget import AdaptiveTreeWidget
class ImageDetailArea(QWidget):
# signal
imageLoaded = pyqtSignal()
imageCleared = pyqtSignal()
# static strings
strings = {
'filename': 'filename: %s (%s)',
'size': 'size: %d x %d',
'component': 'Components (%d in total)',
'quantization': 'Quantization tables (%d in total)',
'huffman': 'Huffman tables (%d for DC, %d for AC)',
'showedComponentsInfo': [
'dc_tbl_no',
'ac_tbl_no',
'quant_tbl_no',
'h_samp_factor',
'v_samp_factor',
],
}
def __init__(self, parent=None):
super().__init__(parent)
self.verticalLayout = QVBoxLayout(self)
self.verticalLayout.setObjectName("verticalLayout")
# title
self.lb_title = QLabel(self)
self.lb_title.setAlignment(Qt.AlignCenter)
self.lb_title.setObjectName("lb_title")
self.verticalLayout.addWidget(self.lb_title)
# filename && size
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.lb_filename = QLabel(self)
self.lb_filename.setObjectName("lb_filename")
self.horizontalLayout.addWidget(self.lb_filename)
self.lb_size = QLabel(self)
self.lb_size.setObjectName("lb_size")
self.horizontalLayout.addWidget(self.lb_size)
self.verticalLayout.addLayout(self.horizontalLayout)
# image preview
self.lb_image = ImageLabel(self)
self.lb_image.setMinimumSize(QSize(250, 250))
self.lb_image.setAlignment(Qt.AlignCenter)
self.lb_image.setObjectName("lb_image")
self.verticalLayout.addWidget(self.lb_image)
# components
self.lb_components = QLabel(self)
self.lb_components.setObjectName("lb_components")
self.verticalLayout.addWidget(self.lb_components)
self.treeWidget_components = AdaptiveTreeWidget(self)
self.treeWidget_components.setUniformRowHeights(True)
self.treeWidget_components.setObjectName("treeWidget_components")
self.treeWidget_components.setColumnCount(3)
self.treeWidget_components.headerItem().setTextAlignment(
0, Qt.AlignLeft | Qt.AlignVCenter
)
self.treeWidget_components.headerItem().setTextAlignment(
1, Qt.AlignLeft | Qt.AlignVCenter
)
self.treeWidget_components.headerItem().setText(0, "ID")
self.treeWidget_components.headerItem().setText(1, "Property")
self.treeWidget_components.headerItem().setText(2, "Value")
self.treeWidget_components.header().setSectionResizeMode(
QHeaderView.ResizeToContents
)
self.verticalLayout.addWidget(self.treeWidget_components)
# quant tables
self.lb_quantTbls = QLabel(self)
self.lb_quantTbls.setObjectName("lb_quantTbls")
self.verticalLayout.addWidget(self.lb_quantTbls)
self.treeWidget_quantTbls = AdaptiveTreeWidget(self)
self.treeWidget_quantTbls.setObjectName("treeWidget_quantTbls")
self.treeWidget_quantTbls.setColumnCount(3)
self.treeWidget_quantTbls.headerItem().setTextAlignment(
0, Qt.AlignLeft | Qt.AlignVCenter
)
self.treeWidget_quantTbls.headerItem().setTextAlignment(
1, Qt.AlignLeft | Qt.AlignVCenter
)
self.treeWidget_quantTbls.headerItem().setText(0, "ID")
self.treeWidget_quantTbls.headerItem().setText(1, "Property")
self.treeWidget_quantTbls.headerItem().setText(2, "Value")
self.verticalLayout.addWidget(self.treeWidget_quantTbls)
# huffman tables
self.lb_huffTbls = QLabel(self)
self.lb_huffTbls.setObjectName("lb_huffTbls")
self.verticalLayout.addWidget(self.lb_huffTbls)
self.treeWidget_huffTbls = AdaptiveTreeWidget(self)
self.treeWidget_huffTbls.setObjectName("treeWidget_huffTbls")
self.treeWidget_huffTbls.setColumnCount(3)
self.treeWidget_huffTbls.headerItem().setTextAlignment(
0, Qt.AlignLeft | Qt.AlignVCenter
)
self.treeWidget_huffTbls.headerItem().setTextAlignment(
1, Qt.AlignLeft | Qt.AlignVCenter
)
self.treeWidget_huffTbls.headerItem().setText(0, "ID")
self.treeWidget_huffTbls.headerItem().setText(1, "Property")
self.treeWidget_huffTbls.headerItem().setText(2, "Value")
self.verticalLayout.addWidget(self.treeWidget_huffTbls)
self.setTitle('( None )')
self.clear()
def setTitle(self, title):
self.lb_title.setText(title)
def clear(self):
self.image = None
self.lb_filename.setText(
self.strings['filename'] % ('', 'NO image loaded')
)
self.lb_size.setText(
self.strings['size'] % (0, 0)
)
self.lb_image.clear()
self.lb_components.setText(
self.strings['component'] % 0
)
self.treeWidget_components.clear()
self.lb_quantTbls.setText(
self.strings['quantization'] % 0
)
self.treeWidget_quantTbls.clear()
self.lb_huffTbls.setText(
self.strings['huffman'] % (0, 0)
)
self.treeWidget_huffTbls.clear()
self.imageCleared.emit()
def setImage(self, image):
self.clear()
self.image = image
self.lb_filename.setText(
self.strings['filename'] % (image.filename, 'original')
)
self.lb_size.setText(
self.strings['size'] % image.size
)
self.lb_image.setImageMemSrc(image, 300, 300)
# components
for comp in image.comp_infos:
topItem = QTreeWidgetItem(
self.treeWidget_components,
[str(comp['component_id']), '', '']
)
for key in self.strings['showedComponentsInfo']:
QTreeWidgetItem(topItem, ['', key, str(comp[key])])
self.lb_components.setText(
self.strings['component'] % len(image.comp_infos)
)
# quantization tables
self.lb_quantTbls.setText(
self.strings['quantization'] % len(image.quant_tbls)
)
for i, quant_tbl in enumerate(image.quant_tbls):
topItem = QTreeWidgetItem(
self.treeWidget_quantTbls,
[str(i), '', '']
)
for key in quant_tbl:
QTreeWidgetItem(topItem, ['', key, str(quant_tbl[key])])
# huffman tables
self.lb_huffTbls.setText(
self.strings['huffman'] % (
len(image.dc_huff_tbls),
len(image.ac_huff_tbls)
)
)
for i, hufftbl in enumerate(image.dc_huff_tbls):
topItem = QTreeWidgetItem(
self.treeWidget_huffTbls,
[str(i), 'type', 'DC']
)
for key in hufftbl:
QTreeWidgetItem(topItem, ['', key, str(hufftbl[key])])
for i, hufftbl in enumerate(image.ac_huff_tbls):
topItem = QTreeWidgetItem(
self.treeWidget_huffTbls,
[str(i), 'type', 'AC']
)
for key in hufftbl:
QTreeWidgetItem(topItem, ['', key, str(hufftbl[key])])
self.imageLoaded.emit()
| 38.64467
| 73
| 0.620255
| 7,363
| 0.967161
| 0
| 0
| 0
| 0
| 0
| 0
| 885
| 0.116249
|
f75bd46e6e679f347d07fe04e940962382046dd8
| 3,005
|
py
|
Python
|
predict.py
|
zhyq/cws_lstm
|
326980e0971482fc712602d3a79069e69a11c8fc
|
[
"Apache-2.0"
] | 7
|
2018-04-28T02:32:51.000Z
|
2020-02-11T07:14:51.000Z
|
predict.py
|
zhyq/cws_lstm
|
326980e0971482fc712602d3a79069e69a11c8fc
|
[
"Apache-2.0"
] | null | null | null |
predict.py
|
zhyq/cws_lstm
|
326980e0971482fc712602d3a79069e69a11c8fc
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import data_helper
from sklearn.model_selection import train_test_split
import re
import lstm
from lstm import *
import time
from viterbi import Viterbi
xrange = range
def simple_cut(text,dh,lm,viterbi):
"""对一个片段text(标点符号把句子划分为多个片段)进行预测。"""
if text:
#print("text: %s" %text)
text_len = len(text)
X_batch = dh.text2ids(text) # 这里每个 batch 是一个样本
fetches = [lm.y_pred]
feed_dict = {lm.X_inputs:X_batch, lm.lr:1.0, lm.batch_size:1, lm.keep_prob:1.0}
_y_pred = sess.run(fetches, feed_dict)[0][:text_len] # padding填充的部分直接丢弃
nodes = [dict(zip(['s','b','m','e'], each[1:])) for each in _y_pred]
#print(type(dh.labels))
#print(dh.labels)
tags = viterbi.viterbi(nodes)
words = []
for i in range(len(text)):
if tags[i] in ['s', 'b']:
words.append(text[i])
else:
words[-1] += text[i]
return words
else:
return []
def cut_word(sentence,dh,lm,viterbi):
"""首先将一个sentence根据标点和英文符号/字符串划分成多个片段text,然后对每一个片段分词。"""
not_cuts = re.compile(u'([0-9\da-zA-Z ]+)|[。,、?!.\.\?,!]')
result = []
start = 0
for seg_sign in not_cuts.finditer(sentence):
result.extend(simple_cut(sentence[start:seg_sign.start()],dh,lm,viterbi))
result.append(sentence[seg_sign.start():seg_sign.end()])
start = seg_sign.end()
result.extend(simple_cut(sentence[start:],dh,lm,viterbi))
return result
def predict(dh,lm,viterbi,sentence):
# 例一# 例一
result = cut_word(sentence,dh,lm,viterbi)
rss = ''
for each in result:
rss = rss + each + ' / '
print (rss)
def main():
parser = argparse.ArgumentParser(description = "lstm segment args.")
parser.add_argument("-a","--action",type=str,default="predict",help="train or predict")
parser.add_argument("-c","--corpus",type=str,default="data/msr_train.txt",help="train file")
parser.add_argument("-v","--vocab_model",type=str,default="model/vocab_model.pkl",help="vocab model file")
parser.add_argument("-m","--lstm_model",type=str,default="model/bi-lstm.ckpt-6",help="lstm model file")
args = parser.parse_args()
corpus = args.corpus
vocab_model = args.vocab_model
action = args.action
lstm_model = args.lstm_model
dh = data_helper.DataHelper(vocab_model)
dh.datahander(corpus)
#dh.loadmodel(vocab_model)
if action == "predict":
lm = lstm.LSTM(lstm_model)
viterbi = Viterbi(dh.labels)
saver = tf.train.Saver()
saver.restore(sess, lm.model_path)
sentence = u'人们思考问题往往不是从零开始的。就好像你现在阅读这篇文章一样,你对每个词的理解都会依赖于你前面看到的一些词,而不是把你前面看的内容全部抛弃了,忘记了,再去理解这个单词。也就是说,人们的思维总是会有延续性的。'
predict(dh,lm,viterbi,sentence)
while True:
sentence = input("input words for cut .EXIT for exit:\n")
if sentence == "EXIT":
break
predict(dh,lm,viterbi,sentence)
if __name__ == "__main__":
main()
| 33.388889
| 125
| 0.628619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,063
| 0.313662
|
f75c066d3ec31ec2f99d70612e1572ff45c4ae07
| 930
|
py
|
Python
|
Regression/multiple_linear_regression.py
|
Rupii/Machine-Learning
|
2b00698815efb04346d5cb980b68af76f27a5ca6
|
[
"MIT"
] | null | null | null |
Regression/multiple_linear_regression.py
|
Rupii/Machine-Learning
|
2b00698815efb04346d5cb980b68af76f27a5ca6
|
[
"MIT"
] | null | null | null |
Regression/multiple_linear_regression.py
|
Rupii/Machine-Learning
|
2b00698815efb04346d5cb980b68af76f27a5ca6
|
[
"MIT"
] | 1
|
2019-09-04T05:43:31.000Z
|
2019-09-04T05:43:31.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 24 23:18:54 2018
@author: Rupesh
"""
# Multiple Linear Regression
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("ggplot")
# loading dependies
df = pd.read_csv("50_Startups.csv")
df.head()
X = df.iloc[:, :-1].values
y = df.iloc[:, 4].values
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
X_cat = LabelEncoder()
X[:, 3] = X_cat.fit_transform(X[:, 3])
onehot = OneHotEncoder(categorical_features = [3])
X = onehot.fit_transform(X).toarray()
# avoiding the dummy variable trap
X = X[:, 1:]
# train test split
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
# model
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
reg.fit(X_train, y_train)
# predict
y_pred = reg.predict(X_test)
import skl
| 19.787234
| 74
| 0.731183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 224
| 0.24086
|
f75c6aa4c0bc9e6a0583632570a241f0d5700804
| 2,945
|
py
|
Python
|
2021/day09/part01/smoke_basin.py
|
fpmosley/advent-of-code
|
507bd89795ff6a0824284c3c8d2123cf19a932a3
|
[
"MIT"
] | null | null | null |
2021/day09/part01/smoke_basin.py
|
fpmosley/advent-of-code
|
507bd89795ff6a0824284c3c8d2123cf19a932a3
|
[
"MIT"
] | null | null | null |
2021/day09/part01/smoke_basin.py
|
fpmosley/advent-of-code
|
507bd89795ff6a0824284c3c8d2123cf19a932a3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
Advent of Code 2021 - Day 9: Smoke Basin (Part 1)
https://adventofcode.com/2021/day/9
'''
import numpy as np
class HeightMap():
def __init__(self) -> None:
self._grid = np.array([])
def add_row(self, row):
np_row = np.array(row)
if self._grid.size != 0:
self._grid = np.vstack([self._grid, np_row])
else:
self._grid = np_row
def find_low_points(self, radius=1):
low_points = []
for index, point in np.ndenumerate(self._grid):
neighbor_points = self._neighbors(radius, coordinates=index)
if point < min(neighbor_points):
low_points.append(point)
return low_points
def _neighbors(self, radius, coordinates=(0, 0)):
neighbors = []
row = coordinates[0]
column = coordinates[1]
# Get UP neighbor value
if row >= 1:
neighbors.append(self._grid[row - radius, column])
# Get LEFT neighbor value
if column >= 1:
neighbors.append(self._grid[row, column - radius])
# Get RIGHT neighbor value
if column < len(self._grid[0]) - radius:
neighbors.append(self._grid[row, column + radius])
# Get DOWN neighbor value
if row < len(self._grid) - radius:
neighbors.append(self._grid[row + radius, column])
return neighbors
def __str__(self) -> str:
output = ""
for row in self._grid:
for elem in row:
output = output + f"{elem:>3}"
output = output + "\n"
return output
def calculate_risk(heights):
# Risk is 1 plus the height
return sum([height + 1 for height in heights])
def main():
filename = input("What is the input file name? ")
try:
with open(filename, "r") as file:
# Create a new board
area = HeightMap()
# Read the rows and setup the HeightMap
for line in file:
line = line.strip()
input_row = [int(x) for x in str(line)]
area.add_row(input_row)
print("The input grid: ")
print(area)
low_points = area.find_low_points()
sum_risk_levels = calculate_risk(
low_points) if low_points else None
if sum_risk_levels:
low_points_str = [str(point) for point in low_points]
print(f"Number of low points: {len(low_points)}")
print(f"Low points: {', '.join(low_points_str)}")
print(
f"\nThe sum of the risk levels of all low points is: {sum_risk_levels}\n")
else:
print("The sum of the risk levels of all low points not found.\n")
except FileNotFoundError:
print(f"No such file or directory: '{filename}'")
if __name__ == "__main__":
main()
| 27.523364
| 94
| 0.55382
| 1,497
| 0.508319
| 0
| 0
| 0
| 0
| 0
| 0
| 637
| 0.216299
|
f75d3544ffa19cc489ce532ee8d14ab4f09c6953
| 3,830
|
py
|
Python
|
datar/base/trig_hb.py
|
stjordanis/datar
|
4e2b5db026ad35918954576badef9951928c0cb1
|
[
"MIT"
] | 110
|
2021-03-09T04:10:40.000Z
|
2022-03-13T10:28:20.000Z
|
datar/base/trig_hb.py
|
sthagen/datar
|
1218a549e2f0547c7b5a824ca6d9adf1bf96ba46
|
[
"MIT"
] | 54
|
2021-06-20T18:53:44.000Z
|
2022-03-29T22:13:07.000Z
|
datar/base/trig_hb.py
|
sthagen/datar
|
1218a549e2f0547c7b5a824ca6d9adf1bf96ba46
|
[
"MIT"
] | 11
|
2021-06-18T03:03:14.000Z
|
2022-02-25T11:48:26.000Z
|
"""Trigonometric and Hyperbolic Functions"""
from typing import Callable
import numpy
from pipda import register_func
from ..core.contexts import Context
from ..core.types import FloatOrIter
from .constants import pi
def _register_trig_hb_func(name: str, np_name: str, doc: str) -> Callable:
"""Register trigonometric and hyperbolic function"""
np_fun = getattr(numpy, np_name)
if name.endswith("pi"):
func = lambda x: np_fun(x * pi)
else:
# ufunc cannot set context
func = lambda x: np_fun(x)
func = register_func(None, context=Context.EVAL, func=func)
func.__name__ = name
func.__doc__ = doc
return func
sin = _register_trig_hb_func(
"sin",
"sin",
doc="""The sine function
Args:
x: a numeric value or iterable
Returns:
The sine value of `x`
""",
)
cos = _register_trig_hb_func(
"cos",
"cos",
doc="""The cosine function
Args:
x: a numeric value or iterable
Returns:
The cosine value of `x`
""",
)
tan = _register_trig_hb_func(
"tan",
"tan",
doc="""The tangent function
Args:
x: a numeric value or iterable
Returns:
The tangent value of `x`
""",
)
acos = _register_trig_hb_func(
"acos",
"arccos",
doc="""The arc-cosine function
Args:
x: a numeric value or iterable
Returns:
The arc-cosine value of `x`
""",
)
asin = _register_trig_hb_func(
"acos",
"arcsin",
doc="""The arc-sine function
Args:
x: a numeric value or iterable
Returns:
The arc-sine value of `x`
""",
)
atan = _register_trig_hb_func(
"acos",
"arctan",
doc="""The arc-sine function
Args:
x: a numeric value or iterable
Returns:
The arc-sine value of `x`
""",
)
sinpi = _register_trig_hb_func(
"sinpi",
"sin",
doc="""The sine function
Args:
x: a numeric value or iterable, which is the multiple of pi
Returns:
The sine value of `x`
""",
)
cospi = _register_trig_hb_func(
"cospi",
"cos",
doc="""The cosine function
Args:
x: a numeric value or iterable, which is the multiple of pi
Returns:
The cosine value of `x`
""",
)
tanpi = _register_trig_hb_func(
"tanpi",
"tan",
doc="""The tangent function
Args:
x: a numeric value or iterable, which is the multiple of pi
Returns:
The tangent value of `x`
""",
)
cosh = _register_trig_hb_func(
"cosh",
"cosh",
doc="""Hyperbolic cosine
Args:
x: a numeric value or iterable
Returns:
The hyperbolic cosine value of `x`
""",
)
sinh = _register_trig_hb_func(
"sinh",
"sinh",
doc="""Hyperbolic sine
Args:
x: a numeric value or iterable
Returns:
The hyperbolic sine value of `x`
""",
)
tanh = _register_trig_hb_func(
"tanh",
"tanh",
doc="""Hyperbolic tangent
Args:
x: a numeric value or iterable
Returns:
The hyperbolic tangent value of `x`
""",
)
acosh = _register_trig_hb_func(
"acosh",
"arccosh",
doc="""Hyperbolic arc-cosine
Args:
x: a numeric value or iterable
Returns:
The hyperbolic arc-cosine value of `x`
""",
)
asinh = _register_trig_hb_func(
"asinh",
"arcsinh",
doc="""Hyperbolic arc-sine
Args:
x: a numeric value or iterable
Returns:
The hyperbolic arc-sine value of `x`
""",
)
atanh = _register_trig_hb_func(
"atanh",
"arctanh",
doc="""Hyperbolic arc-tangent
Args:
x: a numeric value or iterable
Returns:
The hyperbolic arc-tangent value of `x`
""",
)
@register_func(None, context=Context.EVAL)
def atan2(y: FloatOrIter, x: FloatOrIter) -> FloatOrIter:
"""Calculates the angle between the x-axis and the vector (0,0) -> (x,y)
Args:
y: and
x: The end coordinates of the vector
Returns:
The angle between x-axis and vector (0,0) -> (x,y)
"""
return numpy.arctan2(y, x)
| 16.228814
| 76
| 0.636031
| 0
| 0
| 0
| 0
| 360
| 0.093995
| 0
| 0
| 2,305
| 0.601828
|
f75e1e987d9f182ed96fa5d1a87db15f1d90fda6
| 1,463
|
py
|
Python
|
randomised_selection.py
|
neerajp99/algorithms
|
1d6885d2a895821ac511fa8a46913d34db2511ca
|
[
"MIT"
] | 1
|
2021-06-17T07:59:42.000Z
|
2021-06-17T07:59:42.000Z
|
randomised_selection.py
|
neerajp99/algorithms
|
1d6885d2a895821ac511fa8a46913d34db2511ca
|
[
"MIT"
] | null | null | null |
randomised_selection.py
|
neerajp99/algorithms
|
1d6885d2a895821ac511fa8a46913d34db2511ca
|
[
"MIT"
] | 1
|
2022-01-13T08:42:31.000Z
|
2022-01-13T08:42:31.000Z
|
# Implementation of Randomised Selection
"""
Naive Approach
---------
Parameters
---------
An arry with n distinct numbers
---------
Returns
---------
i(th) order statistic, i.e: i(th) smallest element of the input array
---------
Time Complexity
---------
O(n.logn)
---------
Test Cases
---------
[1, 20, 6, 4, 5]
=> [1, 4, 5, 6, 20]
"""
import random
def randomised_selection(unsorted_array, length_of_array, i_order_statistic):
if length_of_array == 1:
return unsorted_array
else:
# pivot = random.choice(unsorted_array)
pivot_range = random.randrange(length_of_array)
pivot = unsorted_array[pivot_range]
pivot_left = []
pivot_right = []
for value in unsorted_array:
if pivot_range == i_order_statistic:
return pivot
if pivot_range > i_order_statistic:
return randomised_selection(unsorted_array[:pivot_range], pivot_range - 1, i_order_statistic)
if pivot_range < i_order_statistic:
return randomised_selection(unsorted_array[pivot_range + 1:], length_of_array - pivot_range, i_order_statistic - pivot_range)
if __name__ == "__main__":
# user_input = input("Enter the list of numbers: \n").strip()
# unsorted_array = [int(item) for item in user_input.split(",")]
print(randomised_selection([1, 23, 3, 43, 5], 5, 3))
| 26.6
| 141
| 0.601504
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 588
| 0.401914
|
f75e9b9f00f9f84646e14e8f9e1b2be7266630e1
| 365
|
py
|
Python
|
fate/labeling/txt.py
|
Mattias1/fate
|
10266406336bc4c683ff5b23af32ac3447f7f054
|
[
"MIT"
] | null | null | null |
fate/labeling/txt.py
|
Mattias1/fate
|
10266406336bc4c683ff5b23af32ac3447f7f054
|
[
"MIT"
] | null | null | null |
fate/labeling/txt.py
|
Mattias1/fate
|
10266406336bc4c683ff5b23af32ac3447f7f054
|
[
"MIT"
] | null | null | null |
import re
from .common import regex_labels, re_number, re_string
keywords = ['TODO']
re_keyword = re.compile(r'\b({})\b'.format('|'.join(keywords)))
def init(document):
document.OnGenerateLabeling.add(main)
def main(document):
regex_list = [(re_keyword, 'keyword'), (re_number, 'number'), (re_string, 'string')]
regex_labels(document, regex_list)
| 22.8125
| 88
| 0.70137
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.123288
|
f7603cc1049048de6e8b2c24b0acac1e2d8f0746
| 490
|
py
|
Python
|
Conteudo das Aulas/146/cgi-bin/cgi4_css_2.py
|
cerberus707/lab-python
|
ebba3c9cde873d70d4bb61084f79ce30b7f9e047
|
[
"Apache-2.0"
] | null | null | null |
Conteudo das Aulas/146/cgi-bin/cgi4_css_2.py
|
cerberus707/lab-python
|
ebba3c9cde873d70d4bb61084f79ce30b7f9e047
|
[
"Apache-2.0"
] | null | null | null |
Conteudo das Aulas/146/cgi-bin/cgi4_css_2.py
|
cerberus707/lab-python
|
ebba3c9cde873d70d4bb61084f79ce30b7f9e047
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import cgitb; cgitb.enable()
print('Content-type: text/html\n')
print(
"""<html>
<head>
<title>CGI 4 - CSS</title>
<link rel="stylesheet" type="text/css" href="../css/estilo1.css">
</head>
<body>
<h1>Colocando CSS em um script a parte</h1>
<hr>
<p>Ola imagens CGI!</p>
<div class="wraptocenter">
<img id="imagem" src="../imagens/evil.jpg" border=1 alt="Piadinha idiota" width=350 height=500>
</div>
<hr>
</body>
</html>
"""
)
| 18.846154
| 101
| 0.604082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 438
| 0.893878
|
f7610420f3cae75b3b9d0169cdf9f686ba220b80
| 4,769
|
py
|
Python
|
neural-navigation-with-lstm/MARCO/plastk/examples/gngsom.py
|
ronaldahmed/SLAM-for-ugv
|
52e3241b8b737a0cfe5682c0aa87ec8c27d6a33d
|
[
"MIT"
] | 14
|
2016-04-03T19:25:13.000Z
|
2022-01-05T07:03:07.000Z
|
neural-navigation-with-lstm/MARCO/plastk/examples/gngsom.py
|
ronaldahmed/SLAM-for-ugv
|
52e3241b8b737a0cfe5682c0aa87ec8c27d6a33d
|
[
"MIT"
] | null | null | null |
neural-navigation-with-lstm/MARCO/plastk/examples/gngsom.py
|
ronaldahmed/SLAM-for-ugv
|
52e3241b8b737a0cfe5682c0aa87ec8c27d6a33d
|
[
"MIT"
] | 5
|
2018-06-21T12:58:58.000Z
|
2020-02-15T05:33:39.000Z
|
"""
GNG vs SOM comparison example for PLASTK.
This script shows how to:
- Train PLASTK vector quantizers (SOM and GNG)
- Set default parameters
- Create a simple agent and environment.
- Run an interaction between the agent and the environment
with a GUI.
$Id: gngsom.py,v 1.3 2006/02/17 19:40:09 jp Exp $
"""
# Import what we need from PLASTK
# All the top-level modules
from plastk import *
# Kohonen SOMs
from plastk.vq.som import SOM,SOM2DDisplay
# Growing Neural Gas
from plastk.vq.gng import EquilibriumGNG, GNG2DDisplay
# the python debugger
import pdb
###################################################################
# Set the PLASTK parameter defaults
# [ help('plastk.params') for more info on parameters ]
#
# SOM Defaults: 10x10 SOM, with 2-D inputs
#
SOM.xdim = 10
SOM.ydim = 10
SOM.dim = 2
#
# GNG defaults: 2-D inputs, maintain average discounted error below
# 0.002, grow at most every 200 steps, max connection age 100.
#
EquilibriumGNG.dim = 2
EquilibriumGNG.rmin = 0
EquilibriumGNG.rmax = 100
EquilibriumGNG.error_threshold = 0.002
EquilibriumGNG.lambda_ = 200
EquilibriumGNG.max_age = 50
EquilibriumGNG.e_b = 0.05
EquilibriumGNG.e_n = 0.001
EquilibriumGNG.print_level = base.VERBOSE
# Overwrite old data files, instead of renaming it.
LoggingRLI.rename_old_data = False
################################################################
# Create the agent and environment
#
class SOMTestEnvironment(Environment):
"""
A simple environment that generates 2D points sampled from a
series of randomly generated normal distributions. Reward is
always 0.
"""
num_samples_per_distr = 5000
def __init__(self,**args):
# If we have an __init__ method on a plastk subclass
# we must call the superclass constructor
super(SOMTestEnvironment,self).__init__(**args)
# create a Python generator object that generates
# the points.
self.gen = self.generate_points()
def __call__(self,action=None):
# The main step method for Environments.
# return the a generated point, plus the reward, except
# when the action is None (at the beginning of an episode).
if action is None:
return self.gen.next()
else:
return self.gen.next(),0
def generate_points(self):
# A python generator that produces an infinite series of 2D
# points generated from a series of randomly generated normal
# distributions.
while True:
mean = rand.uniform(0,100,2)
std = rand.uniform(1,5,2)
for i in xrange(self.num_samples_per_distr):
yield rand.normal(mean,std,2)
class SOMTestAgent(Agent):
"""
A simple agent that receives 2D points and trains a kohonen SOM
and a Growing Neural Gas with them. It produces no meanigful
actions (i.e. it always emits 0).
"""
def __init__(self,**args):
# Call the superclass constructor
super(SOMTestAgent,self).__init__(**args)
# instantiate a SOM
self.som = SOM()
# intialize SOM training
N = SOMTestEnvironment.num_samples_per_distr * 5
self.som.init_training(radius_0 = max(self.som.xdim,self.som.ydim),
training_length = N)
# instantiate a Growing Neural Gas
self.gng = EquilibriumGNG()
def __call__(self,sensation,reward=None):
# On receiving input train the SOM and GNG.
self.som.train(sensation)
self.gng.train(sensation)
# Return 0 for the action.
return 0
################################################
# Run the an interaction between the agent and environment.
#
# Instantiate an agent and an environment
agent = SOMTestAgent()
env = SOMTestEnvironment()
# Instantiate a Reinforcement Learning Interface. An RLI controls the
# interaction between agent and environment, passing sensation and
# reward from the environment to the agent, and actions from the agent
# to the environment. In this experiment, the actions and reward are
# meaningless, and only the sensations, 2D vectors, are important.
#
# The LoggingRLI class includes variable logging and GUI capabilities.
rli = LoggingRLI(name = 'GNGvSOM_experiment')
# Init the RLI
rli.init(agent,env)
# Run the RLI gui with two components, a SOM display and a GNG
# display. The RLI gui takes a list of functions that take two
# parameters, a the rli's GUI frame (root) and the rli object (rli), and return
# instances of Tkinter.Frame that can be packed into the RLI's GUI frame.
#
rli.gui(lambda root,rli:SOM2DDisplay(root,rli.agent.som),
lambda root,rli:GNG2DDisplay(root,gng=rli.agent.gng))
| 29.257669
| 79
| 0.662403
| 2,227
| 0.466974
| 394
| 0.082617
| 0
| 0
| 0
| 0
| 2,759
| 0.578528
|
f761849b5a4f4a9a3e0e3b79a8c5c9b1f726ae8e
| 3,444
|
py
|
Python
|
projects/ide/sublime/src/Bolt/api/inspect/highlighting.py
|
boltjs/bolt
|
c2666c876b34b1a61486a432eef3141ca8d1e411
|
[
"BSD-3-Clause"
] | 11
|
2015-09-29T19:19:34.000Z
|
2020-11-20T09:14:46.000Z
|
projects/ide/sublime/src/Bolt/api/inspect/highlighting.py
|
boltjs/bolt
|
c2666c876b34b1a61486a432eef3141ca8d1e411
|
[
"BSD-3-Clause"
] | null | null | null |
projects/ide/sublime/src/Bolt/api/inspect/highlighting.py
|
boltjs/bolt
|
c2666c876b34b1a61486a432eef3141ca8d1e411
|
[
"BSD-3-Clause"
] | null | null | null |
import sublime
from ui.read import settings as read_settings
from ui.write import write, highlight as write_highlight
from lookup import file_type as lookup_file_type
from ui.read import x as ui_read
from ui.read import spots as read_spots
from ui.read import regions as ui_regions
from core.read import read as core_read
from structs.general_thread import *
from structs.thread_handler import *
from structs.highlight_list import *
from structs.flag_region import *
from core.analyse import analyse
def flags():
return [
FlagRegion('bolt.incorrect', 'comment', 'light_x', 0),
FlagRegion('bolt.missing', 'string', 'arrow_right', 0),
FlagRegion('bolt.unused', 'comment', 'dot', sublime.DRAW_OUTLINED),
FlagRegion('bolt.wrong_module', 'comment', 'light_x', 0)
]
def highlight_setting():
return 'bolt.live.highlight'
def rate_setting():
return 'bolt.live.highlight.rate'
def is_enabled():
settings = read_settings.load_settings()
return settings.get(highlight_setting(), False)
def get_rate():
settings = read_settings.load_settings()
return settings.get(rate_setting(), 1000)
def set_enabled(state):
settings = read_settings.load_settings()
settings.set(highlight_setting(), state)
write.save_settings()
def toggle(view):
def noop(v):
return True
handler = ThreadHandler(noop, noop, noop)
prev = is_enabled()
current = not prev
if (current):
run(view, handler)
else:
clear(view)
set_enabled(current)
def run(view, handler):
valid = lookup_file_type.is_bolt_module(view)
if not valid:
open_file = view.file_name() if view.file_name() != None else '-- no view'
print 'View is not a bolt module: ' + open_file
handler.cancel()
else:
read_view = ui_read.all(view)
spots = read_spots.spots(view)
plasmas = core_read.plasmas(read_view.ptext)
def update_ui(highlights, module_wrong):
def run():
regions = write_highlight.regions(view, highlights)
module_region = [ui_regions.module_name(view)] if module_wrong else []
flag_info = zip(flags(), [regions.incorrect, regions.missing, regions.unused, module_region])
def highlight_flag(x):
if len(x[1]) > 0:
write_highlight.highlight(view, x[1], x[0]),
else:
write_highlight.remove_highlight(view, x[0])
map(highlight_flag, flag_info)
sublime.set_timeout(run, 0)
thread = GeneralThread(_highlighter(read_view, spots, plasmas, update_ui), handler.success, handler.failure)
sublime.set_timeout(thread.start, 0)
handler.init(thread)
def clear(view):
def run():
write_highlight.remove_highlights(view, flags())
sublime.set_timeout(run, 0)
def _highlighter(read_view, spots, plasmas, callback):
def r():
try:
highlights = analyse.all(read_view.base, read_view.nests, plasmas, spots, read_view.external)
module_wrong = analyse.module_wrong(read_view)
callback(highlights, module_wrong)
except Exception as exc:
print "Error during identifying highlighted regions: " + str(exc)
traceback.print_exc(limit=10)
callback(HighlightLists([], [], []), False)
return r
| 29.689655
| 116
| 0.654472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 269
| 0.078107
|
f762be92cbd4d0af01d0dd42ecc1fb37b29c7ade
| 3,745
|
py
|
Python
|
gratipay/models/exchange_route.py
|
stefb965/gratipay.com
|
5f3b5922d6b3a7ff64f51574a1087bab2378cbd8
|
[
"CC0-1.0"
] | null | null | null |
gratipay/models/exchange_route.py
|
stefb965/gratipay.com
|
5f3b5922d6b3a7ff64f51574a1087bab2378cbd8
|
[
"CC0-1.0"
] | null | null | null |
gratipay/models/exchange_route.py
|
stefb965/gratipay.com
|
5f3b5922d6b3a7ff64f51574a1087bab2378cbd8
|
[
"CC0-1.0"
] | null | null | null |
from __future__ import absolute_import, division, print_function, unicode_literals
import braintree
from postgres.orm import Model
class ExchangeRoute(Model):
typname = "exchange_routes"
def __bool__(self):
return self.error != 'invalidated'
__nonzero__ = __bool__
@classmethod
def from_id(cls, id):
r = cls.db.one("""
SELECT r.*::exchange_routes
FROM exchange_routes r
WHERE id = %(id)s
""", locals())
if r:
from gratipay.models.participant import Participant # XXX Red hot hack!
r.set_attributes(participant=Participant.from_id(r.participant))
return r
@classmethod
def from_network(cls, participant, network):
participant_id = participant.id
r = cls.db.one("""
SELECT r.*::exchange_routes
FROM current_exchange_routes r
WHERE participant = %(participant_id)s
AND network = %(network)s
""", locals())
if r:
r.set_attributes(participant=participant)
return r
@classmethod
def from_address(cls, participant, network, address):
participant_id = participant.id
r = cls.db.one("""
SELECT r.*::exchange_routes
FROM exchange_routes r
WHERE participant = %(participant_id)s
AND network = %(network)s
AND address = %(address)s
""", locals())
if r:
r.set_attributes(participant=participant)
return r
@classmethod
def insert(cls, participant, network, address, error='', fee_cap=None, cursor=None):
participant_id = participant.id
r = (cursor or cls.db).one("""
INSERT INTO exchange_routes
(participant, network, address, error, fee_cap)
VALUES (%(participant_id)s, %(network)s, %(address)s, %(error)s, %(fee_cap)s)
RETURNING exchange_routes.*::exchange_routes
""", locals())
if network == 'braintree-cc':
participant.update_giving_and_teams()
r.set_attributes(participant=participant)
return r
def invalidate(self):
if self.network == 'braintree-cc':
braintree.PaymentMethod.delete(self.address)
# For Paypal, we remove the record entirely to prevent
# an integrity error if the user tries to add the route again
if self.network == 'paypal':
# XXX This doesn't sound right. Doesn't this corrupt history pages?
self.db.run("DELETE FROM exchange_routes WHERE id=%s", (self.id,))
else:
self.update_error('invalidated')
def update_error(self, new_error):
id = self.id
old_error = self.error
if old_error == 'invalidated':
return
self.db.run("""
UPDATE exchange_routes
SET error = %(new_error)s
WHERE id = %(id)s
""", locals())
self.set_attributes(error=new_error)
# Update cached amounts if requested and necessary
if self.network != 'braintree-cc':
return
if self.participant.is_suspicious or bool(new_error) == bool(old_error):
return
# XXX *White* hot hack!
# =====================
# During payday, participant is a record from a select of
# payday_participants (or whatever), *not* an actual Participant
# object. We need the real deal so we can use a method on it ...
from gratipay.models.participant import Participant
participant = Participant.from_username(self.participant.username)
participant.update_giving_and_teams()
| 34.357798
| 94
| 0.594393
| 3,610
| 0.963952
| 0
| 0
| 1,883
| 0.502804
| 0
| 0
| 1,576
| 0.420828
|
f763746331e345f22b7c5a33a4edda7eac385dea
| 805
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/video_pipeline/forms.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/video_pipeline/forms.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/video_pipeline/forms.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
"""
Defines a form to provide validations for course-specific configuration.
"""
from django import forms
from openedx.core.djangoapps.video_config.forms import CourseSpecificFlagAdminBaseForm
from openedx.core.djangoapps.video_pipeline.models import (
CourseVideoUploadsEnabledByDefault,
VEMPipelineIntegration,
)
class CourseVideoUploadsEnabledByDefaultAdminForm(CourseSpecificFlagAdminBaseForm):
"""
Form for course-specific Video Uploads enabled by default configuration.
"""
class Meta:
model = CourseVideoUploadsEnabledByDefault
fields = '__all__'
class VEMPipelineIntegrationAdminForm(forms.ModelForm):
"""
Form for VEM Pipeline Integration Admin class.
"""
class Meta:
model = VEMPipelineIntegration
fields = '__all__'
| 26.833333
| 86
| 0.756522
| 475
| 0.590062
| 0
| 0
| 0
| 0
| 0
| 0
| 248
| 0.308075
|
f76393b04c4eca590f51e2e26126536b11d54d6f
| 1,779
|
py
|
Python
|
tests/integration/test_create_from_full_info.py
|
superannotateai/superannotate-python-sdk
|
e2ce848b61efed608265fa64f3781fd5a17c929b
|
[
"MIT"
] | 26
|
2020-09-25T06:25:06.000Z
|
2022-01-30T16:44:07.000Z
|
tests/integration/test_create_from_full_info.py
|
superannotateai/superannotate-python-sdk
|
e2ce848b61efed608265fa64f3781fd5a17c929b
|
[
"MIT"
] | 12
|
2020-12-21T19:59:48.000Z
|
2022-01-21T10:32:07.000Z
|
tests/integration/test_create_from_full_info.py
|
superannotateai/superannotate-python-sdk
|
e2ce848b61efed608265fa64f3781fd5a17c929b
|
[
"MIT"
] | 11
|
2020-09-17T13:39:19.000Z
|
2022-03-02T18:12:29.000Z
|
import os
from os.path import dirname
from unittest import TestCase
import src.superannotate as sa
class TestCloneProject(TestCase):
PROJECT_NAME_1 = "test create from full info1"
PROJECT_NAME_2 = "test create from full info2"
PROJECT_DESCRIPTION = "desc"
PROJECT_TYPE = "Vector"
TEST_FOLDER_PATH = "data_set/sample_project_vector"
@property
def folder_path(self):
return os.path.join(dirname(dirname(__file__)), self.TEST_FOLDER_PATH)
@property
def classes_json(self):
return f"{self.folder_path}/classes/classes.json"
def setUp(self, *args, **kwargs):
self.tearDown()
self._project_1 = sa.create_project(
self.PROJECT_NAME_1, self.PROJECT_DESCRIPTION, self.PROJECT_TYPE
)
def tearDown(self) -> None:
sa.delete_project(self.PROJECT_NAME_1)
sa.delete_project(self.PROJECT_NAME_2)
def test_clone_contributors_and_description(self):
team_users = sa.search_team_contributors()
sa.share_project(self.PROJECT_NAME_1, team_users[0], "QA")
first_project_metadata = sa.get_project_metadata(
self.PROJECT_NAME_1, include_contributors=True
)
first_project_contributors = first_project_metadata["contributors"]
sa.clone_project(self.PROJECT_NAME_2, self.PROJECT_NAME_1, "DESCRIPTION", copy_contributors=True)
second_project_metadata = sa.get_project_metadata(
self.PROJECT_NAME_2, include_contributors=True
)
second_project_contributors = second_project_metadata["contributors"]
self.assertEqual(first_project_contributors[0]["user_id"], second_project_contributors[0]["user_id"])
self.assertEqual("DESCRIPTION", second_project_metadata["description"])
| 37.0625
| 109
| 0.720067
| 1,677
| 0.942664
| 0
| 0
| 210
| 0.118044
| 0
| 0
| 235
| 0.132097
|
f7639fdfd1c81876235b0d816ccef91c2a2888bb
| 903
|
py
|
Python
|
spellingcorrector/utils/count.py
|
NazcaLines/spelling-corrector
|
ae315a3988e94ee46f60ff4ac7d2ee7609ebc24b
|
[
"MIT"
] | null | null | null |
spellingcorrector/utils/count.py
|
NazcaLines/spelling-corrector
|
ae315a3988e94ee46f60ff4ac7d2ee7609ebc24b
|
[
"MIT"
] | null | null | null |
spellingcorrector/utils/count.py
|
NazcaLines/spelling-corrector
|
ae315a3988e94ee46f60ff4ac7d2ee7609ebc24b
|
[
"MIT"
] | null | null | null |
import os
import functools
CORPUS_DIR = str(os.getcwd())[:str(os.getcwd()).index('spellingcorrector/')] \
+ 'data/corpus.txt'
NWORD = {}
def checkCorpus(fn):
@functools.wraps(fn)
def new_func(*args, **kwargs):
t = os.path.isfile(CORPUS_DIR)
if t == False:
raise IOError('cannot find corpus in data/')
return fn(*args, **kwargs)
return new_func
@checkCorpus
def train():
global NWORD
with open(CORPUS_DIR, 'r') as f:
for line in f:
split = line.split()
#tmp = {split[0]:float(split[1])}
NWORD[split[0]] = float(split[1])
def getTrain():
"""
simple singleton implement
"""
global NWORD
if len(NWORD) == 0:
train()
return NWORD
if __name__ == "__main__":
getTrain()
print CORPUS_DIR
print os.path.isfile(CORPUS_DIR)
print len(NWORD)
| 19.630435
| 78
| 0.572536
| 0
| 0
| 0
| 0
| 436
| 0.482835
| 0
| 0
| 154
| 0.170543
|
f764d5863df085c67cf462549442d82ef895d117
| 653
|
py
|
Python
|
rhasspy_weather/parser/nlu_intent.py
|
arniebarni/rhasspy_weather
|
6a9df72adad3e5dafa7962c2be37c824dc04137b
|
[
"MIT"
] | 5
|
2020-03-29T01:00:30.000Z
|
2022-02-06T20:00:00.000Z
|
rhasspy_weather/parser/nlu_intent.py
|
arniebarni/rhasspy_weather
|
6a9df72adad3e5dafa7962c2be37c824dc04137b
|
[
"MIT"
] | 12
|
2020-04-02T15:09:05.000Z
|
2021-10-11T00:44:21.000Z
|
rhasspy_weather/parser/nlu_intent.py
|
arniebarni/rhasspy_weather
|
6a9df72adad3e5dafa7962c2be37c824dc04137b
|
[
"MIT"
] | 5
|
2020-03-25T08:33:02.000Z
|
2021-05-18T08:47:41.000Z
|
import logging
from rhasspy_weather.data_types.request import WeatherRequest
from rhasspy_weather.parser import rhasspy_intent
from rhasspyhermes.nlu import NluIntent
log = logging.getLogger(__name__)
def parse_intent_message(intent_message: NluIntent) -> WeatherRequest:
"""
Parses any of the rhasspy weather intents.
Args:
intent_message: a Hermes NluIntent
Returns: WeatherRequest object
"""
return rhasspy_intent.parse_intent_message(intent_message.to_rhasspy_dict())
def get_template_values(intent_message: NluIntent) -> dict:
return rhasspy_intent.get_template_values(intent_message.to_rhasspy_dict())
| 26.12
| 80
| 0.793262
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 149
| 0.228178
|
f767fc179ce62571eb82287782f1d69c78d494fd
| 1,028
|
py
|
Python
|
415-add-strings/add_strings.py
|
cnluocj/leetcode
|
5b870a63ba1aab3db1e05421c91f404a9aabc489
|
[
"MIT"
] | null | null | null |
415-add-strings/add_strings.py
|
cnluocj/leetcode
|
5b870a63ba1aab3db1e05421c91f404a9aabc489
|
[
"MIT"
] | null | null | null |
415-add-strings/add_strings.py
|
cnluocj/leetcode
|
5b870a63ba1aab3db1e05421c91f404a9aabc489
|
[
"MIT"
] | null | null | null |
"""
59.40%
其实是大数相加
"""
class Solution(object):
def addStrings(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
num1_index = len(num1) - 1
num2_index = len(num2) - 1
if num1_index < 0:
return num2
if num2_index < 0:
return num1
carry = 0
result = []
while True:
r = ''
if num1_index >= 0 and num2_index >= 0:
r = int(num1[num1_index]) + int(num2[num2_index]) + carry
elif num1_index >= 0:
r = int(num1[num1_index]) + carry
elif num2_index >= 0:
r = int(num2[num2_index]) + carry
else:
if carry > 0:
result.append(str(carry))
break
carry = 1 if r > 9 else 0
r = r % 10
result.append(str(r))
num1_index -= 1
num2_index -= 1
return ''.join(result)[::-1]
| 23.906977
| 73
| 0.430934
| 1,002
| 0.961612
| 0
| 0
| 0
| 0
| 0
| 0
| 124
| 0.119002
|
f769f929849f6994908fa8a9ca653f7ebe8e0e87
| 2,770
|
py
|
Python
|
plugins/qdb.py
|
x89/raziel-irc-bot
|
122a5de858a84e018549e0a7fd0be11bb33f2eb3
|
[
"MIT"
] | null | null | null |
plugins/qdb.py
|
x89/raziel-irc-bot
|
122a5de858a84e018549e0a7fd0be11bb33f2eb3
|
[
"MIT"
] | null | null | null |
plugins/qdb.py
|
x89/raziel-irc-bot
|
122a5de858a84e018549e0a7fd0be11bb33f2eb3
|
[
"MIT"
] | null | null | null |
import logging
log = logging.getLogger(__name__)
import json
import requests
import requests.exceptions
import botologist.plugin
BASE_URL = 'https://qdb.lutro.me'
def _get_quote_url(quote):
return BASE_URL + '/' + str(quote['id'])
def _get_qdb_data(url, query_params):
response = requests.get(url, query_params, headers={'accept': 'application/json'})
response.raise_for_status()
return response.json()
def _search_for_quote(quote):
search = False
query_params = None
if isinstance(quote, int):
url = BASE_URL+'/'+str(quote)
single_quote = True
else:
single_quote = False
if quote == 'random':
url = BASE_URL+'/random'
elif quote == 'latest':
url = BASE_URL
else:
search = str(quote)
url = BASE_URL+'/random'
query_params = {'s': search}
try:
data = _get_qdb_data(url, query_params=query_params)
except requests.exceptions.RequestException:
log.warning('QDB request caused an exception', exc_info=True)
return 'HTTP error!'
if single_quote:
quote = data['quote']
else:
quotes = data['quotes']
if 'items' in quotes:
quotes = quotes['items']
if len(quotes) < 1:
return 'No quotes found!'
quote = quotes[0]
url = BASE_URL+'/'+str(quote['id'])
if len(quote['body']) > 400:
body = quote['body']
if search:
try:
body_len = len(body)
substr_pos = body.lower().index(search.lower())
start = body.rfind('\n', 0, substr_pos) + 1
while body_len - start < 300:
substr_pos = body.rfind('\n', 0, start - 1) + 1
if body_len - substr_pos < 300:
start = substr_pos
else:
break
end = start + 350 - len(search)
except ValueError:
start = 0
end = 300
else:
start = 0
end = 300
body = body.replace('\r', '').replace('\n', ' ').replace('\t', ' ')
excerpt = body[start:end]
if start > 0:
excerpt = '[...] ' + excerpt
if end < len(quote['body']):
excerpt = excerpt + ' [...]'
body = excerpt
else:
body = quote['body'].replace('\r', '').replace('\n', ' ').replace('\t', ' ')
return url + ' - ' + body
class QdbPlugin(botologist.plugin.Plugin):
@botologist.plugin.command('qdb')
def search(self, cmd):
'''Search for a quote, or show a specific quote.
Examples: !qdb search for this - !qdb #220
'''
if len(cmd.args) < 1:
arg = 'random'
elif cmd.args[0][0] == '#':
try:
arg = int(cmd.args[0][1:])
except ValueError:
arg = ' '.join(cmd.args)
else:
arg = ' '.join(cmd.args)
return _search_for_quote(arg)
@botologist.plugin.http_handler(method='POST', path='/qdb-update')
def quote_updated(self, body, headers):
data = json.loads(body)
quote = data['quote']
if quote['approved']:
return 'New quote approved! ' + _get_quote_url(quote)
else:
return 'Quote pending approval!'
| 23.277311
| 83
| 0.636462
| 720
| 0.259928
| 0
| 0
| 673
| 0.24296
| 0
| 0
| 493
| 0.177978
|
f76d62143b8e1fa514207d6381b4adbf58120f1a
| 2,889
|
py
|
Python
|
skos/method.py
|
edmondchuc/voc-view
|
57bd965facacc77f40f218685c88e8b858d4925c
|
[
"MIT"
] | 3
|
2021-07-31T16:23:26.000Z
|
2022-01-24T01:28:17.000Z
|
skos/method.py
|
edmondchuc/voc-view
|
57bd965facacc77f40f218685c88e8b858d4925c
|
[
"MIT"
] | null | null | null |
skos/method.py
|
edmondchuc/voc-view
|
57bd965facacc77f40f218685c88e8b858d4925c
|
[
"MIT"
] | 1
|
2019-08-07T06:02:52.000Z
|
2019-08-07T06:02:52.000Z
|
from pyldapi.renderer import Renderer
from pyldapi.view import View
from flask import render_template, Response
from rdflib import Graph, URIRef, BNode
import skos
from skos.common_properties import CommonPropertiesMixin
from config import Config
class Method(CommonPropertiesMixin):
def __init__(self, uri):
CommonPropertiesMixin.__init__(self, uri)
self.uri = uri
self.purpose = skos.get_method_purpose(uri)
self.scope = skos.get_method_scope(uri)
self.equipment = skos.get_method_equipment(uri)
self.time_required = skos.get_method_time_required(uri)
self.instructions = skos.get_method_instructions(uri)
self.additional_note = skos.get_method_additional_note(uri)
self.parameters = skos.get_parameter_relations(uri)
self.categorical_variables = skos.get_categorical_variables_relations(uri)
class MethodRenderer(Renderer):
def __init__(self, uri, request):
self.uri = uri
views = {
'method': View(
'Method',
'A TERN method.',
['text/html'] + Renderer.RDF_MIMETYPES,
'text/html',
namespace='https://w3id.org/tern/ontologies/tern/'
)
}
super().__init__(request, uri, views, 'method')
# TODO: Make a base class and make this a method of the base class.
def render_rdf(self):
g = Graph()
for subj, pred, obj in Config.g.triples((URIRef(self.uri), None, None)):
g.add((subj, pred, obj))
if type(obj) == BNode:
for s, p, o in Config.g.triples((obj, None, None)):
g.add((s, p, o))
return Response(g.serialize(format=self.format), mimetype=self.format)
def render(self):
if not hasattr(self, 'format'):
self.format = 'text/html'
if self.view == 'method':
if self.format == 'text/html':
cc = Method(self.uri)
return render_template('method.html', title=cc.label, c=cc,
skos_class=('https://w3id.org/tern/ontologies/tern/Method', 'Method'),
formats=[(format, format.split('/')[-1]) for format in self.views.get('method').formats])
elif self.format in Renderer.RDF_MIMETYPES:
return self.render_rdf()
else:
# In theory, this line should never execute because if an invalid format has been entered, the pyldapi
# will default to the default format. In this case, The default format for the default view (skos) is
# text/html.
raise RuntimeError('Invalid format error')
else:
# Let pyldapi handle the rendering of the 'alternates' view.
return super(MethodRenderer, self).render()
| 40.125
| 128
| 0.602631
| 2,635
| 0.91208
| 0
| 0
| 0
| 0
| 0
| 0
| 582
| 0.201454
|
f76e35161b8285ae39943d6522c5085c519cc9cf
| 22,791
|
py
|
Python
|
wwt_api_client/communities.py
|
WorldWideTelescope/wwt_api_client
|
cfc42728eb2428f17e711f7527fd97150e629296
|
[
"BSD-3-Clause"
] | null | null | null |
wwt_api_client/communities.py
|
WorldWideTelescope/wwt_api_client
|
cfc42728eb2428f17e711f7527fd97150e629296
|
[
"BSD-3-Clause"
] | 8
|
2019-04-28T17:27:44.000Z
|
2020-11-05T20:24:21.000Z
|
wwt_api_client/communities.py
|
WorldWideTelescope/wwt_api_client
|
cfc42728eb2428f17e711f7527fd97150e629296
|
[
"BSD-3-Clause"
] | 1
|
2019-04-28T17:25:06.000Z
|
2019-04-28T17:25:06.000Z
|
# Copyright 2019-2020 the .NET Foundation
# Distributed under the terms of the revised (3-clause) BSD license.
"""Interacting with the WWT Communities APIs."""
import json
import os.path
import requests
import sys
from urllib.parse import parse_qs, urlparse
from . import APIRequest, Client, enums
__all__ = '''
CommunitiesAPIRequest
CommunitiesClient
CreateCommunityRequest
DeleteCommunityRequest
GetCommunityInfoRequest
GetLatestCommunityRequest
GetMyProfileRequest
GetProfileEntitiesRequest
IsUserRegisteredRequest
interactive_communities_login
'''.split()
LIVE_OAUTH_AUTH_SERVICE = "https://login.live.com/oauth20_authorize.srf"
LIVE_OAUTH_TOKEN_SERVICE = "https://login.live.com/oauth20_token.srf"
LIVE_OAUTH_DESKTOP_ENDPOINT = "https://login.live.com/oauth20_desktop.srf"
LIVE_AUTH_SCOPES = ['wl.emails', 'wl.signin']
WWT_CLIENT_ID = '000000004015657B'
OAUTH_STATE_BASENAME = 'communities-oauth.json'
CLIENT_SECRET_BASENAME = 'communities-client-secret.txt'
class CommunitiesClient(object):
"""A client for WWT Communities API requests.
Instantiating such a client will make at least one web request, to refresh
the Microsoft Live OAuth login token.
In addition, an interactive user login may be necessary. This must be
explicitly allowed by the caller to prevent random programs from hanging
waiting for user input. If interactive login is successful, the
authentication data from such a login are saved in the current user's
state directory (~/.local/state/wwt_api_client/ on Linux machines) for
subsequent use.
"""
_parent = None
_state_dir = None
_state = None
_access_token = None
_refresh_token = None
def __init__(self, parent_client, oauth_client_secret=None, interactive_login_if_needed=False, state_dir=None):
self._parent = parent_client
if state_dir is None:
import appdirs
state_dir = appdirs.user_state_dir('wwt_api_client', 'AAS_WWT')
self._state_dir = state_dir
# Do we have the client secret? This is saved to disk upon the first
# login, but it can also be passed in.
if oauth_client_secret is None:
try:
with open(os.path.join(self._state_dir, CLIENT_SECRET_BASENAME), 'rt') as f:
oauth_client_secret = f.readline().strip()
except FileNotFoundError:
pass
if oauth_client_secret is None:
raise Exception('cannot create CommunitiesClient: the \"oauth client secret\" '
'is not available to the program')
# Try to get state from a previous OAuth flow and decide what to do
# based on where we're at.
try:
with open(os.path.join(self._state_dir, OAUTH_STATE_BASENAME), 'rt') as f:
self._state = json.load(f)
except FileNotFoundError:
pass
# For the record, `http://worldwidetelescope.org/webclient` and
# `http://www.worldwidetelesope.org/webclient` are valid
# redirect_uri's.
token_service_params = {
'client_id': WWT_CLIENT_ID,
'client_secret': oauth_client_secret,
'redirect_uri': LIVE_OAUTH_DESKTOP_ENDPOINT,
}
# Once set, the structure of oauth_data is : {
# 'token_type': 'bearer',
# 'expires_in': <seconds>,
# 'scope': <scopes>,
# 'access_token': <long hex>,
# 'refresh_token': <long hex>,
# 'authentication_token': <long hex>,
# 'user_id': <...>
# }
oauth_data = None
if self._state is not None:
# We have previous state -- hopefully, we only need a refresh, which
# can proceed non-interactively.
token_service_params['grant_type'] = 'refresh_token'
token_service_params['refresh_token'] = self._state['refresh_token']
oauth_data = requests.post(
LIVE_OAUTH_TOKEN_SERVICE,
data = token_service_params,
).json()
if 'error' in oauth_data:
if oauth_data['error'] == 'invalid_grant':
# This indicates that our grant has expired. We need to
# rerun the auth flow.
self._state = None
else:
# Some other kind of error. Bail.
raise Exception(repr(oauth_data))
if self._state is None:
# We need to do the interactive authentication flow. This has to
# be explicitly allowed by the caller because we don't want random
# programs pausing for user input on the terminal.
if not interactive_login_if_needed:
raise Exception('cannot create CommunitiesClient: an interactive login is '
'required but unavailable right now')
params = {
'client_id': WWT_CLIENT_ID,
'scope': ' '.join(LIVE_AUTH_SCOPES),
'redirect_uri': LIVE_OAUTH_DESKTOP_ENDPOINT,
'response_type': 'code'
}
preq = requests.Request(url=LIVE_OAUTH_AUTH_SERVICE, params=params).prepare()
print()
print('To use the WWT Communities APIs, interactive authentication to Microsoft')
print('Live is required. Open this URL in a browser and log in:')
print()
print(preq.url)
print()
print('When done, copy the URL *that you are redirected to* and paste it here:')
print('>> ', end='')
redir_url = input()
# should look like:
# 'https://login.live.com/oauth20_desktop.srf?code=MHEXHEXHE-XHEX-HEXH-EXHE-XHEXHEXHEXHE&lc=NNNN'
parsed = urlparse(redir_url)
params = parse_qs(parsed.query)
code = params.get('code')
if not code:
raise Exception('didn\'t get "code" parameter from response URL')
token_service_params['grant_type'] = 'authorization_code'
token_service_params['code'] = code
oauth_data = requests.post(
LIVE_OAUTH_TOKEN_SERVICE,
data = token_service_params,
).json()
if 'error' in oauth_data:
raise Exception(repr(oauth_data))
# Looks like it worked! Save the results for next time.
os.makedirs(self._state_dir, exist_ok=True)
# Sigh, Python not making it easy to be secure ...
fd = os.open(os.path.join(self._state_dir, OAUTH_STATE_BASENAME), os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
f = open(fd, 'wt')
with f:
json.dump(oauth_data, f)
fd = os.open(os.path.join(self._state_dir, CLIENT_SECRET_BASENAME), os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
f = open(fd, 'wt')
with f:
print(oauth_client_secret, file=f)
# And for this time:
self._access_token = oauth_data['access_token']
self._refresh_token = oauth_data['refresh_token']
def create_community(self, payload=None):
"""Create a new community owned by the current user.
Parameters
----------
See the definition of the :class:`CreateCommunityRequest` class.
Returns
-------
request : an initialized :class:`CreateCommunityRequest` object
The request.
"""
req = CreateCommunityRequest(self)
req.payload = payload
return req
def delete_community(self, id=None):
"""Delete a community.
Parameters
----------
See the definition of the :class:`DeleteCommunityRequest` class.
Returns
-------
request : an initialized :class:`DeleteCommunityRequest` object
The request.
"""
req = DeleteCommunityRequest(self)
req.id = id
return req
def get_community_info(self, id=None):
"""Get information about the specified community.
Parameters
----------
See the definition of the :class:`GetCommunityInfoRequest` class.
Returns
-------
request : an initialized :class:`GetCommunityInfoRequest` object
The request.
"""
req = GetCommunityInfoRequest(self)
req.id = id
return req
def get_latest_community(self):
"""Get information about the most recently created WWT Communities.
.. testsetup:: [*]
>>> comm_client = getfixture('communities_client_cached')
Examples
--------
There are no arguments::
>>> req = comm_client.get_latest_community()
>>> folder = req.send() # returns wwt_data_formats.folder.Folder
Returns
-------
request : an initialized :class:`GetLatestCommunityRequest` object
The request.
"""
return GetLatestCommunityRequest(self)
def get_my_profile(self):
"""Get the logged-in user's profile information.
.. testsetup:: [*]
>>> comm_client = getfixture('communities_client_cached')
Examples
--------
There are no arguments::
>>> req = comm_client.get_my_profile()
>>> json = req.send() # returns JSON data structure
>>> print(json['ProfileId'])
123456
Returns
-------
request : an initialized :class:`GetMyProfileRequest` object
The request.
"""
return GetMyProfileRequest(self)
def get_profile_entities(
self,
entity_type = enums.EntityType.CONTENT,
current_page = 1,
page_size = 99999,
):
"""Get "entities" associated with the logged-in user's profile.
.. testsetup:: [*]
>>> comm_client = getfixture('communities_client_cached')
Parameters
----------
See the definition of the :class:`GetProfileEntitiesRequest` class.
Examples
--------
>>> from wwt_api_client.enums import EntityType
>>> req = comm_client.get_profile_entities(
... entity_type = EntityType.CONTENT,
... current_page = 1, # one-based
... page_size = 99999,
... )
>>> json = req.send() # returns json
>>> print(json['entities'][0]['Id'])
82077
Returns
-------
request : an initialized :class:`GetProfileEntitiesRequest` object
The request.
"""
req = GetProfileEntitiesRequest(self)
req.entity_type = entity_type
req.current_page = current_page
req.page_size = page_size
return req
def is_user_registered(self):
"""Query whether the logged-in Microsoft Live user is registered with
the WWT Communities system.
.. testsetup:: [*]
>>> comm_client = getfixture('communities_client_cached')
Examples
--------
There are no arguments::
>>> req = comm_client.is_user_registered()
>>> print(req.send())
True
Returns
-------
request : an initialized :class:`IsUserRegisteredRequest` object
The request.
"""
return IsUserRegisteredRequest(self)
class CommunitiesAPIRequest(APIRequest):
"""A base class for WWT Communities API requests.
These require that the user be logged in to a Microsoft Live account.
"""
_comm_client = None
def __init__(self, communities_client):
super(CommunitiesAPIRequest, self).__init__(communities_client._parent)
self._comm_client = communities_client
class CreateCommunityRequest(CommunitiesAPIRequest):
"""Create a new community.
The response gives the ID of the new community.
"""
payload = None
"""The request payload is JSON resembling::
{
"communityJson": {
"CategoryID": 20,
"ParentID": "610131",
"AccessTypeID": 2,
"IsOffensive":false,
"IsLink": false,
"CommunityType": "Community",
"Name": "Community name",
"Description": "Community description",
"Tags": "tag1,tag2"
}
}
(It doesn't feel worthwhile to implement this payload as a fully-fledged
data structure at the moment.)
"""
def invalidity_reason(self):
if self.payload is None:
return '"payload" must be a JSON dictionary'
return None
def make_request(self):
return requests.Request(
method = 'POST',
url = self._client._api_base + '/Community/Create/New',
json = self.payload,
cookies = {
'access_token': self._comm_client._access_token,
'refresh_token': self._comm_client._refresh_token,
},
)
def _process_response(self, resp):
s = json.loads(resp.text)
return s['ID']
class DeleteCommunityRequest(CommunitiesAPIRequest):
"""Delete a community.
Returns True if the community was successfully deleted, False otherwise.
"""
id = None
"The ID number of the community to delete"
def invalidity_reason(self):
if not isinstance(self.id, int):
return '"id" must be an integer'
return None
def make_request(self):
# The API includes a {parentId} after the community ID, but it is
# unused.
return requests.Request(
method = 'POST',
url = f'{self._client._api_base}/Community/Delete/{self.id}/0',
cookies = {
'access_token': self._comm_client._access_token,
'refresh_token': self._comm_client._refresh_token,
},
)
def _process_response(self, resp):
t = resp.text
if t == 'True':
return True
elif t == 'False':
return False
raise Exception(f'unexpected response from IsUserRegistered API: {t!r}')
# TODO: we're not implementing the "isEdit" mode where you can update
# community info.
class GetCommunityInfoRequest(CommunitiesAPIRequest):
"""Get information about the specified community.
The response is JSON, looking like::
{
"community": {
"MemberCount": 0,
"ViewCount": 6,
"ShareUrl": null,
"Description": "Testing community",
"LastUpdated": "44 minutes ago",
"ActionUrl": null,
"IsOffensive": false,
"Id": 610180,
"Name": "PKGW Test",
"Category": 20,
"ParentId": 610131,
"ParentName": "None",
"ParentType": 3,
"Tags": "testtag",
"Rating": 0,
"RatedPeople": 0,
"ThumbnailID": "00000000-0000-0000-0000-000000000000",
"Entity": 1,
"FileName": null,
"ContentAzureID": null,
"UserPermission": 63,
"AccessType": 2,
"Producer": "Peter Williams ",
"ProducerId": 609582,
"ContentType": 0,
"DistributedBy": null
},
"permission": {
"Result": {
"CurrentUserPermission": 63,
"PermissionItemList": [
{
"Comment": null,
"Date": "/Date(1585273889157)/",
"Requested": "44 minutes ago",
"CommunityId": 610180,
"CommunityName": "PKGW Test",
"CurrentUserRole": 5,
"IsInherited": true,
"CanShowEditLink": false,
"CanShowDeleteLink": false,
"Id": 609582,
"Name": "Peter Williams ",
"Role": 5
}
],
"PaginationDetails": {
"ItemsPerPage": 8,
"CurrentPage": 0,
"TotalPages": 1,
"TotalCount": 1
},
"SelectedPermissionsTab": 1
},
"Id": 4,
"Exception": null,
"Status": 5,
"IsCanceled": false,
"IsCompleted": true,
"CreationOptions": 0,
"AsyncState": null,
"IsFaulted": false
}
}
"""
id = None
"The ID number of the community to probe"
def invalidity_reason(self):
if not isinstance(self.id, int):
return '"id" must be an integer'
return None
def make_request(self):
return requests.Request(
method = 'GET',
url = f'{self._client._api_base}/Community/Detail/{self.id}',
cookies = {
'access_token': self._comm_client._access_token,
'refresh_token': self._comm_client._refresh_token,
},
headers = {'LiveUserToken': self._comm_client._access_token},
)
def _process_response(self, resp):
return json.loads(resp.text)
class GetLatestCommunityRequest(CommunitiesAPIRequest):
"""Get information about the most recently created WWT Communities. The
information is returned as a ``wwt_data_formats.folder.Folder`` with
sub-Folders corresponding to the communities.
"""
def invalidity_reason(self):
return None
def make_request(self):
return requests.Request(
method = 'GET',
url = self._client._api_base + '/Resource/Service/Browse/LatestCommunity',
headers = {'LiveUserToken': self._comm_client._access_token},
)
def _process_response(self, resp):
from wwt_data_formats.folder import Folder
from xml.etree import ElementTree as etree
xml = etree.fromstring(resp.text)
return Folder.from_xml(xml)
class GetMyProfileRequest(CommunitiesAPIRequest):
"""Get the currently logged-in user's profile information.
The response is JSON, looking like::
{
'ProfileId': 123456,
'ProfileName': 'Firstname Lastname',
'AboutProfile': '',
'Affiliation': 'Affil Text',
'ProfilePhotoLink': '~/Content/Images/profile.png',
'TotalStorage': '5.00 GB',
'UsedStorage': '0.00 B',
'AvailableStorage': '5.00 GB',
'PercentageUsedStorage': '0%',
'IsCurrentUser': True,
'IsSubscribed': False
}
"""
def invalidity_reason(self):
return None
def make_request(self):
return requests.Request(
method = 'GET',
url = self._client._api_base + '/Profile/MyProfile/Get',
headers = {
'Accept': 'application/json, text/plain, */*',
},
cookies = {
'access_token': self._comm_client._access_token,
'refresh_token': self._comm_client._refresh_token,
},
)
def _process_response(self, resp):
return json.loads(resp.text)
class GetProfileEntitiesRequest(CommunitiesAPIRequest):
"""Get "entities" associated with the logged-in user.
Entities include communities, folders, and content files. The response is JSON.
"""
entity_type = enums.EntityType.CONTENT
"What kind of entity to query. Only COMMUNITY and CONTENT are allowed."
current_page = 1
"What page of search results to return -- starting at 1."
page_size = 99999
"How many items to return per page of search results."
def invalidity_reason(self):
if not isinstance(self.entity_type, enums.EntityType):
return '"entity_type" must be a wwt_api_client.enums.EntityType'
if not isinstance(self.current_page, int):
return '"current_page" must be an int'
if not isinstance(self.page_size, int):
return '"current_page" must be an int'
return None
def make_request(self):
return requests.Request(
method = 'GET',
url = f'{self._client._api_base}/Profile/Entities/{self.entity_type.value}/{self.current_page}/{self.page_size}',
cookies = {
'access_token': self._comm_client._access_token,
'refresh_token': self._comm_client._refresh_token,
},
)
def _process_response(self, resp):
return json.loads(resp.text)
class IsUserRegisteredRequest(CommunitiesAPIRequest):
"""Asks whether the logged-in Microsoft Live user is registered with the WWT
Communities system.
"""
def invalidity_reason(self):
return None
def make_request(self):
return requests.Request(
method = 'GET',
url = self._client._api_base + '/Resource/Service/User',
headers = {'LiveUserToken': self._comm_client._access_token},
)
def _process_response(self, resp):
t = resp.text
if t == 'True':
return True
elif t == 'False':
return False
raise Exception(f'unexpected response from IsUserRegistered API: {t!r}')
# Command-line utility for initializing the OAuth state.
def interactive_communities_login(args):
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--secret-file',
metavar = 'PATH',
help = 'Path to a file from which to read the WWT client secret',
)
parser.add_argument(
'--secret-env',
metavar = 'ENV-VAR-NAME',
help = 'Name of an environment variable containing the WWT client secret',
)
settings = parser.parse_args(args)
# Make sure we actually have a secret to work with.
if settings.secret_file is not None:
with open(settings.secret_file) as f:
client_secret = f.readline().strip()
elif settings.secret_env is not None:
client_secret = os.environ.get(settings.secret_env)
else:
print('error: the WWT \"client secret\" must be provided; '
'use --secret-file or --secret-env', file=sys.stderr)
sys.exit(1)
if not client_secret:
print('error: the WWT \"client secret\" is empty or unset', file=sys.stderr)
sys.exit(1)
# Ready to go ...
CommunitiesClient(
Client(),
oauth_client_secret = client_secret,
interactive_login_if_needed = True,
)
print('OAuth flow successfully completed.')
if __name__ == '__main__':
interactive_communities_login(sys.argv[1:])
| 30.966033
| 125
| 0.5833
| 20,299
| 0.890659
| 0
| 0
| 0
| 0
| 0
| 0
| 12,434
| 0.545566
|
f76f8e181e6635c86576107fa1d30d62af17c114
| 158
|
py
|
Python
|
FictionTools/amitools/test/suite/vprintf.py
|
polluks/Puddle-BuildTools
|
c1762d53a33002b62d8cffe3db129505a387bec3
|
[
"BSD-2-Clause"
] | 38
|
2021-06-18T12:56:15.000Z
|
2022-03-12T20:38:40.000Z
|
FictionTools/amitools/test/suite/vprintf.py
|
polluks/Puddle-BuildTools
|
c1762d53a33002b62d8cffe3db129505a387bec3
|
[
"BSD-2-Clause"
] | 2
|
2021-06-20T16:28:12.000Z
|
2021-11-17T21:33:56.000Z
|
FictionTools/amitools/test/suite/vprintf.py
|
polluks/Puddle-BuildTools
|
c1762d53a33002b62d8cffe3db129505a387bec3
|
[
"BSD-2-Clause"
] | 6
|
2021-06-18T18:18:36.000Z
|
2021-12-22T08:01:32.000Z
|
import pytest
def vprintf_test(vamos):
if vamos.flavor == "agcc":
pytest.skip("vprintf not supported")
vamos.run_prog_check_data("vprintf")
| 19.75
| 44
| 0.689873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 38
| 0.240506
|
f7704b3dce5cd94981cb7391a19b755c0df22b68
| 304
|
py
|
Python
|
test/socket_client.py
|
suxb201/Socks5_DNS_Test
|
a1cb8b5d8d998c6a029dc0b329418ecbb9a2fc72
|
[
"MIT"
] | 1
|
2020-11-09T02:08:04.000Z
|
2020-11-09T02:08:04.000Z
|
test/socket_client.py
|
suxb201/Socks5_DNS_Test
|
a1cb8b5d8d998c6a029dc0b329418ecbb9a2fc72
|
[
"MIT"
] | null | null | null |
test/socket_client.py
|
suxb201/Socks5_DNS_Test
|
a1cb8b5d8d998c6a029dc0b329418ecbb9a2fc72
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import socket
HOST = '127.0.0.1' # 服务器的主机名或者 IP 地址
PORT = 10009 # 服务器使用的端口
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
print(s)
s.connect((HOST, PORT))
s.sendall(b'Hello, world')
print(s)
data = s.recv(1024)
print('Received', repr(data))
| 19
| 60
| 0.651316
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 123
| 0.359649
|
f770883d6109ebd548cc44852ed0b4db7874c963
| 752
|
py
|
Python
|
util/statuschanger.py
|
MarkThe/Mark-Tools
|
c755d2b2e095b9f83fcbaba3ac74ec927bcddf26
|
[
"MIT"
] | 1
|
2022-01-04T18:09:50.000Z
|
2022-01-04T18:09:50.000Z
|
util/statuschanger.py
|
MarkThe/Mark-Tools
|
c755d2b2e095b9f83fcbaba3ac74ec927bcddf26
|
[
"MIT"
] | null | null | null |
util/statuschanger.py
|
MarkThe/Mark-Tools
|
c755d2b2e095b9f83fcbaba3ac74ec927bcddf26
|
[
"MIT"
] | null | null | null |
import requests
import Mark
from colorama import Fore
from util.plugins.common import print_slow, getheaders, proxy
def StatusChanger(token, Status):
#change status
CustomStatus = {"custom_status": {"text": Status}} #{"text": Status, "emoji_name": "☢"} if you want to add an emoji to the status
try:
r = requests.patch("https://discord.com/api/v9/users/@me/settings", proxies={"ftp": f'{proxy()}'}, headers=getheaders(token), json=CustomStatus)
print_slow(f"\n{Fore.GREEN}Status changed to {Fore.WHITE}{Status}{Fore.GREEN} ")
except Exception as e:
print(f"{Fore.RED}Error:\n{e}\nOccurred while trying to change the status :/")
print("Enter anything to continue. . . ", end="")
input()
Mark.main()
| 44.235294
| 152
| 0.676862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 355
| 0.470822
|
f77168d5a15a1187d94edfc593ed43416e3d8946
| 1,979
|
py
|
Python
|
recipes/recipe_modules/cloudbuildhelper/test_api.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | null | null | null |
recipes/recipe_modules/cloudbuildhelper/test_api.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | null | null | null |
recipes/recipe_modules/cloudbuildhelper/test_api.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from hashlib import sha256
from recipe_engine import recipe_test_api
class CloudBuildHelperTestApi(recipe_test_api.RecipeTestApi):
def build_success_output(self, image, target='target', canonical_tag=None):
if not image:
img = 'example.com/fake-registry/%s' % target
digest = 'sha256:'+sha256(target).hexdigest()[:16]+'...'
tag = canonical_tag
if tag == ':inputs-hash':
tag = 'cbh-inputs-deadbead...'
else:
img = image.image
digest = image.digest
tag = image.tag
out = {'view_build_url': 'https://example.com/build/%s' % target}
if img:
out['image'] = {'image': img, 'digest': digest, 'tag': tag}
out['view_image_url'] = 'https://example.com/image/%s' % target
return self.m.json.output(out)
def build_error_output(self, message, target='target'):
return self.m.json.output({
'error': message,
'view_build_url': 'https://example.com/build/%s' % target,
})
def upload_success_output(self, tarball, target='target', canonical_tag=None):
if not tarball:
name = 'example/%s' % target
digest = sha256(name).hexdigest()[:16]+'...'
bucket = 'example'
path = 'tarballs/example/%s/%s.tar.gz' % (target, digest)
tag = canonical_tag or '11111-deadbeef'
else:
name = tarball.name
digest = tarball.sha256
bucket = tarball.bucket
path = tarball.path
tag = tarball.version
return self.m.json.output({
'name': name,
'sha256': digest,
'gs': {
'bucket': bucket,
'name': path,
},
'canonical_tag': tag,
})
def upload_error_output(self, message):
return self.m.json.output({'error': message})
def update_pins_output(self, updated):
return self.m.json.output({'updated': updated or []})
| 30.921875
| 80
| 0.632137
| 1,743
| 0.880748
| 0
| 0
| 0
| 0
| 0
| 0
| 573
| 0.28954
|
f7716d154c4129f506d04590e1524fcb8b2888bb
| 7,011
|
py
|
Python
|
tabbi/gmm.py
|
Yu-AnChen/tabbi
|
bf4655905d0f3fc5b7dd49a1cd12c69cb83e5bb5
|
[
"MIT"
] | null | null | null |
tabbi/gmm.py
|
Yu-AnChen/tabbi
|
bf4655905d0f3fc5b7dd49a1cd12c69cb83e5bb5
|
[
"MIT"
] | null | null | null |
tabbi/gmm.py
|
Yu-AnChen/tabbi
|
bf4655905d0f3fc5b7dd49a1cd12c69cb83e5bb5
|
[
"MIT"
] | null | null | null |
import sklearn.mixture
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import ticker
import matplotlib.patheffects as mpatheffects
def get_gmm_and_pos_label(
array, n_components=2, n_steps=5000
):
gmm = sklearn.mixture.GaussianMixture(
n_components=n_components, covariance_type='spherical', random_state=0
)
gmm.fit(array.reshape(-1, 1))
label = np.argmax(gmm.means_)
# low = array.min()
# high = array.max()
low = gmm.means_.min() - 2*np.sqrt(gmm.covariances_[np.argmin(gmm.means_)])
high = gmm.means_.max() + 2*np.sqrt(gmm.covariances_[np.argmax(gmm.means_)])
ref_space = np.linspace(low, high, n_steps)
result = gmm.predict(ref_space.reshape(-1, 1))
idx = np.where(np.ediff1d(result) != 0)
cutoffs = ref_space[idx]
return gmm, label, cutoffs
def _get_gmm_and_pos_label(array, n_components=2):
gmm = sklearn.mixture.GaussianMixture(
n_components=n_components, covariance_type='spherical', random_state=0
)
gmm.fit(array.reshape(-1, 1))
label = np.argmax(gmm.means_)
low = np.expm1(array.min())
high = np.expm1(array.max())
ref_space = np.arange(low, high)
ref_space = np.log1p(ref_space)
result = gmm.predict(ref_space.reshape(-1, 1))
idx = np.where(np.ediff1d(result) != 0)
_cutoffs = ref_space[idx]
diff_mean = np.absolute(_cutoffs - np.mean(array))
diff_high = np.absolute(_cutoffs - np.log1p(high))
cutoffs = _cutoffs[diff_mean < diff_high]
cutoff = np.expm1(cutoffs.max())
# cutoff = cutoffs[np.argmin(diff_mean < diff_high)]
# return gmm, label, cutoff
return gmm, label, _cutoffs
diff_mean = np.absolute(_cutoffs - np.mean(np.expm1(array)))
diff_high = np.absolute(_cutoffs - high)
diff_low = np.absolute(_cutoffs - low)
between = (diff_mean < diff_high) & (diff_mean < diff_low)
cutoffs = _cutoffs[between]
cutoff = cutoffs[np.argmax(between)]
return gmm, label, cutoff
def plot_gmm_fitting(array, gmm, ax):
plt.sca(ax)
_ = plt.hist(array.flatten(), color='lightgray', bins=200, density=True)
x = np.linspace(array.min(), array.max(), 200)
log_prob = gmm.score_samples(x.reshape(-1, 1))
responsibilities = gmm.predict_proba(x.reshape(-1, 1))
pdf = np.exp(log_prob)
pdf_individual = responsibilities * pdf[:, np.newaxis]
mean_index = np.argmax(pdf_individual, axis=0)
rank_map = mean_index.argsort().argsort()
ax.set_prop_cycle(
color=plt.get_cmap('Dark2')(rank_map)
)
ax.plot(x, pdf_individual)
ax.plot(x, pdf, '--k')
return ax
def auto_gate_func(array, n_components=3, n_stds=3, log_transform=True):
gmm = sklearn.mixture.GaussianMixture(
n_components=n_components, covariance_type='spherical', random_state=0
)
if log_transform:
gmm.fit(np.log1p(array).reshape(-1, 1))
else:
gmm.fit(array.reshape(-1, 1))
means = gmm.means_
stds = np.sqrt(gmm.covariances_)
idx = np.argmax(means)
lower_bound = means[idx] - n_stds * stds[idx]
if log_transform:
return np.expm1(lower_bound)
else:
return lower_bound
def plot_cumulative(array, ax, hist_kwargs={}):
formatter = ticker.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((-1,1))
ax.yaxis.set_major_formatter(formatter)
_ = ax.hist(array, histtype='step', bins=300, cumulative=1, **hist_kwargs)
return ax
def gmm_label_map_by_mean(gmm):
return {
o:n
for o, n in zip(
range(len(gmm.means_)),
sorted(range(len(gmm.means_)), key=lambda x: gmm.means_[x][0])
)
}
def sort_predict_label(gmm, labels):
mapping = gmm_label_map_by_mean(gmm)
sorted_labels = labels.copy()
for k, v in mapping.iteritems():
sorted_labels[labels==k] = v
return sorted_labels
def plot_hist_gmm(
df,
markers,
n_components=2,
subplot_grid_shape=None,
transform_log=True,
xlim_percentiles=(0, 100),
cum_density=False,
hide_yaxis_left=True
):
if transform_log:
df = df.transform(np.log1p)
revert_func = np.expm1
else:
revert_func = np.array
if subplot_grid_shape is None:
subplot_grid_shape = (1, len(markers))
n_rows, n_cols = subplot_grid_shape
fig, axes = plt.subplots(n_rows, n_cols, sharex=True)
axes = np.array(axes)
for m, ax in zip(markers, axes.ravel()):
gmm, _, cutoffs = get_gmm_and_pos_label(
df[m].values, n_components=n_components
)
plot_gmm_fitting(df[m].values, gmm, ax)
ax.title.set_text(m)
if hide_yaxis_left:
ax.yaxis.set_visible(False)
p1, p2 = np.array(xlim_percentiles) / 100
axis_min = df.loc[:, markers].quantile(p1).min()
axis_max = df.loc[:, markers].quantile(p2).max()
color_cum = 'gray'
pax = ax.twinx()
pax = plot_cumulative(
df[m].values, pax,
hist_kwargs=dict(color=color_cum, density=cum_density)
)
pax.tick_params(axis='y', labelsize=8, colors=color_cum)
print(cutoffs)
cutoff_range = np.ptp(cutoffs)
if cutoff_range == 0: cutoff_range = 1
cutoff_colors = plt.get_cmap('plasma')(
(cutoffs - np.min(cutoffs)) / cutoff_range
)
for co, cc in zip(cutoffs, cutoff_colors):
ax.axvline(x=co, c=cc, alpha=0.2)
ax.annotate(
'',
xy=(co, 0), xytext=(co, -0.05),
xycoords=('data', 'axes fraction'),
arrowprops=dict(arrowstyle='wedge, tail_width=0.7, shrink_factor=0.5', color=cc)
)
ax.set_xlim(axis_min, axis_max)
# cutoff_string = np.round(revert_func(cutoffs)).astype(int)
for i, (co, cc) in enumerate(
zip(revert_func(cutoffs)[::-1], cutoff_colors[::-1])
):
text = ax.text(
ax.get_xlim()[0] + 0.02*np.diff(ax.get_xlim()),
ax.get_ylim()[1] - 0.05*(i+1)*np.diff(ax.get_ylim()),
f'{np.round(co).astype(int)}',
fontsize=10, c=cc
)
text_outline = mpatheffects.Stroke(linewidth=1, foreground='#000')
text.set_path_effects(
[text_outline, mpatheffects.Normal()]
)
plt.tight_layout()
for aax in fig.axes:
aax.spines['right'].set_color(color_cum)
power_label = aax.yaxis.get_offset_text()
power_label.set_visible(False)
aax.annotate(
power_label.get_text(), xy=(1.02, 1.01),
xycoords='axes fraction', fontsize=10,
color=color_cum
)
plt.sca(ax)
| 31.581081
| 97
| 0.601341
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 384
| 0.054771
|
f7745a348fc7e9affea625ab9bda06298308eebf
| 4,627
|
py
|
Python
|
openGaussBase/testcase/SQL/DML/upsert/Opengauss_Function_DML_Upsert_Case0131.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/SQL/DML/upsert/Opengauss_Function_DML_Upsert_Case0131.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/SQL/DML/upsert/Opengauss_Function_DML_Upsert_Case0131.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : upsert子查询功能
Case Name : upsert子查询for update语法验证
Description :
1、初始创建测试数据
2、session1以线程方式启动,事务内部相关子查询for update后等待20s
3、session1开始后等待5s,session2对session1相关的锁定行进行update
4、验证session1提交后,session2才提交;
5、session1事务执行结果验证
6、验证session2是在session1事务提交后,才进行的update
Expect :
1、初始创建测试数据乘公共
2、session1以线程方式启动,事务内部相关子查询for update后等待20s,session1开始执行
3、session1开始后等待5s,session2对session1相关的锁定行进行update,session2开始执行
4、验证session1提交后,session2才提交;session2事务提交总时长大于10s
5、session1事务正常提交
6、select方式验证,session1的update结果是session2提交之前的数据;
History :
"""
import time
import unittest
from testcase.utils.ComThread import ComThread
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
class UpsertCase131(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info('----Opengauss_Function_DML_Upsert_Case0131:初始化----')
self.pri_sh = CommonSH('PrimaryDbUser')
self.constant = Constant()
self.t1 = 't_dml_upsert_sub0131'
self.t2 = 't_dml_upsert0131'
def test_main(self):
self.log.info("--1、初始创建测试数据--")
sql = f"drop table if exists {self.t1};" \
f"create table {self.t1} (a int,b text);" \
f"insert into {self.t1} values(generate_series(1,10)," \
f"'b-'||generate_series(1,10));" \
f"drop table if exists {self.t2};" \
f"create table {self.t2} (a int primary key,b text,c text);" \
f"insert into {self.t2} values (1,1,1),(2,2,2),(3,3,3);" \
f"select * from {self.t2};select * from {self.t1};"
result = self.pri_sh.execut_db_sql(sql)
self.log.info(result)
self.assertTrue("INSERT 0 10" in result and 'INSERT 0 3' in result)
self.log.info("--2、session1以线程方式启动,事务内部相关子查询for update后等待20s--")
sql1 = f"begin;" \
f"insert into {self.t2} values(2) on duplicate key update " \
f"b= (select b from {self.t1} where a = excluded.a for update);" \
f"select pg_sleep(20);" \
f"end;"
self.log.info(sql1)
session1_excute = ComThread(self.pri_sh.execut_db_sql, args=(sql1,))
session1_excute.setDaemon(True)
session1_excute.start()
time.sleep(5)
self.log.info("--3、session1开始后等待5s,session2对session1相关的锁定行进行update--")
sql2 = f"begin;" \
f"update {self.t1} set b ='bb-2' where a =2;" \
f"end;"
self.log.info(sql2)
start_time = time.time()
session2_result = self.pri_sh.execut_db_sql(sql2)
self.assertIn(self.constant.COMMIT_SUCCESS_MSG, session2_result)
self.log.info("--4、验证session1提交后,session2才提交;session2事务提交总时长大于10s--")
self.log.info(session2_result)
end_time = time.time()
self.log.info('start_time:' + str(start_time) +
';end_time:' + str(end_time))
self.log.info('session2执行等待时长' + str(end_time - start_time))
self.assertTrue(end_time - start_time > 10)
self.log.info("--5、session1事务执行结果--")
session1_excute.join()
session1_result = session1_excute.get_result()
self.log.info(session1_result)
self.assertIn(self.constant.COMMIT_SUCCESS_MSG, session1_result)
self.log.info("--6、验证session2是在session1事务提交后,才进行的update--")
sql3 = f"select * from {self.t2} where a = 2;"
result3 = self.pri_sh.execut_db_sql(sql3)
self.log.info(result3)
self.assertIn("b-2", result3)
sql4 = f"select * from {self.t1} where a = 2;"
result4 = self.pri_sh.execut_db_sql(sql4)
self.log.info(result4)
self.assertIn("bb-2", result4)
def tearDown(self):
self.log.info("--清理测试数据--")
clean_sql = f"drop table if exists {self.t1};" \
f"drop table if exists {self.t2};"
clean_result = self.pri_sh.execut_db_sql(clean_sql)
self.log.info(clean_result)
self.log.info('----Opengauss_Function_DML_Upsert_Case0131:用例执行完毕----')
| 39.211864
| 84
| 0.653555
| 3,516
| 0.67112
| 0
| 0
| 0
| 0
| 0
| 0
| 2,951
| 0.563275
|
f7746001d5f89f7418b92bab28a281a421e0564a
| 6,892
|
py
|
Python
|
multi_traductor.py
|
Jalagarto/translator
|
d35cde0934c4ab94204d6dfdf4e7d6c0bcd6291b
|
[
"Apache-2.0"
] | null | null | null |
multi_traductor.py
|
Jalagarto/translator
|
d35cde0934c4ab94204d6dfdf4e7d6c0bcd6291b
|
[
"Apache-2.0"
] | null | null | null |
multi_traductor.py
|
Jalagarto/translator
|
d35cde0934c4ab94204d6dfdf4e7d6c0bcd6291b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import tkinter as tk
from tkinter import messagebox as msg
from tkinter.ttk import Notebook
from tkinter import ttk
import tkinter.font as font
import requests
class LanguageTab(tk.Frame):
def __init__(self, master, lang_name, lang_code):
super().__init__(master)
# fonts for all widgets
self.option_add("*Font", ("courier",16, 'bold'))
# font to use for label widgets
self.option_add("*Label.Font", ("arial",12))
# font to use for label widgets
self.option_add("*Button.Font", ("arial",14, 'bold'))
self.option_add("*Tab.Font", ("arial",14, 'bold'))
self.option_add("*Frame.Font", ("courier",14, 'bold'))
self.lang_name = lang_name
self.lang_code = lang_code
self.translation_var = tk.StringVar(self)
self.translation_var.set("")
self.copy_button = tk.Button(self, text="Copy to Clipboard", command=self.copy_to_clipboard)
#self.copy_button.pack(side=tk.BOTTOM, fill=tk.X)
self.translated_label = tk.Label(self, textvar=self.translation_var, bg="lightgrey", fg="black")
#self.translated_label.pack(side=tk.TOP, fill=tk.BOTH, expand=1)
def copy_to_clipboard(self):
root = self.winfo_toplevel()
root.clipboard_clear()
root.clipboard_append(self.translation_var.get())
msg.showinfo("Copied Successfully", "Text copied to clipboard")
class NewLanguageForm(tk.Toplevel):
def __init__(self, master):
super().__init__()
self.master = master
self.title("Add new Language")
self.geometry("500x200")
self.name_label = tk.Label(self, text="Language Name")
self.name_entry = tk.Entry(self, bg="white", fg="black")
self.code_label = tk.Label(self, text="Language Code")
self.code_entry = tk.Entry(self, bg="white", fg="black")
self.submit_button = tk.Button(self, text="Submit", command=self.submit)
self.name_label.pack(fill=tk.BOTH, expand=1)
self.name_entry.pack(fill=tk.BOTH, expand=1)
self.code_label.pack(fill=tk.BOTH, expand=1)
self.code_entry.pack(fill=tk.BOTH, expand=1)
self.submit_button.pack(fill=tk.X)
def submit(self):
lang_name = self.name_entry.get()
lang_code = self.code_entry.get()
if lang_name and lang_code:
new_tab = LanguageTab(self.master, lang_name, lang_code)
self.master.languages_menu.add_command(label=lang_name, command=lambda: self.master.add_new_tab(new_tab))
msg.showinfo("Language Option Added", "Language option " + lang_name + " added to menu")
self.destroy()
else:
msg.showerror("Missing Information", "Please add both a name and code")
class TranslateBook(tk.Tk):
def __init__(self):
super().__init__()
self.title("Translation Book v3")
self.geometry("1000x700")
# fonts for all widgets
self.option_add("*Font", ("courier",16, 'bold'))
# font to use for label widgets
self.option_add("*Label.Font", ("arial",12))
# font to use for label widgets
self.option_add("*Button.Font", ("arial",14, 'bold'))
self.option_add("*Tab.Font", ("arial",14, 'bold'))
self.option_add("*Frame.Font", ("courier",14, 'bold'))
self.notestyle = ttk.Style()
self.notestyle.configure("TNotebook.Tab", borderwidth=2, font = ("times",18), padding=(15, 7))
self.myFont2 = font.Font(size=16)
self.menu = tk.Menu(self, bg="lightgrey", fg="black")
self.languages_menu = tk.Menu(self.menu, tearoff=0, bg="lightgrey", fg="black")
self.languages_menu.add_command(label="Add New", command=self.show_new_language_popup)
self.languages_menu.add_command(label="Francés", command=lambda: self.add_new_tab(LanguageTab(self, "Francés", "fr")))
self.languages_menu.add_command(label="Catalan", command=lambda: self.add_new_tab(LanguageTab(self, "Catalan", "ca")))
self.languages_menu.add_command(label="Portugues", command=lambda: self.add_new_tab(LanguageTab(self, "Portugues", "pt")))
self.languages_menu.add_command(label="Aleman", command=lambda: self.add_new_tab(LanguageTab(self, "Aleman", "de")))
self.languages_menu['font'] = self.myFont2
self.menu.add_cascade(label="Languages", menu=self.languages_menu)
self.config(menu=self.menu)
self.notebook = Notebook(self)
self.language_tabs = []
spanish_tab = tk.Frame(self.notebook)
myFont = font.Font(family = 'courier', size=18, weight = 'bold')
self.translate_button = tk.Button(spanish_tab, text="Translate", command=self.translate, height = 2)
self.translate_button.pack(side=tk.BOTTOM, fill=tk.X)
self.translate_button['font'] = myFont
self.spanish_entry = tk.Text(spanish_tab, bg="white", fg="black")
self.spanish_entry.pack(side=tk.TOP, expand=1)
self.notebook.add(spanish_tab, text="Español")
self.notebook.pack(fill=tk.BOTH, expand=1)
def translate(self, text=None):
if len(self.language_tabs) < 1:
msg.showerror("No Languages", "No languages added. Please add some from the menu")
return
if not text:
text = self.spanish_entry.get(1.0, tk.END).strip()
url = "https://translate.googleapis.com/translate_a/single?client=gtx&sl={}&tl={}&dt=t&q={}"
try:
for language in self.language_tabs:
full_url = url.format("es", language.lang_code, text)
r = requests.get(full_url)
r.raise_for_status()
translation = r.json()[0][0][0]
language.translation_var.set(translation)
msg.showinfo("translation", translation)
#msg.showinfo("Translations Successful", "Text successfully translated")
except Exception as e:
msg.showerror("Translation Failed", str(e))
# else:
# msg.showinfo("Translations Successful", "Text successfully translated")
def add_new_tab(self, tab):
self.language_tabs.append(tab)
self.notebook.add(tab, text=tab.lang_name)
try:
self.languages_menu.entryconfig(tab.lang_name, state="disabled")
except:
# language isn't in menu.
pass
def show_new_language_popup(self):
NewLanguageForm(self)
if __name__ == "__main__":
translatebook = TranslateBook()
english_tab = LanguageTab(translatebook, "Inglés", "en")
translatebook.add_new_tab(english_tab)
# german_tab = LanguageTab(translatebook, "Alemán", "de")
# translatebook.add_new_tab(german_tab)
translatebook.mainloop()
# códigos de lenguages --> https://www.labnol.org/code/19899-google-translate-languages
| 43.075
| 130
| 0.642339
| 6,313
| 0.915193
| 0
| 0
| 0
| 0
| 0
| 0
| 1,703
| 0.246883
|