hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bff90483e6a444cb5f10ac00e642caca1f6c71ca | 504 | py | Python | tests/test_basic_shell.py | TheFriendlyCoder/FriendlyShell | 8508c09e787cb8f0fc44c2a9e2587e8d4eec555c | [
"Apache-2.0"
] | null | null | null | tests/test_basic_shell.py | TheFriendlyCoder/FriendlyShell | 8508c09e787cb8f0fc44c2a9e2587e8d4eec555c | [
"Apache-2.0"
] | 93 | 2018-04-21T01:03:06.000Z | 2019-06-23T14:22:37.000Z | tests/test_basic_shell.py | TheFriendlyCoder/FriendlyShell | 8508c09e787cb8f0fc44c2a9e2587e8d4eec555c | [
"Apache-2.0"
] | null | null | null | import platform
from friendlyshell.basic_shell import BasicShell
import pytest
from mock import patch
@pytest.mark.skipif(platform.python_implementation()=="PyPy",
reason="Test not supported on PyPy")
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"]) | 26.526316 | 62 | 0.668651 | import platform
from friendlyshell.basic_shell import BasicShell
import pytest
from mock import patch
@pytest.mark.skipif(platform.python_implementation()=="PyPy",
reason="Test not supported on PyPy")
def test_init():
obj = BasicShell()
with patch('friendlyshell.base_shell.input') as MockInput:
MockInput.side_effect = ['exit!', 'exit']
obj.run()
assert MockInput.call_count == 2
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"]) | 191 | 0 | 22 |
e71fe983b798e8dfec73e6f334a585552f9fc45f | 1,796 | py | Python | block/block.py | Mechasparrow/Sparkles | f2bdcc0019da1f8dea2f3d9209c51c28a0b6a1e6 | [
"MIT"
] | null | null | null | block/block.py | Mechasparrow/Sparkles | f2bdcc0019da1f8dea2f3d9209c51c28a0b6a1e6 | [
"MIT"
] | 14 | 2017-11-22T04:34:59.000Z | 2018-02-13T02:51:30.000Z | block/block.py | Mechasparrow/Sparkles | f2bdcc0019da1f8dea2f3d9209c51c28a0b6a1e6 | [
"MIT"
] | null | null | null | import json
import hashlib
import base64
| 29.442623 | 163 | 0.601893 | import json
import hashlib
import base64
class Block:
def __init__(self, index, timestamp, data, prev_hash, hash=None, nonce=0):
self.index = index
self.timestamp = timestamp
self.data = data
self.prev_hash = prev_hash
self.hash = hash
self.nonce = nonce
def header_string(self):
return str(self.index) + str(self.prev_hash) + str(self.data) + str(self.timestamp) + str(self.nonce)
def create_self_hash(self):
sha = hashlib.sha256()
sha.update(self.header_string().encode('utf-8'))
return sha.hexdigest()
def __dict__(self):
block_dict = {
"index": str(self.index),
"timestamp": str(self.timestamp),
"data": str(self.data),
"prev_hash": str(self.prev_hash),
"hash": str(self.hash),
"nonce": str(self.nonce)
}
return block_dict
def __str__(self):
block_dict = self.__dict__()
block_json = json.dumps(block_dict)
return block_json
def load_from_file(block_path):
block_file = open(block_path, 'rb')
block_string = block_file.read().decode('utf-8')
block = Block.from_json(block_string)
return block
def from_dict(block_dict):
block = Block(int(block_dict['index']), block_dict['timestamp'], block_dict['data'], block_dict['prev_hash'], block_dict['hash'], int(block_dict['nonce']))
return block
def from_json(block_json):
block_dict = json.loads(block_json)
block = Block.from_dict(block_dict)
return block
def valid_block(self, NUM_OF_ZEROS = 4):
if (str(self.hash[0:NUM_OF_ZEROS]) == '0' * NUM_OF_ZEROS):
return True
else:
return False
| 1,499 | -9 | 265 |
ed3b5929aff7dcafb1020e00298084831d34d5d6 | 10,294 | py | Python | tests/app/tests/test_webpack.py | codetigerco/django-webpack-loader | 5e14eb3b82a92226b27788a08ac8bf08d6eb131e | [
"MIT"
] | null | null | null | tests/app/tests/test_webpack.py | codetigerco/django-webpack-loader | 5e14eb3b82a92226b27788a08ac8bf08d6eb131e | [
"MIT"
] | null | null | null | tests/app/tests/test_webpack.py | codetigerco/django-webpack-loader | 5e14eb3b82a92226b27788a08ac8bf08d6eb131e | [
"MIT"
] | 3 | 2020-06-19T15:41:19.000Z | 2020-10-05T08:58:10.000Z | import json
import os
import time
from subprocess import call
from threading import Thread
import django
from django.conf import settings
from django.test import RequestFactory, TestCase
from django.views.generic.base import TemplateView
from django_jinja.builtins import DEFAULT_EXTENSIONS
from unittest2 import skipIf
from webpack_loader.exceptions import (
WebpackError,
WebpackLoaderBadStatsError,
WebpackLoaderTimeoutError,
WebpackBundleLookupError
)
from webpack_loader.utils import get_loader
BUNDLE_PATH = os.path.join(settings.BASE_DIR, 'assets/bundles/')
DEFAULT_CONFIG = 'DEFAULT'
| 41.676113 | 146 | 0.61191 | import json
import os
import time
from subprocess import call
from threading import Thread
import django
from django.conf import settings
from django.test import RequestFactory, TestCase
from django.views.generic.base import TemplateView
from django_jinja.builtins import DEFAULT_EXTENSIONS
from unittest2 import skipIf
from webpack_loader.exceptions import (
WebpackError,
WebpackLoaderBadStatsError,
WebpackLoaderTimeoutError,
WebpackBundleLookupError
)
from webpack_loader.utils import get_loader
BUNDLE_PATH = os.path.join(settings.BASE_DIR, 'assets/bundles/')
DEFAULT_CONFIG = 'DEFAULT'
class LoaderTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
def compile_bundles(self, config, wait=None):
if wait:
time.sleep(wait)
call(['./node_modules/.bin/webpack', '--config', config])
@skipIf(django.VERSION < (1, 7),
'not supported in this django version')
def test_config_check(self):
from webpack_loader.apps import webpack_cfg_check
from webpack_loader.errors import BAD_CONFIG_ERROR
with self.settings(WEBPACK_LOADER={
'BUNDLE_DIR_NAME': 'bundles/',
'STATS_FILE': 'webpack-stats.json',
}):
errors = webpack_cfg_check(None)
expected_errors = [BAD_CONFIG_ERROR]
self.assertEqual(errors, expected_errors)
with self.settings(WEBPACK_LOADER={
'DEFAULT': {}
}):
errors = webpack_cfg_check(None)
expected_errors = []
self.assertEqual(errors, expected_errors)
def test_simple_and_css_extract(self):
self.compile_bundles('webpack.config.simple.js')
assets = get_loader(DEFAULT_CONFIG).get_assets()
self.assertEqual(assets['status'], 'done')
self.assertIn('chunks', assets)
chunks = assets['chunks']
self.assertIn('main', chunks)
self.assertEqual(len(chunks), 1)
files = assets['assets']
self.assertEqual(files['main.css']['path'], os.path.join(settings.BASE_DIR, 'assets/bundles/main.css'))
self.assertEqual(files['main.js']['path'], os.path.join(settings.BASE_DIR, 'assets/bundles/main.js'))
def test_js_gzip_extract(self):
self.compile_bundles('webpack.config.gzipTest.js')
assets = get_loader(DEFAULT_CONFIG).get_assets()
self.assertEqual(assets['status'], 'done')
self.assertIn('chunks', assets)
chunks = assets['chunks']
self.assertIn('main', chunks)
self.assertEqual(len(chunks), 1)
files = assets['assets']
self.assertEqual(files['main.css']['path'], os.path.join(settings.BASE_DIR, 'assets/bundles/main.css'))
self.assertEqual(files['main.js.gz']['path'], os.path.join(settings.BASE_DIR, 'assets/bundles/main.js.gz'))
def test_static_url(self):
self.compile_bundles('webpack.config.publicPath.js')
assets = get_loader(DEFAULT_CONFIG).get_assets()
self.assertEqual(assets['status'], 'done')
self.assertEqual(assets['publicPath'], 'http://custom-static-host.com/')
def test_code_spliting(self):
self.compile_bundles('webpack.config.split.js')
assets = get_loader(DEFAULT_CONFIG).get_assets()
self.assertEqual(assets['status'], 'done')
self.assertIn('chunks', assets)
chunks = assets['chunks']
self.assertIn('main', chunks)
self.assertEquals(len(chunks), 1)
files = assets['assets']
self.assertEqual(files['main.js']['path'], os.path.join(settings.BASE_DIR, 'assets/bundles/main.js'))
self.assertEqual(files['vendors.js']['path'], os.path.join(settings.BASE_DIR, 'assets/bundles/vendors.js'))
def test_templatetags(self):
self.compile_bundles('webpack.config.simple.js')
self.compile_bundles('webpack.config.app2.js')
view = TemplateView.as_view(template_name='home.html')
request = self.factory.get('/')
result = view(request)
self.assertIn('<link type="text/css" href="/static/bundles/main.css" rel="stylesheet" />', result.rendered_content)
self.assertIn('<script type="text/javascript" src="/static/bundles/main.js" async charset="UTF-8"></script>', result.rendered_content)
self.assertIn('<link type="text/css" href="/static/bundles/app2.css" rel="stylesheet" />', result.rendered_content)
self.assertIn('<script type="text/javascript" src="/static/bundles/app2.js" ></script>', result.rendered_content)
self.assertIn('<img src="/static/my-image.png"/>', result.rendered_content)
view = TemplateView.as_view(template_name='only_files.html')
result = view(request)
self.assertIn("var contentCss = '/static/bundles/main.css'", result.rendered_content)
self.assertIn("var contentJS = '/static/bundles/main.js'", result.rendered_content)
self.compile_bundles('webpack.config.publicPath.js')
view = TemplateView.as_view(template_name='home.html')
request = self.factory.get('/')
result = view(request)
self.assertIn('<img src="http://custom-static-host.com/my-image.png"/>', result.rendered_content)
def test_jinja2(self):
self.compile_bundles('webpack.config.simple.js')
self.compile_bundles('webpack.config.app2.js')
view = TemplateView.as_view(template_name='home.jinja')
if django.VERSION >= (1, 8):
settings = {
'TEMPLATES': [
{
"BACKEND": "django_jinja.backend.Jinja2",
"APP_DIRS": True,
"OPTIONS": {
"match_extension": ".jinja",
"extensions": DEFAULT_EXTENSIONS + [
"webpack_loader.contrib.jinja2ext.WebpackExtension",
]
}
},
]
}
else:
settings = {
'TEMPLATE_LOADERS': (
'django_jinja.loaders.FileSystemLoader',
'django_jinja.loaders.AppLoader',
),
}
with self.settings(**settings):
request = self.factory.get('/')
result = view(request)
self.assertIn('<link type="text/css" href="/static/bundles/main.css" rel="stylesheet" />', result.rendered_content)
self.assertIn('<script type="text/javascript" src="/static/bundles/main.js" async charset="UTF-8"></script>', result.rendered_content)
def test_reporting_errors(self):
self.compile_bundles('webpack.config.error.js')
try:
get_loader(DEFAULT_CONFIG).get_bundle('main')
except WebpackError as e:
self.assertIn("Can't resolve 'the-library-that-did-not-exist'", str(e))
def test_missing_bundle(self):
missing_bundle_name = 'missing_bundle'
self.compile_bundles('webpack.config.simple.js')
try:
get_loader(DEFAULT_CONFIG).get_bundle(missing_bundle_name)
except WebpackBundleLookupError as e:
self.assertIn('Cannot resolve bundle {0}'.format(missing_bundle_name), str(e))
def test_missing_stats_file(self):
stats_file = settings.WEBPACK_LOADER[DEFAULT_CONFIG]['STATS_FILE']
if os.path.exists(stats_file):
os.remove(stats_file)
try:
get_loader(DEFAULT_CONFIG).get_assets()
except IOError as e:
expected = (
'Error reading {0}. Are you sure webpack has generated the '
'file and the path is correct?'
).format(stats_file)
self.assertIn(expected, str(e))
def test_timeouts(self):
with self.settings(DEBUG=True):
with open(
settings.WEBPACK_LOADER[DEFAULT_CONFIG]['STATS_FILE'], 'w'
) as stats_file:
stats_file.write(json.dumps({'status': 'compiling'}))
loader = get_loader(DEFAULT_CONFIG)
loader.config['TIMEOUT'] = 0.1
with self.assertRaises(WebpackLoaderTimeoutError):
loader.get_bundle('main')
def test_bad_status_in_production(self):
with open(
settings.WEBPACK_LOADER[DEFAULT_CONFIG]['STATS_FILE'], 'w'
) as stats_file:
stats_file.write(json.dumps({'status': 'unexpected-status'}))
try:
get_loader(DEFAULT_CONFIG).get_bundle('main')
except WebpackLoaderBadStatsError as e:
self.assertIn((
"The stats file does not contain valid data. Make sure "
"webpack-bundle-tracker plugin is enabled and try to run"
" webpack again."
), str(e))
def test_request_blocking(self):
# FIXME: This will work 99% time but there is no garauntee with the
# 4 second thing. Need a better way to detect if request was blocked on
# not.
wait_for = 4
view = TemplateView.as_view(template_name='home.html')
with self.settings(DEBUG=True):
open(settings.WEBPACK_LOADER[DEFAULT_CONFIG]['STATS_FILE'], 'w').write(json.dumps({'status': 'compiling'}))
then = time.time()
request = self.factory.get('/')
result = view(request)
t = Thread(target=self.compile_bundles, args=('webpack.config.simple.js', wait_for))
t2 = Thread(target=self.compile_bundles, args=('webpack.config.app2.js', wait_for))
t.start()
t2.start()
result.rendered_content
elapsed = time.time() - then
t.join()
t2.join()
self.assertTrue(elapsed >= wait_for)
with self.settings(DEBUG=False):
self.compile_bundles('webpack.config.simple.js')
self.compile_bundles('webpack.config.app2.js')
then = time.time()
request = self.factory.get('/')
result = view(request)
result.rendered_content
elapsed = time.time() - then
self.assertTrue(elapsed < wait_for)
| 9,156 | 503 | 23 |
07d1a77517a90a65b1582298f4f1fabcaea35154 | 871 | py | Python | zerver/views/attachments.py | ricardoteixeiraduarte/zulip | 149132348feda1c6929e94e72abb167cc882fc74 | [
"Apache-2.0"
] | 3 | 2018-12-04T01:44:43.000Z | 2019-05-13T06:16:21.000Z | zerver/views/attachments.py | ricardoteixeiraduarte/zulip | 149132348feda1c6929e94e72abb167cc882fc74 | [
"Apache-2.0"
] | 58 | 2018-11-27T15:18:54.000Z | 2018-12-09T13:43:07.000Z | zerver/views/attachments.py | ricardoteixeiraduarte/zulip | 149132348feda1c6929e94e72abb167cc882fc74 | [
"Apache-2.0"
] | 9 | 2019-11-04T18:59:29.000Z | 2022-03-22T17:46:37.000Z | from django.http import HttpRequest, HttpResponse
from zerver.models import UserProfile
from zerver.lib.actions import notify_attachment_update
from zerver.lib.validator import check_int
from zerver.lib.response import json_success
from zerver.lib.attachments import user_attachments, remove_attachment, \
access_attachment_by_id
| 41.47619 | 96 | 0.777268 | from django.http import HttpRequest, HttpResponse
from zerver.models import UserProfile
from zerver.lib.actions import notify_attachment_update
from zerver.lib.validator import check_int
from zerver.lib.response import json_success
from zerver.lib.attachments import user_attachments, remove_attachment, \
access_attachment_by_id
def list_by_user(request: HttpRequest, user_profile: UserProfile) -> HttpResponse:
return json_success({"attachments": user_attachments(user_profile)})
def remove(request: HttpRequest, user_profile: UserProfile, attachment_id: int) -> HttpResponse:
attachment = access_attachment_by_id(user_profile, attachment_id,
needs_owner=True)
notify_attachment_update(user_profile, "remove", {"id": attachment.id})
remove_attachment(user_profile, attachment)
return json_success()
| 488 | 0 | 46 |
84fd261465f62516dc0af20810d688ccad73a067 | 2,863 | py | Python | coyote_framework/util/apps/parallel.py | vaibhavrastogi1988/python_testing_framework | 583a2286479ed0ccda309c866a403dc92fa1bb3b | [
"MIT"
] | null | null | null | coyote_framework/util/apps/parallel.py | vaibhavrastogi1988/python_testing_framework | 583a2286479ed0ccda309c866a403dc92fa1bb3b | [
"MIT"
] | null | null | null | coyote_framework/util/apps/parallel.py | vaibhavrastogi1988/python_testing_framework | 583a2286479ed0ccda309c866a403dc92fa1bb3b | [
"MIT"
] | null | null | null | from multiprocessing import Process, Queue
import warnings
class ErrorInProcessException(RuntimeError):
"""Exception raised when one or more parallel processes raises an exception"""
def run_parallel(*functions):
"""Runs a series of functions in parallel. Return values are ordered by the order in which their functions
were passed.
>>> val1, val2 = run_parallel(
>>> lambda: 1 + 1
>>> lambda: 0
>>> )
If an exception is raised within one of the processes, that exception will be caught at the process
level and raised by the parent process as an ErrorInProcessException, which will track all errors raised in all
processes.
You can catch the exception raised for more details into the process exceptions:
>>> try:
>>> val1, val2 = run_parallel(fn1, fn2)
>>> except ErrorInProcessException, e:
>>> print.e.errors
@param functions: The functions to run specified as individual arguments
@return: List of results for those functions. Unpacking is recommended if you do not need to iterate over the
results as it enforces the number of functions you pass in.
>>> val1, val2 = run_parallel(fn1, fn2, fn3) # Will raise an error
>>> vals = run_parallel(fn1, fn2, fn3) # Will not raise an error
@raise: ErrorInProcessException
"""
errors = Queue()
queue = Queue()
jobs = list()
for i, function in enumerate(functions):
jobs.append(Process(target=target(function), args=(queue, errors, i)))
[job.start() for job in jobs]
[job.join() for job in jobs]
# Get the results in the queue and put them back in the order in which the function was specified in the args
results = [queue.get() for _ in jobs]
results = sorted(results, key=lambda x: x[0])
if not errors.empty():
error_list = list()
while not errors.empty():
error_list.append(errors.get())
raise ErrorInProcessException('Exceptions raised in parallel threads: {}'.format(error_list), errors=error_list)
return [r[1] for r in results] | 37.671053 | 120 | 0.648271 | from multiprocessing import Process, Queue
import warnings
class ErrorInProcessException(RuntimeError):
"""Exception raised when one or more parallel processes raises an exception"""
def __init__(self, message, errors, *args, **kwargs):
self.message = message
self.errors = errors
super(ErrorInProcessException, self).__init__(message, *args, **kwargs)
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.message, self.errors)
def run_parallel(*functions):
"""Runs a series of functions in parallel. Return values are ordered by the order in which their functions
were passed.
>>> val1, val2 = run_parallel(
>>> lambda: 1 + 1
>>> lambda: 0
>>> )
If an exception is raised within one of the processes, that exception will be caught at the process
level and raised by the parent process as an ErrorInProcessException, which will track all errors raised in all
processes.
You can catch the exception raised for more details into the process exceptions:
>>> try:
>>> val1, val2 = run_parallel(fn1, fn2)
>>> except ErrorInProcessException, e:
>>> print.e.errors
@param functions: The functions to run specified as individual arguments
@return: List of results for those functions. Unpacking is recommended if you do not need to iterate over the
results as it enforces the number of functions you pass in.
>>> val1, val2 = run_parallel(fn1, fn2, fn3) # Will raise an error
>>> vals = run_parallel(fn1, fn2, fn3) # Will not raise an error
@raise: ErrorInProcessException
"""
def target(fn):
def wrapped(results_queue, error_queue, index):
result = None
try:
result = fn()
except Exception as e: # Swallow errors or else the process will hang
error_queue.put(e)
warnings.warn('Exception raised in parallel threads: {}'.format(e))
results_queue.put((index, result))
return wrapped
errors = Queue()
queue = Queue()
jobs = list()
for i, function in enumerate(functions):
jobs.append(Process(target=target(function), args=(queue, errors, i)))
[job.start() for job in jobs]
[job.join() for job in jobs]
# Get the results in the queue and put them back in the order in which the function was specified in the args
results = [queue.get() for _ in jobs]
results = sorted(results, key=lambda x: x[0])
if not errors.empty():
error_list = list()
while not errors.empty():
error_list.append(errors.get())
raise ErrorInProcessException('Exceptions raised in parallel threads: {}'.format(error_list), errors=error_list)
return [r[1] for r in results] | 653 | 0 | 80 |
052059728aad75e7ecd44a97f4cab5c5b0b4bbc9 | 898 | py | Python | ms2ldaviz/update_features.py | RP0001/ms2ldaviz | 35ae516f5d3ec9d1a348e8308a4ea50f3ebcdfd7 | [
"MIT"
] | null | null | null | ms2ldaviz/update_features.py | RP0001/ms2ldaviz | 35ae516f5d3ec9d1a348e8308a4ea50f3ebcdfd7 | [
"MIT"
] | null | null | null | ms2ldaviz/update_features.py | RP0001/ms2ldaviz | 35ae516f5d3ec9d1a348e8308a4ea50f3ebcdfd7 | [
"MIT"
] | null | null | null | # Script to add the min and max mz values to feature objects
# takes as input the path of a dictionary file that must have a 'features' key
import os
import pickle
import numpy as np
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ms2ldaviz.settings")
import django
django.setup()
from basicviz.models import Feature,Experiment
if __name__ == '__main__':
infile = sys.argv[1]
with open(infile,'r') as f:
lda_dict = pickle.load(f)
experiment_name = infile.split('/')[-1].split('.')[0]
experiment = Experiment.objects.get(name = experiment_name)
features = lda_dict['features']
n_features = len(features)
ndone = 0
for feature in features:
f = Feature.objects.get(name = feature,experiment = experiment)
f.min_mz = features[feature][0]
f.max_mz = features[feature][1]
f.save()
ndone += 1
if ndone % 100 == 0:
print "Done {} of {}".format(ndone,n_features)
| 25.657143 | 78 | 0.717149 | # Script to add the min and max mz values to feature objects
# takes as input the path of a dictionary file that must have a 'features' key
import os
import pickle
import numpy as np
import sys
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ms2ldaviz.settings")
import django
django.setup()
from basicviz.models import Feature,Experiment
if __name__ == '__main__':
infile = sys.argv[1]
with open(infile,'r') as f:
lda_dict = pickle.load(f)
experiment_name = infile.split('/')[-1].split('.')[0]
experiment = Experiment.objects.get(name = experiment_name)
features = lda_dict['features']
n_features = len(features)
ndone = 0
for feature in features:
f = Feature.objects.get(name = feature,experiment = experiment)
f.min_mz = features[feature][0]
f.max_mz = features[feature][1]
f.save()
ndone += 1
if ndone % 100 == 0:
print "Done {} of {}".format(ndone,n_features)
| 0 | 0 | 0 |
c7a7e6ed0fe4cda560f6e62492631d7e16acbb55 | 1,184 | py | Python | builder/version.py | MyConbook/datatool | 1c12bb5124b48ae827c4832896fd81bf711ad44e | [
"Apache-2.0"
] | null | null | null | builder/version.py | MyConbook/datatool | 1c12bb5124b48ae827c4832896fd81bf711ad44e | [
"Apache-2.0"
] | null | null | null | builder/version.py | MyConbook/datatool | 1c12bb5124b48ae827c4832896fd81bf711ad44e | [
"Apache-2.0"
] | null | null | null | import os
import json
| 24.163265 | 112 | 0.717061 | import os
import json
class Version(object):
def __init__(self, options):
self.options = options
self.file_path = options.get_output_path("version.json")
def read(self):
self.is_new = not os.path.exists(self.file_path)
if not self.is_new:
# Read version file
ver_file = open(self.file_path, "r")
ver_str = ver_file.read()
ver_file.close()
ver_json = json.loads(ver_str)
self.map_ver = int(ver_json["mapver"])
self.database_ver = int(ver_json["dbver"])
self.calendar_checksum = ver_json["calhash"]
else:
self.map_ver = 0
self.database_ver = 0
self.calendar_checksum = None
def write(self):
new_json = json.dumps({"dbver": self.database_ver, "calhash": self.calendar_checksum, "mapver": self.map_ver})
if self.options.is_preview:
print new_json
return
json_file = open(self.file_path, "w")
json_file.write(new_json)
json_file.close()
def increment_db(self):
self.database_ver = self.database_ver + 1
def increment_map(self):
self.map_ver = self.map_ver + 1
def set_calendar_checksum(self, checksum):
if self.calendar_checksum == checksum:
return False
self.calendar_checksum = checksum
return True
| 995 | 1 | 166 |
942405e40afdd88558f03c151717b3c3dd3b1fe0 | 2,282 | py | Python | octavia-cli/octavia_cli/init/commands.py | faros-ai/airbyte | 2ebafe1817a71b5c0a8f8b6f448dbef9db708668 | [
"MIT"
] | 22 | 2020-08-27T00:47:20.000Z | 2020-09-17T15:39:39.000Z | octavia-cli/octavia_cli/init/commands.py | burmecia/airbyte | b9f79ccdf085ae69cff8743fc3f3191ef2b579d9 | [
"MIT"
] | 116 | 2020-08-27T01:11:27.000Z | 2020-09-19T02:47:52.000Z | octavia-cli/octavia_cli/init/commands.py | burmecia/airbyte | b9f79ccdf085ae69cff8743fc3f3191ef2b579d9 | [
"MIT"
] | 1 | 2020-09-15T06:10:01.000Z | 2020-09-15T06:10:01.000Z | #
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
import importlib.resources as pkg_resources
import os
from pathlib import Path
from typing import Iterable, Tuple
import click
from octavia_cli.base_commands import OctaviaCommand
from . import example_files
DIRECTORIES_TO_CREATE = {"connections", "destinations", "sources"}
DEFAULT_API_HEADERS_FILE_CONTENT = pkg_resources.read_text(example_files, "example_api_http_headers.yaml")
API_HTTP_HEADERS_TARGET_PATH = Path("api_http_headers.yaml")
@click.command(cls=OctaviaCommand, help="Initialize required directories for the project.")
@click.pass_context
| 38.677966 | 106 | 0.738826 | #
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
import importlib.resources as pkg_resources
import os
from pathlib import Path
from typing import Iterable, Tuple
import click
from octavia_cli.base_commands import OctaviaCommand
from . import example_files
DIRECTORIES_TO_CREATE = {"connections", "destinations", "sources"}
DEFAULT_API_HEADERS_FILE_CONTENT = pkg_resources.read_text(example_files, "example_api_http_headers.yaml")
API_HTTP_HEADERS_TARGET_PATH = Path("api_http_headers.yaml")
def create_api_headers_configuration_file() -> bool:
if not API_HTTP_HEADERS_TARGET_PATH.is_file():
with open(API_HTTP_HEADERS_TARGET_PATH, "w") as file:
file.write(DEFAULT_API_HEADERS_FILE_CONTENT)
return True
return False
def create_directories(directories_to_create: Iterable[str]) -> Tuple[Iterable[str], Iterable[str]]:
created_directories = []
not_created_directories = []
for directory in directories_to_create:
try:
os.mkdir(directory)
created_directories.append(directory)
except FileExistsError:
not_created_directories.append(directory)
return created_directories, not_created_directories
@click.command(cls=OctaviaCommand, help="Initialize required directories for the project.")
@click.pass_context
def init(ctx: click.Context):
click.echo("🔨 - Initializing the project.")
created_directories, not_created_directories = create_directories(DIRECTORIES_TO_CREATE)
if created_directories:
message = f"✅ - Created the following directories: {', '.join(created_directories)}."
click.echo(click.style(message, fg="green"))
if not_created_directories:
message = f"❓ - Already existing directories: {', '.join(not_created_directories) }."
click.echo(click.style(message, fg="yellow", bold=True))
created_api_http_headers_file = create_api_headers_configuration_file()
if created_api_http_headers_file:
message = f"✅ - Created API HTTP headers file in {API_HTTP_HEADERS_TARGET_PATH}"
click.echo(click.style(message, fg="green", bold=True))
else:
message = "❓ - API HTTP headers file already exists, skipping."
click.echo(click.style(message, fg="yellow", bold=True))
| 1,601 | 0 | 68 |
8315d2c7afb85dde1c99d1c84afee33fbbff73e6 | 5,734 | py | Python | applets/playgrounds/plots_playground.py | xenomarz/deep-signature | f831f05971727c5d00cf3b5c556b6a8b658048df | [
"MIT"
] | null | null | null | applets/playgrounds/plots_playground.py | xenomarz/deep-signature | f831f05971727c5d00cf3b5c556b6a8b658048df | [
"MIT"
] | null | null | null | applets/playgrounds/plots_playground.py | xenomarz/deep-signature | f831f05971727c5d00cf3b5c556b6a8b658048df | [
"MIT"
] | null | null | null | # python peripherals
import random
import os
import sys
import math
sys.path.insert(1, os.path.join(sys.path[0], '../..'))
# numpy
import numpy
# pandas
import pandas
# ipython
from IPython.display import display, HTML
# matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.lines
# pytorch
import torch
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data.sampler import SequentialSampler
from torch.utils.data import DataLoader
# deep signature
from deep_signature.utils import utils
from deep_signature.data_generation.curve_generation import LevelCurvesGenerator
from deep_signature.data_manipulation import curve_processing
from deep_signature.nn.datasets import DeepSignatureTupletsDataset
from deep_signature.nn.networks import DeepSignatureArcLengthNet
from deep_signature.nn.networks import DeepSignatureCurvatureNet
from deep_signature.nn.losses import ContrastiveLoss
from deep_signature.nn.trainers import ModelTrainer
from deep_signature.data_manipulation import curve_sampling
from deep_signature.data_manipulation import curve_processing
from deep_signature.linalg import euclidean_transform
from deep_signature.linalg import affine_transform
# common
from common import settings
from common import utils as common_utils
# notebooks
from notebooks.utils import utils as notebook_utils
# plt.style.use("dark_background")
transform_type = 'affine'
if transform_type == 'euclidean':
level_curves_arclength_tuplets_dir_path = settings.level_curves_euclidean_arclength_tuplets_dir_path
level_curves_arclength_tuplets_results_dir_path = settings.level_curves_euclidean_arclength_tuplets_results_dir_path
elif transform_type == 'equiaffine':
level_curves_arclength_tuplets_dir_path = settings.level_curves_equiaffine_arclength_tuplets_dir_path
level_curves_arclength_tuplets_results_dir_path = settings.level_curves_equiaffine_arclength_tuplets_results_dir_path
elif transform_type == 'affine':
level_curves_arclength_tuplets_dir_path = settings.level_curves_affine_arclength_tuplets_dir_path
level_curves_arclength_tuplets_results_dir_path = settings.level_curves_affine_arclength_tuplets_results_dir_path
if transform_type == 'euclidean':
level_curves_curvature_tuplets_dir_path = settings.level_curves_euclidean_curvature_tuplets_dir_path
level_curves_curvature_tuplets_results_dir_path = settings.level_curves_euclidean_curvature_tuplets_results_dir_path
elif transform_type == 'equiaffine':
level_curves_curvature_tuplets_dir_path = settings.level_curves_equiaffine_curvature_tuplets_dir_path
level_curves_curvature_tuplets_results_dir_path = settings.level_curves_equiaffine_curvature_tuplets_results_dir_path
elif transform_type == 'affine':
level_curves_curvature_tuplets_dir_path = settings.level_curves_affine_curvature_tuplets_dir_path
level_curves_curvature_tuplets_results_dir_path = settings.level_curves_affine_curvature_tuplets_results_dir_path
import warnings
warnings.filterwarnings("ignore")
# constants
true_arclength_colors = ['#FF8C00', '#444444']
predicted_arclength_colors = ['#AA0000', '#00AA00']
sample_colors = ['#AA0000', '#00AA00']
curve_colors = ['#AA0000', '#00AA00']
limit = 5
step = 60
comparison_curves_count = 1
section_supporting_points_count = 20
neighborhood_supporting_points_count = 3
curvature_sample_points = 2*neighborhood_supporting_points_count + 1
arclength_sample_points = section_supporting_points_count
sampling_ratio = 0.2
anchors_ratio = 0.2
device = torch.device('cuda')
# if we're in the equiaffine case, snap 'step' to the closest mutiple of 3 (from above)
# if transform_type == "equiaffine":
# step = int(3 * numpy.ceil(step / 3))
# package settings
torch.set_default_dtype(torch.float64)
numpy.random.seed(60)
# create models
arclength_model = DeepSignatureArcLengthNet(sample_points=arclength_sample_points).cuda()
curvature_model = DeepSignatureCurvatureNet(sample_points=curvature_sample_points).cuda()
# load arclength model state
latest_subdir = common_utils.get_latest_subdirectory(level_curves_arclength_tuplets_results_dir_path)
results = numpy.load(f"{latest_subdir}/results.npy", allow_pickle=True).item()
arclength_model.load_state_dict(torch.load(results['model_file_path'], map_location=device))
arclength_model.eval()
# load curvature model state
latest_subdir = common_utils.get_latest_subdirectory(level_curves_curvature_tuplets_results_dir_path)
results = numpy.load(f"{latest_subdir}/results.npy", allow_pickle=True).item()
curvature_model.load_state_dict(torch.load(results['model_file_path'], map_location=device))
curvature_model.eval()
# load curves (+ shuffle)
curves = LevelCurvesGenerator.load_curves(dir_path=settings.level_curves_dir_path_train)
numpy.random.shuffle(curves)
curves = curves[:limit]
# create color map
color_map = plt.get_cmap('rainbow', limit)
# generate curve records
curve_records = notebook_utils.generate_curve_records(
arclength_model=arclength_model,
curvature_model=curvature_model,
curves=curves,
transform_type=transform_type,
comparison_curves_count=comparison_curves_count,
sampling_ratio=sampling_ratio,
anchors_ratio=anchors_ratio,
step=step,
neighborhood_supporting_points_count=neighborhood_supporting_points_count,
section_supporting_points_count=section_supporting_points_count)
notebook_utils.plot_curve_signature_comparisons(
curve_records=curve_records,
curve_colors=curve_colors)
notebook_utils.plot_curve_arclength_records(
curve_records=curve_records,
true_arclength_colors=true_arclength_colors,
predicted_arclength_colors=predicted_arclength_colors,
sample_colors=sample_colors)
| 36.062893 | 121 | 0.844785 | # python peripherals
import random
import os
import sys
import math
sys.path.insert(1, os.path.join(sys.path[0], '../..'))
# numpy
import numpy
# pandas
import pandas
# ipython
from IPython.display import display, HTML
# matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.lines
# pytorch
import torch
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data.sampler import SequentialSampler
from torch.utils.data import DataLoader
# deep signature
from deep_signature.utils import utils
from deep_signature.data_generation.curve_generation import LevelCurvesGenerator
from deep_signature.data_manipulation import curve_processing
from deep_signature.nn.datasets import DeepSignatureTupletsDataset
from deep_signature.nn.networks import DeepSignatureArcLengthNet
from deep_signature.nn.networks import DeepSignatureCurvatureNet
from deep_signature.nn.losses import ContrastiveLoss
from deep_signature.nn.trainers import ModelTrainer
from deep_signature.data_manipulation import curve_sampling
from deep_signature.data_manipulation import curve_processing
from deep_signature.linalg import euclidean_transform
from deep_signature.linalg import affine_transform
# common
from common import settings
from common import utils as common_utils
# notebooks
from notebooks.utils import utils as notebook_utils
# plt.style.use("dark_background")
transform_type = 'affine'
if transform_type == 'euclidean':
level_curves_arclength_tuplets_dir_path = settings.level_curves_euclidean_arclength_tuplets_dir_path
level_curves_arclength_tuplets_results_dir_path = settings.level_curves_euclidean_arclength_tuplets_results_dir_path
elif transform_type == 'equiaffine':
level_curves_arclength_tuplets_dir_path = settings.level_curves_equiaffine_arclength_tuplets_dir_path
level_curves_arclength_tuplets_results_dir_path = settings.level_curves_equiaffine_arclength_tuplets_results_dir_path
elif transform_type == 'affine':
level_curves_arclength_tuplets_dir_path = settings.level_curves_affine_arclength_tuplets_dir_path
level_curves_arclength_tuplets_results_dir_path = settings.level_curves_affine_arclength_tuplets_results_dir_path
if transform_type == 'euclidean':
level_curves_curvature_tuplets_dir_path = settings.level_curves_euclidean_curvature_tuplets_dir_path
level_curves_curvature_tuplets_results_dir_path = settings.level_curves_euclidean_curvature_tuplets_results_dir_path
elif transform_type == 'equiaffine':
level_curves_curvature_tuplets_dir_path = settings.level_curves_equiaffine_curvature_tuplets_dir_path
level_curves_curvature_tuplets_results_dir_path = settings.level_curves_equiaffine_curvature_tuplets_results_dir_path
elif transform_type == 'affine':
level_curves_curvature_tuplets_dir_path = settings.level_curves_affine_curvature_tuplets_dir_path
level_curves_curvature_tuplets_results_dir_path = settings.level_curves_affine_curvature_tuplets_results_dir_path
import warnings
warnings.filterwarnings("ignore")
# constants
true_arclength_colors = ['#FF8C00', '#444444']
predicted_arclength_colors = ['#AA0000', '#00AA00']
sample_colors = ['#AA0000', '#00AA00']
curve_colors = ['#AA0000', '#00AA00']
limit = 5
step = 60
comparison_curves_count = 1
section_supporting_points_count = 20
neighborhood_supporting_points_count = 3
curvature_sample_points = 2*neighborhood_supporting_points_count + 1
arclength_sample_points = section_supporting_points_count
sampling_ratio = 0.2
anchors_ratio = 0.2
device = torch.device('cuda')
# if we're in the equiaffine case, snap 'step' to the closest mutiple of 3 (from above)
# if transform_type == "equiaffine":
# step = int(3 * numpy.ceil(step / 3))
# package settings
torch.set_default_dtype(torch.float64)
numpy.random.seed(60)
# create models
arclength_model = DeepSignatureArcLengthNet(sample_points=arclength_sample_points).cuda()
curvature_model = DeepSignatureCurvatureNet(sample_points=curvature_sample_points).cuda()
# load arclength model state
latest_subdir = common_utils.get_latest_subdirectory(level_curves_arclength_tuplets_results_dir_path)
results = numpy.load(f"{latest_subdir}/results.npy", allow_pickle=True).item()
arclength_model.load_state_dict(torch.load(results['model_file_path'], map_location=device))
arclength_model.eval()
# load curvature model state
latest_subdir = common_utils.get_latest_subdirectory(level_curves_curvature_tuplets_results_dir_path)
results = numpy.load(f"{latest_subdir}/results.npy", allow_pickle=True).item()
curvature_model.load_state_dict(torch.load(results['model_file_path'], map_location=device))
curvature_model.eval()
# load curves (+ shuffle)
curves = LevelCurvesGenerator.load_curves(dir_path=settings.level_curves_dir_path_train)
numpy.random.shuffle(curves)
curves = curves[:limit]
# create color map
color_map = plt.get_cmap('rainbow', limit)
# generate curve records
curve_records = notebook_utils.generate_curve_records(
arclength_model=arclength_model,
curvature_model=curvature_model,
curves=curves,
transform_type=transform_type,
comparison_curves_count=comparison_curves_count,
sampling_ratio=sampling_ratio,
anchors_ratio=anchors_ratio,
step=step,
neighborhood_supporting_points_count=neighborhood_supporting_points_count,
section_supporting_points_count=section_supporting_points_count)
notebook_utils.plot_curve_signature_comparisons(
curve_records=curve_records,
curve_colors=curve_colors)
notebook_utils.plot_curve_arclength_records(
curve_records=curve_records,
true_arclength_colors=true_arclength_colors,
predicted_arclength_colors=predicted_arclength_colors,
sample_colors=sample_colors)
| 0 | 0 | 0 |
5005ed493c0e7ffb6bca0eff9853fdaa2a10ac62 | 1,204 | py | Python | api/views.py | arwhyte/met | f80fbf65366d13413cebf4f1d49f7ba2ec0d5cac | [
"MIT"
] | null | null | null | api/views.py | arwhyte/met | f80fbf65366d13413cebf4f1d49f7ba2ec0d5cac | [
"MIT"
] | 5 | 2018-11-27T01:31:17.000Z | 2021-06-10T21:02:20.000Z | api/views.py | arwhyte/met | f80fbf65366d13413cebf4f1d49f7ba2ec0d5cac | [
"MIT"
] | 1 | 2018-12-17T17:04:15.000Z | 2018-12-17T17:04:15.000Z | from django.shortcuts import render
from met.models import Artist
from api.serializers import ArtistSerializer
from rest_framework import generics, permissions, status, viewsets
from rest_framework.response import Response
class ArtistViewSet(viewsets.ModelViewSet):
"""
This ViewSet provides both 'list' and 'detail' views.
"""
queryset = Artist.objects.all().order_by('artist_display_name')
serializer_class = ArtistSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
| 31.684211 | 73 | 0.817276 | from django.shortcuts import render
from met.models import Artist
from api.serializers import ArtistSerializer
from rest_framework import generics, permissions, status, viewsets
from rest_framework.response import Response
class ArtistViewSet(viewsets.ModelViewSet):
"""
This ViewSet provides both 'list' and 'detail' views.
"""
queryset = Artist.objects.all().order_by('artist_display_name')
serializer_class = ArtistSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
def delete(self, request, pk, format=None):
artist = self.get_object(pk)
self.perform_destroy(self, artist)
return Response(status=status.HTTP_204_NO_CONTENT)
def perform_destroy(self, instance):
instance.delete()
class ArtistListAPIView(generics.ListCreateAPIView):
queryset = Artist.objects.all().order_by('artist_display_name')
serializer_class = ArtistSerializer
permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,)
class ArtistDetailAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset = Artist.objects.all().order_by('artist_display_name')
serializer_class = ArtistSerializer
permission_classes = (permissions.DjangoModelPermissionsOrAnonReadOnly,)
| 179 | 427 | 94 |
ff094f9844c007f21c1361d067d8f484dff418e6 | 27,270 | py | Python | pokeminer/notification.py | levijoseph/Levi | e8058933e8830fa07c6fbecbb15a0b3c585b402d | [
"MIT"
] | null | null | null | pokeminer/notification.py | levijoseph/Levi | e8058933e8830fa07c6fbecbb15a0b3c585b402d | [
"MIT"
] | null | null | null | pokeminer/notification.py | levijoseph/Levi | e8058933e8830fa07c6fbecbb15a0b3c585b402d | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta, timezone
from collections import deque
from math import sqrt
from time import monotonic
from logging import getLogger
from pkg_resources import resource_stream
from tempfile import NamedTemporaryFile
from .utils import load_pickle, dump_pickle
from .db import Session, get_pokemon_ranking, estimate_remaining_time
from .names import POKEMON_NAMES, MOVES
from . import config
# set unset config options to None
for variable_name in ('PB_API_KEY', 'PB_CHANNEL', 'TWITTER_CONSUMER_KEY',
'TWITTER_CONSUMER_SECRET', 'TWITTER_ACCESS_KEY',
'TWITTER_ACCESS_SECRET', 'LANDMARKS', 'AREA_NAME',
'HASHTAGS', 'TZ_OFFSET', 'ENCOUNTER', 'INITIAL_RANKING',
'NOTIFY', 'NAME_FONT', 'IV_FONT', 'MOVE_FONT',
'TWEET_IMAGES', 'NOTIFY_IDS', 'NEVER_NOTIFY_IDS',
'RARITY_OVERRIDE', 'IGNORE_IVS', 'IGNORE_RARITY',
'WEBHOOKS'):
if not hasattr(config, variable_name):
setattr(config, variable_name, None)
_optional = {
'ALWAYS_NOTIFY': 9,
'FULL_TIME': 1800,
'TIME_REQUIRED': 300,
'NOTIFY_RANKING': 90,
'ALWAYS_NOTIFY_IDS': set(),
'NOTIFICATION_CACHE': 100
}
# set defaults for unset config options
for setting_name, default in _optional.items():
if not hasattr(config, setting_name):
setattr(config, setting_name, default)
del _optional
if config.NOTIFY:
WEBHOOK = False
TWITTER = False
PUSHBULLET = False
if all((config.TWITTER_CONSUMER_KEY, config.TWITTER_CONSUMER_SECRET,
config.TWITTER_ACCESS_KEY, config.TWITTER_ACCESS_SECRET)):
try:
import twitter
from twitter.twitter_utils import calc_expected_status_length
except ImportError as e:
raise ImportError("You specified a TWITTER_ACCESS_KEY but you don't have python-twitter installed.") from e
TWITTER=True
if config.TWEET_IMAGES:
if not config.ENCOUNTER:
raise ValueError('You enabled TWEET_IMAGES but ENCOUNTER is not set.')
try:
import cairo
except ImportError as e:
raise ImportError('You enabled TWEET_IMAGES but Cairo could not be imported.') from e
if config.PB_API_KEY:
try:
from pushbullet import Pushbullet
except ImportError as e:
raise ImportError("You specified a PB_API_KEY but you don't have pushbullet.py installed.") from e
PUSHBULLET=True
if config.WEBHOOKS:
if not isinstance(config.WEBHOOKS, (set, list, tuple)):
raise ValueError('WEBHOOKS must be a set of addresses.')
try:
import requests
except ImportError as e:
raise ImportError("You specified a WEBHOOKS address but you don't have requests installed.") from e
WEBHOOK = True
NATIVE = TWITTER or PUSHBULLET
if not (NATIVE or WEBHOOK):
raise ValueError('NOTIFY is enabled but no keys or webhook address were provided.')
try:
if config.INITIAL_SCORE < config.MINIMUM_SCORE:
raise ValueError('INITIAL_SCORE should be greater than or equal to MINIMUM_SCORE.')
except TypeError:
raise AttributeError('INITIAL_SCORE or MINIMUM_SCORE are not set.')
if config.NOTIFY_RANKING and config.NOTIFY_IDS:
raise ValueError('Only set NOTIFY_RANKING or NOTIFY_IDS, not both.')
elif not any((config.NOTIFY_RANKING, config.NOTIFY_IDS, config.ALWAYS_NOTIFY_IDS)):
raise ValueError('Must set either NOTIFY_RANKING, NOTIFY_IDS, or ALWAYS_NOTIFY_IDS.')
| 38.901569 | 121 | 0.579941 | from datetime import datetime, timedelta, timezone
from collections import deque
from math import sqrt
from time import monotonic
from logging import getLogger
from pkg_resources import resource_stream
from tempfile import NamedTemporaryFile
from .utils import load_pickle, dump_pickle
from .db import Session, get_pokemon_ranking, estimate_remaining_time
from .names import POKEMON_NAMES, MOVES
from . import config
# set unset config options to None
for variable_name in ('PB_API_KEY', 'PB_CHANNEL', 'TWITTER_CONSUMER_KEY',
'TWITTER_CONSUMER_SECRET', 'TWITTER_ACCESS_KEY',
'TWITTER_ACCESS_SECRET', 'LANDMARKS', 'AREA_NAME',
'HASHTAGS', 'TZ_OFFSET', 'ENCOUNTER', 'INITIAL_RANKING',
'NOTIFY', 'NAME_FONT', 'IV_FONT', 'MOVE_FONT',
'TWEET_IMAGES', 'NOTIFY_IDS', 'NEVER_NOTIFY_IDS',
'RARITY_OVERRIDE', 'IGNORE_IVS', 'IGNORE_RARITY',
'WEBHOOKS'):
if not hasattr(config, variable_name):
setattr(config, variable_name, None)
_optional = {
'ALWAYS_NOTIFY': 9,
'FULL_TIME': 1800,
'TIME_REQUIRED': 300,
'NOTIFY_RANKING': 90,
'ALWAYS_NOTIFY_IDS': set(),
'NOTIFICATION_CACHE': 100
}
# set defaults for unset config options
for setting_name, default in _optional.items():
if not hasattr(config, setting_name):
setattr(config, setting_name, default)
del _optional
if config.NOTIFY:
WEBHOOK = False
TWITTER = False
PUSHBULLET = False
if all((config.TWITTER_CONSUMER_KEY, config.TWITTER_CONSUMER_SECRET,
config.TWITTER_ACCESS_KEY, config.TWITTER_ACCESS_SECRET)):
try:
import twitter
from twitter.twitter_utils import calc_expected_status_length
except ImportError as e:
raise ImportError("You specified a TWITTER_ACCESS_KEY but you don't have python-twitter installed.") from e
TWITTER=True
if config.TWEET_IMAGES:
if not config.ENCOUNTER:
raise ValueError('You enabled TWEET_IMAGES but ENCOUNTER is not set.')
try:
import cairo
except ImportError as e:
raise ImportError('You enabled TWEET_IMAGES but Cairo could not be imported.') from e
if config.PB_API_KEY:
try:
from pushbullet import Pushbullet
except ImportError as e:
raise ImportError("You specified a PB_API_KEY but you don't have pushbullet.py installed.") from e
PUSHBULLET=True
if config.WEBHOOKS:
if not isinstance(config.WEBHOOKS, (set, list, tuple)):
raise ValueError('WEBHOOKS must be a set of addresses.')
try:
import requests
except ImportError as e:
raise ImportError("You specified a WEBHOOKS address but you don't have requests installed.") from e
WEBHOOK = True
NATIVE = TWITTER or PUSHBULLET
if not (NATIVE or WEBHOOK):
raise ValueError('NOTIFY is enabled but no keys or webhook address were provided.')
try:
if config.INITIAL_SCORE < config.MINIMUM_SCORE:
raise ValueError('INITIAL_SCORE should be greater than or equal to MINIMUM_SCORE.')
except TypeError:
raise AttributeError('INITIAL_SCORE or MINIMUM_SCORE are not set.')
if config.NOTIFY_RANKING and config.NOTIFY_IDS:
raise ValueError('Only set NOTIFY_RANKING or NOTIFY_IDS, not both.')
elif not any((config.NOTIFY_RANKING, config.NOTIFY_IDS, config.ALWAYS_NOTIFY_IDS)):
raise ValueError('Must set either NOTIFY_RANKING, NOTIFY_IDS, or ALWAYS_NOTIFY_IDS.')
class PokeImage:
def __init__(self, pokemon_id, iv, moves, time_of_day):
self.pokemon_id = pokemon_id
self.name = POKEMON_NAMES[pokemon_id]
self.attack, self.defense, self.stamina = iv
self.move1, self.move2 = moves
self.time_of_day = time_of_day
def create(self):
if self.time_of_day > 1:
bg = resource_stream('pokeminer', 'static/assets/notification-bg-night.png')
else:
bg = resource_stream('pokeminer', 'static/assets/notification-bg-day.png')
ims = cairo.ImageSurface.create_from_png(bg)
self.context = cairo.Context(ims)
pokepic = resource_stream('pokeminer', 'static/original-icons/{}.png'.format(self.pokemon_id))
self.draw_stats()
self.draw_image(pokepic, 204, 224)
self.draw_name()
image = NamedTemporaryFile(suffix='.png', delete=True)
ims.write_to_png(image)
image.mode = 'rb'
return image
def draw_stats(self):
"""Draw the Pokemon's IV's and moves."""
self.context.set_line_width(1.75)
text_x = 240
if self.attack is not None:
self.context.select_font_face(config.IV_FONT or "monospace")
self.context.set_font_size(22)
# black stroke
self.draw_ivs(text_x)
self.context.set_source_rgba(0, 0, 0)
self.context.stroke()
# white fill
self.context.move_to(text_x, 90)
self.draw_ivs(text_x)
self.context.set_source_rgba(1, 1, 1)
self.context.fill()
if self.move1 or self.move2:
self.context.select_font_face(config.MOVE_FONT or "sans-serif")
self.context.set_font_size(16)
# black stroke
self.draw_moves(text_x)
self.context.set_source_rgba(0, 0, 0)
self.context.stroke()
# white fill
self.draw_moves(text_x)
self.context.set_source_rgba(1, 1, 1)
self.context.fill()
def draw_ivs(self, text_x):
self.context.move_to(text_x, 90)
self.context.text_path("Attack: {:>2}/15".format(self.attack))
self.context.move_to(text_x, 116)
self.context.text_path("Defense: {:>2}/15".format(self.defense))
self.context.move_to(text_x, 142)
self.context.text_path("Stamina: {:>2}/15".format(self.stamina))
def draw_moves(self, text_x):
if self.move1:
self.context.move_to(text_x, 170)
self.context.text_path("Move 1: {}".format(self.move1))
if self.move2:
self.context.move_to(text_x, 188)
self.context.text_path("Move 2: {}".format(self.move2))
def draw_image(self, pokepic, height, width):
"""Draw a scaled image on a given context."""
ims = cairo.ImageSurface.create_from_png(pokepic)
# calculate proportional scaling
img_height = ims.get_height()
img_width = ims.get_width()
width_ratio = float(width) / float(img_width)
height_ratio = float(height) / float(img_height)
scale_xy = min(height_ratio, width_ratio)
# scale image and add it
self.context.save()
if scale_xy < 1:
self.context.scale(scale_xy, scale_xy)
if scale_xy == width_ratio:
new_height = img_height * scale_xy
top = (height - new_height) / 2
self.context.translate(8, top + 8)
else:
new_width = img_width * scale_xy
left = (width - new_width) / 2
self.context.translate(left + 8, 8)
else:
left = (width - img_width) / 2
top = (height - img_height) / 2
self.context.translate(left + 8, top + 8)
self.context.set_source_surface(ims)
self.context.paint()
self.context.restore()
def draw_name(self):
"""Draw the Pokemon's name."""
self.context.set_line_width(2.5)
text_x = 240
text_y = 50
self.context.select_font_face(config.NAME_FONT or "sans-serif")
self.context.set_font_size(32)
self.context.move_to(text_x, text_y)
self.context.set_source_rgba(0, 0, 0)
self.context.text_path(self.name)
self.context.stroke()
self.context.move_to(text_x, text_y)
self.context.set_source_rgba(1, 1, 1)
self.context.show_text(self.name)
class Notification:
def __init__(self, pokemon_id, coordinates, time_till_hidden, iv, moves, score, time_of_day):
self.pokemon_id = pokemon_id
self.name = POKEMON_NAMES[pokemon_id]
self.coordinates = coordinates
self.moves = moves
self.score = score
self.iv = iv
self.time_of_day = time_of_day
self.logger = getLogger('notifier')
self.description = 'wild'
try:
if self.score == 1:
self.description = 'perfect'
elif self.score > .83:
self.description = 'great'
elif self.score > .6:
self.description = 'good'
except TypeError:
pass
if config.TZ_OFFSET:
now = datetime.now(timezone(timedelta(hours=config.TZ_OFFSET)))
else:
now = datetime.now()
if TWITTER and config.HASHTAGS:
self.hashtags = config.HASHTAGS.copy()
else:
self.hashtags = set()
# check if expiration time is known, or a range
if isinstance(time_till_hidden, (tuple, list)):
soonest, latest = time_till_hidden
self.min_delta = timedelta(seconds=soonest)
self.max_delta = timedelta(seconds=latest)
# check if the two TTHs end on same minute
if ((now + self.min_delta).strftime('%I:%M') ==
(now + self.max_delta).strftime('%I:%M')):
average = (soonest + latest) / 2
time_till_hidden = average
self.delta = timedelta(seconds=average)
self.expire_time = (
now + self.delta).strftime('%I:%M %p').lstrip('0')
else:
self.delta = None
self.expire_time = None
self.min_expire_time = (
now + self.min_delta).strftime('%I:%M').lstrip('0')
self.max_expire_time = (
now + self.max_delta).strftime('%I:%M %p').lstrip('0')
else:
self.delta = timedelta(seconds=time_till_hidden)
self.expire_time = (
now + self.delta).strftime('%I:%M %p').lstrip('0')
self.min_delta = None
self.map_link = 'https://maps.google.com/maps?q={0[0]:.5f},{0[1]:.5f}'.format(
self.coordinates)
self.place = None
def notify(self):
if config.LANDMARKS:
self.landmark = config.LANDMARKS.find_landmark(self.coordinates)
else:
self.landmark = None
if self.landmark:
self.place = self.landmark.generate_string(self.coordinates)
if TWITTER and self.landmark.hashtags:
self.hashtags.update(self.landmark.hashtags)
else:
self.place = self.generic_place_string()
tweeted = False
pushed = False
if PUSHBULLET:
pushed = self.pbpush()
if TWITTER:
tweeted = self.tweet()
return tweeted or pushed
def pbpush(self):
""" Send a PushBullet notification either privately or to a channel,
depending on whether or not PB_CHANNEL is set in config.
"""
try:
pb = Pushbullet(config.PB_API_KEY)
except Exception:
self.logger.exception('Failed to create a PushBullet object.')
return False
description = self.description
try:
if self.score < .45:
description = 'weak'
elif self.score < .35:
description = 'bad'
except TypeError:
pass
area = config.AREA_NAME
if self.delta:
expiry = 'until {}'.format(self.expire_time)
minutes, seconds = divmod(self.delta.total_seconds(), 60)
remaining = 'for {m}m{s:.0f}s'.format(m=int(minutes), s=seconds)
else:
expiry = 'until between {t1} and {t2}'.format(
t1=self.min_expire_time, t2=self.max_expire_time)
minutes, seconds = divmod(self.min_delta.total_seconds(), 60)
min_remaining = '{m}m{s:.0f}s'.format(m=int(minutes), s=seconds)
minutes, seconds = divmod(self.max_delta.total_seconds(), 60)
max_remaining = '{m}m{s:.0f}s'.format(m=int(minutes), s=seconds)
remaining = 'for between {r1} and {r2}'.format(
r1=min_remaining, r2=max_remaining)
title = ('A {d} {n} will be in {a} {e}!'
).format(d=description, n=self.name, a=area, e=expiry)
body = ('It will be {p} {r}.\n\n'
'Attack: {iv[0]}\n'
'Defense: {iv[1]}\n'
'Stamina: {iv[2]}\n'
'Move 1: {m[0]}\n'
'Move 2: {m[1]}\n\n').format(
p=self.place, r=remaining, iv=self.iv, m=self.moves)
try:
try:
channel = pb.channels[config.PB_CHANNEL]
channel.push_link(title, self.map_link, body)
except (IndexError, KeyError):
pb.push_link(title, self.map_link, body)
except Exception:
self.logger.exception('Failed to send a PushBullet notification about {}.'.format(self.name))
return False
else:
self.logger.info('Sent a PushBullet notification about {}.'.format(self.name))
return True
def tweet(self):
""" Create message, reduce it until it fits in a tweet, and then tweet
it with a link to Google maps and tweet location included.
"""
def generate_tag_string(hashtags):
'''create hashtag string'''
tag_string = ''
if hashtags:
for hashtag in hashtags:
tag_string += ' #{}'.format(hashtag)
return tag_string
try:
api = twitter.Api(consumer_key=config.TWITTER_CONSUMER_KEY,
consumer_secret=config.TWITTER_CONSUMER_SECRET,
access_token_key=config.TWITTER_ACCESS_KEY,
access_token_secret=config.TWITTER_ACCESS_SECRET)
except Exception:
self.logger.exception('Failed to create a Twitter API object.')
tag_string = generate_tag_string(self.hashtags)
if self.expire_time:
tweet_text = (
'A {d} {n} appeared! It will be {p} until {e}. {t} {u}').format(
d=self.description, n=self.name, p=self.place,
e=self.expire_time, t=tag_string, u=self.map_link)
else:
tweet_text = (
'A {d} {n} appeared {p}! It will expire sometime between '
'{e1} and {e2}. {t} {u}').format(
d=self.description, n=self.name, p=self.place,
e1=self.min_expire_time, e2=self.max_expire_time,
t=tag_string, u=self.map_link)
if calc_expected_status_length(tweet_text) > 140:
tweet_text = tweet_text.replace(' meters ', 'm ')
# remove hashtags until length is short enough
while calc_expected_status_length(tweet_text) > 140:
if self.hashtags:
hashtag = self.hashtags.pop()
tweet_text = tweet_text.replace(' #' + hashtag, '')
else:
break
if (calc_expected_status_length(tweet_text) > 140 and
self.landmark.shortname):
tweet_text = tweet_text.replace(self.landmark.name,
self.landmark.shortname)
if calc_expected_status_length(tweet_text) > 140:
place = self.landmark.shortname or self.landmark.name
phrase = self.landmark.phrase
if self.place.startswith(phrase):
place_string = '{ph} {pl}'.format(ph=phrase, pl=place)
else:
place_string = 'near {}'.format(place)
tweet_text = tweet_text.replace(self.place, place_string)
if calc_expected_status_length(tweet_text) > 140:
if self.expire_time:
tweet_text = 'A {d} {n} will be {p} until {e}. {u}'.format(
d=self.description, n=self.name,
p=place_string, e=self.expire_time,
u=self.map_link)
else:
tweet_text = (
"A {d} {n} appeared {p}! It'll expire between {e1} & {e2}."
' {u}').format(d=self.description, n=self.name,
p=place_string, e1=self.min_expire_time,
e2=self.max_expire_time, u=self.map_link)
if calc_expected_status_length(tweet_text) > 140:
if self.expire_time:
tweet_text = 'A {d} {n} will expire at {e}. {u}'.format(
n=self.name, e=self.expire_time, u=self.map_link)
else:
tweet_text = (
'A {d} {n} will expire between {e1} & {e2}. {u}').format(
d=self.description, n=self.name, e1=self.min_expire_time,
e2=self.max_expire_time, u=self.map_link)
image = None
if config.TWEET_IMAGES:
try:
image = PokeImage(self.pokemon_id, self.iv, self.moves, self.time_of_day).create()
except Exception:
self.logger.exception('Failed to create a Tweet image.')
try:
api.PostUpdate(tweet_text,
media=image,
latitude=self.coordinates[0],
longitude=self.coordinates[1],
display_coordinates=True)
except Exception:
self.logger.exception('Failed to Tweet about {}.'.format(self.name))
return False
else:
self.logger.info('Sent a tweet about {}.'.format(self.name))
return True
finally:
try:
image.close()
except AttributeError:
pass
@staticmethod
def generic_place_string():
""" Create a place string with area name (if available)"""
if config.AREA_NAME:
# no landmarks defined, just use area name
place = 'in {}'.format(config.AREA_NAME)
return place
else:
# no landmarks or area name defined, just say 'around'
return 'around'
class Notifier:
def __init__(self, spawns):
self.spawns = spawns
self.recent_notifications = deque(maxlen=config.NOTIFICATION_CACHE)
self.notify_ranking = config.NOTIFY_RANKING
self.session = Session(autoflush=False)
self.initial_score = config.INITIAL_SCORE
self.minimum_score = config.MINIMUM_SCORE
self.last_notification = monotonic() - (config.FULL_TIME / 2)
self.always_notify = []
self.logger = getLogger('notifier')
self.never_notify = config.NEVER_NOTIFY_IDS or tuple()
self.rarity_override = config.RARITY_OVERRIDE or {}
if self.notify_ranking:
self.set_pokemon_ranking(loadpickle=True)
self.set_notify_ids()
self.auto = True
elif config.NOTIFY_IDS or config.ALWAYS_NOTIFY_IDS:
self.notify_ids = config.NOTIFY_IDS or config.ALWAYS_NOTIFY_IDS
self.always_notify = config.ALWAYS_NOTIFY_IDS
self.notify_ranking = len(self.notify_ids)
self.auto = False
if WEBHOOK:
self.wh_session = requests.Session()
def set_notify_ids(self):
self.notify_ids = self.pokemon_ranking[0:self.notify_ranking]
self.always_notify = set(self.pokemon_ranking[0:config.ALWAYS_NOTIFY])
self.always_notify |= set(config.ALWAYS_NOTIFY_IDS)
def set_pokemon_ranking(self, loadpickle=False):
self.ranking_time = monotonic()
if loadpickle:
self.pokemon_ranking = load_pickle('ranking')
if self.pokemon_ranking:
return
try:
self.pokemon_ranking = get_pokemon_ranking(self.session)
except Exception:
self.session.rollback()
self.logger.exception('An exception occurred while trying to update rankings.')
dump_pickle('ranking', self.pokemon_ranking)
def get_rareness_score(self, pokemon_id):
if pokemon_id in self.rarity_override:
return self.rarity_override[pokemon_id]
exclude = len(self.always_notify)
total = self.notify_ranking - exclude
ranking = self.notify_ids.index(pokemon_id) - exclude
percentile = 1 - (ranking / total)
return percentile
def get_iv_score(self, iv):
try:
return sum(iv) / 45
except TypeError:
return None
def get_required_score(self, now=None):
if self.initial_score == self.minimum_score or config.FULL_TIME == 0:
return self.initial_score
now = now or monotonic()
time_passed = now - self.last_notification
subtract = self.initial_score - self.minimum_score
if time_passed < config.FULL_TIME:
subtract *= (time_passed / config.FULL_TIME)
return self.initial_score - subtract
def eligible(self, pokemon):
pokemon_id = pokemon['pokemon_id']
if (pokemon_id in self.never_notify
or pokemon['encounter_id'] in self.recent_notifications):
return False
if pokemon_id in self.always_notify:
return True
if pokemon_id not in self.notify_ids:
return False
if config.IGNORE_RARITY:
return True
rareness = self.get_rareness_score(pokemon_id)
highest_score = (rareness + 1) / 2
score_required = self.get_required_score()
return highest_score > score_required
def notify(self, pokemon, time_of_day):
"""Send a PushBullet notification and/or a Tweet, depending on if their
respective API keys have been set in config.
"""
spawn_id = pokemon['spawn_id']
coordinates = (pokemon['lat'], pokemon['lon'])
pokemon_id = pokemon['pokemon_id']
encounter_id = pokemon['encounter_id']
name = POKEMON_NAMES[pokemon_id]
if encounter_id in self.recent_notifications:
# skip duplicate
return False
if pokemon['valid']:
time_till_hidden = pokemon['time_till_hidden_ms'] / 1000
else:
time_till_hidden = None
now = monotonic()
if self.auto:
if now - self.ranking_time > 3600:
self.set_pokemon_ranking()
self.set_notify_ids()
if pokemon_id in self.always_notify:
score_required = 0
else:
if time_till_hidden and time_till_hidden < config.TIME_REQUIRED:
self.logger.info('{n} has only {s} seconds remaining.'.format(
n=name, s=time_till_hidden))
return False
score_required = self.get_required_score(now)
iv = (pokemon.get('individual_attack'),
pokemon.get('individual_defense'),
pokemon.get('individual_stamina'))
moves = (MOVES.get(pokemon.get('move_1'), {}).get('name'),
MOVES.get(pokemon.get('move_2'), {}).get('name'))
iv_score = self.get_iv_score(iv)
if score_required:
if config.IGNORE_RARITY:
score = iv_score
elif config.IGNORE_IVS or iv_score is None:
score = self.get_rareness_score(pokemon_id)
else:
rareness = self.get_rareness_score(pokemon_id)
try:
score = (iv_score + rareness) / 2
except TypeError:
self.logger.warning('Failed to calculate score for {}.'.format(name))
return False
else:
score = None
if score_required and score < score_required:
self.logger.info("{n}'s score was {s:.3f} (iv: {i:.3f}),"
" but {r:.3f} was required.".format(
n=name, s=score, i=iv_score, r=score_required))
return False
if not time_till_hidden:
seen = pokemon['seen'] % 3600
try:
time_till_hidden = estimate_remaining_time(self.session, spawn_id, seen)
except Exception:
self.session.rollback()
self.logger.exception('An exception occurred while trying to estimate remaining time.')
mean = sum(time_till_hidden) / 2
if mean < config.TIME_REQUIRED and pokemon_id not in self.always_notify:
self.logger.info('{n} has only around {s} seconds remaining.'.format(
n=name, s=mean))
return False
whpushed = False
if WEBHOOK:
whpushed = self.webhook(pokemon, time_till_hidden)
notified = False
if NATIVE:
notified = Notification(pokemon_id, coordinates, time_till_hidden, iv, moves, iv_score, time_of_day).notify()
if notified or whpushed:
self.last_notification = monotonic()
self.recent_notifications.append(encounter_id)
return notified or whpushed
def webhook(self, pokemon, time_till_hidden):
""" Send a notification via webhook
"""
if isinstance(time_till_hidden, (tuple, list)):
time_till_hidden = time_till_hidden[0]
data = {
'type': "pokemon",
'message': {
"encounter_id": pokemon['encounter_id'],
"pokemon_id": pokemon['pokemon_id'],
"last_modified_time": pokemon['seen'] * 1000,
"spawnpoint_id": pokemon['spawn_id'],
"latitude": pokemon['lat'],
"longitude": pokemon['lon'],
"disappear_time": pokemon['seen'] + time_till_hidden,
"time_until_hidden_ms": time_till_hidden * 1000
}
}
try:
data['message']['individual_attack'] = pokemon['individual_attack']
data['message']['individual_defense'] = pokemon['individual_defense']
data['message']['individual_stamina'] = pokemon['individual_stamina']
data['message']['move_1'] = pokemon['move_1']
data['message']['move_2'] = pokemon['move_2']
except KeyError:
pass
ret = False
for w in config.WEBHOOKS:
try:
self.wh_session.post(w, json=data, timeout=(1, 1))
ret = True
except requests.exceptions.Timeout:
self.logger.warning('Response timeout on webhook endpoint {}'.format(w))
except requests.exceptions.RequestException as e:
self.logger.warning('Request Error: {}'.format(e))
return ret
| 7,739 | 15,793 | 69 |
f1c1bf77c34041980fcc84957b781098ab32a71f | 3,159 | py | Python | AbletonLiveScripts/v10/ComradeEncoders/skin.py | lzref/ComradeEncoders | 154a10d7d1c83e6b8f28af7f19ee6cb2a45a88e9 | [
"MIT"
] | 1 | 2021-11-26T01:52:30.000Z | 2021-11-26T01:52:30.000Z | AbletonLiveScripts/v10/ComradeEncoders/skin.py | lzref/ComradeEncoders | 154a10d7d1c83e6b8f28af7f19ee6cb2a45a88e9 | [
"MIT"
] | null | null | null | AbletonLiveScripts/v10/ComradeEncoders/skin.py | lzref/ComradeEncoders | 154a10d7d1c83e6b8f28af7f19ee6cb2a45a88e9 | [
"MIT"
] | 1 | 2021-11-26T01:52:31.000Z | 2021-11-26T01:52:31.000Z | # uncompyle6 version 3.4.1
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.16 (v2.7.16:413a49145e, Mar 2 2019, 14:32:10)
# [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)]
# Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/SL_MkIII/skin.py
# Compiled at: 2019-04-23 14:43:03
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.control_surface import Skin
from .colors import Rgb
skin = Skin(Colors) | 25.475806 | 132 | 0.600823 | # uncompyle6 version 3.4.1
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.16 (v2.7.16:413a49145e, Mar 2 2019, 14:32:10)
# [GCC 4.2.1 Compatible Apple LLVM 6.0 (clang-600.0.57)]
# Embedded file name: /Users/versonator/Jenkins/live/output/mac_64_static/Release/python-bundle/MIDI Remote Scripts/SL_MkIII/skin.py
# Compiled at: 2019-04-23 14:43:03
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.control_surface import Skin
from .colors import Rgb
class Colors:
class DefaultButton:
On = Rgb.GREEN
Off = Rgb.BLACK
Disabled = Rgb.BLACK
class Session:
RecordButton = Rgb.RED
ClipTriggeredPlay = Rgb.GREEN_BLINK
ClipTriggeredRecord = Rgb.RED_BLINK
ClipStarted = Rgb.GREEN_PULSE
ClipRecording = Rgb.RED_PULSE
ClipStopped = Rgb.AMBER
Scene = Rgb.BLACK
SceneTriggered = Rgb.GREEN_BLINK
NoScene = Rgb.BLACK
StopClipTriggered = Rgb.RED_PULSE
StopClip = Rgb.RED
StopClipDisabled = Rgb.RED_HALF
ClipEmpty = Rgb.BLACK
Navigation = Rgb.WHITE
class Mixer:
ArmOn = Rgb.RED
ArmOff = Rgb.RED_HALF
SoloOn = Rgb.BLUE
SoloOff = Rgb.BLUE_HALF
MuteOn = Rgb.YELLOW_HALF
MuteOff = Rgb.YELLOW
Pan = Rgb.ORANGE
TrackSelect = Rgb.WHITE
Send = Rgb.WHITE
class Monitor:
In = Rgb.LIGHT_BLUE
Auto = Rgb.YELLOW
Off = Rgb.YELLOW
Disabled = Rgb.YELLOW_HALF
class Transport:
PlayOn = Rgb.GREEN
PlayOff = Rgb.GREEN_HALF
StopEnabled = Rgb.WHITE
StopDisabled = Rgb.WHITE_HALF
SeekOn = Rgb.WHITE
SeekOff = Rgb.WHITE_HALF
LoopOn = Rgb.YELLOW
LoopOff = Rgb.YELLOW_HALF
MetronomeOn = Rgb.YELLOW
MetronomeOff = Rgb.YELLOW_HALF
class Recording:
On = Rgb.RED
Off = Rgb.RED_HALF
Transition = Rgb.BLACK
class Mode:
class Mute:
On = Rgb.YELLOW
class Solo:
On = Rgb.BLUE
class Monitor:
On = Rgb.GREEN
class Arm:
On = Rgb.RED
class Devices:
On = Rgb.PURPLE
Off = Rgb.PURPLE
class Pan:
On = Rgb.ORANGE
Off = Rgb.ORANGE
class Sends:
On = Rgb.WHITE
Off = Rgb.WHITE
class DrumGroup:
PadEmpty = Rgb.BLACK
PadFilled = Rgb.YELLOW
PadSelected = Rgb.LIGHT_BLUE
PadSelectedNotSoloed = Rgb.LIGHT_BLUE
PadMuted = Rgb.DARK_ORANGE
PadMutedSelected = Rgb.LIGHT_BLUE
PadSoloed = Rgb.DARK_BLUE
PadSoloedSelected = Rgb.LIGHT_BLUE
PadInvisible = Rgb.BLACK
PadAction = Rgb.RED
class ItemNavigation:
NoItem = Rgb.BLACK
ItemSelected = Rgb.PURPLE
ItemNotSelected = Rgb.PURPLE_HALF
class Device:
On = Rgb.PURPLE
class TrackNavigation:
On = Rgb.LIGHT_BLUE
class SceneNavigation:
On = Rgb.WHITE
class Action:
Available = Rgb.WHITE
skin = Skin(Colors) | 0 | 2,614 | 23 |
21379215feb06f7d3c18bd160f2818dca2bcd3d0 | 186 | py | Python | 1117 - Validacao de Nota.py | le16bits/URI---Python | 9d22ae74f008104bc9c3c0e2d5f8cd59303bc1db | [
"Apache-2.0"
] | null | null | null | 1117 - Validacao de Nota.py | le16bits/URI---Python | 9d22ae74f008104bc9c3c0e2d5f8cd59303bc1db | [
"Apache-2.0"
] | null | null | null | 1117 - Validacao de Nota.py | le16bits/URI---Python | 9d22ae74f008104bc9c3c0e2d5f8cd59303bc1db | [
"Apache-2.0"
] | null | null | null | med=0
c=0
while c!=2:
n=float(input())
if n>=0 and n<=10:
med=med+n
c=c+1
else:
print("nota invalida")
med=med/2
print("media = %.2f" %med)
| 14.307692 | 32 | 0.467742 | med=0
c=0
while c!=2:
n=float(input())
if n>=0 and n<=10:
med=med+n
c=c+1
else:
print("nota invalida")
med=med/2
print("media = %.2f" %med)
| 0 | 0 | 0 |
9bd8849c7f1a7f3654e2873c3d90677a63dbc9bc | 517 | py | Python | atcoder/agc/agc026_b.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | 1 | 2018-11-12T15:18:55.000Z | 2018-11-12T15:18:55.000Z | atcoder/agc/agc026_b.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | atcoder/agc/agc026_b.py | knuu/competitive-programming | 16bc68fdaedd6f96ae24310d697585ca8836ab6e | [
"MIT"
] | null | null | null | for _ in range(int(input())):
A, B, C, D = map(int, input().split())
if A < B or C + D < B:
print("No")
continue
elif C >= B - 1:
print("Yes")
continue
ret = []
s_set = set()
now = A
while True:
now %= B
if now in s_set:
print("Yes", ret)
break
else:
s_set.add(now)
if now <= C:
now += D
ret.append(now)
else:
print("No", ret)
break
| 20.68 | 42 | 0.382979 | for _ in range(int(input())):
A, B, C, D = map(int, input().split())
if A < B or C + D < B:
print("No")
continue
elif C >= B - 1:
print("Yes")
continue
ret = []
s_set = set()
now = A
while True:
now %= B
if now in s_set:
print("Yes", ret)
break
else:
s_set.add(now)
if now <= C:
now += D
ret.append(now)
else:
print("No", ret)
break
| 0 | 0 | 0 |
988add07bc2ff22289d10f731798d2cfd279ae40 | 3,529 | py | Python | program files/plot_3d.py | FiZ-ix/FSAE-Tyre-Analysis | f41a6f84c221404983e6a8af5341aa2193126c4d | [
"MIT"
] | 3 | 2020-10-23T13:03:35.000Z | 2020-11-01T01:41:55.000Z | program files/plot_3d.py | FiZ-ix/FSAE-Tyre-Analysis | f41a6f84c221404983e6a8af5341aa2193126c4d | [
"MIT"
] | null | null | null | program files/plot_3d.py | FiZ-ix/FSAE-Tyre-Analysis | f41a6f84c221404983e6a8af5341aa2193126c4d | [
"MIT"
] | null | null | null | '''
A.Q. Snyder
TFR Tire Data Analysis
this code is written to analyze the TTC FSAE tire data
the code is written in a linear, easy to read format catered towards an engineering mindset
rather than efficient software
Contact: aaron.snyder@temple.edu for help running or understanding the program
'''
#_______________________________________________________________________________________________________________________
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
#run_input = input("Enter the run number you want to study: ") # example of input B1965run2
run_input = 'B1965run2'
data = pd.read_excel (r'C:\Users\Fizics\Desktop\TTC\data\RunData_cornering_ASCII_SI_10in_round8 excel/'+(run_input)+(".xlsx"),skiprows=2)
df = pd.DataFrame(data)
df = df.drop(df.index[0:5000])
# SI Units are being used
# This varaibles are mostly used in the splash graph. You can add whatever other variables you want to look at
speed=df["V"] # kph
pressure=df["P"] # kPa
inclinationAngle=df["IA"] # deg
slipAngle = df["SA"] # deg
verticalLoad = df["FZ"] * -1 # N
Radius_loaded=df["RL"] # cm
lateralForce = df["FY"] # N
alignTorque = df["MZ"] # Nm
slipAngle = np.array(slipAngle)
verticalLoad = np.array(verticalLoad)
lateralForce = np.array(lateralForce)
Z1 = np.where(np.logical_and(verticalLoad>= 0, verticalLoad<=320))
Z2 = np.where(np.logical_and(verticalLoad>= 320, verticalLoad<=550))
Z3 = np.where(np.logical_and(verticalLoad>= 550, verticalLoad<=750))
Z4 = np.where(np.logical_and(verticalLoad>= 750, verticalLoad<=950))
Z5 = np.where(np.logical_and(verticalLoad>= 980, verticalLoad<=1200))
labelAvgZ1 = str(np.round(np.average(verticalLoad[Z1])))+(' N')
labelAvgZ2 = str(np.round(np.average(verticalLoad[Z2])))+(' N')
labelAvgZ3 = str(np.round(np.average(verticalLoad[Z3])))+(' N')
labelAvgZ4 = str(np.round(np.average(verticalLoad[Z4])))+(' N')
labelAvgZ5 = str(np.round(np.average(verticalLoad[Z5])))+(' N')
d = 10
x1 = np.flip(np.sort(slipAngle[Z1]))
y1 = np.sort(lateralForce[Z1])
curve1 = np.polyfit(x1,y1,d)
poly1 = np.poly1d(curve1)
x2 = np.flip(np.sort(slipAngle[Z2]))
y2 = np.sort(lateralForce[Z2])
curve2 = np.polyfit(x2,y2,d)
poly2 = np.poly1d(curve2)
x3 = np.flip(np.sort(slipAngle[Z3]))
y3 = np.sort(lateralForce[Z3])
curve3 = np.polyfit(x3,y3,d)
poly3 = np.poly1d(curve3)
x4 = np.flip(np.sort(slipAngle[Z4]))
y4 = np.sort(lateralForce[Z4])
curve4 = np.polyfit(x4,y4,d)
poly4 = np.poly1d(curve4)
x5 = np.flip(np.sort(slipAngle[Z5]))
y5 = np.sort(lateralForce[Z5])
curve5 = np.polyfit(x5,y5,d)
poly5 = np.poly1d(curve5)
fig1 = plt.figure(figsize = (10,7))
ax1 = plt.axes(projection="3d")
ax1.scatter3D(slipAngle[Z1],lateralForce[Z1],verticalLoad[Z1],marker = 'x',linewidths=0.08,color = 'midnightblue')
ax1.scatter3D(slipAngle[Z2],lateralForce[Z2],verticalLoad[Z2],marker = 'x',linewidths=0.08,color = 'mediumblue')
ax1.scatter3D(slipAngle[Z3],lateralForce[Z3],verticalLoad[Z3],marker = 'x',linewidths=0.08,color = 'slateblue')
ax1.scatter3D(slipAngle[Z4],lateralForce[Z4],verticalLoad[Z4],marker = 'x',linewidths=0.08,color = 'mediumpurple')
ax1.scatter3D(slipAngle[Z5],lateralForce[Z5],verticalLoad[Z5],marker = 'x',linewidths=0.08,color = 'plum')
ax1.plot(x1, poly1(x1), c='lime', label=labelAvgZ1)
ax1.plot(x2, poly2(x2), c='lime', label=labelAvgZ1)
ax1.plot(x4, poly4(x4), c='lime', label=labelAvgZ1)
ax1.plot(x5, poly5(x5), c='lime', label=labelAvgZ1)
ax1.set_xlabel('Slip Angle')
ax1.set_ylabel('Lateral Force')
ax1.set_zlabel('Vertical Load')
plt.show()
| 34.940594 | 137 | 0.731935 | '''
A.Q. Snyder
TFR Tire Data Analysis
this code is written to analyze the TTC FSAE tire data
the code is written in a linear, easy to read format catered towards an engineering mindset
rather than efficient software
Contact: aaron.snyder@temple.edu for help running or understanding the program
'''
#_______________________________________________________________________________________________________________________
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
#run_input = input("Enter the run number you want to study: ") # example of input B1965run2
run_input = 'B1965run2'
data = pd.read_excel (r'C:\Users\Fizics\Desktop\TTC\data\RunData_cornering_ASCII_SI_10in_round8 excel/'+(run_input)+(".xlsx"),skiprows=2)
df = pd.DataFrame(data)
df = df.drop(df.index[0:5000])
# SI Units are being used
# This varaibles are mostly used in the splash graph. You can add whatever other variables you want to look at
speed=df["V"] # kph
pressure=df["P"] # kPa
inclinationAngle=df["IA"] # deg
slipAngle = df["SA"] # deg
verticalLoad = df["FZ"] * -1 # N
Radius_loaded=df["RL"] # cm
lateralForce = df["FY"] # N
alignTorque = df["MZ"] # Nm
slipAngle = np.array(slipAngle)
verticalLoad = np.array(verticalLoad)
lateralForce = np.array(lateralForce)
Z1 = np.where(np.logical_and(verticalLoad>= 0, verticalLoad<=320))
Z2 = np.where(np.logical_and(verticalLoad>= 320, verticalLoad<=550))
Z3 = np.where(np.logical_and(verticalLoad>= 550, verticalLoad<=750))
Z4 = np.where(np.logical_and(verticalLoad>= 750, verticalLoad<=950))
Z5 = np.where(np.logical_and(verticalLoad>= 980, verticalLoad<=1200))
labelAvgZ1 = str(np.round(np.average(verticalLoad[Z1])))+(' N')
labelAvgZ2 = str(np.round(np.average(verticalLoad[Z2])))+(' N')
labelAvgZ3 = str(np.round(np.average(verticalLoad[Z3])))+(' N')
labelAvgZ4 = str(np.round(np.average(verticalLoad[Z4])))+(' N')
labelAvgZ5 = str(np.round(np.average(verticalLoad[Z5])))+(' N')
d = 10
x1 = np.flip(np.sort(slipAngle[Z1]))
y1 = np.sort(lateralForce[Z1])
curve1 = np.polyfit(x1,y1,d)
poly1 = np.poly1d(curve1)
x2 = np.flip(np.sort(slipAngle[Z2]))
y2 = np.sort(lateralForce[Z2])
curve2 = np.polyfit(x2,y2,d)
poly2 = np.poly1d(curve2)
x3 = np.flip(np.sort(slipAngle[Z3]))
y3 = np.sort(lateralForce[Z3])
curve3 = np.polyfit(x3,y3,d)
poly3 = np.poly1d(curve3)
x4 = np.flip(np.sort(slipAngle[Z4]))
y4 = np.sort(lateralForce[Z4])
curve4 = np.polyfit(x4,y4,d)
poly4 = np.poly1d(curve4)
x5 = np.flip(np.sort(slipAngle[Z5]))
y5 = np.sort(lateralForce[Z5])
curve5 = np.polyfit(x5,y5,d)
poly5 = np.poly1d(curve5)
fig1 = plt.figure(figsize = (10,7))
ax1 = plt.axes(projection="3d")
ax1.scatter3D(slipAngle[Z1],lateralForce[Z1],verticalLoad[Z1],marker = 'x',linewidths=0.08,color = 'midnightblue')
ax1.scatter3D(slipAngle[Z2],lateralForce[Z2],verticalLoad[Z2],marker = 'x',linewidths=0.08,color = 'mediumblue')
ax1.scatter3D(slipAngle[Z3],lateralForce[Z3],verticalLoad[Z3],marker = 'x',linewidths=0.08,color = 'slateblue')
ax1.scatter3D(slipAngle[Z4],lateralForce[Z4],verticalLoad[Z4],marker = 'x',linewidths=0.08,color = 'mediumpurple')
ax1.scatter3D(slipAngle[Z5],lateralForce[Z5],verticalLoad[Z5],marker = 'x',linewidths=0.08,color = 'plum')
ax1.plot(x1, poly1(x1), c='lime', label=labelAvgZ1)
ax1.plot(x2, poly2(x2), c='lime', label=labelAvgZ1)
ax1.plot(x4, poly4(x4), c='lime', label=labelAvgZ1)
ax1.plot(x5, poly5(x5), c='lime', label=labelAvgZ1)
ax1.set_xlabel('Slip Angle')
ax1.set_ylabel('Lateral Force')
ax1.set_zlabel('Vertical Load')
plt.show()
| 0 | 0 | 0 |
e32f3c14c79545ff05c55c5bc297341537edf039 | 4,614 | py | Python | clickhouse.py | RaviTejaKomma/clickhouse-data-backup | f6f001346b94ab198068f424b008374e97302b5b | [
"MIT"
] | 1 | 2021-11-29T10:46:46.000Z | 2021-11-29T10:46:46.000Z | clickhouse.py | RaviTejaKomma/clickhouse-data-backup | f6f001346b94ab198068f424b008374e97302b5b | [
"MIT"
] | null | null | null | clickhouse.py | RaviTejaKomma/clickhouse-data-backup | f6f001346b94ab198068f424b008374e97302b5b | [
"MIT"
] | null | null | null | from clickhouse_driver import Client
from utils import *
from configs.default import *
def connect_db():
'''
Creates a connection to the CLickhouse database.
:return: Error if some exception occurred else tuple containing connection object and None.
'''
try:
client = Client(host='localhost')
except Exception as e:
print("Exception:", e)
return None, e
return client, None
def get_data_path(gcs_client, db_name):
'''
:return:
'''
query = "SELECT metadata_path FROM system.tables WHERE database == '%s' limit 1" % (db_name)
try:
result = gcs_client.execute(query)
global_path = '/'.join(result[0][0].split('/')[:4])
except Exception as e:
print("Exception:", e)
return None, e
return global_path, None
def freeze_partitions(clickhouse_client, db_name, table_name, start_date, end_date):
'''
Freeze the partitions that falls between the given dates.
:param clickhouse_client:
:param db_name:
:param table_name:
:param start_date:
:param end_date:
:return: Error if some exception occurred else tuple containing list of partitions that got freeze and None.
'''
query = """select distinct partition_id from system.parts where database='%s' and table='%s' and partition_id>='%s' and partition_id<='%s'""" % \
(db_name, table_name, start_date, end_date)
try:
partitions = clickhouse_client.execute(query)
partitions = list(map(lambda x: x[0], partitions))
for partition in partitions:
query = "ALTER TABLE `%s`.%s FREEZE PARTITION ID '%s';" % (db_name, table_name, partition)
clickhouse_client.execute(query)
except Exception as e:
print("Exception:", e)
return None, e
return partitions, None
def delete_partitions(clickhouse_client, db_name, table_name, partitions):
'''
Drops the partitions from the given table.
:param clickhouse_client:
:param db_name:
:param table_name:
:param partitions:
:return: Error if some exception is raised else None.
'''
try:
for partition in partitions:
query = "ALTER TABLE `%s`.%s DROP PARTITION ID '%s';" % (db_name, table_name, partition)
clickhouse_client.execute(query)
except Exception as e:
print("Exception:", e)
return e
def attach_partitions(clickhouse_client, db_name, table_name, partitions):
'''
Attach the partitions to the specified table.
:param clickhouse_client:
:param db_name:
:param table_name:
:param partitions:
:return: Error if some exception is raised else None
'''
"""List of partitions will be something like ['20190424_122_122_0_113', '20190425_110_110_0_113', '20190427_111_111_0_113'] so we need to split each one by '_' and get the first element."""
try:
for partition in partitions:
query = "ALTER TABLE `%s`.%s ATTACH PARTITION ID '%s';" % (db_name, table_name, partition.split('_')[0])
clickhouse_client.execute(query)
except Exception as e:
print("Exception:", e)
return e
def move_from_shadow(db_name, table_name):
'''
Move the freezed partitions into backup folder
:return: Error if some exception is raised else None
'''
try:
data_path = "data/%s/%s/" % (db_name.replace("-", "%2D"), table_name)
for folder in os.listdir(SHADOW_PATH):
if folder == "increment.txt":
continue
partitions_path = os.path.join(SHADOW_PATH, folder, data_path)
for partition in os.listdir(partitions_path):
src_path = os.path.join(partitions_path, partition)
copy_dir(src_path, BACKUP_PATH)
except Exception as e:
print("Exception :", e)
return e
def move_to_detached(db_name, table_name):
'''
Move the backup partitions into the detached folder
:return: Error if some exception is raised else a tuple containing list of partitions moved to detached and None.
'''
try:
detached_path = os.path.join(GLOBAL_PATH, "data", db_name.replace("-", "%2D"), table_name, "detached")
restore_backup_path = os.path.join(RESTORE_PATH, "backup")
partitions = os.listdir(restore_backup_path)
for partition in partitions:
partition_path = os.path.join(restore_backup_path, partition)
copy_dir(partition_path, detached_path)
except Exception as e:
print("Exception:", e)
return None, e
return partitions, None
| 34.954545 | 193 | 0.651062 | from clickhouse_driver import Client
from utils import *
from configs.default import *
def connect_db():
'''
Creates a connection to the CLickhouse database.
:return: Error if some exception occurred else tuple containing connection object and None.
'''
try:
client = Client(host='localhost')
except Exception as e:
print("Exception:", e)
return None, e
return client, None
def get_data_path(gcs_client, db_name):
'''
:return:
'''
query = "SELECT metadata_path FROM system.tables WHERE database == '%s' limit 1" % (db_name)
try:
result = gcs_client.execute(query)
global_path = '/'.join(result[0][0].split('/')[:4])
except Exception as e:
print("Exception:", e)
return None, e
return global_path, None
def freeze_partitions(clickhouse_client, db_name, table_name, start_date, end_date):
'''
Freeze the partitions that falls between the given dates.
:param clickhouse_client:
:param db_name:
:param table_name:
:param start_date:
:param end_date:
:return: Error if some exception occurred else tuple containing list of partitions that got freeze and None.
'''
query = """select distinct partition_id from system.parts where database='%s' and table='%s' and partition_id>='%s' and partition_id<='%s'""" % \
(db_name, table_name, start_date, end_date)
try:
partitions = clickhouse_client.execute(query)
partitions = list(map(lambda x: x[0], partitions))
for partition in partitions:
query = "ALTER TABLE `%s`.%s FREEZE PARTITION ID '%s';" % (db_name, table_name, partition)
clickhouse_client.execute(query)
except Exception as e:
print("Exception:", e)
return None, e
return partitions, None
def delete_partitions(clickhouse_client, db_name, table_name, partitions):
'''
Drops the partitions from the given table.
:param clickhouse_client:
:param db_name:
:param table_name:
:param partitions:
:return: Error if some exception is raised else None.
'''
try:
for partition in partitions:
query = "ALTER TABLE `%s`.%s DROP PARTITION ID '%s';" % (db_name, table_name, partition)
clickhouse_client.execute(query)
except Exception as e:
print("Exception:", e)
return e
def attach_partitions(clickhouse_client, db_name, table_name, partitions):
'''
Attach the partitions to the specified table.
:param clickhouse_client:
:param db_name:
:param table_name:
:param partitions:
:return: Error if some exception is raised else None
'''
"""List of partitions will be something like ['20190424_122_122_0_113', '20190425_110_110_0_113', '20190427_111_111_0_113'] so we need to split each one by '_' and get the first element."""
try:
for partition in partitions:
query = "ALTER TABLE `%s`.%s ATTACH PARTITION ID '%s';" % (db_name, table_name, partition.split('_')[0])
clickhouse_client.execute(query)
except Exception as e:
print("Exception:", e)
return e
def move_from_shadow(db_name, table_name):
'''
Move the freezed partitions into backup folder
:return: Error if some exception is raised else None
'''
try:
data_path = "data/%s/%s/" % (db_name.replace("-", "%2D"), table_name)
for folder in os.listdir(SHADOW_PATH):
if folder == "increment.txt":
continue
partitions_path = os.path.join(SHADOW_PATH, folder, data_path)
for partition in os.listdir(partitions_path):
src_path = os.path.join(partitions_path, partition)
copy_dir(src_path, BACKUP_PATH)
except Exception as e:
print("Exception :", e)
return e
def move_to_detached(db_name, table_name):
'''
Move the backup partitions into the detached folder
:return: Error if some exception is raised else a tuple containing list of partitions moved to detached and None.
'''
try:
detached_path = os.path.join(GLOBAL_PATH, "data", db_name.replace("-", "%2D"), table_name, "detached")
restore_backup_path = os.path.join(RESTORE_PATH, "backup")
partitions = os.listdir(restore_backup_path)
for partition in partitions:
partition_path = os.path.join(restore_backup_path, partition)
copy_dir(partition_path, detached_path)
except Exception as e:
print("Exception:", e)
return None, e
return partitions, None
| 0 | 0 | 0 |
60d7d86933d74789e6af8587cdafb6707d35891c | 6,966 | py | Python | anytask/courses/pythontask.py | AnnaSvalova/anytask | f814b43c496f67a2efe2a150873a1ae32ad97449 | [
"MIT"
] | 1 | 2018-12-03T05:48:43.000Z | 2018-12-03T05:48:43.000Z | anytask/courses/pythontask.py | AnnaSvalova/anytask | f814b43c496f67a2efe2a150873a1ae32ad97449 | [
"MIT"
] | null | null | null | anytask/courses/pythontask.py | AnnaSvalova/anytask | f814b43c496f67a2efe2a150873a1ae32ad97449 | [
"MIT"
] | 1 | 2021-09-18T22:38:20.000Z | 2021-09-18T22:38:20.000Z | from tasks.models import Task, TaskTaken
from issues.models import Issue
from django.conf import settings
from django.db.models import Q
from django.db import transaction
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
import datetime
@login_required
@transaction.commit_on_success
@login_required
| 35.005025 | 115 | 0.652168 | from tasks.models import Task, TaskTaken
from issues.models import Issue
from django.conf import settings
from django.db.models import Q
from django.db import transaction
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
import datetime
class PythonTaskStat(object):
def __init__(self, course_tasks):
self.tasks = course_tasks
self.group_stat = {}
self.course_stat = {
'total': 0.0,
'active_students': 0,
'avg_score': 0.0,
}
def update(self, group):
self._group_update(group)
self._course_update(group)
def get_group_stat(self):
return [(group, stat['student_stat']) for (group, stat) in self.group_stat.iteritems()]
def get_course_stat(self):
stat = [
(group, stat['total'], stat['active_students'], stat['avg_score'])
for (group, stat) in self.group_stat.iteritems()
]
stat.append(
(None, self.course_stat['total'], self.course_stat['active_students'], self.course_stat['avg_score'])
)
return stat
def _student_stat(self, tasks):
total = 0.0
tasks_list = []
for task in tasks:
total += task.score
tasks_list.append((task.task, task.score))
return (total, tasks_list)
def _group_update(self, group):
stat = {
'total': 0.0,
'active_students': 0,
'avg_score': 0.0,
'student_stat': [],
}
group_students = []
for student in group.students.filter(is_active=True).order_by('last_name', 'first_name'):
tasks = TaskTaken.objects.filter(user=student).filter(task__in=self.tasks) \
.filter(Q(Q(status=TaskTaken.STATUS_TAKEN) | Q(status=TaskTaken.STATUS_SCORED)))
if tasks.count() > 0:
stat['active_students'] += 1
scores, student_tasks = self._student_stat(tasks)
group_students.append((student, scores, student_tasks))
stat['total'] += scores
stat['student_stat'] = group_students
if stat['active_students'] > 0:
stat['avg_score'] = stat['total'] / stat['active_students']
self.group_stat[group] = stat
def _course_update(self, group):
stat = self.group_stat[group]
self.course_stat['total'] += stat['total']
self.course_stat['active_students'] += stat['active_students']
if self.course_stat['active_students'] > 0:
self.course_stat['avg_score'] = self.course_stat['total'] / self.course_stat['active_students']
else:
self.course_stat['avg_score'] = 0.0
def tasks_list(request, course):
user = request.user
course.can_edit = course.user_can_edit_course(user)
delta = datetime.timedelta(days=settings.PYTHONTASK_MAX_DAYS_WITHOUT_SCORES)
task_and_task_taken = []
for task in Task.objects.filter(course=course).filter(parent_task=None).order_by('weight'):
task.add_user_properties(user)
if task.task_text is None:
task.task_text = ''
task_taken_list = []
for task_taken in TaskTaken.objects.filter(task=task).exclude(task__is_hidden=True).filter(
Q(Q(status=TaskTaken.STATUS_TAKEN) | Q(status=TaskTaken.STATUS_SCORED))):
if settings.PYTHONTASK_MAX_DAYS_WITHOUT_SCORES and task_taken.status == TaskTaken.STATUS_TAKEN:
task_taken.cancel_date = task_taken.taken_time + delta
task_taken_list.append(task_taken)
if task.has_subtasks():
subtask_and_task_takens = []
for subtask in Task.objects.filter(parent_task=task).order_by('weight'):
subtask.add_user_properties(user)
if subtask.task_text is None:
subtask.task_text = ''
subtask_takens = list(TaskTaken.objects.filter(task=subtask).exclude(task__is_hidden=True).exclude(
task__parent_task__is_hidden=True).filter(
Q(Q(status=TaskTaken.STATUS_TAKEN) | Q(status=TaskTaken.STATUS_SCORED))))
if settings.PYTHONTASK_MAX_DAYS_WITHOUT_SCORES:
for subtask_taken in filter(lambda x: x.status == TaskTaken.STATUS_TAKEN, subtask_takens):
subtask_taken.cancel_date = subtask_taken.taken_time + delta
subtask_and_task_takens.append((subtask, subtask_takens))
task_and_task_taken.append((task, subtask_and_task_takens))
else:
task_and_task_taken.append((task, task_taken_list))
context = {
'course': course,
'user': user,
'tasks_taken': task_and_task_taken,
'user_is_teacher': course.user_is_teacher(user),
'STATUS_TAKEN': TaskTaken.STATUS_TAKEN,
'STATUS_SCORED': TaskTaken.STATUS_SCORED,
}
return render_to_response('course_tasks_potok.html', context, context_instance=RequestContext(request))
def python_stat(request, course):
tasks = Task.objects.filter(course=course)
stat = PythonTaskStat(tasks)
for group in course.groups.all().order_by('name'):
stat.update(group)
context = {
'course': course,
'group_stat': stat.get_group_stat(),
'course_stat': stat.get_course_stat()
}
return render_to_response('statistics.html', context, context_instance=RequestContext(request))
@login_required
@transaction.commit_on_success
def get_task(request, course_id, task_id):
user = request.user
task = get_object_or_404(Task, id=task_id)
user_can_take_task, reason = task.user_can_take_task(user)
if user_can_take_task:
task_taken, created = TaskTaken.objects.get_or_create(user=user, task=task)
task_taken.take()
if not task_taken.issue:
issue, created = Issue.objects.get_or_create(task=task, student=user)
task_taken.issue = issue
task_taken.save()
task_taken.issue.add_comment(unicode(_("zapisalsya_na_task")))
return redirect('courses.views.course_page', course_id=course_id)
@login_required
def cancel_task(request, course_id, task_id):
user = request.user
task = get_object_or_404(Task, id=task_id)
if task.user_can_cancel_task(user):
task_taken = get_object_or_404(TaskTaken, user=user, task=task)
task_taken.cancel()
if not task_taken.issue:
issue, created = Issue.objects.get_or_create(task=task, student=user)
task_taken.issue = issue
task_taken.save()
task_taken.issue.add_comment(u"{} {} {}".format(user.first_name, user.last_name, _("otkazalsya_ot_taska")))
return redirect('courses.views.course_page', course_id=course_id)
| 6,164 | 8 | 301 |
70f243da8ea27dfd8b48b82b37efe2226ca86dd1 | 3,967 | py | Python | dynaconf/base.py | rochacbruno/dynaconf_ng | 2127ce78c511047f4aa9499a826caf842dcdf036 | [
"MIT"
] | 6 | 2021-07-08T05:53:26.000Z | 2022-03-02T15:11:42.000Z | dynaconf/base.py | rochacbruno/dynaconf_ng | 2127ce78c511047f4aa9499a826caf842dcdf036 | [
"MIT"
] | 1 | 2021-07-13T13:49:43.000Z | 2021-07-13T13:49:43.000Z | dynaconf/base.py | rochacbruno/dynaconf_ng | 2127ce78c511047f4aa9499a826caf842dcdf036 | [
"MIT"
] | null | null | null | from pathlib import Path
from typing import Any, Optional, Union
from pydantic import BaseModel, BaseSettings, Extra, Field
SettingsField = Field # noqa
"""Extendable settings field"""
class BaseDynaconf:
"""Extendable model"""
def _d_get_value(self, key: str, default: Any = None) -> Any:
"""Get a setting value.
:param key: The setting key
:param default: The default value to return if the setting is not found
or an exception to raise if the setting is not found.
:return: The setting value
- This is called only when the attribute is not found in the instance.
- if the key has `__` in it, it is treated as a compound setting.
(__ will be replaced with . to get the key)
- If the key is a dot separated path, e.g. 'a.b.c',
it will be split into a list of keys and passed to
d_get_value for recursive nesting lookup.
- try to get a swaped case A -> b, B -> a, etc.
- try to get a lower case version of the key
- try to get an uppercase version of the key
- if the final value is a callable it will be called with the
key + settings instance as arguments.
"""
if "__" in key or "." in key:
key = key.replace("__", ".")
head, *tail = key.split(".")
return self._d_get_value(
head, default
)._d_get_value(
".".join(tail), default
)
for lookup_key in (
key.swapcase(),
key.lower(),
key.upper(),
):
try:
return self.__getattribute__(lookup_key)
except AttributeError:
continue
if issubclass(default, Exception):
raise default(
f"'{self.__class__.__name__}' object has no attribute '{key}'"
)
return default(key, self) if callable(default) else default
def __getattr__(self, name: str) -> Any:
"""Get a setting value by its attribute name."""
return self._d_get_value(name, default=AttributeError)
def __getitem__(self, key: str) -> Any:
"""Get a setting value by its key name (simulate a dictionary).
try to get the get as an attribute as it is
if an exception is raised then try the get_value lookup.
"""
try:
return getattr(self, key)
except AttributeError:
return self._d_get_value(key, default=KeyError)
def get(self, key: str, default: Any = None) -> Any:
"""Get a setting value by its key name.
Delegate to __getitem__ to simulate a dictionary.
:param key: The setting key
:param default: The default value to return if the setting is not found
:return: The setting value
"""
try:
return self[key]
except KeyError:
return default
class SubModel(BaseDynaconf, BaseModel):
"""A Type for compound settings.
Represents a Dict under a Dynaconf object.
"""
class Dynaconf(BaseDynaconf, BaseSettings):
"""Settings Management."""
class Config(BaseSettings.Config):
"""Settings Configuration."""
env_prefix = "DYNACONF_"
env_file = None
env_file_encoding = 'utf-8'
secrets_dir = None
validate_all = True
extra = Extra.allow
arbitrary_types_allowed = True
case_sensitive = False
__config__: Config # type: ignore
| 31.23622 | 79 | 0.586337 | from pathlib import Path
from typing import Any, Optional, Union
from pydantic import BaseModel, BaseSettings, Extra, Field
SettingsField = Field # noqa
"""Extendable settings field"""
class BaseDynaconf:
"""Extendable model"""
def _d_get_value(self, key: str, default: Any = None) -> Any:
"""Get a setting value.
:param key: The setting key
:param default: The default value to return if the setting is not found
or an exception to raise if the setting is not found.
:return: The setting value
- This is called only when the attribute is not found in the instance.
- if the key has `__` in it, it is treated as a compound setting.
(__ will be replaced with . to get the key)
- If the key is a dot separated path, e.g. 'a.b.c',
it will be split into a list of keys and passed to
d_get_value for recursive nesting lookup.
- try to get a swaped case A -> b, B -> a, etc.
- try to get a lower case version of the key
- try to get an uppercase version of the key
- if the final value is a callable it will be called with the
key + settings instance as arguments.
"""
if "__" in key or "." in key:
key = key.replace("__", ".")
head, *tail = key.split(".")
return self._d_get_value(
head, default
)._d_get_value(
".".join(tail), default
)
for lookup_key in (
key.swapcase(),
key.lower(),
key.upper(),
):
try:
return self.__getattribute__(lookup_key)
except AttributeError:
continue
if issubclass(default, Exception):
raise default(
f"'{self.__class__.__name__}' object has no attribute '{key}'"
)
return default(key, self) if callable(default) else default
def __getattr__(self, name: str) -> Any:
"""Get a setting value by its attribute name."""
return self._d_get_value(name, default=AttributeError)
def __getitem__(self, key: str) -> Any:
"""Get a setting value by its key name (simulate a dictionary).
try to get the get as an attribute as it is
if an exception is raised then try the get_value lookup.
"""
try:
return getattr(self, key)
except AttributeError:
return self._d_get_value(key, default=KeyError)
def get(self, key: str, default: Any = None) -> Any:
"""Get a setting value by its key name.
Delegate to __getitem__ to simulate a dictionary.
:param key: The setting key
:param default: The default value to return if the setting is not found
:return: The setting value
"""
try:
return self[key]
except KeyError:
return default
class SubModel(BaseDynaconf, BaseModel):
"""A Type for compound settings.
Represents a Dict under a Dynaconf object.
"""
class Dynaconf(BaseDynaconf, BaseSettings):
"""Settings Management."""
def __init__(
__pydantic_self__,
_env_file: Union[Path, str, None] = None,
_env_file_encoding: Optional[str] = None,
_secrets_dir: Union[Path, str, None] = None,
**values: Any
) -> None:
super().__init__(
_env_file=_env_file,
_env_file_encoding=_env_file_encoding,
_secrets_dir=_secrets_dir,
**values
)
class Config(BaseSettings.Config):
"""Settings Configuration."""
env_prefix = "DYNACONF_"
env_file = None
env_file_encoding = 'utf-8'
secrets_dir = None
validate_all = True
extra = Extra.allow
arbitrary_types_allowed = True
case_sensitive = False
__config__: Config # type: ignore
| 389 | 0 | 27 |
6441c7d1b42e0ec4602b09d8c03ae526ed933d76 | 1,728 | py | Python | GUI/printer/Pillow-2.7.0/Tests/test_image_mode.py | y-gupta/rfid-auth-system | 44f3de884d05e1906757b97f0a1a140469a3290f | [
"Apache-2.0"
] | 5 | 2015-01-21T14:13:34.000Z | 2016-05-14T06:53:38.000Z | GUI/printer/Pillow-2.7.0/Tests/test_image_mode.py | 1upon0/rfid-auth-system | 44f3de884d05e1906757b97f0a1a140469a3290f | [
"Apache-2.0"
] | null | null | null | GUI/printer/Pillow-2.7.0/Tests/test_image_mode.py | 1upon0/rfid-auth-system | 44f3de884d05e1906757b97f0a1a140469a3290f | [
"Apache-2.0"
] | 3 | 2015-02-01T17:10:39.000Z | 2019-12-05T05:21:42.000Z | from helper import unittest, PillowTestCase, hopper
from PIL import Image
if __name__ == '__main__':
unittest.main()
# End of file
| 29.793103 | 71 | 0.489583 | from helper import unittest, PillowTestCase, hopper
from PIL import Image
class TestImageMode(PillowTestCase):
def test_sanity(self):
im = hopper()
im.mode
from PIL import ImageMode
ImageMode.getmode("1")
ImageMode.getmode("L")
ImageMode.getmode("P")
ImageMode.getmode("RGB")
ImageMode.getmode("I")
ImageMode.getmode("F")
m = ImageMode.getmode("1")
self.assertEqual(m.mode, "1")
self.assertEqual(m.bands, ("1",))
self.assertEqual(m.basemode, "L")
self.assertEqual(m.basetype, "L")
m = ImageMode.getmode("RGB")
self.assertEqual(m.mode, "RGB")
self.assertEqual(m.bands, ("R", "G", "B"))
self.assertEqual(m.basemode, "RGB")
self.assertEqual(m.basetype, "L")
def test_properties(self):
def check(mode, *result):
signature = (
Image.getmodebase(mode), Image.getmodetype(mode),
Image.getmodebands(mode), Image.getmodebandnames(mode),
)
self.assertEqual(signature, result)
check("1", "L", "L", 1, ("1",))
check("L", "L", "L", 1, ("L",))
check("P", "RGB", "L", 1, ("P",))
check("I", "L", "I", 1, ("I",))
check("F", "L", "F", 1, ("F",))
check("RGB", "RGB", "L", 3, ("R", "G", "B"))
check("RGBA", "RGB", "L", 4, ("R", "G", "B", "A"))
check("RGBX", "RGB", "L", 4, ("R", "G", "B", "X"))
check("RGBX", "RGB", "L", 4, ("R", "G", "B", "X"))
check("CMYK", "RGB", "L", 4, ("C", "M", "Y", "K"))
check("YCbCr", "RGB", "L", 3, ("Y", "Cb", "Cr"))
if __name__ == '__main__':
unittest.main()
# End of file
| 1,496 | 15 | 77 |
eab6c257c582df55a8d67f6190eb0e09bd9ee002 | 5,713 | py | Python | pvlibs/data_import/loana.py | bfw930/pvlibs | 94bed6cbca1b349eac29136d6703959aeb61bf29 | [
"MIT"
] | null | null | null | pvlibs/data_import/loana.py | bfw930/pvlibs | 94bed6cbca1b349eac29136d6703959aeb61bf29 | [
"MIT"
] | 5 | 2020-03-24T17:26:11.000Z | 2020-12-15T04:39:42.000Z | pvlibs/data_import/loana.py | bfw930/pvlibs | 94bed6cbca1b349eac29136d6703959aeb61bf29 | [
"MIT"
] | null | null | null |
''' Functions
Summary:
This file contains
Example:
Usage of
Todo:
*
'''
''' Imports '''
# data array handling
import numpy as np
# data table handling
import pandas as pd
from operator import itemgetter
from itertools import groupby
''' Current-Voltage File Format Parse Functions '''
def type_loana(file_path):
''' Parse HALM Current-Voltage Format
Import current-voltage measurement settings and data from a HALM format file
# inputs
_file_path (str): full filepath
Returns:
dict: extracted data and parameters
'''
#res = {}
# open data file and extract lines
with open(file_path, 'r', encoding = 'iso-8859-1') as file:
lines = file.readlines()
results = {}
head_ids = [ i for i in range(len(lines)) if lines[i].startswith('[') ]
tail_ids = [ i for i in range(len(lines)) if lines[i].startswith('\n') ]
seg_ids = list(zip(head_ids, tail_ids))
for seg in seg_ids[:]:
header = lines[seg[0]].strip('\n[]')
results[header] = {}
for j in range(seg[0]+1, seg[1]):
val = lines[j].strip('\n').split('\t')
results[header][val[0][:-1]] = val[1:]
idx = [ i for i in range(len(lines)) if lines[i].startswith('**Data**') ][0] + 1
data = np.array([ [ float(l) for l in lines[i].strip('\n').split('\t') ] for i in range(idx, len(lines)) ])
#print(results.keys())
results['Data'] = data
# open data file and extract lines
with open(file_path[:-3]+'drk', 'r', encoding = 'iso-8859-1') as file:
lines = file.readlines()
dark_results = {}
head_ids = [ i for i in range(len(lines)) if lines[i].startswith('[') ]
tail_ids = [ i for i in range(len(lines)) if lines[i].startswith('\n') ]
seg_ids = list(zip(head_ids, tail_ids))
for seg in seg_ids[:]:
header = lines[seg[0]].strip('\n[]')
dark_results[header] = {}
for j in range(seg[0]+1, seg[1]):
val = lines[j].strip('\n').split('\t')
dark_results[header][val[0][:-1]] = val[1:]
results['Dark'] = dark_results
# open data file and extract lines
with open(file_path[:-3]+'jv', 'r', encoding = 'iso-8859-1') as file:
lines = file.readlines()
jv_results = {}
head_ids = [ i for i in range(len(lines)) if lines[i].startswith('[') ]
tail_ids = [ i for i in range(len(lines)) if lines[i].startswith('\n') ]
seg_ids = list(zip(head_ids, tail_ids))
for seg in seg_ids[:]:
header = lines[seg[0]].strip('\n[]')
jv_results[header] = {}
for j in range(seg[0]+1, seg[1]):
val = lines[j].strip('\n').split('\t')
jv_results[header][val[0][:-1]] = val[1:]
results['JV'] = jv_results
keep = ['Results', 'Data', 'Sample', 'Dark', 'JV']
#keep = ['Results']
# only keep desired data
results = { k:v for k,v in results.items() if k in keep }
#print(results)
#res = { **res, **{ '{}-{}'.format(ff,k):float(v[0]) for k,v in results['Results'].items() if k != 'Model' } }
# return data dict
return results
def type_loana_bak(file_path):
''' Parse HALM Current-Voltage Format
Import current-voltage measurement settings and data from a HALM format file
# inputs
_file_path (str): full filepath
Returns:
dict: extracted data and parameters
'''
res = {}
# iterate over loana data files
for ff in ['lgt', 'drk', 'jv', ]:
# open data file and extract lines
with open(file_path[:-3]+ff, 'r', encoding = 'iso-8859-1') as file:
lines = file.readlines()
results = {}
head_ids = [ i for i in range(len(lines)) if lines[i].startswith('[') ]
tail_ids = [ i for i in range(len(lines)) if lines[i].startswith('\n') ]
seg_ids = list(zip(head_ids, tail_ids))
for seg in seg_ids[:]:
header = lines[seg[0]].strip('\n[]')
results[header] = {}
for j in range(seg[0]+1, seg[1]):
val = lines[j].strip('\n').split('\t')
results[header][val[0][:-1]] = val[1:]
idx = [ i for i in range(len(lines)) if lines[i].startswith('**Data**') ][0] + 1
data = np.array([ [ float(l) for l in lines[i].strip('\n').split('\t') ] for i in range(idx, len(lines)) ])
results['data'] = data
keep = ['Results',]
# only keep desired data
results = { k:v for k,v in results.items() if k in keep }
#print(results)
#if ff == 'lgt':
#print(ff, results['Results']['Intensity'])
#ff = '{}-{}'.format(ff,'{}s{}'.format(
# *str(float(results['Results']['Intensity'][0])).split('.')))
#print(ff)
res = { **res, **{ '{}-{}'.format(ff,k):float(v[0]) for k,v in results['Results'].items() if k != 'Model' } }
# return data dict
return res
def loana(file_type, file_path, file_name):
''' Parse File
Parse source data file of given format, run post parse processing, and return imported data
Args:
file_path (str): full file path including name with extension
file_type (str): data file format information
Returns:
dict: parsed, processed data and parameters from file
'''
# HALM IV
if file_type == 'loana-bak':
#if True:
# import raw data from file
data = type_loana_bak(file_path = '{}/{}'.format(file_path, file_name))
elif file_type == 'loana':
# import raw data from file
data = type_loana(file_path = '{}/{}'.format(file_path, file_name))
# return imported data
return data
| 24.731602 | 117 | 0.561001 |
''' Functions
Summary:
This file contains
Example:
Usage of
Todo:
*
'''
''' Imports '''
# data array handling
import numpy as np
# data table handling
import pandas as pd
from operator import itemgetter
from itertools import groupby
''' Current-Voltage File Format Parse Functions '''
def type_loana(file_path):
''' Parse HALM Current-Voltage Format
Import current-voltage measurement settings and data from a HALM format file
# inputs
_file_path (str): full filepath
Returns:
dict: extracted data and parameters
'''
#res = {}
# open data file and extract lines
with open(file_path, 'r', encoding = 'iso-8859-1') as file:
lines = file.readlines()
results = {}
head_ids = [ i for i in range(len(lines)) if lines[i].startswith('[') ]
tail_ids = [ i for i in range(len(lines)) if lines[i].startswith('\n') ]
seg_ids = list(zip(head_ids, tail_ids))
for seg in seg_ids[:]:
header = lines[seg[0]].strip('\n[]')
results[header] = {}
for j in range(seg[0]+1, seg[1]):
val = lines[j].strip('\n').split('\t')
results[header][val[0][:-1]] = val[1:]
idx = [ i for i in range(len(lines)) if lines[i].startswith('**Data**') ][0] + 1
data = np.array([ [ float(l) for l in lines[i].strip('\n').split('\t') ] for i in range(idx, len(lines)) ])
#print(results.keys())
results['Data'] = data
# open data file and extract lines
with open(file_path[:-3]+'drk', 'r', encoding = 'iso-8859-1') as file:
lines = file.readlines()
dark_results = {}
head_ids = [ i for i in range(len(lines)) if lines[i].startswith('[') ]
tail_ids = [ i for i in range(len(lines)) if lines[i].startswith('\n') ]
seg_ids = list(zip(head_ids, tail_ids))
for seg in seg_ids[:]:
header = lines[seg[0]].strip('\n[]')
dark_results[header] = {}
for j in range(seg[0]+1, seg[1]):
val = lines[j].strip('\n').split('\t')
dark_results[header][val[0][:-1]] = val[1:]
results['Dark'] = dark_results
# open data file and extract lines
with open(file_path[:-3]+'jv', 'r', encoding = 'iso-8859-1') as file:
lines = file.readlines()
jv_results = {}
head_ids = [ i for i in range(len(lines)) if lines[i].startswith('[') ]
tail_ids = [ i for i in range(len(lines)) if lines[i].startswith('\n') ]
seg_ids = list(zip(head_ids, tail_ids))
for seg in seg_ids[:]:
header = lines[seg[0]].strip('\n[]')
jv_results[header] = {}
for j in range(seg[0]+1, seg[1]):
val = lines[j].strip('\n').split('\t')
jv_results[header][val[0][:-1]] = val[1:]
results['JV'] = jv_results
keep = ['Results', 'Data', 'Sample', 'Dark', 'JV']
#keep = ['Results']
# only keep desired data
results = { k:v for k,v in results.items() if k in keep }
#print(results)
#res = { **res, **{ '{}-{}'.format(ff,k):float(v[0]) for k,v in results['Results'].items() if k != 'Model' } }
# return data dict
return results
def type_loana_bak(file_path):
''' Parse HALM Current-Voltage Format
Import current-voltage measurement settings and data from a HALM format file
# inputs
_file_path (str): full filepath
Returns:
dict: extracted data and parameters
'''
res = {}
# iterate over loana data files
for ff in ['lgt', 'drk', 'jv', ]:
# open data file and extract lines
with open(file_path[:-3]+ff, 'r', encoding = 'iso-8859-1') as file:
lines = file.readlines()
results = {}
head_ids = [ i for i in range(len(lines)) if lines[i].startswith('[') ]
tail_ids = [ i for i in range(len(lines)) if lines[i].startswith('\n') ]
seg_ids = list(zip(head_ids, tail_ids))
for seg in seg_ids[:]:
header = lines[seg[0]].strip('\n[]')
results[header] = {}
for j in range(seg[0]+1, seg[1]):
val = lines[j].strip('\n').split('\t')
results[header][val[0][:-1]] = val[1:]
idx = [ i for i in range(len(lines)) if lines[i].startswith('**Data**') ][0] + 1
data = np.array([ [ float(l) for l in lines[i].strip('\n').split('\t') ] for i in range(idx, len(lines)) ])
results['data'] = data
keep = ['Results',]
# only keep desired data
results = { k:v for k,v in results.items() if k in keep }
#print(results)
#if ff == 'lgt':
#print(ff, results['Results']['Intensity'])
#ff = '{}-{}'.format(ff,'{}s{}'.format(
# *str(float(results['Results']['Intensity'][0])).split('.')))
#print(ff)
res = { **res, **{ '{}-{}'.format(ff,k):float(v[0]) for k,v in results['Results'].items() if k != 'Model' } }
# return data dict
return res
def loana(file_type, file_path, file_name):
''' Parse File
Parse source data file of given format, run post parse processing, and return imported data
Args:
file_path (str): full file path including name with extension
file_type (str): data file format information
Returns:
dict: parsed, processed data and parameters from file
'''
# HALM IV
if file_type == 'loana-bak':
#if True:
# import raw data from file
data = type_loana_bak(file_path = '{}/{}'.format(file_path, file_name))
elif file_type == 'loana':
# import raw data from file
data = type_loana(file_path = '{}/{}'.format(file_path, file_name))
# return imported data
return data
| 0 | 0 | 0 |
109fa9280bd1e97603de17491d9fb060284caed0 | 2,121 | py | Python | test/unit/core/hooks/test_HooksRegistry.py | novopl/peltak | 7c8ac44f994d923091a534870960fdae1e15e95e | [
"Apache-2.0"
] | 6 | 2015-09-10T13:20:34.000Z | 2021-02-15T08:10:27.000Z | test/unit/core/hooks/test_HooksRegistry.py | novopl/peltak | 7c8ac44f994d923091a534870960fdae1e15e95e | [
"Apache-2.0"
] | 41 | 2015-09-09T12:44:55.000Z | 2021-06-01T23:25:56.000Z | test/unit/core/hooks/test_HooksRegistry.py | novopl/peltak | 7c8ac44f994d923091a534870960fdae1e15e95e | [
"Apache-2.0"
] | null | null | null | # pylint: disable=missing-docstring
from unittest.mock import Mock
import pytest
from peltak.core.hooks import HooksRegister
| 23.054348 | 65 | 0.7124 | # pylint: disable=missing-docstring
from unittest.mock import Mock
import pytest
from peltak.core.hooks import HooksRegister
def test_is_called_when_subscribed():
fake_handler = Mock()
register = HooksRegister()
register('fake-hook')(fake_handler)
register.call('fake-hook')
fake_handler.assert_called_once_with()
def test_is_not_called_after_unsubscribed():
conf = {}
fake_handler = Mock()
register = HooksRegister()
register('fake-hook')(fake_handler)
register.remove('fake-hook', fake_handler)
register.call('fake-hook', conf)
fake_handler.assert_not_called()
def test_passes_arguments_to_the_hooks():
param = {'name': 'hello'}
fake_handler = Mock()
register = HooksRegister()
register('fake-hook')(fake_handler)
register.call('fake-hook', param, 1)
fake_handler.assert_called_once_with(param, 1)
def test_can_register_hooks_with_decorator():
fake_fn = Mock()
register = HooksRegister()
@register('fake-hook')
def post_conf_load(): # pylint: disable=unused-variable
fake_fn()
register.call('fake-hook')
fake_fn.assert_called_once_with()
def test_registering_with_empty_name_raises_ValueError():
fake_handler = Mock()
register = HooksRegister()
with pytest.raises(ValueError):
register('')(fake_handler)
def test_calling_a_hook_with_empty_name_raises_ValueError():
register = HooksRegister()
with pytest.raises(ValueError):
register.call('')
def test_trying_to_remove_unregistered_hook_raises_ValueError():
fake_handler1 = Mock()
fake_handler2 = Mock()
register = HooksRegister()
with pytest.raises(ValueError):
register.remove('fake-hook', fake_handler1)
register('fake-hook')(fake_handler2)
with pytest.raises(ValueError):
register.remove('fake-hook', fake_handler1)
def test_registering_hook_twice_raises_ValueError():
fake_handler = Mock()
register = HooksRegister()
register('fake-hook')(fake_handler)
with pytest.raises(ValueError):
register('fake-hook')(fake_handler)
| 1,802 | 0 | 184 |
6793f836930f73e131d4395418728751c84330db | 2,155 | py | Python | backend/src/tests/test_datasets.py | VarityPlatform/lablr | f6eee78f5eb352f21c849a5ee72ee23b418167e1 | [
"MIT"
] | 1 | 2021-09-20T18:21:15.000Z | 2021-09-20T18:21:15.000Z | backend/src/tests/test_datasets.py | VarityPlatform/lablr | f6eee78f5eb352f21c849a5ee72ee23b418167e1 | [
"MIT"
] | null | null | null | backend/src/tests/test_datasets.py | VarityPlatform/lablr | f6eee78f5eb352f21c849a5ee72ee23b418167e1 | [
"MIT"
] | null | null | null | """
Test datasets
"""
from fastapi.testclient import TestClient
from main import app, PREFIX
client = TestClient(app)
EXAMPLE_DATASET_BODY = {
"name": "Unit Test Dataset",
"description": "Dataset created during a unit test",
"labels": [
{
"name": "Boolean",
"variant": "boolean",
},
{
"name": "Numerical",
"variant": "numerical",
"minimum": -1,
"maximum": 1,
"interval": 0.5,
},
],
}
def test_get_datasets():
"""Unit test for hitting the /datasets endpoint"""
response = client.get(f"{PREFIX}/datasets")
assert response.status_code == 200
def test_get_nonexistent_dataset():
"""Unit test for trying to fetch a nonexistent dataset"""
response = client.get(f"{PREFIX}/datasets/daflkadsjflkajdflkadfadadfadsfad")
assert response.status_code == 404
def test_create_delete_dataset():
"""Unit test for creating and subsequently deleting a dataset"""
body = EXAMPLE_DATASET_BODY.copy()
response = client.post(f"{PREFIX}/datasets", json=body)
response_body = response.json()
dataset_id = response_body["dataset_id"]
assert response.status_code == 200
assert response_body["name"] == "Unit Test Dataset"
assert response_body["description"] == "Dataset created during a unit test"
response = client.get(f"{PREFIX}/datasets/{dataset_id}")
assert response.status_code == 200
response = client.delete(f"{PREFIX}/datasets/{dataset_id}")
assert response.status_code == 200
def test_create_missing_fields_dataset():
"""Unit test for creating a dataset with missing important fields"""
for field in ["name", "description", "labels"]:
body = EXAMPLE_DATASET_BODY.copy()
del body[field]
response = client.post(f"{PREFIX}/datasets", json=body)
assert response.status_code == 422
def test_delete_nonexistent_dataset():
"""Unit test for trying to delete a nonexistent dataset"""
response = client.delete(f"{PREFIX}/datasets/daflkadsjflkajdflkadfadadfadsfad")
assert response.status_code == 404
| 25.963855 | 83 | 0.657077 | """
Test datasets
"""
from fastapi.testclient import TestClient
from main import app, PREFIX
client = TestClient(app)
EXAMPLE_DATASET_BODY = {
"name": "Unit Test Dataset",
"description": "Dataset created during a unit test",
"labels": [
{
"name": "Boolean",
"variant": "boolean",
},
{
"name": "Numerical",
"variant": "numerical",
"minimum": -1,
"maximum": 1,
"interval": 0.5,
},
],
}
def test_get_datasets():
"""Unit test for hitting the /datasets endpoint"""
response = client.get(f"{PREFIX}/datasets")
assert response.status_code == 200
def test_get_nonexistent_dataset():
"""Unit test for trying to fetch a nonexistent dataset"""
response = client.get(f"{PREFIX}/datasets/daflkadsjflkajdflkadfadadfadsfad")
assert response.status_code == 404
def test_create_delete_dataset():
"""Unit test for creating and subsequently deleting a dataset"""
body = EXAMPLE_DATASET_BODY.copy()
response = client.post(f"{PREFIX}/datasets", json=body)
response_body = response.json()
dataset_id = response_body["dataset_id"]
assert response.status_code == 200
assert response_body["name"] == "Unit Test Dataset"
assert response_body["description"] == "Dataset created during a unit test"
response = client.get(f"{PREFIX}/datasets/{dataset_id}")
assert response.status_code == 200
response = client.delete(f"{PREFIX}/datasets/{dataset_id}")
assert response.status_code == 200
def test_create_missing_fields_dataset():
"""Unit test for creating a dataset with missing important fields"""
for field in ["name", "description", "labels"]:
body = EXAMPLE_DATASET_BODY.copy()
del body[field]
response = client.post(f"{PREFIX}/datasets", json=body)
assert response.status_code == 422
def test_delete_nonexistent_dataset():
"""Unit test for trying to delete a nonexistent dataset"""
response = client.delete(f"{PREFIX}/datasets/daflkadsjflkajdflkadfadadfadsfad")
assert response.status_code == 404
| 0 | 0 | 0 |
6fa132238cc81afebcb7ad528f8226ca00e5c954 | 1,484 | py | Python | stego/mainPage/pythonScripts/decodification_commas.py | alexauf/steganographyLab | 57d47d81f9e6b076740d817e7334270420e0c489 | [
"Apache-2.0"
] | null | null | null | stego/mainPage/pythonScripts/decodification_commas.py | alexauf/steganographyLab | 57d47d81f9e6b076740d817e7334270420e0c489 | [
"Apache-2.0"
] | null | null | null | stego/mainPage/pythonScripts/decodification_commas.py | alexauf/steganographyLab | 57d47d81f9e6b076740d817e7334270420e0c489 | [
"Apache-2.0"
] | null | null | null | import os
import re
#If len > 0, the HTML line given contains single and/or double quotation marks
#Takes the HTML lines with quotation marks and returns the codification
#depending on if it has single or double quotation marks.
if __name__ == "__main__":
# test_bits_total()
# test_num_attributes_line()
main()
| 20.328767 | 78 | 0.588275 | import os
import re
#If len > 0, the HTML line given contains single and/or double quotation marks
def quotation_marks_lines (input):
re_list = [r'"(.*?)"', r"'(.*?)'"]
matches = []
for r in re_list:
matches += re.findall(r, input)
return len(matches)
def total_capacity (input):
maxbits = 0
for line in input.splitlines():
maxbits += quotation_marks_lines(line)
return maxbits
#Takes the HTML lines with quotation marks and returns the codification
#depending on if it has single or double quotation marks.
def retrieve_msg_commas (input):
relist = r'"(.*?)"'
relist2 = r"'(.*?)'"
double = {}
msg = []
if (quotation_marks_lines (input) > 0):
p = re.compile(relist)
for m in p.finditer(input):
double[m.start()] = "1"
p = re.compile(relist2)
for m in p.finditer(input):
double[m.start()] = "0"
sdouble = sorted(double.items())
bits = [value[1] for value in sdouble]
for bit in bits:
msg.append(bit)
return msg
def main():
with open(os.getcwd()+"/stego/tempResponseAlejandroCommas.html") as fd:
content = fd.read()
hlines = content.splitlines()
msg = []
for line in hlines:
tmp = retrieve_msg_commas(line)
if(tmp is not None):
msg += tmp
print(msg)
if __name__ == "__main__":
# test_bits_total()
# test_num_attributes_line()
main()
| 1,060 | 0 | 90 |
289b8db8ffece4af25a2aca154df3f13fc021a4d | 7,428 | py | Python | pricewars_merchant.py | hpi-epic/pricewars-merchant | 7981dad52034493f1f77e21e2aa297598c6ef769 | [
"MIT"
] | null | null | null | pricewars_merchant.py | hpi-epic/pricewars-merchant | 7981dad52034493f1f77e21e2aa297598c6ef769 | [
"MIT"
] | 14 | 2017-07-05T12:37:52.000Z | 2018-09-05T08:44:47.000Z | pricewars_merchant.py | hpi-epic/pricewars-merchant | 7981dad52034493f1f77e21e2aa297598c6ef769 | [
"MIT"
] | 8 | 2017-06-18T08:48:30.000Z | 2021-09-24T11:20:22.000Z | import json
from abc import ABCMeta, abstractmethod
import time
import threading
import hashlib
import base64
import random
from typing import Optional, List
from pathlib import Path
from api import Marketplace, Producer
from server import MerchantServer
from models import SoldOffer, Offer
| 38.487047 | 139 | 0.630587 | import json
from abc import ABCMeta, abstractmethod
import time
import threading
import hashlib
import base64
import random
from typing import Optional, List
from pathlib import Path
from api import Marketplace, Producer
from server import MerchantServer
from models import SoldOffer, Offer
class PricewarsMerchant(metaclass=ABCMeta):
# Save/Read token file in merchant directory
TOKEN_FILE = Path(__file__).parent / 'auth_tokens.json'
def __init__(self, port: int, token: Optional[str], marketplace_url: str, producer_url: str, merchant_name: str, color: Optional[str]):
# Default colors which are assigned to a merchant without its own color.
# Created with colorbrewer: http://colorbrewer2.org/#type=qualitative&scheme=Paired&n=10
colors = ['#a6cee3','#1f78b4','#b2df8a','#fb9a99','#fdbf6f','#ff7f00','#cab2d6','#6a3d9a']
self.settings = {
'update_interval': 5,
# it could make sense to choose larger upper bounds to
# ensure that the merchants to not exceed their quota.
'interval_lower_bound_relative': 0.7,
'interval_upper_bound_relative': 1.35,
'restock_limit': 20,
'shipping': 5,
'prime_shipping': 1,
}
self.state = 'running'
self.server_thread = self.start_server(port)
self.number_offered_items = 0
self.products_not_offered = []
if not color:
color = colors[random.randint(0, len(colors)-1)]
self.color = color
if not token:
token = self.load_tokens().get(merchant_name)
self.marketplace = Marketplace(token, host=marketplace_url)
self.marketplace.wait_for_host()
if token:
merchant_id = self.calculate_id(token)
if not self.marketplace.merchant_exists(merchant_id):
print('Existing token appears to be outdated.')
token = None
else:
print('Running with existing token "%s".' % token)
self.token = token
self.merchant_id = merchant_id
if token is None:
register_response = self.marketplace.register(port, merchant_name, color)
self.token = register_response.merchant_token
self.merchant_id = register_response.merchant_id
self.save_token(merchant_name)
print('Registered new merchant with token "%s".' % self.token)
# request current request limitations from market place.
req_limit = self.marketplace.get_request_limit()
# Update rate has to account of (i) getting market situations,
# (ii) posting updates, (iii) getting products, (iv) posting
# new products. As restocking should not occur too often,
# we use a rather conservative factor of 2.5x factor.
self.settings['update_interval'] = (1 / req_limit) * 2.5
self.producer = Producer(self.token, host=producer_url)
def load_tokens(self) -> dict:
try:
with open(self.TOKEN_FILE, 'r') as f:
return json.load(f)
except FileNotFoundError:
return {}
def save_token(self, name: str) -> None:
tokens = self.load_tokens()
with open(self.TOKEN_FILE, 'w') as f:
tokens[name] = self.token
json.dump(tokens, f)
@staticmethod
def calculate_id(token: str) -> str:
return base64.b64encode(hashlib.sha256(token.encode('utf-8')).digest()).decode('utf-8')
def run(self):
# initial random sleep to avoid starting merchants in sync
time.sleep(2 * random.random())
start_time = time.time()
update_counter = 1
self.open_new_offer()
while True:
interval = self.settings['update_interval']
lower_bound = self.settings['interval_lower_bound_relative']
upper_bound = self.settings['interval_upper_bound_relative']
if self.state == 'running':
self.update_offers()
# determine required sleep length for next interval
rdm_interval_length = random.uniform(interval * lower_bound, interval * upper_bound)
# calculate next expected update timestamp (might be in the
# past in cases where the marketplace blocked for some time)
next_update_ts = start_time + interval * (update_counter - 1) + rdm_interval_length
sleep_time = next_update_ts - time.time()
if sleep_time <= 0:
# short random sleep to catch up with the intervals,
# but try not to DDoS the marketplace
sleep_time = random.uniform(interval * 0.05, interval * 0.2)
time.sleep(sleep_time)
update_counter += 1
def update_offers(self) -> None:
"""
Entry point for regular merchant activity.
When the merchant is running, this is called in each update interval.
"""
market_situation = self.marketplace.get_offers()
own_offers = [offer for offer in market_situation if offer.merchant_id == self.merchant_id]
for offer in own_offers:
offer.price = self.calculate_price(offer.offer_id, market_situation)
self.marketplace.update_offer(offer)
def restock(self):
order = self.producer.order(self.settings['restock_limit'])
return order.products
def open_new_offer(self) -> None:
if not self.products_not_offered:
self.products_not_offered = self.restock()
product = self.products_not_offered.pop()
self.number_offered_items += product.quantity
shipping_time = {
'standard': self.settings['shipping'],
'prime': self.settings['prime_shipping']
}
offer = Offer.from_product(product, 0, shipping_time)
offer.merchant_id = self.merchant_id
market_situation = self.marketplace.get_offers()
offer.price = self.calculate_price(offer.offer_id, market_situation + [offer])
self.marketplace.add_offer(offer)
def sold_offer(self, offer: SoldOffer) -> None:
"""
This method is called whenever the merchant sells a product.
"""
print('Product sold')
self.number_offered_items -= offer.quantity_sold
if self.number_offered_items == 0:
self.open_new_offer()
def start(self):
self.state = 'running'
def stop(self):
self.state = 'stopping'
def update_settings(self, new_settings: dict) -> None:
for key, value in new_settings.items():
if key in self.settings:
# Cast value type to the type that is already in the settings dictionary
value = type(self.settings[key])(value)
self.settings[key] = value
def start_server(self, port):
server = MerchantServer(self)
thread = threading.Thread(target=server.app.run, kwargs={'host': '0.0.0.0', 'port': port})
thread.daemon = True
thread.start()
return thread
@abstractmethod
def calculate_price(self, offer_id: int, market_situation: List[Offer]) -> float:
"""
Calculate the price for the offer indicated by 'offer_id' given the current market situation.
The offer id is guaranteed to be in the market situation.
"""
pass
| 5,532 | 1,580 | 23 |
88d13ee6499a366bc31e75cade118cbe9a24e1e1 | 1,955 | py | Python | copy_timecode.py | flow-dev/CopyTimecodeAtoB | 5e4852dbff6d128b00160bddc20da5b222fe4f33 | [
"MIT"
] | null | null | null | copy_timecode.py | flow-dev/CopyTimecodeAtoB | 5e4852dbff6d128b00160bddc20da5b222fe4f33 | [
"MIT"
] | null | null | null | copy_timecode.py | flow-dev/CopyTimecodeAtoB | 5e4852dbff6d128b00160bddc20da5b222fe4f33 | [
"MIT"
] | null | null | null | import argparse
import glob
import os
from video_ffmpeg_utils import VideoFfmpegUtils
"""
Copy Timecode and enc ProResHQ or h265
Example:
python3 copy_timecode.py \
--video-src "PATH_TO_VIDEO_SRC" \
--video-out "PATH_TO_VIDEO_OUT" \
--codec h265 \
--copy-tc \
"""
# --------------- Arguments ---------------
parser = argparse.ArgumentParser(description='Copy Timecode')
parser.add_argument('--video-src', type=str, required=True)
parser.add_argument('--video-out', type=str, default="output.mov", help="Path to video onto which to encode the output")
parser.add_argument('--codec', type=str, default='h265', choices=['proreshq', 'h265'])
parser.add_argument('--copy-tc', action='store_true', help="Used when copying timecode")
args = parser.parse_args()
# --------------- Main ---------------
if __name__ == '__main__':
video_utils = VideoFfmpegUtils()
# Get Video Info by ffprobe
video_info = video_utils.get_video_info(args.video_src)
print("Check video_src_info:",video_info)
# enc ProResHQ or h265
if args.codec == 'proreshq':
print("run_ProResHQ_enc")
if args.copy_tc:
timecode = video_utils.get_timecode(args.video_src)
print("video_src_timecode:", timecode)
video_utils.run_ProResHQ_enc(args.video_src, args.video_out, timecode)
else:
video_utils.run_ProResHQ_enc(args.video_src, args.video_out)
if args.codec == 'h265':
print("run_hevc_nvenc")
if args.copy_tc:
timecode = video_utils.get_timecode(args.video_src)
print("video_src_timecode:", timecode)
video_utils.run_hevc_nvenc(args.video_src, args.video_out, timecode)
else:
video_utils.run_hevc_nvenc(args.video_src, args.video_out)
# Check video_out_info
video_info = video_utils.get_video_info(args.video_out)
print("Check video_out_info:",video_info)
| 31.031746 | 120 | 0.662916 | import argparse
import glob
import os
from video_ffmpeg_utils import VideoFfmpegUtils
"""
Copy Timecode and enc ProResHQ or h265
Example:
python3 copy_timecode.py \
--video-src "PATH_TO_VIDEO_SRC" \
--video-out "PATH_TO_VIDEO_OUT" \
--codec h265 \
--copy-tc \
"""
# --------------- Arguments ---------------
parser = argparse.ArgumentParser(description='Copy Timecode')
parser.add_argument('--video-src', type=str, required=True)
parser.add_argument('--video-out', type=str, default="output.mov", help="Path to video onto which to encode the output")
parser.add_argument('--codec', type=str, default='h265', choices=['proreshq', 'h265'])
parser.add_argument('--copy-tc', action='store_true', help="Used when copying timecode")
args = parser.parse_args()
# --------------- Main ---------------
if __name__ == '__main__':
video_utils = VideoFfmpegUtils()
# Get Video Info by ffprobe
video_info = video_utils.get_video_info(args.video_src)
print("Check video_src_info:",video_info)
# enc ProResHQ or h265
if args.codec == 'proreshq':
print("run_ProResHQ_enc")
if args.copy_tc:
timecode = video_utils.get_timecode(args.video_src)
print("video_src_timecode:", timecode)
video_utils.run_ProResHQ_enc(args.video_src, args.video_out, timecode)
else:
video_utils.run_ProResHQ_enc(args.video_src, args.video_out)
if args.codec == 'h265':
print("run_hevc_nvenc")
if args.copy_tc:
timecode = video_utils.get_timecode(args.video_src)
print("video_src_timecode:", timecode)
video_utils.run_hevc_nvenc(args.video_src, args.video_out, timecode)
else:
video_utils.run_hevc_nvenc(args.video_src, args.video_out)
# Check video_out_info
video_info = video_utils.get_video_info(args.video_out)
print("Check video_out_info:",video_info)
| 0 | 0 | 0 |
663ab76b35a4777947c738b5b827fe96066f92dc | 53,509 | py | Python | idpmodem/codecs/common.py | Inmarsat/idpmodem | 2b77f7ead25fa71a390fb4eb97ae966a464d6132 | [
"Apache-2.0"
] | 3 | 2021-01-07T16:01:54.000Z | 2021-12-19T04:15:12.000Z | idpmodem/codecs/common.py | Inmarsat/idpmodem | 2b77f7ead25fa71a390fb4eb97ae966a464d6132 | [
"Apache-2.0"
] | 1 | 2021-12-17T15:09:06.000Z | 2021-12-17T15:09:06.000Z | idpmodem/codecs/common.py | Inmarsat/idpmodem | 2b77f7ead25fa71a390fb4eb97ae966a464d6132 | [
"Apache-2.0"
] | 2 | 2019-02-06T09:02:40.000Z | 2020-03-17T02:55:16.000Z | """Codec functions for IDP Common Message Format supported by Inmarsat MGS.
Also supported on ORBCOMM IGWS1.
"""
from binascii import b2a_base64
from math import log2, ceil
from struct import pack, unpack
from typing import Tuple, Union
from warnings import WarningMessage, warn
import xml.etree.ElementTree as ET
from xml.dom.minidom import parseString
from idpmodem.constants import FORMAT_HEX, FORMAT_B64, SATELLITE_GENERAL_TRACE
__version__ = '2.0.0'
DATA_TYPES = {
'bool': 'BooleanField',
'int_8': 'SignedIntField',
'uint_8': 'UnsignedIntField',
'int_16': 'SignedIntField',
'uint_16': 'UnsignedIntField',
'int_32': 'SignedIntField',
'uint_32': 'UnsignedIntField',
'int_64': 'SignedIntField',
'uint_64': 'UnsignedIntField',
'float': 'DataField',
'double': 'DataField',
'string': 'StringField',
'data': 'DataField',
'enum': 'EnumField',
'array': 'ArrayField', # TODO: support for array type
}
XML_NAMESPACE = {
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xsd': 'http://www.w3.org/2001/XMLSchema'
}
SIN_RANGE = (16, 255)
for ns in XML_NAMESPACE:
ET.register_namespace(ns, XML_NAMESPACE[ns])
def optimal_bits(value_range: tuple) -> int:
"""Returns the optimal number of bits for encoding a specified range."""
if not (isinstance(value_range, tuple) and len(value_range) == 2 and
value_range[0] <= value_range[1]):
#: non-compliant
raise ValueError('value_range must be of form (min, max)')
total_range = value_range[1] - value_range[0]
total_range += 1 if value_range[0] == 0 else 0
optimal_bits = max(1, ceil(log2(value_range[1] - value_range[0])))
return optimal_bits
class BaseField:
"""The base class for a Field.
Attributes:
data_type (str): The data type from a supported list.
name (str): The unique Field name.
description (str): Optional description.
optional (bool): Optional indication the field is optional.
"""
def __init__(self,
name: str,
data_type: str,
description: str = None,
optional: bool = False) -> None:
"""Instantiates the base field.
Args:
name: The field name must be unique within a Message.
data_type: The data type represented within the field.
description: (Optional) Description/purpose of the field.
optional: (Optional) Indicates if the field is mandatory.
"""
if data_type not in DATA_TYPES:
raise ValueError('Invalid data type {}'.format(data_type))
if name is None or name.strip() == '':
raise ValueError('Invalid name must be non-empty')
self.data_type = data_type
self.name = name
self.description = description
self.optional = optional
class Message:
"""The Payload structure for Message Definition Files uploaded to a Mailbox.
Attributes:
name (str): The message name
sin (int): The Service Identification Number
min (int): The Message Identification Number
fields (list): A list of Fields
description (str): Optional description
is_forward (bool): Indicates if the message is mobile-terminated
"""
def __init__(self,
name: str,
sin: int,
min: int,
description: str = None,
is_forward: bool = False,
fields: "list[BaseField]" = None):
"""Instantiates a Message.
Args:
name: The message name should be unique within the xMessages list.
sin: The Service Identification Number (16..255)
min: The Message Identification Number (0..255)
description: (Optional) Description/purpose of the Message.
is_forward: Indicates if the message is intended to be
Mobile-Terminated.
fields: Optional definition of fields during instantiation.
"""
if not isinstance(sin, int) or sin not in range(16, 256):
raise ValueError('Invalid SIN {} must be in range 16..255'.format(
sin))
if not isinstance(min, int) or min not in range (0, 256):
raise ValueError('Invalid MIN {} must be in range 0..255'.format(
min))
self.name = name
self.description = description
self.is_forward = is_forward
self.sin = sin
self.min = min
self.fields = fields or Fields()
@property
@fields.setter
@property
def decode(self, databytes: bytes) -> None:
"""Parses and stores field values from raw data (received over-the-air).
Args:
databytes: A bytes array (typically from the forward message)
"""
binary_str = ''.join(format(int(b), '08b') for b in databytes)
bit_offset = 16 #: Begin after SIN/MIN bytes
for field in self.fields:
if field.optional:
present = binary_str[bit_offset] == '1'
bit_offset += 1
if not present:
continue
bit_offset += field.decode(binary_str[bit_offset:])
def derive(self, databytes: bytes) -> None:
"""Derives field values from raw data bytes (received over-the-air).
Deprecated/replaced by `decode`.
Args:
databytes: A bytes array (typically from the forward message)
"""
self.decode(databytes)
def encode(self,
data_format: int = FORMAT_B64,
exclude: list = None) -> dict:
"""Encodes using the specified data format (base64 or hex).
Args:
data_format (int): 2=ASCII-Hex, 3=base64
exclude (list[str]): A list of optional field names to exclude
Returns:
Dictionary with sin, min, data_format and data to pass into AT%MGRT
or atcommand function `message_mo_send`
"""
if data_format not in [FORMAT_B64, FORMAT_HEX]:
raise ValueError('data_format {} unsupported'.format(data_format))
#:AT%MGRT uses '{}.{},{},{}'.format(sin, min, data_format, data)
bin_str = ''
for field in self.fields:
if field.optional:
if exclude is not None and field.name in exclude:
present = False
elif hasattr(field, 'value'):
present = field.value is not None
elif hasattr(field, 'elements'):
present = field.elements is not None
else:
raise ValueError('Unknown value of optional')
bin_str += '1' if present else '0'
if not present:
continue
bin_str += field.encode()
for _ in range(0, 8 - len(bin_str) % 8): #:pad to next byte
bin_str += '0'
_format = '0{}X'.format(int(len(bin_str) / 8 * 2)) #:hex bytes 2 chars
hex_str = format(int(bin_str, 2), _format)
if (self.is_forward and len(hex_str) / 2 > 9998 or
not self.is_forward and len(hex_str) / 2 > 6398):
raise ValueError('{} bytes exceeds maximum size for Payload'.format(
len(hex_str) / 2))
if data_format == FORMAT_HEX:
data = hex_str
else:
data = b2a_base64(bytearray.fromhex(hex_str)).strip().decode()
return {
'sin': self.sin,
'min': self.min,
'data_format': data_format,
'data': data
}
def xml(self, indent: bool = False) -> ET.Element:
"""Returns the XML definition for a Message Definition File."""
xmessage = ET.Element('Message')
name = ET.SubElement(xmessage, 'Name')
name.text = self.name
min = ET.SubElement(xmessage, 'MIN')
min.text = str(self.min)
fields = ET.SubElement(xmessage, 'Fields')
for field in self.fields:
fields.append(field.xml())
return xmessage if not indent else _indent_xml(xmessage)
class Service:
"""A data structure holding a set of related Forward and Return Messages.
Attributes:
name (str): The service name
sin (int): Service Identification Number or codec service id (16..255)
description (str): A description of the service (unsupported)
messages_forward (list): A list of mobile-terminated Message definitions
messages_return (list): A list of mobile-originated Message definitions
"""
def __init__(self,
name: str,
sin: int,
description: str = None,
messages_forward: "list[Message]" = None,
messages_return: "list[Message]" = None) -> None:
"""Instantiates a Service made up of Messages.
Args:
name: The service name should be unique within a MessageDefinitions
sin: The Service Identification Number (16..255)
description: (Optional)
"""
if not isinstance(name, str) or name == '':
raise ValueError('Invalid service name {}'.format(name))
if sin not in range(16, 256):
raise ValueError('Invalid SIN must be 16..255')
self.name = name
self.sin = sin
if description is not None:
warn('Service Description not currently supported')
self.description = None
self.messages_forward = messages_forward or Messages(self.sin,
is_forward=True)
self.messages_return = messages_return or Messages(self.sin,
is_forward=False)
def xml(self, indent: bool = False) -> ET.Element:
"""Returns the XML structure of the Service for a MDF."""
if len(self.messages_forward) == 0 and len(self.messages_return) == 0:
raise ValueError('No messages defined for service {}'.format(
self.sin))
xservice = ET.Element('Service')
name = ET.SubElement(xservice, 'Name')
name.text = str(self.name)
sin = ET.SubElement(xservice, 'SIN')
sin.text = str(self.sin)
if self.description:
desc = ET.SubElement(xservice, 'Description')
desc.text = str(self.description)
if len(self.messages_forward) > 0:
forward_messages = ET.SubElement(xservice, 'ForwardMessages')
for m in self.messages_forward:
forward_messages.append(m.xml())
if len(self.messages_return) > 0:
return_messages = ET.SubElement(xservice, 'ReturnMessages')
for m in self.messages_return:
return_messages.append(m.xml())
return xservice if not indent else _indent_xml(xservice)
class ObjectList(list):
"""Base class for a specific object type list.
Used for Fields, Messages, Services.
Attributes:
list_type: The object type the list is comprised of.
"""
SUPPORTED_TYPES = [
BaseField,
# Field,
Message,
Service,
]
def add(self, obj: object) -> bool:
"""Add an object to the end of the list.
Args:
obj (object): A valid object according to the list_type
Raises:
ValueError if there is a duplicate or invalid name,
invalid value_range or unsupported data_type
"""
if not isinstance(obj, self.list_type):
raise ValueError('Invalid {} definition'.format(self.list_type))
for o in self:
if o.name == obj.name:
raise ValueError('Duplicate {} name {} found'.format(
self.list_type, obj.name))
self.append(obj)
return True
def __getitem__(self, n: Union[str, int]) -> object:
"""Retrieves an object by name or index.
Args:
n: The object name or list index
Returns:
object
"""
if isinstance(n, str):
for o in self:
if o.name == n:
return o
raise ValueError('{} name {} not found'.format(self.list_type, n))
return super().__getitem__(n)
def delete(self, name: str) -> bool:
"""Delete an object from the list by name.
Args:
name: The name of the object.
Returns:
boolean: success
"""
for o in self:
if o.name == name:
self.remove(o)
return True
return False
class Fields(ObjectList):
"""The list of Fields defining a Message or ArrayElement."""
class Messages(ObjectList):
"""The list of Messages (Forward or Return) within a Service."""
def add(self, message: Message) -> bool:
"""Add a message to the list if it matches the parent SIN.
Overrides the base class add method.
Args:
message (object): A valid Message
Raises:
ValueError if there is a duplicate or invalid name,
invalid value_range or unsupported data_type
"""
if not isinstance(message, Message):
raise ValueError('Invalid message definition')
if message.sin != self.sin:
raise ValueError('Message SIN {} does not match service {}'.format(
message.sin, self.sin))
for m in self:
if m.name == message.name:
raise ValueError('Duplicate message name {} found'.format(
message.name))
if m.min == message.min:
raise ValueError('Duplicate message MIN {} found'.format(
message.min))
self.append(message)
return True
class Services(ObjectList):
"""The list of Service(s) within a MessageDefinitions."""
def add(self, service: Service) -> None:
"""Adds a Service to the list of Services."""
if not isinstance(service, Service):
raise ValueError('{} is not a valid Service'.format(service))
if service.name in self:
raise ValueError('Duplicate Service {}'.format(service.name))
for existing_service in self:
if existing_service.sin == service.sin:
raise ValueError('Duplicate SIN {}'.format(service.sin))
self.append(service)
class BooleanField(BaseField):
"""A Boolean field."""
@property
@default.setter
@property
@value.setter
@property
def encode(self) -> str:
"""Returns the binary string of the field value."""
if self.value is None and not self.optional:
raise ValueError('No value assigned to field')
return '1' if self.value else '0'
def decode(self, binary_str: str) -> int:
"""Populates the field value from binary and returns the next offset.
Args:
binary_str (str): The binary string to decode
Returns:
The bit offset after parsing
"""
self.value = True if binary_str[0] == '1' else False
return 1
class EnumField(BaseField):
"""An enumerated field sends an index over-the-air representing a string."""
def __init__(self,
name: str,
items: "list[str]",
size: int,
description: str = None,
optional: bool = False,
default: int = None,
value: int = None) -> None:
"""Instantiates a EnumField.
Args:
name: The field name must be unique within a Message.
items: A list of strings (indexed from 0).
size: The number of *bits* used to encode the index over-the-air.
description: An optional description/purpose for the field.
optional: Indicates if the field is optional in the Message.
default: A default value for the enum.
value: Optional value to set during initialization.
"""
super().__init__(name=name,
data_type='enum',
description=description,
optional=optional)
if items is None or not all(isinstance(item, str) for item in items):
raise ValueError('Items must all be strings')
if not isinstance(size, int) or size < 1:
raise ValueError('Size must be integer greater than 0 bits')
self.items = items
self.size = size
self.default = default
self.value = value if value is not None else self.default
@property
@items.setter
@property
@default.setter
@property
@value.setter
@property
@size.setter
@property
def encode(self) -> str:
"""Returns the binary string of the field value."""
if self.value is None:
raise ValueError('No value configured in EnumField {}'.format(
self.name))
_format = '0{}b'.format(self.bits)
binstr = format(self.items.index(self.value), _format)
return binstr
def decode(self, binary_str: str) -> int:
"""Populates the field value from binary and returns the next offset.
Args:
binary_str (str): The binary string to decode
Returns:
The bit offset after parsing
"""
self.value = binary_str[:self.bits]
return self.bits
class UnsignedIntField(BaseField):
"""An unsigned integer value using a defined number of bits over-the-air."""
def __init__(self,
name: str,
size: int,
data_type: str = 'uint_16',
description: str = None,
optional: bool = False,
default: int = None,
value: int = None) -> None:
"""Instantiates a UnsignedIntField.
Args:
name: The field name must be unique within a Message.
size: The number of *bits* used to encode the integer over-the-air
(maximum 32).
data_type: The integer type represented (for decoding).
description: An optional description/purpose for the string.
optional: Indicates if the string is optional in the Message.
default: A default value for the string.
value: Optional value to set during initialization.
"""
if data_type not in ['uint_8', 'uint_16', 'uint_32']:
raise ValueError('Invalid unsignedint type {}'.format(data_type))
super().__init__(name=name,
data_type=data_type,
description=description,
optional=optional)
self.size = size
self.default = default
self.value = value if value is not None else default
@property
@size.setter
@property
@value.setter
@property
@default.setter
@property
def encode(self) -> str:
"""Returns the binary string of the field value."""
if self.value is None:
raise ValueError('No value defined in UnsignedIntField {}'.format(
self.name))
_format = '0{}b'.format(self.bits)
return format(self.value, _format)
def decode(self, binary_str: str) -> int:
"""Populates the field value from binary and returns the next offset.
Args:
binary_str (str): The binary string to decode
Returns:
The bit offset after parsing
"""
self.value = int(binary_str[:self.bits], 2)
return self.bits
class SignedIntField(BaseField):
"""A signed integer value using a defined number of bits over-the-air."""
def __init__(self,
name: str,
size: int,
data_type: str = 'int_16',
description: str = None,
optional: bool = False,
default: int = None,
value: int = None) -> None:
"""Instantiates a SignedIntField.
Args:
name: The field name must be unique within a Message.
size: The number of *bits* used to encode the integer over-the-air
(maximum 32).
data_type: The integer type represented (for decoding).
description: An optional description/purpose for the string.
optional: Indicates if the string is optional in the Message.
default: A default value for the string.
value: Optional value to set during initialization.
"""
if data_type not in ['int_8', 'int_16', 'int_32']:
raise ValueError('Invalid unsignedint type {}'.format(data_type))
super().__init__(name=name,
data_type=data_type,
description=description,
optional=optional)
self.size = size
self.default = default
self.value = value if value is not None else default
@property
@size.setter
@property
@value.setter
@property
@default.setter
@property
def encode(self) -> str:
"""Returns the binary string of the field value."""
if self.value is None:
raise ValueError('No value defined in UnsignedIntField {}'.format(
self.name))
_format = '0{}b'.format(self.bits)
if self.value < 0:
invertedbin = format(self.value * -1, _format)
twocomplementbin = ''
i = 0
while len(twocomplementbin) < len(invertedbin):
twocomplementbin += '1' if invertedbin[i] == '0' else '0'
i += 1
binstr = format(int(twocomplementbin, 2) + 1, _format)
else:
binstr = format(self.value, _format)
return binstr
def decode(self, binary_str: str) -> int:
"""Populates the field value from binary and returns the next offset.
Args:
binary_str (str): The binary string to decode
Returns:
The bit offset after parsing
"""
value = int(binary_str[:self.bits], 2)
if (value & (1 << (self.bits - 1))) != 0: #:sign bit set e.g. 8bit: 128-255
value = value - (1 << self.bits) #:compute negative value
self.value = value
return self.bits
class StringField(BaseField):
"""A character string sent over-the-air."""
def __init__(self,
name: str,
size: int,
description: str = None,
optional: bool = False,
fixed: bool = False,
default: str = None,
value: str = None) -> None:
"""Instantiates a StringField.
Args:
name: The field name must be unique within a Message.
size: The maximum number of characters in the string.
description: An optional description/purpose for the string.
optional: Indicates if the string is optional in the Message.
fixed: Indicates if the string is always fixed length `size`.
default: A default value for the string.
value: Optional value to set during initialization.
"""
super().__init__(name=name,
data_type='string',
description=description,
optional=optional)
self.size = size
self.fixed = fixed
self.default = default
self.value = value if value is not None else default
@property
@size.setter
@property
@default.setter
@property
@value.setter
@property
def encode(self) -> str:
"""Returns the binary string of the field value."""
if self.value is None and not self.optional:
raise ValueError('No value defined for StringField {}'.format(
self.name))
binstr = ''.join(format(ord(c), '08b') for c in self.value)
if self.fixed:
binstr += ''.join('0' for bit in range(len(binstr), self.bits))
else:
binstr = _encode_field_length(len(self.value)) + binstr
return binstr
def decode(self, binary_str: str) -> int:
"""Populates the field value from binary and returns the next offset.
Args:
binary_str (str): The binary string to decode
Returns:
The bit offset after parsing
"""
if self.fixed:
length = self.size
bit_index = 0
else:
(length, bit_index) = _decode_field_length(binary_str)
n = int(binary_str[bit_index:bit_index + length * 8], 2)
char_bytes = n.to_bytes((n.bit_length() + 7) // 8, 'big')
for i, byte in enumerate(char_bytes):
if byte == 0:
warn('Truncating after 0 byte in string')
char_bytes = char_bytes[:i]
break
self.value = char_bytes.decode('utf-8', 'surrogatepass') or '\0'
return bit_index + length * 8
def xml(self) -> ET.Element:
"""Returns the message definition XML representation of the field."""
xmlfield = self._base_xml()
size = ET.SubElement(xmlfield, 'Size')
size.text = str(self.size)
if self.fixed:
fixed = ET.SubElement(xmlfield, 'Fixed')
fixed.text = 'true'
if self.default:
default = ET.SubElement(xmlfield, 'Default')
default.text = str(self.default)
return xmlfield
class DataField(BaseField):
"""A data field of raw bytes sent over-the-air.
Can also be used to hold floating point, double-precision or large integers.
"""
supported_data_types = ['data', 'float', 'double']
def __init__(self,
name: str,
size: int,
data_type: str = 'data',
description: str = None,
optional: bool = False,
fixed: bool = False,
default: bytes = None,
value: bytes = None) -> None:
"""Instantiates a EnumField.
Args:
name: The field name must be unique within a Message.
size: The maximum number of bytes to send over-the-air.
data_type: The data type represented within the bytes.
description: An optional description/purpose for the field.
optional: Indicates if the field is optional in the Message.
fixed: Indicates if the data bytes are a fixed `size`.
default: A default value for the enum.
value: Optional value to set during initialization.
"""
if data_type is None or data_type not in self.supported_data_types:
raise ValueError('Invalid data type {}'.format(data_type))
super().__init__(name=name,
data_type=data_type,
description=description,
optional=optional)
self.fixed = fixed
self.size = size
self.default = default
self.value = value if value is not None else default
@property
@size.setter
@property
@value.setter
@property
def encode(self) -> str:
"""Returns the binary string of the field value."""
if self.value is None and not self.optional:
raise ValueError('No value defined for DataField {}'.format(
self.name))
binstr = ''
binstr = ''.join(format(b, '08b') for b in self._value)
if self.fixed: #:pad to fixed length
binstr += ''.join('0' for bit in range(len(binstr), self.bits))
else:
binstr = _encode_field_length(len(self._value)) + binstr
return binstr
def decode(self, binary_str: str) -> int:
"""Populates the field value from binary and returns the next offset.
Args:
binary_str (str): The binary string to decode
Returns:
The bit offset after parsing
"""
if self.fixed:
binary = binary_str[:self.bits]
bits = self.bits
else:
(length, bit_index) = _decode_field_length(binary_str)
binary = binary_str[bit_index:length * 8 + bit_index]
bits = len(binary)
self._value = int(binary, 2).to_bytes(int(bits / 8), 'big')
return self.bits
class ArrayField(BaseField):
"""An Array Field provides a list where each element is a set of Fields.
Attributes:
name (str): The name of the field instance.
size (int): The maximum number of elements allowed.
fields (Fields): A list of Field types comprising each ArrayElement
description (str): An optional description of the array/use.
optional (bool): Indicates if the array is optional in the Message
fixed (bool): Indicates if the array is always the fixed `size`
elements (list): The enumerated list of ArrayElements
"""
def __init__(self,
name: str,
size: int,
fields: Fields,
description: str = None,
optional: bool = False,
fixed: bool = False,
elements: "list[Fields]" = None) -> None:
"""Initializes an ArrayField instance.
Args:
name: The unique field name within the Message.
size: The maximum number of elements allowed.
fields: The list of Field types comprising each element.
description: An optional description/purpose of the array.
optional: Indicates if the array is optional in the Message.
fixed: Indicates if the array is always the fixed `size`.
elements: Option to populate elements of Fields during instantiation.
"""
super().__init__(name=name,
data_type='array',
description=description,
optional=optional)
self.size = size
self.fixed = fixed
self.fields = fields
self.elements = elements or []
@property
@size.setter
@property
@fields.setter
@property
@elements.setter
@property
def append(self, element: Fields):
"""Adds the array element to the list of elements."""
if not isinstance(element, Fields):
raise ValueError('Invalid element definition must be Fields')
if self._valid_element(element):
for i, field in enumerate(element):
if (hasattr(field, 'description') and
field.description != self.fields[i].description):
field.description = self.fields[i].description
if hasattr(field, 'value') and field.value is None:
field.value = self.fields[i].default
self._elements.append(element)
def new_element(self) -> Fields:
"""Returns an empty element at the end of the elements list."""
new_index = len(self._elements)
self._elements.append(Fields(self.fields))
return self.elements[new_index]
def encode(self) -> str:
"""Returns the binary string of the field value."""
if len(self.elements) == 0:
raise ValueError('No elements to encode')
binstr = ''
for element in self.elements:
for field in element:
binstr += field.encode()
if not self.fixed:
binstr = _encode_field_length(len(self.elements)) + binstr
return binstr
def decode(self, binary_str: str) -> int:
"""Populates the field value from binary and returns the next offset.
Args:
binary_str (str): The binary string to decode
Returns:
The bit offset after parsing
"""
if self.fixed:
length = self.size
bit_index = 0
else:
(length, bit_index) = _decode_field_length(binary_str)
for index in range(0, length):
fields = Fields(self.fields)
for field in fields:
if field.optional:
if binary_str[bit_index] == '0':
bit_index += 1
continue
bit_index += 1
bit_index += field.decode(binary_str[bit_index:])
try:
self._elements[index] = fields
except IndexError:
self._elements.append(fields)
return bit_index
class MessageDefinitions:
"""A set of Message Definitions grouped into Services.
Attributes:
services: The list of Services with Messages defined.
"""
| 35.912081 | 85 | 0.556617 | """Codec functions for IDP Common Message Format supported by Inmarsat MGS.
Also supported on ORBCOMM IGWS1.
"""
from binascii import b2a_base64
from math import log2, ceil
from struct import pack, unpack
from typing import Tuple, Union
from warnings import WarningMessage, warn
import xml.etree.ElementTree as ET
from xml.dom.minidom import parseString
from idpmodem.constants import FORMAT_HEX, FORMAT_B64, SATELLITE_GENERAL_TRACE
__version__ = '2.0.0'
DATA_TYPES = {
'bool': 'BooleanField',
'int_8': 'SignedIntField',
'uint_8': 'UnsignedIntField',
'int_16': 'SignedIntField',
'uint_16': 'UnsignedIntField',
'int_32': 'SignedIntField',
'uint_32': 'UnsignedIntField',
'int_64': 'SignedIntField',
'uint_64': 'UnsignedIntField',
'float': 'DataField',
'double': 'DataField',
'string': 'StringField',
'data': 'DataField',
'enum': 'EnumField',
'array': 'ArrayField', # TODO: support for array type
}
XML_NAMESPACE = {
'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xsd': 'http://www.w3.org/2001/XMLSchema'
}
SIN_RANGE = (16, 255)
for ns in XML_NAMESPACE:
ET.register_namespace(ns, XML_NAMESPACE[ns])
def optimal_bits(value_range: tuple) -> int:
"""Returns the optimal number of bits for encoding a specified range."""
if not (isinstance(value_range, tuple) and len(value_range) == 2 and
value_range[0] <= value_range[1]):
#: non-compliant
raise ValueError('value_range must be of form (min, max)')
total_range = value_range[1] - value_range[0]
total_range += 1 if value_range[0] == 0 else 0
optimal_bits = max(1, ceil(log2(value_range[1] - value_range[0])))
return optimal_bits
def _encode_field_length(length) -> str:
if length < 128:
return '0{:07b}'.format(length)
return '1{:015b}'.format(length)
def _decode_field_length(binstr: str) -> Tuple[int, int]:
if binstr[0] == '0':
bit_index = 8
else:
bit_index = 16
length = int(binstr[1:bit_index], 2)
return (length, bit_index)
def _attribute_equivalence(reference: object,
other: object,
exclude: "list[str]" = None) -> bool:
for attr, val in reference.__dict__.items():
if exclude is not None and attr in exclude:
continue
if not hasattr(other, attr) or val != other.__dict__[attr]:
return False
return True
def _indent_xml(elem, level=0):
xmlstr = parseString(ET.tostring(elem)).toprettyxml(indent=" ")
# i = "\n" + level*" "
# j = "\n" + (level-1)*" "
# if len(elem):
# if not elem.text or not elem.text.strip():
# elem.text = i + " "
# if not elem.tail or not elem.tail.strip():
# elem.tail = i
# for subelem in elem:
# _indent_xml(subelem, level+1)
# if not elem.tail or not elem.tail.strip():
# elem.tail = j
# else:
# if level and (not elem.tail or not elem.tail.strip()):
# elem.tail = j
# return elem
return xmlstr
class BaseField:
"""The base class for a Field.
Attributes:
data_type (str): The data type from a supported list.
name (str): The unique Field name.
description (str): Optional description.
optional (bool): Optional indication the field is optional.
"""
def __init__(self,
name: str,
data_type: str,
description: str = None,
optional: bool = False) -> None:
"""Instantiates the base field.
Args:
name: The field name must be unique within a Message.
data_type: The data type represented within the field.
description: (Optional) Description/purpose of the field.
optional: (Optional) Indicates if the field is mandatory.
"""
if data_type not in DATA_TYPES:
raise ValueError('Invalid data type {}'.format(data_type))
if name is None or name.strip() == '':
raise ValueError('Invalid name must be non-empty')
self.data_type = data_type
self.name = name
self.description = description
self.optional = optional
def __repr__(self) -> str:
from pprint import pformat
return pformat(vars(self), indent=4)
def _base_xml(self) -> ET.Element:
xsi_type = DATA_TYPES[self.data_type]
xmlfield = ET.Element('Field', attrib={
'{http://www.w3.org/2001/XMLSchema-instance}type': xsi_type
})
name = ET.SubElement(xmlfield, 'Name')
name.text = self.name
if self.description:
description = ET.SubElement(xmlfield, 'Description')
description.text = str(self.description)
if self.optional:
optional = ET.SubElement(xmlfield, 'Optional')
optional.text = 'true'
return xmlfield
class Message:
"""The Payload structure for Message Definition Files uploaded to a Mailbox.
Attributes:
name (str): The message name
sin (int): The Service Identification Number
min (int): The Message Identification Number
fields (list): A list of Fields
description (str): Optional description
is_forward (bool): Indicates if the message is mobile-terminated
"""
def __init__(self,
name: str,
sin: int,
min: int,
description: str = None,
is_forward: bool = False,
fields: "list[BaseField]" = None):
"""Instantiates a Message.
Args:
name: The message name should be unique within the xMessages list.
sin: The Service Identification Number (16..255)
min: The Message Identification Number (0..255)
description: (Optional) Description/purpose of the Message.
is_forward: Indicates if the message is intended to be
Mobile-Terminated.
fields: Optional definition of fields during instantiation.
"""
if not isinstance(sin, int) or sin not in range(16, 256):
raise ValueError('Invalid SIN {} must be in range 16..255'.format(
sin))
if not isinstance(min, int) or min not in range (0, 256):
raise ValueError('Invalid MIN {} must be in range 0..255'.format(
min))
self.name = name
self.description = description
self.is_forward = is_forward
self.sin = sin
self.min = min
self.fields = fields or Fields()
@property
def fields(self) -> list:
return self._fields
@fields.setter
def fields(self, fields: "list[BaseField]"):
if not all(isinstance(field, BaseField) for field in fields):
raise ValueError('Invalid field found in list')
self._fields = fields
@property
def ota_size(self) -> int:
ota_bits = 2 * 8
for field in self.fields:
ota_bits += field.bits + (1 if field.optional else 0)
return ceil(ota_bits / 8)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Message):
return NotImplemented
return _attribute_equivalence(self, other)
def decode(self, databytes: bytes) -> None:
"""Parses and stores field values from raw data (received over-the-air).
Args:
databytes: A bytes array (typically from the forward message)
"""
binary_str = ''.join(format(int(b), '08b') for b in databytes)
bit_offset = 16 #: Begin after SIN/MIN bytes
for field in self.fields:
if field.optional:
present = binary_str[bit_offset] == '1'
bit_offset += 1
if not present:
continue
bit_offset += field.decode(binary_str[bit_offset:])
def derive(self, databytes: bytes) -> None:
"""Derives field values from raw data bytes (received over-the-air).
Deprecated/replaced by `decode`.
Args:
databytes: A bytes array (typically from the forward message)
"""
self.decode(databytes)
def encode(self,
data_format: int = FORMAT_B64,
exclude: list = None) -> dict:
"""Encodes using the specified data format (base64 or hex).
Args:
data_format (int): 2=ASCII-Hex, 3=base64
exclude (list[str]): A list of optional field names to exclude
Returns:
Dictionary with sin, min, data_format and data to pass into AT%MGRT
or atcommand function `message_mo_send`
"""
if data_format not in [FORMAT_B64, FORMAT_HEX]:
raise ValueError('data_format {} unsupported'.format(data_format))
#:AT%MGRT uses '{}.{},{},{}'.format(sin, min, data_format, data)
bin_str = ''
for field in self.fields:
if field.optional:
if exclude is not None and field.name in exclude:
present = False
elif hasattr(field, 'value'):
present = field.value is not None
elif hasattr(field, 'elements'):
present = field.elements is not None
else:
raise ValueError('Unknown value of optional')
bin_str += '1' if present else '0'
if not present:
continue
bin_str += field.encode()
for _ in range(0, 8 - len(bin_str) % 8): #:pad to next byte
bin_str += '0'
_format = '0{}X'.format(int(len(bin_str) / 8 * 2)) #:hex bytes 2 chars
hex_str = format(int(bin_str, 2), _format)
if (self.is_forward and len(hex_str) / 2 > 9998 or
not self.is_forward and len(hex_str) / 2 > 6398):
raise ValueError('{} bytes exceeds maximum size for Payload'.format(
len(hex_str) / 2))
if data_format == FORMAT_HEX:
data = hex_str
else:
data = b2a_base64(bytearray.fromhex(hex_str)).strip().decode()
return {
'sin': self.sin,
'min': self.min,
'data_format': data_format,
'data': data
}
def xml(self, indent: bool = False) -> ET.Element:
"""Returns the XML definition for a Message Definition File."""
xmessage = ET.Element('Message')
name = ET.SubElement(xmessage, 'Name')
name.text = self.name
min = ET.SubElement(xmessage, 'MIN')
min.text = str(self.min)
fields = ET.SubElement(xmessage, 'Fields')
for field in self.fields:
fields.append(field.xml())
return xmessage if not indent else _indent_xml(xmessage)
class Service:
"""A data structure holding a set of related Forward and Return Messages.
Attributes:
name (str): The service name
sin (int): Service Identification Number or codec service id (16..255)
description (str): A description of the service (unsupported)
messages_forward (list): A list of mobile-terminated Message definitions
messages_return (list): A list of mobile-originated Message definitions
"""
def __init__(self,
name: str,
sin: int,
description: str = None,
messages_forward: "list[Message]" = None,
messages_return: "list[Message]" = None) -> None:
"""Instantiates a Service made up of Messages.
Args:
name: The service name should be unique within a MessageDefinitions
sin: The Service Identification Number (16..255)
description: (Optional)
"""
if not isinstance(name, str) or name == '':
raise ValueError('Invalid service name {}'.format(name))
if sin not in range(16, 256):
raise ValueError('Invalid SIN must be 16..255')
self.name = name
self.sin = sin
if description is not None:
warn('Service Description not currently supported')
self.description = None
self.messages_forward = messages_forward or Messages(self.sin,
is_forward=True)
self.messages_return = messages_return or Messages(self.sin,
is_forward=False)
def xml(self, indent: bool = False) -> ET.Element:
"""Returns the XML structure of the Service for a MDF."""
if len(self.messages_forward) == 0 and len(self.messages_return) == 0:
raise ValueError('No messages defined for service {}'.format(
self.sin))
xservice = ET.Element('Service')
name = ET.SubElement(xservice, 'Name')
name.text = str(self.name)
sin = ET.SubElement(xservice, 'SIN')
sin.text = str(self.sin)
if self.description:
desc = ET.SubElement(xservice, 'Description')
desc.text = str(self.description)
if len(self.messages_forward) > 0:
forward_messages = ET.SubElement(xservice, 'ForwardMessages')
for m in self.messages_forward:
forward_messages.append(m.xml())
if len(self.messages_return) > 0:
return_messages = ET.SubElement(xservice, 'ReturnMessages')
for m in self.messages_return:
return_messages.append(m.xml())
return xservice if not indent else _indent_xml(xservice)
class ObjectList(list):
"""Base class for a specific object type list.
Used for Fields, Messages, Services.
Attributes:
list_type: The object type the list is comprised of.
"""
SUPPORTED_TYPES = [
BaseField,
# Field,
Message,
Service,
]
def __init__(self, list_type: object):
if list_type not in self.SUPPORTED_TYPES:
raise ValueError('Unsupported object type {}'.format(list_type))
super().__init__()
self.list_type = list_type
def add(self, obj: object) -> bool:
"""Add an object to the end of the list.
Args:
obj (object): A valid object according to the list_type
Raises:
ValueError if there is a duplicate or invalid name,
invalid value_range or unsupported data_type
"""
if not isinstance(obj, self.list_type):
raise ValueError('Invalid {} definition'.format(self.list_type))
for o in self:
if o.name == obj.name:
raise ValueError('Duplicate {} name {} found'.format(
self.list_type, obj.name))
self.append(obj)
return True
def __getitem__(self, n: Union[str, int]) -> object:
"""Retrieves an object by name or index.
Args:
n: The object name or list index
Returns:
object
"""
if isinstance(n, str):
for o in self:
if o.name == n:
return o
raise ValueError('{} name {} not found'.format(self.list_type, n))
return super().__getitem__(n)
def __setitem__(self, n: Union[str, int], value):
if isinstance(n, str):
for o in self:
if o.name == n:
o.value = value
break
else:
super().__setitem__(n, value)
def delete(self, name: str) -> bool:
"""Delete an object from the list by name.
Args:
name: The name of the object.
Returns:
boolean: success
"""
for o in self:
if o.name == name:
self.remove(o)
return True
return False
class Fields(ObjectList):
"""The list of Fields defining a Message or ArrayElement."""
def __init__(self, fields: "list[BaseField]" = None):
super().__init__(list_type=BaseField)
if fields is not None:
for field in fields:
self.add(field)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Fields):
return NotImplemented
if len(self) != len(other):
return False
for field in self:
if field != other[field.name]:
return False
return True
class Messages(ObjectList):
"""The list of Messages (Forward or Return) within a Service."""
def __init__(self, sin: int, is_forward: bool):
super().__init__(list_type=Message)
self.sin = sin
self.is_forward = is_forward
def add(self, message: Message) -> bool:
"""Add a message to the list if it matches the parent SIN.
Overrides the base class add method.
Args:
message (object): A valid Message
Raises:
ValueError if there is a duplicate or invalid name,
invalid value_range or unsupported data_type
"""
if not isinstance(message, Message):
raise ValueError('Invalid message definition')
if message.sin != self.sin:
raise ValueError('Message SIN {} does not match service {}'.format(
message.sin, self.sin))
for m in self:
if m.name == message.name:
raise ValueError('Duplicate message name {} found'.format(
message.name))
if m.min == message.min:
raise ValueError('Duplicate message MIN {} found'.format(
message.min))
self.append(message)
return True
class Services(ObjectList):
"""The list of Service(s) within a MessageDefinitions."""
def __init__(self, services: "list[Service]" = None):
super().__init__(list_type=Service)
if services is not None:
for service in services:
if not isinstance(service, Service):
raise ValueError('Invalid Service {}'.format(service))
self.add(service)
def add(self, service: Service) -> None:
"""Adds a Service to the list of Services."""
if not isinstance(service, Service):
raise ValueError('{} is not a valid Service'.format(service))
if service.name in self:
raise ValueError('Duplicate Service {}'.format(service.name))
for existing_service in self:
if existing_service.sin == service.sin:
raise ValueError('Duplicate SIN {}'.format(service.sin))
self.append(service)
class BooleanField(BaseField):
"""A Boolean field."""
def __init__(self,
name: str,
description: str = None,
optional: bool = False,
default: bool = False,
value: bool = None) -> None:
super().__init__(name=name,
data_type='bool',
description=description,
optional=optional)
"""Instantiates a BooleanField.
Args:
name: The field name must be unique within a Message.
description: An optional description/purpose for the field.
optional: Indicates if the field is optional in the Message.
default: A default value for the boolean.
value: Optional value to set during initialization.
"""
self.default = default
self.value = value if value is not None else default
@property
def default(self):
return self._default
@default.setter
def default(self, v: bool):
if v is not None and not isinstance(v, bool):
raise ValueError('Invalid boolean value {}'.format(v))
self._default = v
@property
def value(self):
return self._value
@value.setter
def value(self, v: bool):
if v is not None and not isinstance(v, bool):
raise ValueError('Invalid boolean value {}'.format(v))
self._value = v
@property
def bits(self):
return 1
def __eq__(self, other: object) -> bool:
if not isinstance(other, BooleanField):
return NotImplemented
return _attribute_equivalence(self, other)
def encode(self) -> str:
"""Returns the binary string of the field value."""
if self.value is None and not self.optional:
raise ValueError('No value assigned to field')
return '1' if self.value else '0'
def decode(self, binary_str: str) -> int:
"""Populates the field value from binary and returns the next offset.
Args:
binary_str (str): The binary string to decode
Returns:
The bit offset after parsing
"""
self.value = True if binary_str[0] == '1' else False
return 1
def xml(self) -> ET.Element:
xmlfield = self._base_xml()
if self.default:
default = ET.SubElement(xmlfield, 'Default')
default.text = 'true'
return xmlfield
class EnumField(BaseField):
"""An enumerated field sends an index over-the-air representing a string."""
def __init__(self,
name: str,
items: "list[str]",
size: int,
description: str = None,
optional: bool = False,
default: int = None,
value: int = None) -> None:
"""Instantiates a EnumField.
Args:
name: The field name must be unique within a Message.
items: A list of strings (indexed from 0).
size: The number of *bits* used to encode the index over-the-air.
description: An optional description/purpose for the field.
optional: Indicates if the field is optional in the Message.
default: A default value for the enum.
value: Optional value to set during initialization.
"""
super().__init__(name=name,
data_type='enum',
description=description,
optional=optional)
if items is None or not all(isinstance(item, str) for item in items):
raise ValueError('Items must all be strings')
if not isinstance(size, int) or size < 1:
raise ValueError('Size must be integer greater than 0 bits')
self.items = items
self.size = size
self.default = default
self.value = value if value is not None else self.default
def _validate_enum(self, v: Union[int, str]) -> Union[int, None]:
if v is not None:
if isinstance(v, str):
if v not in self.items:
raise ValueError('Invalid value {}'.format(v))
for index, item in enumerate(self.items):
if item == v:
return index
elif isinstance(v, int):
if v < 0 or v >= len(self.items):
raise ValueError('Invalid enum index {}'.format(v))
else:
raise ValueError('Invalid value {}'.format(v))
return v
@property
def items(self):
return self._items
@items.setter
def items(self, l: list):
if not isinstance(l, list) or not all(isinstance(x, str) for x in l):
raise ValueError('Items must be a list of strings')
self._items = l
@property
def default(self):
if self._default is None:
return None
return self.items[self._default]
@default.setter
def default(self, v: Union[int, str]):
self._default = self._validate_enum(v)
@property
def value(self):
if self._value is None:
return None
return self.items[self._value]
@value.setter
def value(self, v: Union[int, str]):
self._value = self._validate_enum(v)
@property
def size(self):
return self._size
@size.setter
def size(self, v: int):
if not isinstance(v, int) or v < 1:
raise ValueError('Size must be integer greater than zero')
minimum_bits = optimal_bits((0, len(self.items)))
if v < minimum_bits:
raise ValueError('Size must be at least {} to support item count'
.format(minimum_bits))
self._size = v
@property
def bits(self):
return self.size
def __eq__(self, other: object) -> bool:
if not isinstance(other, EnumField):
return NotImplemented
return _attribute_equivalence(self, other)
def encode(self) -> str:
"""Returns the binary string of the field value."""
if self.value is None:
raise ValueError('No value configured in EnumField {}'.format(
self.name))
_format = '0{}b'.format(self.bits)
binstr = format(self.items.index(self.value), _format)
return binstr
def decode(self, binary_str: str) -> int:
"""Populates the field value from binary and returns the next offset.
Args:
binary_str (str): The binary string to decode
Returns:
The bit offset after parsing
"""
self.value = binary_str[:self.bits]
return self.bits
def xml(self) -> ET.Element:
xmlfield = self._base_xml()
size = ET.SubElement(xmlfield, 'Size')
size.text = str(self.size)
items = ET.SubElement(xmlfield, 'Items')
for string in self.items:
item = ET.SubElement(items, 'string')
item.text = str(string)
if self.default:
default = ET.SubElement(xmlfield, 'Default')
default.text = str(self.default)
return xmlfield
class UnsignedIntField(BaseField):
"""An unsigned integer value using a defined number of bits over-the-air."""
def __init__(self,
name: str,
size: int,
data_type: str = 'uint_16',
description: str = None,
optional: bool = False,
default: int = None,
value: int = None) -> None:
"""Instantiates a UnsignedIntField.
Args:
name: The field name must be unique within a Message.
size: The number of *bits* used to encode the integer over-the-air
(maximum 32).
data_type: The integer type represented (for decoding).
description: An optional description/purpose for the string.
optional: Indicates if the string is optional in the Message.
default: A default value for the string.
value: Optional value to set during initialization.
"""
if data_type not in ['uint_8', 'uint_16', 'uint_32']:
raise ValueError('Invalid unsignedint type {}'.format(data_type))
super().__init__(name=name,
data_type=data_type,
description=description,
optional=optional)
self.size = size
self.default = default
self.value = value if value is not None else default
@property
def size(self):
return self._size
@size.setter
def size(self, value: int):
if not isinstance(value, int) or value < 1:
raise ValueError('Size must be integer greater than 0 bits')
data_type_size = int(self.data_type.split('_')[1])
if value > data_type_size:
warn('Size {} larger than required by {}'.format(
value, self.data_type))
self._size = value
@property
def value(self):
return self._value
@value.setter
def value(self, v: int):
clip = False
if v is not None:
if not isinstance(v, int) or v < 0:
raise ValueError('Unsignedint must be non-negative integer')
if v > 2**self.size - 1:
self._value = 2**self.size - 1
warn('Clipping unsignedint at max value {}'.format(self._value))
clip = True
if not clip:
self._value = v
@property
def default(self):
return self._default
@default.setter
def default(self, v: int):
if v is not None:
if v > 2**self.size - 1 or v < 0:
raise ValueError('Invalid unsignedint default {}'.format(v))
self._default = v
@property
def bits(self):
return self.size
def __eq__(self, other: object) -> bool:
if not isinstance(other, UnsignedIntField):
return NotImplemented
return _attribute_equivalence(self, other)
def encode(self) -> str:
"""Returns the binary string of the field value."""
if self.value is None:
raise ValueError('No value defined in UnsignedIntField {}'.format(
self.name))
_format = '0{}b'.format(self.bits)
return format(self.value, _format)
def decode(self, binary_str: str) -> int:
"""Populates the field value from binary and returns the next offset.
Args:
binary_str (str): The binary string to decode
Returns:
The bit offset after parsing
"""
self.value = int(binary_str[:self.bits], 2)
return self.bits
def xml(self) -> ET.Element:
xmlfield = self._base_xml()
size = ET.SubElement(xmlfield, 'Size')
size.text = str(self.size)
if self.default:
default = ET.SubElement(xmlfield, 'Default')
default.text = str(self.default)
return xmlfield
class SignedIntField(BaseField):
"""A signed integer value using a defined number of bits over-the-air."""
def __init__(self,
name: str,
size: int,
data_type: str = 'int_16',
description: str = None,
optional: bool = False,
default: int = None,
value: int = None) -> None:
"""Instantiates a SignedIntField.
Args:
name: The field name must be unique within a Message.
size: The number of *bits* used to encode the integer over-the-air
(maximum 32).
data_type: The integer type represented (for decoding).
description: An optional description/purpose for the string.
optional: Indicates if the string is optional in the Message.
default: A default value for the string.
value: Optional value to set during initialization.
"""
if data_type not in ['int_8', 'int_16', 'int_32']:
raise ValueError('Invalid unsignedint type {}'.format(data_type))
super().__init__(name=name,
data_type=data_type,
description=description,
optional=optional)
self.size = size
self.default = default
self.value = value if value is not None else default
@property
def size(self):
return self._size
@size.setter
def size(self, value: int):
if not isinstance(value, int) or value < 1:
raise ValueError('Size must be integer greater than 0 bits')
self._size = value
@property
def value(self):
return self._value
@value.setter
def value(self, v: int):
clip = False
if v is not None:
if not isinstance(v, int):
raise ValueError('Unsignedint must be non-negative integer')
if v > (2**self.size / 2) - 1:
self._value = int(2**self.size / 2) - 1
warn('Clipping signedint at max value {}'.format(self._value))
clip = True
if v < -(2**self.size / 2):
self._value = -1 * int(2**self.size / 2)
warn('Clipping signedint at min value {}'.format(self._value))
clip = True
if not clip:
self._value = v
@property
def default(self):
return self._default
@default.setter
def default(self, v: int):
if v is not None:
if not isinstance(v, int):
raise ValueError('Invalid signed integer {}'.format(v))
if v > (2**self.size / 2) - 1 or v < -(2**self.size / 2):
raise ValueError('Invalid default {}'.format(v))
self._default = v
@property
def bits(self):
return self.size
def __eq__(self, other: object) -> bool:
if not isinstance(other, SignedIntField):
return NotImplemented
return _attribute_equivalence(self, other)
def encode(self) -> str:
"""Returns the binary string of the field value."""
if self.value is None:
raise ValueError('No value defined in UnsignedIntField {}'.format(
self.name))
_format = '0{}b'.format(self.bits)
if self.value < 0:
invertedbin = format(self.value * -1, _format)
twocomplementbin = ''
i = 0
while len(twocomplementbin) < len(invertedbin):
twocomplementbin += '1' if invertedbin[i] == '0' else '0'
i += 1
binstr = format(int(twocomplementbin, 2) + 1, _format)
else:
binstr = format(self.value, _format)
return binstr
def decode(self, binary_str: str) -> int:
"""Populates the field value from binary and returns the next offset.
Args:
binary_str (str): The binary string to decode
Returns:
The bit offset after parsing
"""
value = int(binary_str[:self.bits], 2)
if (value & (1 << (self.bits - 1))) != 0: #:sign bit set e.g. 8bit: 128-255
value = value - (1 << self.bits) #:compute negative value
self.value = value
return self.bits
def xml(self) -> ET.Element:
xmlfield = self._base_xml()
size = ET.SubElement(xmlfield, 'Size')
size.text = str(self.size)
if self.default:
default = ET.SubElement(xmlfield, 'Default')
default.text = str(self.default)
return xmlfield
class StringField(BaseField):
"""A character string sent over-the-air."""
def __init__(self,
name: str,
size: int,
description: str = None,
optional: bool = False,
fixed: bool = False,
default: str = None,
value: str = None) -> None:
"""Instantiates a StringField.
Args:
name: The field name must be unique within a Message.
size: The maximum number of characters in the string.
description: An optional description/purpose for the string.
optional: Indicates if the string is optional in the Message.
fixed: Indicates if the string is always fixed length `size`.
default: A default value for the string.
value: Optional value to set during initialization.
"""
super().__init__(name=name,
data_type='string',
description=description,
optional=optional)
self.size = size
self.fixed = fixed
self.default = default
self.value = value if value is not None else default
def _validate_string(self, s: str):
if s is not None:
if not isinstance(s, str):
raise ValueError('Invalid string {}'.format(s))
if len(s) > self.size:
warn('Clipping string at max {} characters'.format(self.size))
return s[:self.size]
return s
@property
def size(self):
return self._size
@size.setter
def size(self, value: int):
if not isinstance(value, int) or value < 1:
raise ValueError('Size must be integer greater than 0 characters')
self._size = value
@property
def default(self) -> str:
return self._default
@default.setter
def default(self, v: str):
self._default = self._validate_string(v)
@property
def value(self) -> str:
return self._value
@value.setter
def value(self, v: str):
self._value = self._validate_string(v)
@property
def bits(self):
if self.fixed or self.value is None:
return self.size * 8
return len(self.value) * 8
def __eq__(self, other: object) -> bool:
if not isinstance(other, StringField):
return NotImplemented
return _attribute_equivalence(self, other)
def encode(self) -> str:
"""Returns the binary string of the field value."""
if self.value is None and not self.optional:
raise ValueError('No value defined for StringField {}'.format(
self.name))
binstr = ''.join(format(ord(c), '08b') for c in self.value)
if self.fixed:
binstr += ''.join('0' for bit in range(len(binstr), self.bits))
else:
binstr = _encode_field_length(len(self.value)) + binstr
return binstr
def decode(self, binary_str: str) -> int:
"""Populates the field value from binary and returns the next offset.
Args:
binary_str (str): The binary string to decode
Returns:
The bit offset after parsing
"""
if self.fixed:
length = self.size
bit_index = 0
else:
(length, bit_index) = _decode_field_length(binary_str)
n = int(binary_str[bit_index:bit_index + length * 8], 2)
char_bytes = n.to_bytes((n.bit_length() + 7) // 8, 'big')
for i, byte in enumerate(char_bytes):
if byte == 0:
warn('Truncating after 0 byte in string')
char_bytes = char_bytes[:i]
break
self.value = char_bytes.decode('utf-8', 'surrogatepass') or '\0'
return bit_index + length * 8
def xml(self) -> ET.Element:
"""Returns the message definition XML representation of the field."""
xmlfield = self._base_xml()
size = ET.SubElement(xmlfield, 'Size')
size.text = str(self.size)
if self.fixed:
fixed = ET.SubElement(xmlfield, 'Fixed')
fixed.text = 'true'
if self.default:
default = ET.SubElement(xmlfield, 'Default')
default.text = str(self.default)
return xmlfield
class DataField(BaseField):
"""A data field of raw bytes sent over-the-air.
Can also be used to hold floating point, double-precision or large integers.
"""
supported_data_types = ['data', 'float', 'double']
def __init__(self,
name: str,
size: int,
data_type: str = 'data',
description: str = None,
optional: bool = False,
fixed: bool = False,
default: bytes = None,
value: bytes = None) -> None:
"""Instantiates a EnumField.
Args:
name: The field name must be unique within a Message.
size: The maximum number of bytes to send over-the-air.
data_type: The data type represented within the bytes.
description: An optional description/purpose for the field.
optional: Indicates if the field is optional in the Message.
fixed: Indicates if the data bytes are a fixed `size`.
default: A default value for the enum.
value: Optional value to set during initialization.
"""
if data_type is None or data_type not in self.supported_data_types:
raise ValueError('Invalid data type {}'.format(data_type))
super().__init__(name=name,
data_type=data_type,
description=description,
optional=optional)
self.fixed = fixed
self.size = size
self.default = default
self.value = value if value is not None else default
@property
def size(self):
return self._size
@size.setter
def size(self, value: int):
if not isinstance(value, int) or value < 1:
raise ValueError('Size must be integer greater than 0 bytes')
if self.data_type == 'float':
if value != 4:
warn('Adjusting float size to 4 bytes fixed')
self._size = 4
self.fixed = True
elif self.data_type == 'double':
if value != 8:
warn('Adjusting float size to 8 bytes fixed')
self._size = 4
self.fixed = True
else:
self._size = value
@property
def value(self):
if self.data_type == 'float':
return unpack('!f', self._value)
if self.data_type == 'double':
return unpack('!d', self._value)
return self._value
@value.setter
def value(self, v: Union[bytes, float]):
if v is not None:
if self.data_type in ['float', 'double']:
if not isinstance(v, float):
raise ValueError('Invalid {} value {}'.format(
self.data_type, v))
_format = '!f' if self.data_type == 'float' else '!d'
v = pack(_format, v)
elif not isinstance(v, bytes):
raise ValueError('Invalid bytes {}'.format(v))
self._value = v
@property
def bits(self):
if self.fixed:
return self.size * 8
elif self.value is None:
return 0
return len(self.value) * 8
def __eq__(self, other: object) -> bool:
if not isinstance(other, DataField):
return NotImplemented
return _attribute_equivalence(self, other)
def encode(self) -> str:
"""Returns the binary string of the field value."""
if self.value is None and not self.optional:
raise ValueError('No value defined for DataField {}'.format(
self.name))
binstr = ''
binstr = ''.join(format(b, '08b') for b in self._value)
if self.fixed: #:pad to fixed length
binstr += ''.join('0' for bit in range(len(binstr), self.bits))
else:
binstr = _encode_field_length(len(self._value)) + binstr
return binstr
def decode(self, binary_str: str) -> int:
"""Populates the field value from binary and returns the next offset.
Args:
binary_str (str): The binary string to decode
Returns:
The bit offset after parsing
"""
if self.fixed:
binary = binary_str[:self.bits]
bits = self.bits
else:
(length, bit_index) = _decode_field_length(binary_str)
binary = binary_str[bit_index:length * 8 + bit_index]
bits = len(binary)
self._value = int(binary, 2).to_bytes(int(bits / 8), 'big')
return self.bits
def xml(self) -> ET.Element:
xmlfield = self._base_xml()
size = ET.SubElement(xmlfield, 'Size')
size.text = str(self.size)
if self.default:
default = ET.SubElement(xmlfield, 'Default')
default.text = str(self.default)
return xmlfield
class ArrayField(BaseField):
"""An Array Field provides a list where each element is a set of Fields.
Attributes:
name (str): The name of the field instance.
size (int): The maximum number of elements allowed.
fields (Fields): A list of Field types comprising each ArrayElement
description (str): An optional description of the array/use.
optional (bool): Indicates if the array is optional in the Message
fixed (bool): Indicates if the array is always the fixed `size`
elements (list): The enumerated list of ArrayElements
"""
def __init__(self,
name: str,
size: int,
fields: Fields,
description: str = None,
optional: bool = False,
fixed: bool = False,
elements: "list[Fields]" = None) -> None:
"""Initializes an ArrayField instance.
Args:
name: The unique field name within the Message.
size: The maximum number of elements allowed.
fields: The list of Field types comprising each element.
description: An optional description/purpose of the array.
optional: Indicates if the array is optional in the Message.
fixed: Indicates if the array is always the fixed `size`.
elements: Option to populate elements of Fields during instantiation.
"""
super().__init__(name=name,
data_type='array',
description=description,
optional=optional)
self.size = size
self.fixed = fixed
self.fields = fields
self.elements = elements or []
@property
def size(self):
return self._size
@size.setter
def size(self, value: int):
if not isinstance(value, int) or value < 1:
raise ValueError('Size must be integer greater than 0 fields')
self._size = value
@property
def fields(self):
return self._fields
@fields.setter
def fields(self, fields: Fields):
if not isinstance(fields, Fields):
raise ValueError('Invalid Fields definition for ArrayField')
self._fields = fields
@property
def elements(self):
return self._elements
@elements.setter
def elements(self, elements: list):
if elements is None or not hasattr(self, '_elements'):
self._elements = []
if not isinstance(elements, list):
raise ValueError('Elements must be a list of grouped Fields')
for fields in elements:
for index, field in enumerate(fields):
if (field.name != self.fields[index].name):
raise ValueError('fields[{}].name expected {} got {}'
.format(index,
self.fields[index].name,
field.name))
if (field.data_type != self.fields[index].data_type):
raise ValueError('fields[{}].data_type expected {} got {}'
.format(index,
self.fields[index].data_type,
field.data_type))
#TODO: validate non-optional fields have value/elements
if field.value is None and not field.optional:
raise ValueError('fields[{}].value missing'.format(index))
try:
self._elements[index] = fields
except IndexError:
self._elements.append(fields)
@property
def bits(self):
bits = 0
for field in self.fields:
bits += field.bits
return bits
def __eq__(self, other: object) -> bool:
if not isinstance(other, ArrayField):
return NotImplemented
return _attribute_equivalence(self, other)
def _valid_element(self, element: Fields) -> bool:
for i, field in enumerate(self.fields):
e = element[i]
if e.name != field.name:
raise ValueError('element field name {} does not match {}'
.format(e.name, field.name))
if e.data_type != field.data_type:
raise ValueError('element field data_type {} does not match {}'
.format(e.data_type, field.data_type))
if e.optional != field.optional:
raise ValueError('element optional {} does not match {}'
.format(e.optional, field.optional))
if hasattr(field, 'fixed') and e.fixed != field.fixed:
raise ValueError('element fixed {} does not match {}'
.format(e.fixed, field.fixed))
if hasattr(field, 'size') and e.size != field.size:
raise ValueError('element size {} does not match {}'
.format(e.size, field.size))
return True
def append(self, element: Fields):
"""Adds the array element to the list of elements."""
if not isinstance(element, Fields):
raise ValueError('Invalid element definition must be Fields')
if self._valid_element(element):
for i, field in enumerate(element):
if (hasattr(field, 'description') and
field.description != self.fields[i].description):
field.description = self.fields[i].description
if hasattr(field, 'value') and field.value is None:
field.value = self.fields[i].default
self._elements.append(element)
def new_element(self) -> Fields:
"""Returns an empty element at the end of the elements list."""
new_index = len(self._elements)
self._elements.append(Fields(self.fields))
return self.elements[new_index]
def encode(self) -> str:
"""Returns the binary string of the field value."""
if len(self.elements) == 0:
raise ValueError('No elements to encode')
binstr = ''
for element in self.elements:
for field in element:
binstr += field.encode()
if not self.fixed:
binstr = _encode_field_length(len(self.elements)) + binstr
return binstr
def decode(self, binary_str: str) -> int:
"""Populates the field value from binary and returns the next offset.
Args:
binary_str (str): The binary string to decode
Returns:
The bit offset after parsing
"""
if self.fixed:
length = self.size
bit_index = 0
else:
(length, bit_index) = _decode_field_length(binary_str)
for index in range(0, length):
fields = Fields(self.fields)
for field in fields:
if field.optional:
if binary_str[bit_index] == '0':
bit_index += 1
continue
bit_index += 1
bit_index += field.decode(binary_str[bit_index:])
try:
self._elements[index] = fields
except IndexError:
self._elements.append(fields)
return bit_index
def xml(self) -> ET.Element:
xmlfield = self._base_xml()
size = ET.SubElement(xmlfield, 'Size')
size.text = str(self.size)
if self.fixed:
default = ET.SubElement(xmlfield, 'Fixed')
default.text = 'true'
fields = ET.SubElement(xmlfield, 'Fields')
for field in self.fields:
fields.append(field.xml())
return xmlfield
class MessageDefinitions:
"""A set of Message Definitions grouped into Services.
Attributes:
services: The list of Services with Messages defined.
"""
def __init__(self, services: Services = None):
if services is not None:
if not isinstance(services, Services):
raise ValueError('Invalid Services')
self.services = services or Services()
def xml(self, indent: bool = False) -> ET.Element:
xmsgdef = ET.Element('MessageDefinition',
attrib={'xmlns:xsd': XML_NAMESPACE['xsd']})
services = ET.SubElement(xmsgdef, 'Services')
for service in self.services:
services.append(service.xml())
return xmsgdef if not indent else _indent_xml(xmsgdef)
def mdf_export(self, filename: str, pretty: bool = False):
tree = ET.ElementTree(self.xml())
root = tree.getroot()
if pretty:
from xml.dom.minidom import parseString
xmlstr = parseString(ET.tostring(root)).toprettyxml(indent=" ")
with open(filename, 'w') as f:
f.write(xmlstr)
else:
with open(filename, 'wb') as f:
tree.write(f, encoding='utf-8', xml_declaration=True)
| 17,647 | 0 | 2,226 |
884d81afcabb1bff852b98a3da835296414d403e | 1,109 | py | Python | Python Essential Training/StructuredData.py | RaghuBhogireddy/python | 2f4a0118715dcf6829dc72b32256d5cb1e6df19f | [
"Apache-2.0"
] | null | null | null | Python Essential Training/StructuredData.py | RaghuBhogireddy/python | 2f4a0118715dcf6829dc72b32256d5cb1e6df19f | [
"Apache-2.0"
] | null | null | null | Python Essential Training/StructuredData.py | RaghuBhogireddy/python | 2f4a0118715dcf6829dc72b32256d5cb1e6df19f | [
"Apache-2.0"
] | null | null | null |
if __name__ == "__main__" : main() | 26.404762 | 101 | 0.542831 |
def main():
lists()
tuples()
dictionaries()
def lists():
# Lists are mutable means we can alter the data in the List Object
game = ['Rock', 'Paper', 'Scissors', 'Lizard', 'Spark']
# We can add items using append
game.append('Cricket')
print(game)
print('=====================')
# To insert at particuar position
game.insert(2,'Hockey')
print(game)
print('=====================')
# To remove specific Item using remove() or using it's index in pop() or we can use del statement
game.remove('Paper')
print(game)
print('=====================')
game.pop(3)
print(game)
print('=====================')
del game[3]
print(game)
print('=====================')
def tuples():
# Tuples are immutable, so we can't perform any modificationson the tuple.
# Only we can get info about the Object
game = ('Rock', 'Paper', 'Scissors', 'Lizard', 'Spark')
def dictionaries():
animals = {'Kitty' : 'meow', 'puppy' : 'ruff!', 'lion' : 'grr', 'dragon' : 'rawr'}
print(animals)
if __name__ == "__main__" : main() | 978 | 0 | 92 |
7dbd1e9b99127d118ff1370eda23f186ecbe47ac | 82 | py | Python | tests/__init__.py | omarvaneer/pharynx_redox | ffcd5733fd0823244f50590951e9af0bc9ae2518 | [
"MIT"
] | 2 | 2018-06-08T12:45:03.000Z | 2018-07-13T04:17:01.000Z | tests/__init__.py | omarvaneer/pharynx_redox | ffcd5733fd0823244f50590951e9af0bc9ae2518 | [
"MIT"
] | 17 | 2020-03-18T11:43:39.000Z | 2020-07-21T18:04:25.000Z | tests/__init__.py | half-adder/pharynx_redox | a5b99f6afb4a36a021d0439bb15d2c826de605b1 | [
"MIT"
] | 3 | 2021-07-21T16:14:28.000Z | 2021-07-27T15:38:39.000Z | from pathlib import Path
TEST_DATA_DIR = Path("/Users/sean/code/pharedox/data")
| 16.4 | 54 | 0.768293 | from pathlib import Path
TEST_DATA_DIR = Path("/Users/sean/code/pharedox/data")
| 0 | 0 | 0 |
d9df54afb8190932c8344788b1e8fec7824d4429 | 4,582 | py | Python | fastproject/modules/users/service.py | jorge4larcon/fastproject | 0a091940c22e27d813f38819027081314d334c22 | [
"MIT"
] | 12 | 2022-02-11T04:03:17.000Z | 2022-02-15T03:34:50.000Z | fastproject/modules/users/service.py | jorge4larcon/fastproject | 0a091940c22e27d813f38819027081314d334c22 | [
"MIT"
] | null | null | null | fastproject/modules/users/service.py | jorge4larcon/fastproject | 0a091940c22e27d813f38819027081314d334c22 | [
"MIT"
] | 1 | 2022-02-13T02:55:50.000Z | 2022-02-13T02:55:50.000Z | """Service module."""
import datetime
import uuid
import zoneinfo
from typing import Any, Optional
from ... import config
from ...utils import encoding
from . import password_hashing, repository
async def create_user(
username: str,
email: str,
first_name: str,
last_name: str,
password: str,
date_joined: Optional[datetime.datetime] = None,
is_superuser=False,
is_staff=False,
is_active=True,
last_login: Optional[datetime.datetime] = None,
) -> repository.User:
"""Inserts a user into the database.
Args:
username: The username of the user.
email: The email of the user.
first_name: The first name of the user.
last_name: The last name of the user.
password: The password (not hashed) of the user.
date_joined: The datetime the user joined the system.
is_superuser: A flag that indicates if this user is super user.
is_staff: A flag that indicated if this user can is staff.
is_active: A flag that indicates if this user is active.
last_login: The datetime the user last logged in.
Returns:
A repository.User representing the created user.
Raises:
UsernameAlreadyExistsError: If the username already exists.
EmailAlreadyExistsError: If the email already exists.
"""
username = encoding.normalize_str(username)
email = encoding.normalize_str(email)
first_name = encoding.normalize_str(first_name)
last_name = encoding.normalize_str(last_name)
if date_joined is None:
tzinfo = zoneinfo.ZoneInfo(config.settings["APPLICATION"]["timezone"])
date_joined = datetime.datetime.now(tz=tzinfo)
password_hash = password_hashing.make_password(password)
return await repository.insert_user(
username=username,
email=email,
first_name=first_name,
last_name=last_name,
password=password_hash,
date_joined=date_joined,
is_superuser=is_superuser,
is_staff=is_staff,
is_active=is_active,
last_login=last_login,
)
async def get_user_by_id(user_id: uuid.UUID) -> Optional[repository.User]:
"""Returns the user with the specified user_id from the database.
Args:
user_id: The user_id of the searched user.
Returns:
A repository.User representing the searched user, None if the user was not
found.
"""
return await repository.get_user_by_id(user_id)
async def update_user_by_id(
user_id: uuid.UUID, **kwargs: Any
) -> Optional[repository.User]:
"""Updates the data of the user with the specified user_id in the database.
Args:
user_id: The user_id of the user that will be updated.
**username (str): The username of the user.
**email (str): The email of the user.
**first_name (str): The first name of the user.
**last_name (str): The last name of the user.
**password (str): The password (not hashed) of the user.
**is_superuser (bool): A flag that indicates if this user is super user.
**is_staff (bool): A flag that indicated if this user can is staff.
**is_active (bool): A flag that indicates if this user is active.
**date_joined (datetime.datetime): The datetime the user joined the
system.
**last_login (datetime.datetime): The datetime the user last logged in.
Returns:
A repository.User representing the updated user, None if the user was not
updated.
Raises:
UsernameAlreadyExistsError: If the username already exists.
EmailAlreadyExistsError: If the email already exists.
"""
if "username" in kwargs:
kwargs["username"] = encoding.normalize_str(kwargs["username"])
if "email" in kwargs:
kwargs["email"] = encoding.normalize_str(kwargs["email"])
if "first_name" in kwargs:
kwargs["first_name"] = encoding.normalize_str(kwargs["first_name"])
if "last_name" in kwargs:
kwargs["last_name"] = encoding.normalize_str(kwargs["last_name"])
if "password" in kwargs:
kwargs["password"] = password_hashing.make_password(kwargs["password"])
return await repository.update_user_by_id(user_id, **kwargs)
async def delete_user_by_id(user_id: uuid.UUID) -> Optional[repository.User]:
"""Deletes the user with the specified user_id from the database.
Args:
user_id: The user_id of the user that will be deleted.
Returns:
A repository.User representing the deleted user, None if the user was not
deleted.
"""
return await repository.delete_user_by_id(user_id)
| 34.712121 | 80 | 0.691619 | """Service module."""
import datetime
import uuid
import zoneinfo
from typing import Any, Optional
from ... import config
from ...utils import encoding
from . import password_hashing, repository
async def create_user(
username: str,
email: str,
first_name: str,
last_name: str,
password: str,
date_joined: Optional[datetime.datetime] = None,
is_superuser=False,
is_staff=False,
is_active=True,
last_login: Optional[datetime.datetime] = None,
) -> repository.User:
"""Inserts a user into the database.
Args:
username: The username of the user.
email: The email of the user.
first_name: The first name of the user.
last_name: The last name of the user.
password: The password (not hashed) of the user.
date_joined: The datetime the user joined the system.
is_superuser: A flag that indicates if this user is super user.
is_staff: A flag that indicated if this user can is staff.
is_active: A flag that indicates if this user is active.
last_login: The datetime the user last logged in.
Returns:
A repository.User representing the created user.
Raises:
UsernameAlreadyExistsError: If the username already exists.
EmailAlreadyExistsError: If the email already exists.
"""
username = encoding.normalize_str(username)
email = encoding.normalize_str(email)
first_name = encoding.normalize_str(first_name)
last_name = encoding.normalize_str(last_name)
if date_joined is None:
tzinfo = zoneinfo.ZoneInfo(config.settings["APPLICATION"]["timezone"])
date_joined = datetime.datetime.now(tz=tzinfo)
password_hash = password_hashing.make_password(password)
return await repository.insert_user(
username=username,
email=email,
first_name=first_name,
last_name=last_name,
password=password_hash,
date_joined=date_joined,
is_superuser=is_superuser,
is_staff=is_staff,
is_active=is_active,
last_login=last_login,
)
async def get_user_by_id(user_id: uuid.UUID) -> Optional[repository.User]:
"""Returns the user with the specified user_id from the database.
Args:
user_id: The user_id of the searched user.
Returns:
A repository.User representing the searched user, None if the user was not
found.
"""
return await repository.get_user_by_id(user_id)
async def update_user_by_id(
user_id: uuid.UUID, **kwargs: Any
) -> Optional[repository.User]:
"""Updates the data of the user with the specified user_id in the database.
Args:
user_id: The user_id of the user that will be updated.
**username (str): The username of the user.
**email (str): The email of the user.
**first_name (str): The first name of the user.
**last_name (str): The last name of the user.
**password (str): The password (not hashed) of the user.
**is_superuser (bool): A flag that indicates if this user is super user.
**is_staff (bool): A flag that indicated if this user can is staff.
**is_active (bool): A flag that indicates if this user is active.
**date_joined (datetime.datetime): The datetime the user joined the
system.
**last_login (datetime.datetime): The datetime the user last logged in.
Returns:
A repository.User representing the updated user, None if the user was not
updated.
Raises:
UsernameAlreadyExistsError: If the username already exists.
EmailAlreadyExistsError: If the email already exists.
"""
if "username" in kwargs:
kwargs["username"] = encoding.normalize_str(kwargs["username"])
if "email" in kwargs:
kwargs["email"] = encoding.normalize_str(kwargs["email"])
if "first_name" in kwargs:
kwargs["first_name"] = encoding.normalize_str(kwargs["first_name"])
if "last_name" in kwargs:
kwargs["last_name"] = encoding.normalize_str(kwargs["last_name"])
if "password" in kwargs:
kwargs["password"] = password_hashing.make_password(kwargs["password"])
return await repository.update_user_by_id(user_id, **kwargs)
async def delete_user_by_id(user_id: uuid.UUID) -> Optional[repository.User]:
"""Deletes the user with the specified user_id from the database.
Args:
user_id: The user_id of the user that will be deleted.
Returns:
A repository.User representing the deleted user, None if the user was not
deleted.
"""
return await repository.delete_user_by_id(user_id)
| 0 | 0 | 0 |
45c50b07d8e2b5ce4b964895748ab5d6910015ff | 1,091 | py | Python | pySnowRadar/algorithms/GSFC.py | kingjml/pySnowRadar | a64721c3a84f255aa3bb9b872682a79969f7b1be | [
"MIT"
] | 4 | 2020-06-04T00:25:46.000Z | 2021-12-17T15:08:35.000Z | pySnowRadar/algorithms/GSFC.py | kingjml/pySnowRadar | a64721c3a84f255aa3bb9b872682a79969f7b1be | [
"MIT"
] | 7 | 2020-02-19T11:34:26.000Z | 2020-10-02T12:52:17.000Z | pySnowRadar/algorithms/GSFC.py | kingjml/pySnowRadar | a64721c3a84f255aa3bb9b872682a79969f7b1be | [
"MIT"
] | null | null | null | import numpy as np
def GSFC_NK(data, **kwargs):
'''
Picker algorithm by NASA Goddard
Arguments must include the snowradar trace data itself (passed as a 1D float array) as well as
any parameters required by the algorithm for layer-picking
Only 2 picked layers are expected
1. air-snow layer pick (integer indexing the passed snowradar trace)
2. snow-ice layer pick (integer indexing the passed snowradar trace)
'''
as_pick = np.int64(0)
si_pick = np.int64(1)
return as_pick, si_pick
def NSIDC(data):
'''
This is a placeholder for the NASA Goddard product hosted at NSIDC.
Arguments must include the snowradar trace data itself (passed as a 1D float array) as well as
any parameters required by the algorithm for layer-picking
Only 2 picked layers are expected
1. air-snow layer pick (integer indexing the passed snowradar trace)
2. snow-ice layer pick (integer indexing the passed snowradar trace)
'''
as_pick = np.int64(0)
si_pick = np.int64(1)
return as_pick, si_pick | 33.060606 | 99 | 0.694775 | import numpy as np
def GSFC_NK(data, **kwargs):
'''
Picker algorithm by NASA Goddard
Arguments must include the snowradar trace data itself (passed as a 1D float array) as well as
any parameters required by the algorithm for layer-picking
Only 2 picked layers are expected
1. air-snow layer pick (integer indexing the passed snowradar trace)
2. snow-ice layer pick (integer indexing the passed snowradar trace)
'''
as_pick = np.int64(0)
si_pick = np.int64(1)
return as_pick, si_pick
def NSIDC(data):
'''
This is a placeholder for the NASA Goddard product hosted at NSIDC.
Arguments must include the snowradar trace data itself (passed as a 1D float array) as well as
any parameters required by the algorithm for layer-picking
Only 2 picked layers are expected
1. air-snow layer pick (integer indexing the passed snowradar trace)
2. snow-ice layer pick (integer indexing the passed snowradar trace)
'''
as_pick = np.int64(0)
si_pick = np.int64(1)
return as_pick, si_pick | 0 | 0 | 0 |
bd76b89d924fd22fe938563772ebe5dbb2038dcc | 205 | py | Python | xarray_schema/types.py | carbonplan/xarray-schema | 0af2699ccc06b8028acff138deb452000c7bea86 | [
"MIT"
] | 16 | 2021-11-08T17:38:16.000Z | 2022-03-22T12:06:01.000Z | xarray_schema/types.py | carbonplan/xarray-schema | 0af2699ccc06b8028acff138deb452000c7bea86 | [
"MIT"
] | 25 | 2021-11-18T06:18:43.000Z | 2022-03-31T06:26:42.000Z | xarray_schema/types.py | carbonplan/xarray-schema | 0af2699ccc06b8028acff138deb452000c7bea86 | [
"MIT"
] | 2 | 2021-11-08T20:46:01.000Z | 2022-03-14T16:50:53.000Z | from typing import Dict, Tuple, Union
from numpy.typing import DTypeLike # noqa: F401
DimsT = Tuple[Union[str, None]]
ShapeT = Tuple[Union[int, None]]
ChunksT = Union[bool, Dict[str, Union[int, None]]]
| 25.625 | 50 | 0.721951 | from typing import Dict, Tuple, Union
from numpy.typing import DTypeLike # noqa: F401
DimsT = Tuple[Union[str, None]]
ShapeT = Tuple[Union[int, None]]
ChunksT = Union[bool, Dict[str, Union[int, None]]]
| 0 | 0 | 0 |
932caf98ac61173ef299a1e94e97dd848d7eb492 | 195 | py | Python | modules/feedback/models/__init__.py | heolin123/funcrowd | 20167783de208394c09ed0429a5f02ec6dd79c42 | [
"MIT"
] | null | null | null | modules/feedback/models/__init__.py | heolin123/funcrowd | 20167783de208394c09ed0429a5f02ec6dd79c42 | [
"MIT"
] | 11 | 2019-11-12T23:26:45.000Z | 2021-06-10T17:37:23.000Z | modules/feedback/models/__init__.py | heolin123/funcrowd | 20167783de208394c09ed0429a5f02ec6dd79c42 | [
"MIT"
] | null | null | null | from modules.feedback.models.feedback_field import FeedbackField
from modules.feedback.models.feedback_score_field import FeedbackScoreField
from modules.feedback.models.feedback import Feedback
| 48.75 | 75 | 0.892308 | from modules.feedback.models.feedback_field import FeedbackField
from modules.feedback.models.feedback_score_field import FeedbackScoreField
from modules.feedback.models.feedback import Feedback
| 0 | 0 | 0 |
997f062bf1fa44535ddbcb9d6600f67079e41a9f | 1,008 | py | Python | setup.py | Hi-king/redshells | de1bd1c7c95355a0b7f5920dacad7351d065bd4d | [
"MIT"
] | null | null | null | setup.py | Hi-king/redshells | de1bd1c7c95355a0b7f5920dacad7351d065bd4d | [
"MIT"
] | null | null | null | setup.py | Hi-king/redshells | de1bd1c7c95355a0b7f5920dacad7351d065bd4d | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
install_requires = [
'luigi',
'gokart>=0.1.20',
'python-dateutil==2.7.5',
'pandas',
'scipy',
'numpy',
'gensim',
'scikit-learn',
'tensorflow>=1.13.1, <2.0',
'tqdm',
'optuna==0.6.0',
'docutils==0.15' # to avoid dependency conflict
]
setup(
name='redshells',
use_scm_version=True,
setup_requires=['setuptools_scm'],
description='Tasks which are defined using gokart.TaskOnKart. The tasks can be used with data pipeline library "luigi".',
long_description=long_description,
long_description_content_type="text/markdown",
author='M3, inc.',
url='https://github.com/m3dev/redshells',
license='MIT License',
packages=find_packages(),
install_requires=install_requires,
test_suite='test')
| 27.243243 | 125 | 0.673611 | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
install_requires = [
'luigi',
'gokart>=0.1.20',
'python-dateutil==2.7.5',
'pandas',
'scipy',
'numpy',
'gensim',
'scikit-learn',
'tensorflow>=1.13.1, <2.0',
'tqdm',
'optuna==0.6.0',
'docutils==0.15' # to avoid dependency conflict
]
setup(
name='redshells',
use_scm_version=True,
setup_requires=['setuptools_scm'],
description='Tasks which are defined using gokart.TaskOnKart. The tasks can be used with data pipeline library "luigi".',
long_description=long_description,
long_description_content_type="text/markdown",
author='M3, inc.',
url='https://github.com/m3dev/redshells',
license='MIT License',
packages=find_packages(),
install_requires=install_requires,
test_suite='test')
| 0 | 0 | 0 |
4522b1410f732f09ed17b24de0fed7e2b5760f44 | 3,020 | py | Python | rio_viz/scripts/cli.py | bradh/rio-viz | bc73a06c09e49b19541543f1e758109466ca17f8 | [
"MIT"
] | null | null | null | rio_viz/scripts/cli.py | bradh/rio-viz | bc73a06c09e49b19541543f1e758109466ca17f8 | [
"MIT"
] | null | null | null | rio_viz/scripts/cli.py | bradh/rio-viz | bc73a06c09e49b19541543f1e758109466ca17f8 | [
"MIT"
] | null | null | null | """rio_viz.cli."""
import os
import tempfile
from contextlib import contextmanager, ExitStack
import click
from rio_viz import app, raster
from rio_cogeo.cogeo import cog_validate, cog_translate
from rio_cogeo.profiles import cog_profiles
@contextmanager
def TemporaryRasterFile(dst_path, suffix=".tif"):
"""Create temporary file."""
fileobj = tempfile.NamedTemporaryFile(
dir=os.path.dirname(dst_path), suffix=suffix, delete=False
)
fileobj.close()
try:
yield fileobj
finally:
os.remove(fileobj.name)
class MbxTokenType(click.ParamType):
"""Mapbox token type."""
name = "token"
def convert(self, value, param, ctx):
"""Validate token."""
try:
if not value:
return ""
assert value.startswith("pk")
return value
except (AttributeError, AssertionError):
raise click.ClickException(
"Mapbox access token must be public (pk). "
"Please sign up at https://www.mapbox.com/signup/ to get a public token. "
"If you already have an account, you can retreive your "
"token at https://www.mapbox.com/account/."
)
@click.command()
@click.argument("src_paths", type=str, nargs=-1, required=True)
@click.option(
"--style",
type=click.Choice(["satellite", "basic"]),
default="basic",
help="Mapbox basemap",
)
@click.option("--port", type=int, default=8080, help="Webserver port (default: 8080)")
@click.option(
"--mapbox-token",
type=MbxTokenType(),
metavar="TOKEN",
default=lambda: os.environ.get("MAPBOX_ACCESS_TOKEN", ""),
help="Pass Mapbox token",
)
@click.option("--no-check", is_flag=True, help="Ignore COG validation")
def viz(src_paths, style, port, mapbox_token, no_check):
"""Rasterio Viz cli."""
# Check if cog
src_paths = list(src_paths)
with ExitStack() as ctx:
for ii, src_path in enumerate(src_paths):
if not no_check and not cog_validate(src_path):
# create tmp COG
click.echo("create temporaty COG")
tmp_path = ctx.enter_context(TemporaryRasterFile(src_path))
output_profile = cog_profiles.get("deflate")
output_profile.update(dict(blockxsize="256", blockysize="256"))
config = dict(
GDAL_TIFF_INTERNAL_MASK=os.environ.get(
"GDAL_TIFF_INTERNAL_MASK", True
),
GDAL_TIFF_OVR_BLOCKSIZE="128",
)
cog_translate(src_path, tmp_path.name, output_profile, config=config)
src_paths[ii] = tmp_path.name
src_dst = raster.RasterTiles(src_paths)
application = app.viz(src_dst, token=mapbox_token, port=port, style=style)
url = application.get_template_url()
click.echo(f"Viewer started at {url}", err=True)
click.launch(url)
application.start()
| 31.789474 | 90 | 0.613245 | """rio_viz.cli."""
import os
import tempfile
from contextlib import contextmanager, ExitStack
import click
from rio_viz import app, raster
from rio_cogeo.cogeo import cog_validate, cog_translate
from rio_cogeo.profiles import cog_profiles
@contextmanager
def TemporaryRasterFile(dst_path, suffix=".tif"):
"""Create temporary file."""
fileobj = tempfile.NamedTemporaryFile(
dir=os.path.dirname(dst_path), suffix=suffix, delete=False
)
fileobj.close()
try:
yield fileobj
finally:
os.remove(fileobj.name)
class MbxTokenType(click.ParamType):
"""Mapbox token type."""
name = "token"
def convert(self, value, param, ctx):
"""Validate token."""
try:
if not value:
return ""
assert value.startswith("pk")
return value
except (AttributeError, AssertionError):
raise click.ClickException(
"Mapbox access token must be public (pk). "
"Please sign up at https://www.mapbox.com/signup/ to get a public token. "
"If you already have an account, you can retreive your "
"token at https://www.mapbox.com/account/."
)
@click.command()
@click.argument("src_paths", type=str, nargs=-1, required=True)
@click.option(
"--style",
type=click.Choice(["satellite", "basic"]),
default="basic",
help="Mapbox basemap",
)
@click.option("--port", type=int, default=8080, help="Webserver port (default: 8080)")
@click.option(
"--mapbox-token",
type=MbxTokenType(),
metavar="TOKEN",
default=lambda: os.environ.get("MAPBOX_ACCESS_TOKEN", ""),
help="Pass Mapbox token",
)
@click.option("--no-check", is_flag=True, help="Ignore COG validation")
def viz(src_paths, style, port, mapbox_token, no_check):
"""Rasterio Viz cli."""
# Check if cog
src_paths = list(src_paths)
with ExitStack() as ctx:
for ii, src_path in enumerate(src_paths):
if not no_check and not cog_validate(src_path):
# create tmp COG
click.echo("create temporaty COG")
tmp_path = ctx.enter_context(TemporaryRasterFile(src_path))
output_profile = cog_profiles.get("deflate")
output_profile.update(dict(blockxsize="256", blockysize="256"))
config = dict(
GDAL_TIFF_INTERNAL_MASK=os.environ.get(
"GDAL_TIFF_INTERNAL_MASK", True
),
GDAL_TIFF_OVR_BLOCKSIZE="128",
)
cog_translate(src_path, tmp_path.name, output_profile, config=config)
src_paths[ii] = tmp_path.name
src_dst = raster.RasterTiles(src_paths)
application = app.viz(src_dst, token=mapbox_token, port=port, style=style)
url = application.get_template_url()
click.echo(f"Viewer started at {url}", err=True)
click.launch(url)
application.start()
| 0 | 0 | 0 |
ddf8a80c9ef40019459999285c100aa0e114c9b4 | 1,040 | py | Python | solo/migrations/0001_initial.py | Saket-Komawar/Forex | a470cb59cc0e9d4ed71713975cd8d2f778540100 | [
"Apache-2.0"
] | null | null | null | solo/migrations/0001_initial.py | Saket-Komawar/Forex | a470cb59cc0e9d4ed71713975cd8d2f778540100 | [
"Apache-2.0"
] | 2 | 2020-06-05T17:30:33.000Z | 2021-06-10T18:53:59.000Z | solo/migrations/0001_initial.py | Saket-Komawar/Forex | a470cb59cc0e9d4ed71713975cd8d2f778540100 | [
"Apache-2.0"
] | 2 | 2017-02-01T09:24:27.000Z | 2017-02-05T17:09:02.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-24 09:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
| 29.714286 | 114 | 0.567308 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-24 09:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='IEC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('iecNo', models.IntegerField()),
('name', models.CharField(max_length=512)),
],
),
migrations.CreateModel(
name='search',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('query', models.CharField(max_length=256)),
('score', models.FloatField()),
('date', models.DateTimeField(default=django.utils.timezone.now)),
],
),
]
| 0 | 832 | 23 |
23fc7876e4e6e46fbb838f9d38c3d65f4947fba6 | 26,623 | py | Python | sequana/scripts/main.py | ddesvillechabrol/sequana | c92d6071e782df78566fffd15ad619c8c3df6fe3 | [
"BSD-3-Clause"
] | null | null | null | sequana/scripts/main.py | ddesvillechabrol/sequana | c92d6071e782df78566fffd15ad619c8c3df6fe3 | [
"BSD-3-Clause"
] | null | null | null | sequana/scripts/main.py | ddesvillechabrol/sequana | c92d6071e782df78566fffd15ad619c8c3df6fe3 | [
"BSD-3-Clause"
] | null | null | null | #-*- coding: utf-8 -*-
import sys
import os
import glob
import click
#import click_completion
#click_completion.init()
from sequana import version
import functools
__all__ = ["main"]
import sequana
import colorlog
logger = colorlog.getLogger(__name__)
# This can be used by all commands as a simple decorator
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
import pkg_resources
pipelines = [item.key for item in pkg_resources.working_set if item.key.startswith("sequana")]
if len(pipelines):
version +="\nThe following pipelines are installed:\n"
for item in pkg_resources.working_set:
if item.key.startswith("sequana") and item.key != 'sequana':
version += "\n - {} version: {}".format(item.key, item.version)
@click.group(context_settings=CONTEXT_SETTINGS)
@click.version_option(version=version)
def main(**kwargs):
"""\bThis is the main entry point for a set of Sequana applications.
Pipelines such as sequana_rnaseq, sequana_variant_calling have their own
application and help.
In addition, more advanced tools such as sequana_taxonomy or
sequana_coverage have their own standalone.
"""
pass
@main.command()
@click.argument('filename', type=click.STRING, nargs=-1)
@click.option("-o", "--output",
help="filename where to save results. to be used with --head, --tail")
@click.option("--count-reads", is_flag=True)
@click.option("--head", type=click.INT,
help='number of reads to extract from the head')
@click.option("--merge", is_flag=True)
@click.option("--tail", type=click.INT,
help="number of reads to extract from the tail")
def fastq(**kwargs):
"""Set of useful utilities for FastQ manipulation.
Input file can be gzipped or not. The --output-file
"""
from sequana.fastq import FastQ
filenames = kwargs['filename']
# users may provide a wildcards such as "A*gz" or list of files.
if len(filenames) == 1:
# if existing files or glob, a glob would give the same answer.
filenames = glob.glob(filenames[0])
for filename in filenames:
os.path.exists(filename)
# could be simplified calling count_reads only once
if kwargs['count_reads']:
for filename in filenames:
f = FastQ(filename)
Nreads = f.count_reads()
Nlines = Nreads * 4
print(f"Number of reads in {filename}: {Nreads}")
print(f"Number of lines in {filename}: {Nlines}")
elif kwargs['head']:
for filename in filenames:
f = FastQ(filename)
if kwargs['output'] is None:
logger.error("Please use --output to tell us where to save the results")
sys.exit(1)
N = kwargs['head'] * 4
f.extract_head(N=N, output_filename=kwargs['output'])
elif kwargs['tail']: #pragma: no cover
raise NotImplementedError
elif kwargs['merge']:
import subprocess
# merge all input files (assuming gz extension)
extensions = [filename.split(".")[-1] for filename in filenames]
if set(extensions) != set(['gz']):
raise ValueError("Your input FastQ files must be zipped")
output_filename = kwargs['output']
if output_filename is None:
logger.error("You must use --output filename.gz")
sys.exit(1)
if output_filename.endswith(".gz") is False:
raise ValueError("your output file must end in .gz")
p1 = subprocess.Popen(['zcat'] + list(filenames), stdout=subprocess.PIPE)
fout = open(output_filename, 'wb')
p2 = subprocess.run(['pigz'], stdin=p1.stdout, stdout=fout)
else: #pragma: no cover
print("Use one of the commands")
@main.command()
@click.argument('name', type=click.STRING)
@click.option('--check', is_flag=True)
@click.option('--extract-adapters', is_flag=True)
@click.option('--quick-fix', is_flag=True)
@click.option('--output', default=None)
def samplesheet(**kwargs):
"""Utilities to manipulate sample sheet"""
name = kwargs['name']
from sequana.iem import IEM
if kwargs['check']:
iem = IEM(name)
iem.validate()
logger.info("SampleSheet looks correct")
elif kwargs["extract_adapters"]:
iem = IEM(name)
iem.to_fasta()
elif kwargs["quick_fix"]:
iem = IEM(name, tryme=True)
if kwargs['output']:
filename = kwargs['output']
else:
filename = name + ".fixed"
logger.info("Saving fixed version in {}".format(filename))
iem.quick_fix(output_filename=filename)
# This will be a complex command to provide HTML summary page for
# input files (e.g. bam), or results from pipelines. For each module,
# we should have corresponding option that starts with the module's name
# This can also takes as input various types of data (e.g. FastA)
@main.command()
@click.argument("name", type=click.Path(exists=True), nargs=-1)
@click.option("--module",
required=False,
type=click.Choice(["bamqc", "bam", "fasta", "fastq", "gff"]))
def summary(**kwargs):
"""Create a HTML report for various type of NGS formats.
\b
* bamqc
* fastq
This will process all files in the given pattern (in back quotes)
sequentially and procude one HTML file per input file.
Other module all work in the same way. For example, for FastQ files::
sequana summary one_input.fastq
sequana summary `ls *fastq`
"""
names = kwargs['name']
module = kwargs['module']
if module is None:
if names[0].endswith('fastq.gz') or names[0].endswith('.fastq'):
module = "fastq"
elif names[0].endswith('.bam'):
module = "bam"
elif names[0].endswith('.gff') or names[0].endswith('gff3'):
module = "gff"
elif names[0].endswith('fasta.gz') or names[0].endswith('.fasta'):
module = "fasta"
else:
logger.error("please use --module to tell us about the input fimes")
sys.exit(1)
if module == "bamqc":
for name in names:
print(f"Processing {name}")
from sequana.modules_report.bamqc import BAMQCModule
report = BAMQCModule(name, "bamqc.html")
elif module == "fasta": # there is no module per se. HEre we just call FastA.summary()
from sequana.fasta import FastA
for name in names:
f = FastA(name)
f.summary()
elif module == "fastq": # there is no module per se. HEre we just call FastA.summary()
from sequana.fastq import FastQ
from sequana import FastQC
for filename in names:
ff = FastQC(filename, max_sample=1e6, verbose=False)
stats = ff.get_stats()
print(stats)
elif module == "bam":
import pandas as pd
from sequana import BAM
for filename in names:
ff = BAM(filename)
stats = ff.get_stats()
df = pd.Series(stats).to_frame().T
print(df)
elif module == "gff":
import pandas as pd
from sequana import GFF3
for filename in names:
ff = GFF3(filename)
print("#filename: {}".format(filename))
print("#Number of entries per genetic type:")
print(ff.df.value_counts('type').to_string())
print("#Number of duplicated attribute (if any) per attribute:")
ff.get_duplicated_attributes_per_type()
@main.command()
@click.option("--file1", type=click.Path(),
default=None, required=True,
help="""The first input RNA-seq table to compare""")
@click.option("--file2", type=click.Path(),
default=None, required=True,
help="""The second input RNA-seq table to compare""")
@common_logger
def rnaseq_compare(**kwargs):
"""Compare 2 tables created by the 'sequana rnadiff' command"""
from sequana.compare import RNADiffCompare
c = RNADiffCompare(kwargs['file1'], kwargs['file2'])
c.plot_volcano_differences()
from pylab import savefig
savefig("sequana_rnaseq_compare_volcano.png", dpi=200)
@main.command()
@click.option("--annotation", type=click.Path(),
default=None,
help="""The annotation GFF file used to perform the feature count""")
@click.option("--report-only",
is_flag=True,
default=False,
help="""Generate report assuming results are already present""")
@click.option("--output-directory", type=click.Path(),
default="rnadiff",
help="""Output directory where are saved the results""")
@click.option("--features", type=click.Path(),
default="all_features.out",
help="""The Counts from feature counts. This should be the output of the
sequana_rnaseq pipeline all_features.out """)
#FIXME I think it would be better to have a single file with multiple columns
#for alternative condition (specified using the "condition" option)
@click.option("--design", type=click.Path(),
default="design.csv", help="""It should have been generated by sequana_rnaseq. If
not, it must be a comma separated file with two columns. One for the label to be
found in the --features file and one column with the condition to which it
belong. E.g. with 3 replicates and 2 conditions. It should look like:
\b
label,condition
WT1,WT
WT2,WT
WT3,WT
file1,cond1
fileother,cond1
""")
@click.option("--condition", type=str,
default="condition", help="""The name of the column in design.csv to use as condition
for the differential analysis. Default is 'condition'""")
@click.option("--feature-name",
default="gene",
help="""The feature name compatible with your GFF. Default is 'gene'""")
@click.option("--attribute-name",
default="ID",
help="""The attribute used as identifier. compatible with your GFF. Default is 'ID'""")
@click.option("--reference", type=click.Path(),
default=None,
help="""The reference to test DGE against. If provided, conditions not
involving the reference are ignored. Otherwise all combinations are
tested""")
@click.option("--comparisons", type=click.Path(),
default=None,
help="""Not yet implemented. By default, all comparisons are computed""")
@click.option("--cooks-cutoff", type=click.Path(),
default=None,
help="""if none, let DESeq2 choose the cutoff""")
@click.option("--independent-filtering/--no-independent-filtering",
default=False,
help="""Do not perform independent_filtering by default. low counts may not
have adjusted pvalues otherwise""")
@click.option("--beta-prior/--no-beta-prior",
default=False,
help="Use beta priori or not. Default is no beta prior")
@click.option("--fit-type",
default="parametric",
help="DESeq2 type of fit. Default is 'parametric'")
@common_logger
def rnadiff(**kwargs):
"""Perform RNA-seq differential analysis.
This command performs the differential analysis of gene expression. The
analysis is performed on feature counts generated by a RNA-seq analysis
(see e.g. https://github.com/sequana/rnaseq pipeline). The analysis is
performed by DESeq2. A HTML report is created as well as a set of output
files, including summary table of the analysis.
To perform this analysis, you will need the GFF file used during the RNA-seq
analysis, the feature stored altogether in a single file, an experimental
design file, and the feature and attribute used during the feature count.
Here is an example:
\b
sequana rnadiff --annotation Lepto.gff
--design design.csv --features all_features.out
--feature-name gene --attribute-name ID
"""
import pandas as pd
from sequana.featurecounts import FeatureCount
from sequana.rnadiff import RNADiffAnalysis, RNADesign
from sequana.modules_report.rnadiff import RNAdiffModule
logger.setLevel(kwargs['logger'])
outdir = kwargs['output_directory']
feature = kwargs['feature_name']
attribute = kwargs['attribute_name']
design = kwargs['design']
reference=kwargs['reference']
if kwargs['annotation']:
gff = kwargs['annotation']
logger.info(f"Checking annotation file")
from sequana import GFF3
g = GFF3(gff) #.save_annotation_to_csv()
if feature not in g.features:
logger.critical(f"{feature} not found in the GFF. Most probably a wrong feature name")
attributes = g.get_attributes(feature)
if attribute not in attributes:
logger.critical(f"{attribute} not found in the GFF for the provided feature. Most probably a wrong feature name. Please change --attribute-name option or do not provide any GFF")
sys.exit(1)
else:
gff = None
design_check = RNADesign(design, reference=reference)
compa_csv = kwargs['comparisons']
if compa_csv:
compa_df = pd.read_csv(compa_csv)
comparisons = list(zip(compa_df["alternative"], compa_df["reference"]))
else:
comparisons = design_check.comparisons
if kwargs['report_only'] is False:
logger.info(f"Processing features counts and saving into {outdir}/light_counts.csv")
fc = FeatureCount(kwargs['features'])
from easydev import mkdirs
mkdirs(f"{outdir}")
fc.rnadiff_df.to_csv(f"{outdir}/light_counts.csv")
logger.info(f"Differential analysis to be saved into ./{outdir}")
for k in sorted(["independent_filtering", "beta_prior",
"cooks_cutoff", "fit_type", "reference"]):
logger.info(f" Parameter {k} set to : {kwargs[k]}")
r = RNADiffAnalysis(f"{outdir}/light_counts.csv", design,
condition=kwargs["condition"],
comparisons=comparisons,
fc_feature=feature,
fc_attribute=attribute,
outdir=outdir,
gff=gff,
cooks_cutoff=kwargs.get("cooks_cutoff"),
independent_filtering=kwargs.get("independent_filtering"),
beta_prior=kwargs.get("beta_prior"),
fit_type=kwargs.get('fit_type')
)
logger.info(f"Saving output files into {outdir}/rnadiff.csv")
try:
results = r.run()
results.to_csv(f"{outdir}/rnadiff.csv")
except Exception as err:
logger.error(err)
sys.exit(1)
else:
logger.info(f"DGE done.")
# cleanup if succesful
os.remove(f"{outdir}/rnadiff.err")
os.remove(f"{outdir}/rnadiff.out")
os.remove(f"{outdir}/rnadiff_light.R")
logger.info(f"Reporting. Saving in rnadiff.html")
report = RNAdiffModule(outdir, kwargs['design'], gff=gff,
fc_attribute=attribute,
fc_feature=feature,
alpha=0.05,
log2_fc=0,
condition=kwargs["condition"],
annot_cols=None,
pattern="*vs*_degs_DESeq2.csv")
@main.command()
@click.option("--mart", default="ENSEMBL_MART_ENSEMBL",
show_default=True,
help="A valid mart name")
@click.option("--dataset", required=True,
help="A valid dataset name. e.g. mmusculus_gene_ensembl, hsapiens_gene_ensembl")
@click.option("--attributes", multiple=True,
default=["ensembl_gene_id","go_id","entrezgene_id","external_gene_name"],
show_default=True,
help="A list of valid attributes to look for in the dataset")
@click.option("--output", default=None,
help="""by default save results into a CSV file named
biomart_<dataset>_<YEAR>_<MONTH>_<DAY>.csv""")
@common_logger
def biomart(**kwargs):
"""Retrieve information from biomart and save into CSV file
This command uses BioMart from BioServices to introspect a MART service
(--mart) and a specific dataset (default to mmusculus_gene_ensembl). Then,
for all ensembl IDs, it will fetch the requested attributes (--attributes).
Finally, it saves the CSV file into an output file (--output). This takes
about 5-10 minutes to retrieve the data depending on the connection.
"""
print(kwargs)
logger.setLevel(kwargs["logger"])
mart = kwargs['mart']
attributes = kwargs['attributes']
dataset = kwargs["dataset"]
from sequana.enrichment import Mart
conv = Mart(dataset, mart)
df = conv.query(attributes)
conv.save(df, filename=kwargs['output'])
@main.command()
@click.option("-i", "--input", required=True,
help="The salmon input file.")
@click.option("-o", "--output", required=True,
help="The feature counts output file")
@click.option("-f", "--gff", required=True,
help="A GFF file compatible with your salmon file")
@click.option("-a", "--attribute", default="ID",
help="A valid attribute to be found in the GFF file and salmon input")
@click.option("-a", "--feature", default="gene",
help="A valid feature")
def salmon(**kwargs):
"""Convert output of Salmon into a feature counts file """
from sequana import salmon
salmon_input = kwargs['input']
output = kwargs["output"]
if os.path.exists(salmon_input) is False:
logger.critical("Input file does not exists ({})".format(salmon_input))
gff = kwargs["gff"]
attribute = kwargs['attribute']
feature = kwargs['feature']
# reads file generated by salmon and generated count file as expected by
# DGE.
s = salmon.Salmon(salmon_input, gff)
s.save_feature_counts(output, feature=feature, attribute=attribute)
@main.command()
@click.option("-i", "--input", required=True)
@click.option("-o", "--output", required=True)
def gtf_fixer(**kwargs):
"""Reads GTF and fix known issues (exon and genes uniqueness)"""
from sequana.gtf import GTFFixer
gtf = GTFFixer(kwargs['input'])
res = gtf.fix_exons_uniqueness(kwargs['output'])
#res = gtf.fix_exons_uniqueness(kwargs['output'])
print(res)
# This will be a complex command to provide HTML summary page for
# input files (e.g. bam), or results from pipelines. For each module,
# we should have corresponding option that starts with the module's name
# This can also takes as input various types of data (e.g. FastA)
@main.command()
@click.argument("name", type=click.Path(exists=True),
nargs=1)
@click.option("--annotation-attribute", type=click.STRING,
#required=True,
default="Name",
help="a valid taxon identifiers")
@click.option("--panther-taxon", type=click.INT,
#required=True,
default=0,
help="a valid taxon identifiers")
@click.option("--kegg-name", type=click.STRING,
default=None,
help="a valid KEGG name (automatically filled for 9606 (human) and 10090 (mmusculus)")
@click.option("--log2-foldchange-cutoff", type=click.FLOAT,
default=1,
show_default=True,
help="remove events with absolute log2 fold change below this value")
@click.option("--padj-cutoff", type=click.FLOAT,
default=0.05,
show_default=True,
help="remove events with pvalue abobe this value default (0.05).")
@click.option("--biomart", type=click.STRING,
default=None,
help="""you may need a biomart mapping of your identifier for the kegg
pathways analysis. If you do not have this file, you can use 'sequana biomart'
command""")
@click.option("--go-only", type=click.BOOL,
default=False,
is_flag=True,
help="""to run only panther db enrichment""")
@click.option("--plot-linearx", type=click.BOOL,
default=False,
is_flag=True,
help="""Default is log2 fold enrichment in the plots. use this to use linear scale""")
@click.option("--compute-levels", type=click.BOOL,
default=False,
is_flag=True,
help="""to compute the GO levels (slow) in the plots""")
@click.option("--max-genes", type=click.INT,
default=2000,
help="""Maximum number of genes (up or down) to use in PantherDB, which is limited to about 3000""")
@click.option("--kegg-only", type=click.BOOL,
default=False,
is_flag=True,
help="""to run only kegg patways enrichment""")
@click.option("--kegg-pathways-directory", type=click.Path(),
default=None,
help="""a place where to find the pathways for each organism""")
@click.option("--kegg-background", type=click.INT,
default=None,
help="""a background for kegg enrichment. If None, set to number of genes found in KEGG""")
@common_logger
def enrichment(**kwargs):
"""Create a HTML report for various sequana out
\b
* enrichment: the output of RNADiff pipeline
Example for the enrichment module:
sequana enrichment rnadiff.csv --panther-taxon 10090
--log2-foldchange-cutoff 2 --kegg-only
The KEGG pathways are loaded and it may take time. Once done, they are saved
in kegg_pathways/organism and be loaded next time:
sequana enrichment rnadiff/rnadiff.csv
--panther-taxon 189518 \
--log2-foldchange-cutoff 2 --kegg-only \
--kegg-name lbi\
--annotation file.gff
"""
import pandas as pd
from sequana.modules_report.enrichment import Enrichment
logger.setLevel(kwargs['logger'])
taxon = kwargs['panther_taxon']
if taxon == 0:
logger.error("You must provide a taxon with --panther-taxon")
return
keggname = kwargs['kegg_name']
params = {"padj": kwargs['padj_cutoff'],
"log2_fc": kwargs['log2_foldchange_cutoff'],
"max_entries": kwargs['max_genes'],
"mapper": kwargs['biomart'],
"kegg_background": kwargs['kegg_background'],
"preload_directory": kwargs['kegg_pathways_directory'],
"plot_logx": not kwargs['plot_linearx'],
"plot_compute_levels": kwargs['compute_levels'],
}
filename = kwargs['biomart']
if filename and os.path.exists(filename) is False:
logger.error("{} does not exists".format(filename))
sys.exit(1)
filename = kwargs['kegg_pathways_directory']
if filename and os.path.exists(filename) is False:
logger.error("{} does not exists".format(filename))
sys.exit(1)
rnadiff_file = kwargs['name']
logger.info(f"Reading {rnadiff_file}")
rnadiff = pd.read_csv(rnadiff_file, index_col=0, header=[0,1])
# now that we have loaded all results from a rnadiff analysis, let us
# perform the enrichment for each comparison found in the file
annot_col = kwargs['annotation_attribute']
Nmax = kwargs['max_genes']
from sequana.utils import config
for compa in rnadiff.columns.levels[0]:
if compa not in ['statistics', 'annotation']:
# get gene list
df = rnadiff[compa].copy()
# we add the annotation
for x in rnadiff['annotation'].columns:
df[x] = rnadiff['annotation'][x]
# now we find the gene lists
padj = params['padj']
log2fc = params['log2_fc']
df = df.query("(log2FoldChange >=@log2fc or log2FoldChange<=-@log2fc) and padj <= @padj")
df.reset_index(inplace=True)
dfup = df.sort_values("log2FoldChange", ascending=False)
up_genes = list(dfup.query("log2FoldChange > 0")[annot_col])[:Nmax]
dfdown = df.sort_values("log2FoldChange", ascending=True)
down_genes = list(dfdown.query("log2FoldChange < 0")[annot_col])[:Nmax]
all_genes = list(
df.sort_values("log2FoldChange", key=abs,ascending=False)[annot_col]
)[:Nmax]
gene_dict = {
"up": up_genes,
"down": down_genes,
"all": all_genes,
}
Nup = len(up_genes)
Ndown = len(down_genes)
N = Nup + Ndown
logger.info(f"Computing enrichment for the {compa} case")
logger.info(f"Found {Nup} genes up-regulated, {Ndown} down regulated ({N} in total).")
config.output_dir = f"enrichment/{compa}"
try:os.mkdir("enrichment")
except:pass
report = Enrichment(gene_dict, taxon, df,
kegg_organism=keggname,
enrichment_params=params,
go_only=kwargs["go_only"],
kegg_only=kwargs["kegg_only"],
command=" ".join(['sequana'] + sys.argv[1:]))
@main.command()
@click.option("--search-kegg", type=click.Path(),
default=None,
help="""Search a pattern amongst all KEGG organism""")
@click.option("--search-panther", type=click.Path(),
default=None,
help="""Search a pattern amongst all KEGG organism""")
@common_logger
def taxonomy(**kwargs):
"""Tool to retrieve taxonomic information.
sequana taxonomy --search-kegg leptospira
"""
if kwargs['search_kegg']:
from sequana.kegg import KEGGHelper
k = KEGGHelper()
results = k.search(kwargs['search_kegg'].lower())
print(results)
elif kwargs['search_panther']:
import pandas as pd
from sequana import sequana_data
df = pd.read_csv(sequana_data("panther.csv"), index_col=0)
pattern = kwargs['search_panther']
f1 = df[[True if pattern in x else False for x in df['name']]]
f2 = df[[True if pattern in x else False for x in df.short_name]]
f3 = df[[True if pattern in x else False for x in df.long_name]]
indices = list(f1.index) + list(f2.index) + list(f3.index)
if len(indices) == 0:
# maybe it is a taxon ID ?
f4 = df[[True if pattern in str(x) else False for x in df.taxon_id]]
indices = list(f4.index)
indices = set(indices)
print(df.loc[indices])
@main.command()
@click.argument("gff_filename", type=click.Path(exists=True))
@common_logger
def gff2gtf(**kwargs):
"""Convert a GFF file into GTF
This is experimental convertion. Use with care.
"""
filename = kwargs["gff_filename"]
assert filename.endswith(".gff") or filename.endswith(".gff3")
from sequana.gff3 import GFF3
g = GFF3(filename)
if filename.endswith(".gff"):
g.to_gtf(os.path.basename(filename).replace(".gff", ".gtf"))
elif filename.endswith(".gff3"):
g.to_gtf(os.path.basename(filename).replace(".gff3", ".gtf"))
| 36.469863 | 190 | 0.645232 | #-*- coding: utf-8 -*-
import sys
import os
import glob
import click
#import click_completion
#click_completion.init()
from sequana import version
import functools
__all__ = ["main"]
import sequana
import colorlog
logger = colorlog.getLogger(__name__)
# This can be used by all commands as a simple decorator
def common_logger(func):
@click.option("--logger", default="INFO",
type=click.Choice(["INFO", "DEBUG", "WARNING", "CRITICAL", "ERROR"]))
@functools.wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wrapper
def get_env_vars(ctx, args, incomplete):
return [k for k in os.environ.keys() if incomplete in k]
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
import pkg_resources
pipelines = [item.key for item in pkg_resources.working_set if item.key.startswith("sequana")]
if len(pipelines):
version +="\nThe following pipelines are installed:\n"
for item in pkg_resources.working_set:
if item.key.startswith("sequana") and item.key != 'sequana':
version += "\n - {} version: {}".format(item.key, item.version)
@click.group(context_settings=CONTEXT_SETTINGS)
@click.version_option(version=version)
def main(**kwargs):
"""\bThis is the main entry point for a set of Sequana applications.
Pipelines such as sequana_rnaseq, sequana_variant_calling have their own
application and help.
In addition, more advanced tools such as sequana_taxonomy or
sequana_coverage have their own standalone.
"""
pass
@main.command()
@click.argument('filename', type=click.STRING, nargs=-1)
@click.option("-o", "--output",
help="filename where to save results. to be used with --head, --tail")
@click.option("--count-reads", is_flag=True)
@click.option("--head", type=click.INT,
help='number of reads to extract from the head')
@click.option("--merge", is_flag=True)
@click.option("--tail", type=click.INT,
help="number of reads to extract from the tail")
def fastq(**kwargs):
"""Set of useful utilities for FastQ manipulation.
Input file can be gzipped or not. The --output-file
"""
from sequana.fastq import FastQ
filenames = kwargs['filename']
# users may provide a wildcards such as "A*gz" or list of files.
if len(filenames) == 1:
# if existing files or glob, a glob would give the same answer.
filenames = glob.glob(filenames[0])
for filename in filenames:
os.path.exists(filename)
# could be simplified calling count_reads only once
if kwargs['count_reads']:
for filename in filenames:
f = FastQ(filename)
Nreads = f.count_reads()
Nlines = Nreads * 4
print(f"Number of reads in {filename}: {Nreads}")
print(f"Number of lines in {filename}: {Nlines}")
elif kwargs['head']:
for filename in filenames:
f = FastQ(filename)
if kwargs['output'] is None:
logger.error("Please use --output to tell us where to save the results")
sys.exit(1)
N = kwargs['head'] * 4
f.extract_head(N=N, output_filename=kwargs['output'])
elif kwargs['tail']: #pragma: no cover
raise NotImplementedError
elif kwargs['merge']:
import subprocess
# merge all input files (assuming gz extension)
extensions = [filename.split(".")[-1] for filename in filenames]
if set(extensions) != set(['gz']):
raise ValueError("Your input FastQ files must be zipped")
output_filename = kwargs['output']
if output_filename is None:
logger.error("You must use --output filename.gz")
sys.exit(1)
if output_filename.endswith(".gz") is False:
raise ValueError("your output file must end in .gz")
p1 = subprocess.Popen(['zcat'] + list(filenames), stdout=subprocess.PIPE)
fout = open(output_filename, 'wb')
p2 = subprocess.run(['pigz'], stdin=p1.stdout, stdout=fout)
else: #pragma: no cover
print("Use one of the commands")
@main.command()
@click.argument('name', type=click.STRING)
@click.option('--check', is_flag=True)
@click.option('--extract-adapters', is_flag=True)
@click.option('--quick-fix', is_flag=True)
@click.option('--output', default=None)
def samplesheet(**kwargs):
"""Utilities to manipulate sample sheet"""
name = kwargs['name']
from sequana.iem import IEM
if kwargs['check']:
iem = IEM(name)
iem.validate()
logger.info("SampleSheet looks correct")
elif kwargs["extract_adapters"]:
iem = IEM(name)
iem.to_fasta()
elif kwargs["quick_fix"]:
iem = IEM(name, tryme=True)
if kwargs['output']:
filename = kwargs['output']
else:
filename = name + ".fixed"
logger.info("Saving fixed version in {}".format(filename))
iem.quick_fix(output_filename=filename)
# This will be a complex command to provide HTML summary page for
# input files (e.g. bam), or results from pipelines. For each module,
# we should have corresponding option that starts with the module's name
# This can also takes as input various types of data (e.g. FastA)
@main.command()
@click.argument("name", type=click.Path(exists=True), nargs=-1)
@click.option("--module",
required=False,
type=click.Choice(["bamqc", "bam", "fasta", "fastq", "gff"]))
def summary(**kwargs):
"""Create a HTML report for various type of NGS formats.
\b
* bamqc
* fastq
This will process all files in the given pattern (in back quotes)
sequentially and procude one HTML file per input file.
Other module all work in the same way. For example, for FastQ files::
sequana summary one_input.fastq
sequana summary `ls *fastq`
"""
names = kwargs['name']
module = kwargs['module']
if module is None:
if names[0].endswith('fastq.gz') or names[0].endswith('.fastq'):
module = "fastq"
elif names[0].endswith('.bam'):
module = "bam"
elif names[0].endswith('.gff') or names[0].endswith('gff3'):
module = "gff"
elif names[0].endswith('fasta.gz') or names[0].endswith('.fasta'):
module = "fasta"
else:
logger.error("please use --module to tell us about the input fimes")
sys.exit(1)
if module == "bamqc":
for name in names:
print(f"Processing {name}")
from sequana.modules_report.bamqc import BAMQCModule
report = BAMQCModule(name, "bamqc.html")
elif module == "fasta": # there is no module per se. HEre we just call FastA.summary()
from sequana.fasta import FastA
for name in names:
f = FastA(name)
f.summary()
elif module == "fastq": # there is no module per se. HEre we just call FastA.summary()
from sequana.fastq import FastQ
from sequana import FastQC
for filename in names:
ff = FastQC(filename, max_sample=1e6, verbose=False)
stats = ff.get_stats()
print(stats)
elif module == "bam":
import pandas as pd
from sequana import BAM
for filename in names:
ff = BAM(filename)
stats = ff.get_stats()
df = pd.Series(stats).to_frame().T
print(df)
elif module == "gff":
import pandas as pd
from sequana import GFF3
for filename in names:
ff = GFF3(filename)
print("#filename: {}".format(filename))
print("#Number of entries per genetic type:")
print(ff.df.value_counts('type').to_string())
print("#Number of duplicated attribute (if any) per attribute:")
ff.get_duplicated_attributes_per_type()
@main.command()
@click.option("--file1", type=click.Path(),
default=None, required=True,
help="""The first input RNA-seq table to compare""")
@click.option("--file2", type=click.Path(),
default=None, required=True,
help="""The second input RNA-seq table to compare""")
@common_logger
def rnaseq_compare(**kwargs):
"""Compare 2 tables created by the 'sequana rnadiff' command"""
from sequana.compare import RNADiffCompare
c = RNADiffCompare(kwargs['file1'], kwargs['file2'])
c.plot_volcano_differences()
from pylab import savefig
savefig("sequana_rnaseq_compare_volcano.png", dpi=200)
@main.command()
@click.option("--annotation", type=click.Path(),
default=None,
help="""The annotation GFF file used to perform the feature count""")
@click.option("--report-only",
is_flag=True,
default=False,
help="""Generate report assuming results are already present""")
@click.option("--output-directory", type=click.Path(),
default="rnadiff",
help="""Output directory where are saved the results""")
@click.option("--features", type=click.Path(),
default="all_features.out",
help="""The Counts from feature counts. This should be the output of the
sequana_rnaseq pipeline all_features.out """)
#FIXME I think it would be better to have a single file with multiple columns
#for alternative condition (specified using the "condition" option)
@click.option("--design", type=click.Path(),
default="design.csv", help="""It should have been generated by sequana_rnaseq. If
not, it must be a comma separated file with two columns. One for the label to be
found in the --features file and one column with the condition to which it
belong. E.g. with 3 replicates and 2 conditions. It should look like:
\b
label,condition
WT1,WT
WT2,WT
WT3,WT
file1,cond1
fileother,cond1
""")
@click.option("--condition", type=str,
default="condition", help="""The name of the column in design.csv to use as condition
for the differential analysis. Default is 'condition'""")
@click.option("--feature-name",
default="gene",
help="""The feature name compatible with your GFF. Default is 'gene'""")
@click.option("--attribute-name",
default="ID",
help="""The attribute used as identifier. compatible with your GFF. Default is 'ID'""")
@click.option("--reference", type=click.Path(),
default=None,
help="""The reference to test DGE against. If provided, conditions not
involving the reference are ignored. Otherwise all combinations are
tested""")
@click.option("--comparisons", type=click.Path(),
default=None,
help="""Not yet implemented. By default, all comparisons are computed""")
@click.option("--cooks-cutoff", type=click.Path(),
default=None,
help="""if none, let DESeq2 choose the cutoff""")
@click.option("--independent-filtering/--no-independent-filtering",
default=False,
help="""Do not perform independent_filtering by default. low counts may not
have adjusted pvalues otherwise""")
@click.option("--beta-prior/--no-beta-prior",
default=False,
help="Use beta priori or not. Default is no beta prior")
@click.option("--fit-type",
default="parametric",
help="DESeq2 type of fit. Default is 'parametric'")
@common_logger
def rnadiff(**kwargs):
"""Perform RNA-seq differential analysis.
This command performs the differential analysis of gene expression. The
analysis is performed on feature counts generated by a RNA-seq analysis
(see e.g. https://github.com/sequana/rnaseq pipeline). The analysis is
performed by DESeq2. A HTML report is created as well as a set of output
files, including summary table of the analysis.
To perform this analysis, you will need the GFF file used during the RNA-seq
analysis, the feature stored altogether in a single file, an experimental
design file, and the feature and attribute used during the feature count.
Here is an example:
\b
sequana rnadiff --annotation Lepto.gff
--design design.csv --features all_features.out
--feature-name gene --attribute-name ID
"""
import pandas as pd
from sequana.featurecounts import FeatureCount
from sequana.rnadiff import RNADiffAnalysis, RNADesign
from sequana.modules_report.rnadiff import RNAdiffModule
logger.setLevel(kwargs['logger'])
outdir = kwargs['output_directory']
feature = kwargs['feature_name']
attribute = kwargs['attribute_name']
design = kwargs['design']
reference=kwargs['reference']
if kwargs['annotation']:
gff = kwargs['annotation']
logger.info(f"Checking annotation file")
from sequana import GFF3
g = GFF3(gff) #.save_annotation_to_csv()
if feature not in g.features:
logger.critical(f"{feature} not found in the GFF. Most probably a wrong feature name")
attributes = g.get_attributes(feature)
if attribute not in attributes:
logger.critical(f"{attribute} not found in the GFF for the provided feature. Most probably a wrong feature name. Please change --attribute-name option or do not provide any GFF")
sys.exit(1)
else:
gff = None
design_check = RNADesign(design, reference=reference)
compa_csv = kwargs['comparisons']
if compa_csv:
compa_df = pd.read_csv(compa_csv)
comparisons = list(zip(compa_df["alternative"], compa_df["reference"]))
else:
comparisons = design_check.comparisons
if kwargs['report_only'] is False:
logger.info(f"Processing features counts and saving into {outdir}/light_counts.csv")
fc = FeatureCount(kwargs['features'])
from easydev import mkdirs
mkdirs(f"{outdir}")
fc.rnadiff_df.to_csv(f"{outdir}/light_counts.csv")
logger.info(f"Differential analysis to be saved into ./{outdir}")
for k in sorted(["independent_filtering", "beta_prior",
"cooks_cutoff", "fit_type", "reference"]):
logger.info(f" Parameter {k} set to : {kwargs[k]}")
r = RNADiffAnalysis(f"{outdir}/light_counts.csv", design,
condition=kwargs["condition"],
comparisons=comparisons,
fc_feature=feature,
fc_attribute=attribute,
outdir=outdir,
gff=gff,
cooks_cutoff=kwargs.get("cooks_cutoff"),
independent_filtering=kwargs.get("independent_filtering"),
beta_prior=kwargs.get("beta_prior"),
fit_type=kwargs.get('fit_type')
)
logger.info(f"Saving output files into {outdir}/rnadiff.csv")
try:
results = r.run()
results.to_csv(f"{outdir}/rnadiff.csv")
except Exception as err:
logger.error(err)
sys.exit(1)
else:
logger.info(f"DGE done.")
# cleanup if succesful
os.remove(f"{outdir}/rnadiff.err")
os.remove(f"{outdir}/rnadiff.out")
os.remove(f"{outdir}/rnadiff_light.R")
logger.info(f"Reporting. Saving in rnadiff.html")
report = RNAdiffModule(outdir, kwargs['design'], gff=gff,
fc_attribute=attribute,
fc_feature=feature,
alpha=0.05,
log2_fc=0,
condition=kwargs["condition"],
annot_cols=None,
pattern="*vs*_degs_DESeq2.csv")
@main.command()
@click.option("--mart", default="ENSEMBL_MART_ENSEMBL",
show_default=True,
help="A valid mart name")
@click.option("--dataset", required=True,
help="A valid dataset name. e.g. mmusculus_gene_ensembl, hsapiens_gene_ensembl")
@click.option("--attributes", multiple=True,
default=["ensembl_gene_id","go_id","entrezgene_id","external_gene_name"],
show_default=True,
help="A list of valid attributes to look for in the dataset")
@click.option("--output", default=None,
help="""by default save results into a CSV file named
biomart_<dataset>_<YEAR>_<MONTH>_<DAY>.csv""")
@common_logger
def biomart(**kwargs):
"""Retrieve information from biomart and save into CSV file
This command uses BioMart from BioServices to introspect a MART service
(--mart) and a specific dataset (default to mmusculus_gene_ensembl). Then,
for all ensembl IDs, it will fetch the requested attributes (--attributes).
Finally, it saves the CSV file into an output file (--output). This takes
about 5-10 minutes to retrieve the data depending on the connection.
"""
print(kwargs)
logger.setLevel(kwargs["logger"])
mart = kwargs['mart']
attributes = kwargs['attributes']
dataset = kwargs["dataset"]
from sequana.enrichment import Mart
conv = Mart(dataset, mart)
df = conv.query(attributes)
conv.save(df, filename=kwargs['output'])
@main.command()
@click.option("-i", "--input", required=True,
help="The salmon input file.")
@click.option("-o", "--output", required=True,
help="The feature counts output file")
@click.option("-f", "--gff", required=True,
help="A GFF file compatible with your salmon file")
@click.option("-a", "--attribute", default="ID",
help="A valid attribute to be found in the GFF file and salmon input")
@click.option("-a", "--feature", default="gene",
help="A valid feature")
def salmon(**kwargs):
"""Convert output of Salmon into a feature counts file """
from sequana import salmon
salmon_input = kwargs['input']
output = kwargs["output"]
if os.path.exists(salmon_input) is False:
logger.critical("Input file does not exists ({})".format(salmon_input))
gff = kwargs["gff"]
attribute = kwargs['attribute']
feature = kwargs['feature']
# reads file generated by salmon and generated count file as expected by
# DGE.
s = salmon.Salmon(salmon_input, gff)
s.save_feature_counts(output, feature=feature, attribute=attribute)
@main.command()
@click.option("-i", "--input", required=True)
@click.option("-o", "--output", required=True)
def gtf_fixer(**kwargs):
"""Reads GTF and fix known issues (exon and genes uniqueness)"""
from sequana.gtf import GTFFixer
gtf = GTFFixer(kwargs['input'])
res = gtf.fix_exons_uniqueness(kwargs['output'])
#res = gtf.fix_exons_uniqueness(kwargs['output'])
print(res)
# This will be a complex command to provide HTML summary page for
# input files (e.g. bam), or results from pipelines. For each module,
# we should have corresponding option that starts with the module's name
# This can also takes as input various types of data (e.g. FastA)
@main.command()
@click.argument("name", type=click.Path(exists=True),
nargs=1)
@click.option("--annotation-attribute", type=click.STRING,
#required=True,
default="Name",
help="a valid taxon identifiers")
@click.option("--panther-taxon", type=click.INT,
#required=True,
default=0,
help="a valid taxon identifiers")
@click.option("--kegg-name", type=click.STRING,
default=None,
help="a valid KEGG name (automatically filled for 9606 (human) and 10090 (mmusculus)")
@click.option("--log2-foldchange-cutoff", type=click.FLOAT,
default=1,
show_default=True,
help="remove events with absolute log2 fold change below this value")
@click.option("--padj-cutoff", type=click.FLOAT,
default=0.05,
show_default=True,
help="remove events with pvalue abobe this value default (0.05).")
@click.option("--biomart", type=click.STRING,
default=None,
help="""you may need a biomart mapping of your identifier for the kegg
pathways analysis. If you do not have this file, you can use 'sequana biomart'
command""")
@click.option("--go-only", type=click.BOOL,
default=False,
is_flag=True,
help="""to run only panther db enrichment""")
@click.option("--plot-linearx", type=click.BOOL,
default=False,
is_flag=True,
help="""Default is log2 fold enrichment in the plots. use this to use linear scale""")
@click.option("--compute-levels", type=click.BOOL,
default=False,
is_flag=True,
help="""to compute the GO levels (slow) in the plots""")
@click.option("--max-genes", type=click.INT,
default=2000,
help="""Maximum number of genes (up or down) to use in PantherDB, which is limited to about 3000""")
@click.option("--kegg-only", type=click.BOOL,
default=False,
is_flag=True,
help="""to run only kegg patways enrichment""")
@click.option("--kegg-pathways-directory", type=click.Path(),
default=None,
help="""a place where to find the pathways for each organism""")
@click.option("--kegg-background", type=click.INT,
default=None,
help="""a background for kegg enrichment. If None, set to number of genes found in KEGG""")
@common_logger
def enrichment(**kwargs):
"""Create a HTML report for various sequana out
\b
* enrichment: the output of RNADiff pipeline
Example for the enrichment module:
sequana enrichment rnadiff.csv --panther-taxon 10090
--log2-foldchange-cutoff 2 --kegg-only
The KEGG pathways are loaded and it may take time. Once done, they are saved
in kegg_pathways/organism and be loaded next time:
sequana enrichment rnadiff/rnadiff.csv
--panther-taxon 189518 \
--log2-foldchange-cutoff 2 --kegg-only \
--kegg-name lbi\
--annotation file.gff
"""
import pandas as pd
from sequana.modules_report.enrichment import Enrichment
logger.setLevel(kwargs['logger'])
taxon = kwargs['panther_taxon']
if taxon == 0:
logger.error("You must provide a taxon with --panther-taxon")
return
keggname = kwargs['kegg_name']
params = {"padj": kwargs['padj_cutoff'],
"log2_fc": kwargs['log2_foldchange_cutoff'],
"max_entries": kwargs['max_genes'],
"mapper": kwargs['biomart'],
"kegg_background": kwargs['kegg_background'],
"preload_directory": kwargs['kegg_pathways_directory'],
"plot_logx": not kwargs['plot_linearx'],
"plot_compute_levels": kwargs['compute_levels'],
}
filename = kwargs['biomart']
if filename and os.path.exists(filename) is False:
logger.error("{} does not exists".format(filename))
sys.exit(1)
filename = kwargs['kegg_pathways_directory']
if filename and os.path.exists(filename) is False:
logger.error("{} does not exists".format(filename))
sys.exit(1)
rnadiff_file = kwargs['name']
logger.info(f"Reading {rnadiff_file}")
rnadiff = pd.read_csv(rnadiff_file, index_col=0, header=[0,1])
# now that we have loaded all results from a rnadiff analysis, let us
# perform the enrichment for each comparison found in the file
annot_col = kwargs['annotation_attribute']
Nmax = kwargs['max_genes']
from sequana.utils import config
for compa in rnadiff.columns.levels[0]:
if compa not in ['statistics', 'annotation']:
# get gene list
df = rnadiff[compa].copy()
# we add the annotation
for x in rnadiff['annotation'].columns:
df[x] = rnadiff['annotation'][x]
# now we find the gene lists
padj = params['padj']
log2fc = params['log2_fc']
df = df.query("(log2FoldChange >=@log2fc or log2FoldChange<=-@log2fc) and padj <= @padj")
df.reset_index(inplace=True)
dfup = df.sort_values("log2FoldChange", ascending=False)
up_genes = list(dfup.query("log2FoldChange > 0")[annot_col])[:Nmax]
dfdown = df.sort_values("log2FoldChange", ascending=True)
down_genes = list(dfdown.query("log2FoldChange < 0")[annot_col])[:Nmax]
all_genes = list(
df.sort_values("log2FoldChange", key=abs,ascending=False)[annot_col]
)[:Nmax]
gene_dict = {
"up": up_genes,
"down": down_genes,
"all": all_genes,
}
Nup = len(up_genes)
Ndown = len(down_genes)
N = Nup + Ndown
logger.info(f"Computing enrichment for the {compa} case")
logger.info(f"Found {Nup} genes up-regulated, {Ndown} down regulated ({N} in total).")
config.output_dir = f"enrichment/{compa}"
try:os.mkdir("enrichment")
except:pass
report = Enrichment(gene_dict, taxon, df,
kegg_organism=keggname,
enrichment_params=params,
go_only=kwargs["go_only"],
kegg_only=kwargs["kegg_only"],
command=" ".join(['sequana'] + sys.argv[1:]))
@main.command()
@click.option("--search-kegg", type=click.Path(),
default=None,
help="""Search a pattern amongst all KEGG organism""")
@click.option("--search-panther", type=click.Path(),
default=None,
help="""Search a pattern amongst all KEGG organism""")
@common_logger
def taxonomy(**kwargs):
"""Tool to retrieve taxonomic information.
sequana taxonomy --search-kegg leptospira
"""
if kwargs['search_kegg']:
from sequana.kegg import KEGGHelper
k = KEGGHelper()
results = k.search(kwargs['search_kegg'].lower())
print(results)
elif kwargs['search_panther']:
import pandas as pd
from sequana import sequana_data
df = pd.read_csv(sequana_data("panther.csv"), index_col=0)
pattern = kwargs['search_panther']
f1 = df[[True if pattern in x else False for x in df['name']]]
f2 = df[[True if pattern in x else False for x in df.short_name]]
f3 = df[[True if pattern in x else False for x in df.long_name]]
indices = list(f1.index) + list(f2.index) + list(f3.index)
if len(indices) == 0:
# maybe it is a taxon ID ?
f4 = df[[True if pattern in str(x) else False for x in df.taxon_id]]
indices = list(f4.index)
indices = set(indices)
print(df.loc[indices])
@main.command()
@click.argument("gff_filename", type=click.Path(exists=True))
@common_logger
def gff2gtf(**kwargs):
"""Convert a GFF file into GTF
This is experimental convertion. Use with care.
"""
filename = kwargs["gff_filename"]
assert filename.endswith(".gff") or filename.endswith(".gff3")
from sequana.gff3 import GFF3
g = GFF3(filename)
if filename.endswith(".gff"):
g.to_gtf(os.path.basename(filename).replace(".gff", ".gtf"))
elif filename.endswith(".gff3"):
g.to_gtf(os.path.basename(filename).replace(".gff3", ".gtf"))
| 324 | 0 | 45 |
5d1e7dc3e866c93d2b4626e52d2811ac3aeffb26 | 430 | py | Python | src/euler_python_package/euler_python/medium/p463.py | wilsonify/euler | 5214b776175e6d76a7c6d8915d0e062d189d9b79 | [
"MIT"
] | null | null | null | src/euler_python_package/euler_python/medium/p463.py | wilsonify/euler | 5214b776175e6d76a7c6d8915d0e062d189d9b79 | [
"MIT"
] | null | null | null | src/euler_python_package/euler_python/medium/p463.py | wilsonify/euler | 5214b776175e6d76a7c6d8915d0e062d189d9b79 | [
"MIT"
] | null | null | null | def problem463():
"""
The function $f$ is defined for all positive integers as follows:
• $f(1)=1$
• $f(3)=3$
• $f(2n)=f(n)$
• $f(4n + 1)=2f(2n + 1) - f(n)$
• $f(4n + 3)=3f(2n + 1) - 2f(n)$
The function $S(n)$ is defined as $\\sum_{i=1}^{n}f(i)$.
$S(8)=22$ and $S(100)=3604$.
Find $S(3^{37})$. Give the last 9 digits of your answer.
"""
pass
| 21.5 | 69 | 0.439535 | def problem463():
"""
The function $f$ is defined for all positive integers as follows:
• $f(1)=1$
• $f(3)=3$
• $f(2n)=f(n)$
• $f(4n + 1)=2f(2n + 1) - f(n)$
• $f(4n + 3)=3f(2n + 1) - 2f(n)$
The function $S(n)$ is defined as $\\sum_{i=1}^{n}f(i)$.
$S(8)=22$ and $S(100)=3604$.
Find $S(3^{37})$. Give the last 9 digits of your answer.
"""
pass
| 0 | 0 | 0 |
f8167a1c1ad122d2aa27edbcc0972069992f92e2 | 792 | py | Python | src/testscrape.py | kelpabc123/UCSDCourseBot | 267900801b00c7f470fc6c2dfb0f0cfe30580b13 | [
"MIT"
] | null | null | null | src/testscrape.py | kelpabc123/UCSDCourseBot | 267900801b00c7f470fc6c2dfb0f0cfe30580b13 | [
"MIT"
] | null | null | null | src/testscrape.py | kelpabc123/UCSDCourseBot | 267900801b00c7f470fc6c2dfb0f0cfe30580b13 | [
"MIT"
] | null | null | null | #this is for testing, an internal testbench to verify implemented features
from bs4 import BeautifulSoup
from course import MyCourse
import requests
import csv
import cscraper
cscraper.updateCSV()
courses = cscraper.findCoursebyID("MaTh 170")
for course in courses:
course.printCourse()
courses = cscraper.findCoursebyDesc("experiment")
for course in courses:
print(course)
courses = cscraper.findCoursebyTitle("Materials")
for course in courses:
print(course)
#url = "https://ucsd.edu/catalog/front/courses.html"
#req = requests.get(url)
#soup = BeautifulSoup(req.text, "lxml")
#for tag in soup.findAll("a", string="courses"):
#print(tag.get('href'))
#for tag in soup.findAll('p','course-name'):
# print(tag.text)
# print(tag.find_next('p','course-descriptions').text | 30.461538 | 74 | 0.744949 | #this is for testing, an internal testbench to verify implemented features
from bs4 import BeautifulSoup
from course import MyCourse
import requests
import csv
import cscraper
cscraper.updateCSV()
courses = cscraper.findCoursebyID("MaTh 170")
for course in courses:
course.printCourse()
courses = cscraper.findCoursebyDesc("experiment")
for course in courses:
print(course)
courses = cscraper.findCoursebyTitle("Materials")
for course in courses:
print(course)
#url = "https://ucsd.edu/catalog/front/courses.html"
#req = requests.get(url)
#soup = BeautifulSoup(req.text, "lxml")
#for tag in soup.findAll("a", string="courses"):
#print(tag.get('href'))
#for tag in soup.findAll('p','course-name'):
# print(tag.text)
# print(tag.find_next('p','course-descriptions').text | 0 | 0 | 0 |
b69bfe145a717d98c5a08a40b92361188a96eb44 | 1,455 | py | Python | pymantic/parsers/lark/base.py | machallboyd/pymantic | 159208f1a45d4bfda56adaa0cfdb555cadd89d39 | [
"BSD-3-Clause"
] | 12 | 2016-05-27T07:19:22.000Z | 2022-01-09T04:42:58.000Z | pymantic/parsers/lark/base.py | machallboyd/pymantic | 159208f1a45d4bfda56adaa0cfdb555cadd89d39 | [
"BSD-3-Clause"
] | 5 | 2015-09-07T16:07:46.000Z | 2021-01-04T21:03:18.000Z | pymantic/parsers/lark/base.py | machallboyd/pymantic | 159208f1a45d4bfda56adaa0cfdb555cadd89d39 | [
"BSD-3-Clause"
] | 2 | 2020-03-26T20:40:58.000Z | 2022-02-15T06:18:55.000Z | from pymantic.compat import (
binary_type,
)
class LarkParser(object):
"""Provide a consistent interface for parsing serialized RDF using one
of the lark parsers.
"""
def parse(self, string_or_stream, graph=None):
"""Parse a string or file-like object into RDF primitives and add
them to either the provided graph or a new graph.
"""
tf = self.lark.options.transformer
try:
if graph is None:
graph = tf._make_graph()
tf._prepare_parse(graph)
if hasattr(string_or_stream, 'readline'):
triples = self.line_by_line_parser(string_or_stream)
else:
# Presume string.
triples = self.lark.parse(string_or_stream)
graph.addAll(triples)
finally:
tf._cleanup_parse()
return graph
def parse_string(self, string_or_bytes, graph=None):
"""Parse a string, decoding it from bytes to UTF-8 if necessary.
"""
if isinstance(string_or_bytes, binary_type):
string = string_or_bytes.decode('utf-8')
else:
string = string_or_bytes
return self.parse(string, graph)
| 28.529412 | 74 | 0.594502 | from pymantic.compat import (
binary_type,
)
class LarkParser(object):
"""Provide a consistent interface for parsing serialized RDF using one
of the lark parsers.
"""
def __init__(self, lark):
self.lark = lark
def line_by_line_parser(self, stream):
for line in stream: # Equivalent to readline
if line:
yield next(self.lark.parse(line))
def parse(self, string_or_stream, graph=None):
"""Parse a string or file-like object into RDF primitives and add
them to either the provided graph or a new graph.
"""
tf = self.lark.options.transformer
try:
if graph is None:
graph = tf._make_graph()
tf._prepare_parse(graph)
if hasattr(string_or_stream, 'readline'):
triples = self.line_by_line_parser(string_or_stream)
else:
# Presume string.
triples = self.lark.parse(string_or_stream)
graph.addAll(triples)
finally:
tf._cleanup_parse()
return graph
def parse_string(self, string_or_bytes, graph=None):
"""Parse a string, decoding it from bytes to UTF-8 if necessary.
"""
if isinstance(string_or_bytes, binary_type):
string = string_or_bytes.decode('utf-8')
else:
string = string_or_bytes
return self.parse(string, graph)
| 171 | 0 | 53 |
d387510f698295a09b2327c0611bdbb9f98c718e | 3,452 | py | Python | utils_ml/tests/unittest/data_processing/test_data_processing_utils.py | RodSernaPerez/utils_ml | 72251447f5c33f3873c746e98f532ce19a5b6003 | [
"MIT"
] | null | null | null | utils_ml/tests/unittest/data_processing/test_data_processing_utils.py | RodSernaPerez/utils_ml | 72251447f5c33f3873c746e98f532ce19a5b6003 | [
"MIT"
] | null | null | null | utils_ml/tests/unittest/data_processing/test_data_processing_utils.py | RodSernaPerez/utils_ml | 72251447f5c33f3873c746e98f532ce19a5b6003 | [
"MIT"
] | null | null | null | import unittest
import numpy as np
from utils_ml.src.data_processing import data_processing_utils
if __name__ == '__main__':
unittest.main()
| 33.514563 | 119 | 0.532155 | import unittest
import numpy as np
from utils_ml.src.data_processing import data_processing_utils
class TestDataProcessingUtils(unittest.TestCase):
MSG = "Data processing utils:: UNITTEST:: "
def test_convert_to_one_hot_vectors_list_with_ints(self):
msg = self.MSG + "convert_to_one_hot_vectors_list_with_ints:: converts to one hot vectors a list of ints"
input_list = [0, 2, 1, 2]
expected_output = [[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1]]
result = data_processing_utils.convert_to_one_hot_vectors(input_list)
self.assertTrue(isinstance(result, list),
msg + "::Error: type of output is not list")
self.assertTrue(
np.array_equal(
np.asarray(result),
np.asarray(expected_output)),
msg + "::Error: result is not right")
print(msg + "::OK")
def test_convert_to_one_hot_vectors_list_with_strings(self):
msg = self.MSG + "convert_to_one_hot_vectors_list_with_strings:: converts to one hot vectors a list of strings"
input_list = ["1", "3", "2", "3"]
expected_output = [[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1]]
result = data_processing_utils.convert_to_one_hot_vectors(input_list)
self.assertTrue(isinstance(result, list),
msg + "::Error: type of output is not list")
self.assertTrue(
np.array_equal(
np.asarray(result),
np.asarray(expected_output)),
msg + "::Error: result is not right")
print(msg + "::OK")
def test_convert_to_one_hot_vectors_numpy_with_ints(self):
msg = self.MSG + "convert_to_one_hot_vectors_numpy_with_ints:: converts to one hot vectors a numpy of ints"
input_list = np.asarray([0, 2, 1, 2])
expected_output = [[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1]]
result = data_processing_utils.convert_to_one_hot_vectors(input_list)
self.assertTrue(isinstance(result, np.ndarray),
msg + "::Error: type of output is not numpy")
self.assertTrue(
np.array_equal(
result,
expected_output),
msg + "::Error: result is not right")
print(msg + "::OK")
def test_convert_to_one_hot_vectors_numpy_with_strings(self):
msg = self.MSG + \
"convert_to_one_hot_vectors_numpy_with_strings:: converts to one hot vectors a numpy of strings"
input_list = np.asarray(["1", "3", "2", "3"])
expected_output = np.asarray([[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 0, 1]])
result = data_processing_utils.convert_to_one_hot_vectors(input_list)
self.assertTrue(isinstance(result, np.ndarray),
msg + "::Error: type of output is not numpy")
self.assertTrue(
np.array_equal(
result,
expected_output),
msg + "::Error: result is not right")
print(msg + "::OK")
if __name__ == '__main__':
unittest.main()
| 3,095 | 184 | 23 |
dc3f2757c0df48ed984cc60afdc04795aceea950 | 8,952 | py | Python | dependencies/FontTools/Mac/TTX.py | charlesmchen/typefacet | 8c6db26d0c599ece16f3704696811275120a4044 | [
"Apache-2.0"
] | 21 | 2015-01-16T05:10:02.000Z | 2021-06-11T20:48:15.000Z | dependencies/FontTools/Mac/TTX.py | charlesmchen/typefacet | 8c6db26d0c599ece16f3704696811275120a4044 | [
"Apache-2.0"
] | 1 | 2019-09-09T12:10:27.000Z | 2020-05-22T10:12:14.000Z | dependencies/FontTools/Mac/TTX.py | charlesmchen/typefacet | 8c6db26d0c599ece16f3704696811275120a4044 | [
"Apache-2.0"
] | 2 | 2015-05-03T04:51:08.000Z | 2018-08-24T08:28:53.000Z | """Main TTX application, Mac-only"""
#make sure we don't lose events to SIOUX
import MacOS
MacOS.EnableAppswitch(-1)
SetWatchCursor()
# a few constants
LOGFILENAME = "TTX errors"
PREFSFILENAME = "TTX preferences"
DEFAULTXMLOUTPUT = ":XML output"
DEFAULTTTOUTPUT = ":TrueType output"
import FrameWork
import MiniAEFrame, AppleEvents
import EasyDialogs
import Res
import macfs
import os
import sys, time
import re, string
import traceback
from fontTools import ttLib, version
from fontTools.ttLib import xmlImport
from fontTools.ttLib.macUtils import ProgressBar
abouttext = """\
TTX - The free TrueType to XML to TrueType converter
(version %s)
Copyright 1999-2001, Just van Rossum (Letterror)
just@letterror.com""" % version
default_prefs = """\
xmloutput: ":XML output"
ttoutput: ":TrueType output"
makesuitcases: 1
"""
sys.stdin = dummy_stdin()
# redirect all output to a log file
sys.stdout = sys.stderr = open(LOGFILENAME, "w", 0) # unbuffered
print "Starting TTX at " + time.ctime(time.time())
# fire it up!
ttx = TTX()
ttx.mainloop()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# clues for BuildApplication/MacFreeze.
#
# These modules somehow get imported, but we don't want/have them:
#
# macfreeze: exclude msvcrt
# macfreeze: exclude W
# macfreeze: exclude SOCKS
# macfreeze: exclude TERMIOS
# macfreeze: exclude termios
# macfreeze: exclude icglue
# macfreeze: exclude ce
#
# these modules are imported dynamically, so MacFreeze won't see them:
#
# macfreeze: include fontTools.ttLib.tables._c_m_a_p
# macfreeze: include fontTools.ttLib.tables._c_v_t
# macfreeze: include fontTools.ttLib.tables._f_p_g_m
# macfreeze: include fontTools.ttLib.tables._g_a_s_p
# macfreeze: include fontTools.ttLib.tables._g_l_y_f
# macfreeze: include fontTools.ttLib.tables._h_d_m_x
# macfreeze: include fontTools.ttLib.tables._h_e_a_d
# macfreeze: include fontTools.ttLib.tables._h_h_e_a
# macfreeze: include fontTools.ttLib.tables._h_m_t_x
# macfreeze: include fontTools.ttLib.tables._k_e_r_n
# macfreeze: include fontTools.ttLib.tables._l_o_c_a
# macfreeze: include fontTools.ttLib.tables._m_a_x_p
# macfreeze: include fontTools.ttLib.tables._n_a_m_e
# macfreeze: include fontTools.ttLib.tables._p_o_s_t
# macfreeze: include fontTools.ttLib.tables._p_r_e_p
# macfreeze: include fontTools.ttLib.tables._v_h_e_a
# macfreeze: include fontTools.ttLib.tables._v_m_t_x
# macfreeze: include fontTools.ttLib.tables.L_T_S_H_
# macfreeze: include fontTools.ttLib.tables.O_S_2f_2
# macfreeze: include fontTools.ttLib.tables.T_S_I__0
# macfreeze: include fontTools.ttLib.tables.T_S_I__1
# macfreeze: include fontTools.ttLib.tables.T_S_I__2
# macfreeze: include fontTools.ttLib.tables.T_S_I__3
# macfreeze: include fontTools.ttLib.tables.T_S_I__5
# macfreeze: include fontTools.ttLib.tables.C_F_F_
| 27.71517 | 93 | 0.705541 | """Main TTX application, Mac-only"""
#make sure we don't lose events to SIOUX
import MacOS
MacOS.EnableAppswitch(-1)
def SetWatchCursor():
import Qd, QuickDraw
Qd.SetCursor(Qd.GetCursor(QuickDraw.watchCursor).data)
def SetArrowCursor():
import Qd
Qd.SetCursor(Qd.qd.arrow)
SetWatchCursor()
# a few constants
LOGFILENAME = "TTX errors"
PREFSFILENAME = "TTX preferences"
DEFAULTXMLOUTPUT = ":XML output"
DEFAULTTTOUTPUT = ":TrueType output"
import FrameWork
import MiniAEFrame, AppleEvents
import EasyDialogs
import Res
import macfs
import os
import sys, time
import re, string
import traceback
from fontTools import ttLib, version
from fontTools.ttLib import xmlImport
from fontTools.ttLib.macUtils import ProgressBar
abouttext = """\
TTX - The free TrueType to XML to TrueType converter
(version %s)
Copyright 1999-2001, Just van Rossum (Letterror)
just@letterror.com""" % version
class TTX(FrameWork.Application, MiniAEFrame.AEServer):
def __init__(self):
FrameWork.Application.__init__(self)
MiniAEFrame.AEServer.__init__(self)
self.installaehandler(
AppleEvents.kCoreEventClass, AppleEvents.kAEOpenApplication, self.do_nothing)
self.installaehandler(
AppleEvents.kCoreEventClass, AppleEvents.kAEPrintDocuments, self.do_nothing)
self.installaehandler(
AppleEvents.kCoreEventClass, AppleEvents.kAEOpenDocuments, self.handle_opendocumentsevent)
self.installaehandler(
AppleEvents.kCoreEventClass, AppleEvents.kAEQuitApplication, self.handle_quitevent)
def idle(self, event):
SetArrowCursor()
def makeusermenus(self):
m = FrameWork.Menu(self.menubar, "File")
FrameWork.MenuItem(m, "Open...", "O", self.domenu_open)
FrameWork.Separator(m)
FrameWork.MenuItem(m, "Quit", "Q", self._quit)
def do_about(self, *args):
EasyDialogs.Message(abouttext)
def handle_quitevent(self, *args, **kwargs):
self._quit()
def domenu_open(self, *args):
fss, ok = macfs.StandardGetFile()
if ok:
self.opendocument(fss.as_pathname())
def handle_opendocumentsevent(self, docs, **kwargs):
if type(docs) <> type([]):
docs = [docs]
for doc in docs:
fss, a = doc.Resolve()
path = fss.as_pathname()
self.opendocument(path)
def opendocument(self, path):
filename = os.path.basename(path)
filetype = guessfiletype(path)
handler = getattr(self, "handle_%s_file" % filetype)
handler(path)
def handle_xml_file(self, path):
prefs = getprefs()
makesuitcase = int(prefs.get("makesuitcases", 0))
dstfolder = prefs.get("ttoutput", DEFAULTTTOUTPUT)
if not os.path.exists(dstfolder):
os.mkdir(dstfolder)
srcfilename = dstfilename = os.path.basename(path)
if dstfilename[-4:] in (".ttx", ".xml"):
dstfilename = dstfilename[:-4]
if dstfilename[-4:] not in (".TTF", ".ttf"):
dstfilename = dstfilename + ".TTF"
dst = os.path.join(dstfolder, dstfilename)
if makesuitcase:
try:
# see if the destination file is writable,
# otherwise we'll get an error waaay at the end of
# the parse procedure
testref = Res.FSpOpenResFile(macfs.FSSpec(dst), 3) # read-write
except Res.Error, why:
if why[0] <> -43: # file not found
EasyDialogs.Message("Can't create '%s'; file already open" % dst)
return
else:
Res.CloseResFile(testref)
else:
try:
f = open(dst, "wb")
except IOError, why:
EasyDialogs.Message("Can't create '%s'; file already open" % dst)
return
else:
f.close()
pb = ProgressBar("Reading TTX file '%s'..." % srcfilename)
try:
tt = ttLib.TTFont()
tt.importXML(path, pb)
pb.setlabel("Compiling and saving...")
tt.save(dst, makesuitcase)
finally:
pb.close()
def handle_datafork_file(self, path):
prefs = getprefs()
dstfolder = prefs.get("xmloutput", DEFAULTXMLOUTPUT)
if not os.path.exists(dstfolder):
os.mkdir(dstfolder)
filename = os.path.basename(path)
pb = ProgressBar("Dumping '%s' to XML..." % filename)
if filename[-4:] in (".TTF", ".ttf"):
filename = filename[:-4]
filename = filename + ".ttx"
dst = os.path.join(dstfolder, filename)
try:
tt = ttLib.TTFont(path)
tt.saveXML(dst, pb)
finally:
pb.close()
def handle_resource_file(self, path):
prefs = getprefs()
dstfolder = prefs.get("xmloutput", DEFAULTXMLOUTPUT)
if not os.path.exists(dstfolder):
os.mkdir(dstfolder)
filename = os.path.basename(path)
fss = macfs.FSSpec(path)
try:
resref = Res.FSpOpenResFile(fss, 1) # read-only
except:
return "unknown"
Res.UseResFile(resref)
pb = None
try:
n = Res.Count1Resources("sfnt")
for i in range(1, n+1):
res = Res.Get1IndResource('sfnt', i)
resid, restype, resname = res.GetResInfo()
if not resname:
resname = filename + `i`
pb = ProgressBar("Dumping '%s' to XML..." % resname)
dst = os.path.join(dstfolder, resname + ".ttx")
try:
tt = ttLib.TTFont(path, i)
tt.saveXML(dst, pb)
finally:
pb.close()
finally:
Res.CloseResFile(resref)
def handle_python_file(self, path):
pass
#print "python", path
def handle_unknown_file(self, path):
EasyDialogs.Message("Cannot open '%s': unknown file kind" % os.path.basename(path))
def do_nothing(self, *args, **kwargs):
pass
def mainloop(self, mask=FrameWork.everyEvent, wait=0):
self.quitting = 0
while not self.quitting:
try:
self.do1event(mask, wait)
except self.__class__:
# D'OH! FrameWork tries to quit us on cmd-.!
pass
except KeyboardInterrupt:
pass
except ttLib.xmlImport.xml_parse_error, why:
EasyDialogs.Message(
"An error occurred while parsing the XML file:\n" + why)
except:
exc = traceback.format_exception(sys.exc_type, sys.exc_value, None)[0]
exc = string.strip(exc)
EasyDialogs.Message("An error occurred!\n%s\n[see the logfile '%s' for details]" %
(exc, LOGFILENAME))
traceback.print_exc()
def do_kHighLevelEvent(self, event):
import AE
AE.AEProcessAppleEvent(event)
def guessfiletype(path):
#if path[-3:] == ".py":
# return "python"
f = open(path, "rb")
data = f.read(21)
f.close()
if data[:5] == "<?xml":
return "xml"
elif data[:4] in ("\000\001\000\000", "OTTO", "true"):
return "datafork"
else:
# assume res fork font
fss = macfs.FSSpec(path)
try:
resref = Res.FSpOpenResFile(fss, 1) # read-only
except:
return "unknown"
Res.UseResFile(resref)
i = Res.Count1Resources("sfnt")
Res.CloseResFile(resref)
if i > 0:
return "resource"
return "unknown"
default_prefs = """\
xmloutput: ":XML output"
ttoutput: ":TrueType output"
makesuitcases: 1
"""
def getprefs(path=PREFSFILENAME):
if not os.path.exists(path):
f = open(path, "w")
f.write(default_prefs)
f.close()
f = open(path)
lines = f.readlines()
prefs = {}
for line in lines:
if line[-1:] == "\n":
line = line[:-1]
try:
name, value = re.split(":", line, 1)
prefs[string.strip(name)] = eval(value)
except:
pass
return prefs
class dummy_stdin:
def readline(self):
return ""
sys.stdin = dummy_stdin()
# redirect all output to a log file
sys.stdout = sys.stderr = open(LOGFILENAME, "w", 0) # unbuffered
print "Starting TTX at " + time.ctime(time.time())
# fire it up!
ttx = TTX()
ttx.mainloop()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# clues for BuildApplication/MacFreeze.
#
# These modules somehow get imported, but we don't want/have them:
#
# macfreeze: exclude msvcrt
# macfreeze: exclude W
# macfreeze: exclude SOCKS
# macfreeze: exclude TERMIOS
# macfreeze: exclude termios
# macfreeze: exclude icglue
# macfreeze: exclude ce
#
# these modules are imported dynamically, so MacFreeze won't see them:
#
# macfreeze: include fontTools.ttLib.tables._c_m_a_p
# macfreeze: include fontTools.ttLib.tables._c_v_t
# macfreeze: include fontTools.ttLib.tables._f_p_g_m
# macfreeze: include fontTools.ttLib.tables._g_a_s_p
# macfreeze: include fontTools.ttLib.tables._g_l_y_f
# macfreeze: include fontTools.ttLib.tables._h_d_m_x
# macfreeze: include fontTools.ttLib.tables._h_e_a_d
# macfreeze: include fontTools.ttLib.tables._h_h_e_a
# macfreeze: include fontTools.ttLib.tables._h_m_t_x
# macfreeze: include fontTools.ttLib.tables._k_e_r_n
# macfreeze: include fontTools.ttLib.tables._l_o_c_a
# macfreeze: include fontTools.ttLib.tables._m_a_x_p
# macfreeze: include fontTools.ttLib.tables._n_a_m_e
# macfreeze: include fontTools.ttLib.tables._p_o_s_t
# macfreeze: include fontTools.ttLib.tables._p_r_e_p
# macfreeze: include fontTools.ttLib.tables._v_h_e_a
# macfreeze: include fontTools.ttLib.tables._v_m_t_x
# macfreeze: include fontTools.ttLib.tables.L_T_S_H_
# macfreeze: include fontTools.ttLib.tables.O_S_2f_2
# macfreeze: include fontTools.ttLib.tables.T_S_I__0
# macfreeze: include fontTools.ttLib.tables.T_S_I__1
# macfreeze: include fontTools.ttLib.tables.T_S_I__2
# macfreeze: include fontTools.ttLib.tables.T_S_I__3
# macfreeze: include fontTools.ttLib.tables.T_S_I__5
# macfreeze: include fontTools.ttLib.tables.C_F_F_
| 5,539 | 31 | 561 |
027ccfc7c5e008bba853d139b86860d3618ffd39 | 1,412 | py | Python | main.py | rudradatta/Flames | 3692564ef1c3493eb2e1586be47ca997ede97cb4 | [
"BSD-3-Clause"
] | null | null | null | main.py | rudradatta/Flames | 3692564ef1c3493eb2e1586be47ca997ede97cb4 | [
"BSD-3-Clause"
] | null | null | null | main.py | rudradatta/Flames | 3692564ef1c3493eb2e1586be47ca997ede97cb4 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import logging
import __builtin__
from google.appengine.ext.webapp import util
# Enable info logging by the app (this is separate from appserver's
# logging).
logging.getLogger().setLevel(logging.INFO)
# Force sys.path to have our own directory first, so we can import from it.
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
try:
from django import v1_5 as django
except ImportError:
pass
# Import the part of Django that we use here.
import django.core.handlers.wsgi
if __name__ == '__main__':
main()
| 27.153846 | 75 | 0.751416 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import logging
import __builtin__
from google.appengine.ext.webapp import util
# Enable info logging by the app (this is separate from appserver's
# logging).
logging.getLogger().setLevel(logging.INFO)
# Force sys.path to have our own directory first, so we can import from it.
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
try:
from django import v1_5 as django
except ImportError:
pass
# Import the part of Django that we use here.
import django.core.handlers.wsgi
def main():
# Create a Django application for WSGI.
application = django.core.handlers.wsgi.WSGIHandler()
# Run the WSGI CGI handler with that application.
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| 182 | 0 | 23 |
9e2d92bd74afeb262f1f3e8cd2eae3690b0b4724 | 4,076 | py | Python | tests/test_pipelines.py | sethvargo/scrapy-s3pipeline | 1a11de83c51c3863b471d806583fa5f3a5a6464a | [
"MIT"
] | 77 | 2017-12-07T11:31:14.000Z | 2022-03-18T21:08:33.000Z | tests/test_pipelines.py | sethvargo/scrapy-s3pipeline | 1a11de83c51c3863b471d806583fa5f3a5a6464a | [
"MIT"
] | 7 | 2019-04-19T23:55:22.000Z | 2021-03-06T22:22:37.000Z | tests/test_pipelines.py | sethvargo/scrapy-s3pipeline | 1a11de83c51c3863b471d806583fa5f3a5a6464a | [
"MIT"
] | 14 | 2017-12-07T11:31:42.000Z | 2022-03-18T20:54:09.000Z | from unittest import TestCase
from scrapy.exporters import JsonLinesItemExporter, JsonItemExporter
from scrapy.settings import BaseSettings, default_settings
from s3pipeline import S3Pipeline
from s3pipeline.strategies.s3 import S3Strategy
from s3pipeline.strategies.gcs import GCSStrategy
| 38.45283 | 95 | 0.673454 | from unittest import TestCase
from scrapy.exporters import JsonLinesItemExporter, JsonItemExporter
from scrapy.settings import BaseSettings, default_settings
from s3pipeline import S3Pipeline
from s3pipeline.strategies.s3 import S3Strategy
from s3pipeline.strategies.gcs import GCSStrategy
class TestPipelineSettings(TestCase):
def test_s3(self):
settings = BaseSettings({
'S3PIPELINE_URL': 's3://my-bucket/{name}/{time}/items.{chunk:07d}.jl.gz',
'FEED_EXPORTERS_BASE': default_settings.FEED_EXPORTERS_BASE,
})
pipeline = S3Pipeline(settings, None)
self.assertEqual(pipeline.bucket_name, 'my-bucket')
self.assertEqual(pipeline.object_key_template, '{name}/{time}/items.{chunk:07d}.jl.gz')
self.assertEqual(pipeline.max_chunk_size, 100)
self.assertTrue(pipeline.use_gzip)
self.assertEqual(pipeline.max_wait_upload_time, 30)
self.assertIsInstance(pipeline.strategy, S3Strategy)
self.assertEqual(pipeline.exporter_cls, JsonLinesItemExporter)
def test_gcs(self):
settings = BaseSettings({
'S3PIPELINE_URL': 'gs://my-bucket/{name}/{time}/items.{chunk:07d}.jl',
'FEED_EXPORTERS_BASE': default_settings.FEED_EXPORTERS_BASE,
})
pipeline = S3Pipeline(settings, None)
self.assertEqual(pipeline.bucket_name, 'my-bucket')
self.assertEqual(pipeline.object_key_template, '{name}/{time}/items.{chunk:07d}.jl')
self.assertEqual(pipeline.max_chunk_size, 100)
self.assertFalse(pipeline.use_gzip)
self.assertEqual(pipeline.max_wait_upload_time, 30)
self.assertIsInstance(pipeline.strategy, GCSStrategy)
self.assertEqual(pipeline.exporter_cls, JsonLinesItemExporter)
def test_json(self):
settings = BaseSettings({
'S3PIPELINE_URL': 's3://my-bucket/{name}/{time}/items.{chunk:07d}.json',
'FEED_EXPORTERS_BASE': default_settings.FEED_EXPORTERS_BASE,
})
pipeline = S3Pipeline(settings, None)
self.assertFalse(pipeline.use_gzip)
self.assertEqual(pipeline.exporter_cls, JsonItemExporter)
def test_json_gz(self):
settings = BaseSettings({
'S3PIPELINE_URL': 's3://my-bucket/{name}/{time}/items.{chunk:07d}.json.gz',
'FEED_EXPORTERS_BASE': default_settings.FEED_EXPORTERS_BASE,
})
pipeline = S3Pipeline(settings, None)
self.assertTrue(pipeline.use_gzip)
self.assertEqual(pipeline.exporter_cls, JsonItemExporter)
def test_force_gzip(self):
settings = BaseSettings({
'S3PIPELINE_URL': 's3://my-bucket/{name}/{time}/items.{chunk:07d}.jl',
'S3PIPELINE_GZIP': True,
'FEED_EXPORTERS_BASE': default_settings.FEED_EXPORTERS_BASE,
})
pipeline = S3Pipeline(settings, None)
self.assertTrue(pipeline.use_gzip)
def test_force_no_gzip(self):
settings = BaseSettings({
'S3PIPELINE_URL': 's3://my-bucket/{name}/{time}/items.{chunk:07d}.jl.gz',
'S3PIPELINE_GZIP': False,
'FEED_EXPORTERS_BASE': default_settings.FEED_EXPORTERS_BASE,
})
pipeline = S3Pipeline(settings, None)
self.assertFalse(pipeline.use_gzip)
def test_max_chunk_size(self):
settings = BaseSettings({
'S3PIPELINE_URL': 's3://my-bucket/{name}/{time}/items.{chunk:07d}.jl.gz',
'S3PIPELINE_MAX_CHUNK_SIZE': 1000,
'FEED_EXPORTERS_BASE': default_settings.FEED_EXPORTERS_BASE,
})
pipeline = S3Pipeline(settings, None)
self.assertEqual(pipeline.max_chunk_size, 1000)
def test_max_wait_upload_time(self):
settings = BaseSettings({
'S3PIPELINE_URL': 's3://my-bucket/{name}/{time}/items.{chunk:07d}.jl.gz',
'S3PIPELINE_MAX_WAIT_UPLOAD_TIME': 300,
'FEED_EXPORTERS_BASE': default_settings.FEED_EXPORTERS_BASE,
})
pipeline = S3Pipeline(settings, None)
self.assertEqual(pipeline.max_wait_upload_time, 300)
| 3,528 | 16 | 239 |
de7fb23e68e497f342ffadf54e891b1ee5789685 | 315 | py | Python | .ycm_extra_conf.py | AleksanderGondek/py-friends-and-strangers | eda80c13653b6208c8d178feb6283f5279abc6bf | [
"MIT"
] | null | null | null | .ycm_extra_conf.py | AleksanderGondek/py-friends-and-strangers | eda80c13653b6208c8d178feb6283f5279abc6bf | [
"MIT"
] | null | null | null | .ycm_extra_conf.py | AleksanderGondek/py-friends-and-strangers | eda80c13653b6208c8d178feb6283f5279abc6bf | [
"MIT"
] | null | null | null | """ Vim YouCompleteMe"""
import pathlib
| 21 | 46 | 0.619048 | """ Vim YouCompleteMe"""
import pathlib
def Settings(**kwargs):
return {
"interpreter_path": pathlib.Path(__file__)
.parent.absolute()
.joinpath(".venv/bin/python"),
"sys_path": [
pathlib.Path(__file__)
.parent.absolute()
.joinpath("py_friends_and_strangers")
]
}
| 252 | 0 | 23 |
395347f97ebf9ebe9440f03168b3fc2acdb1aae1 | 124 | py | Python | apiproxy/resources/py/base64-encode.py | shahbagdadi/apigee-saml-idp | f881ac7579e6496965b4aeac22ef132ac9857c1a | [
"MIT"
] | 3 | 2016-02-01T16:37:51.000Z | 2022-01-13T03:14:31.000Z | apiproxy/resources/py/base64-encode.py | shahbagdadi/apigee-saml-idp | f881ac7579e6496965b4aeac22ef132ac9857c1a | [
"MIT"
] | 1 | 2016-01-15T02:52:56.000Z | 2016-01-15T02:52:56.000Z | apiproxy/resources/py/base64-encode.py | shahbagdadi/apigee-saml-idp | f881ac7579e6496965b4aeac22ef132ac9857c1a | [
"MIT"
] | 6 | 2015-09-30T05:37:38.000Z | 2022-01-13T03:14:43.000Z | import base64
enc_response = base64.b64encode(flow.getVariable("sf.response"))
flow.setVariable("sf.response", enc_response) | 41.333333 | 64 | 0.814516 | import base64
enc_response = base64.b64encode(flow.getVariable("sf.response"))
flow.setVariable("sf.response", enc_response) | 0 | 0 | 0 |
ab4b9cb10b61ad5a5c3b660008f819e16fe29a51 | 1,022 | py | Python | aiida/storage/sqlite_temp/__init__.py | aiidateam/aiida_core | 46d244e32ac5eca2e22a3d088314591ce064be57 | [
"PSF-2.0",
"MIT"
] | 153 | 2016-12-23T20:59:03.000Z | 2019-07-02T06:47:52.000Z | aiida/storage/sqlite_temp/__init__.py | aiidateam/aiida_core | 46d244e32ac5eca2e22a3d088314591ce064be57 | [
"PSF-2.0",
"MIT"
] | 2,466 | 2016-12-24T01:03:52.000Z | 2019-07-04T13:41:08.000Z | aiida/storage/sqlite_temp/__init__.py | aiidateam/aiida_core | 46d244e32ac5eca2e22a3d088314591ce064be57 | [
"PSF-2.0",
"MIT"
] | 88 | 2016-12-23T16:28:00.000Z | 2019-07-01T15:55:20.000Z | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""A temporary backend, using an in-memory sqlite database.
This backend is intended for testing and demonstration purposes.
Whenever it is instantiated, it creates a fresh storage backend,
and destroys it when it is garbage collected.
"""
# AUTO-GENERATED
# yapf: disable
# pylint: disable=wildcard-import
from .backend import *
__all__ = (
'SqliteTempBackend',
)
# yapf: enable
| 35.241379 | 75 | 0.515656 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""A temporary backend, using an in-memory sqlite database.
This backend is intended for testing and demonstration purposes.
Whenever it is instantiated, it creates a fresh storage backend,
and destroys it when it is garbage collected.
"""
# AUTO-GENERATED
# yapf: disable
# pylint: disable=wildcard-import
from .backend import *
__all__ = (
'SqliteTempBackend',
)
# yapf: enable
| 0 | 0 | 0 |
8421e6f29dc860c958defcb0f8837716e7ec31c1 | 633 | py | Python | migrations/versions/e1855559096_connection_full_name.py | ArthurPBressan/sisgep1 | d11151353b895a1a7b7673f90248ea3a0c209da6 | [
"MIT"
] | null | null | null | migrations/versions/e1855559096_connection_full_name.py | ArthurPBressan/sisgep1 | d11151353b895a1a7b7673f90248ea3a0c209da6 | [
"MIT"
] | null | null | null | migrations/versions/e1855559096_connection_full_name.py | ArthurPBressan/sisgep1 | d11151353b895a1a7b7673f90248ea3a0c209da6 | [
"MIT"
] | null | null | null | """connection full name
Revision ID: e1855559096
Revises: 401bc82cc255
Create Date: 2015-09-26 17:40:20.742180
"""
# revision identifiers, used by Alembic.
revision = 'e1855559096'
down_revision = '401bc82cc255'
from alembic import op
import sqlalchemy as sa
| 23.444444 | 93 | 0.703002 | """connection full name
Revision ID: e1855559096
Revises: 401bc82cc255
Create Date: 2015-09-26 17:40:20.742180
"""
# revision identifiers, used by Alembic.
revision = 'e1855559096'
down_revision = '401bc82cc255'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('connection', sa.Column('full_name', sa.String(length=255), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('connection', 'full_name')
### end Alembic commands ###
| 322 | 0 | 46 |
8b6c8edca98f0afe1dd1b31d15580fcba225aafd | 2,274 | py | Python | example/my_foo_project/cli_client.py | mattpaletta/optional-grpc | 71d827630c02478cbad79762fa05282590aeacc1 | [
"MIT"
] | 1 | 2019-02-15T02:57:32.000Z | 2019-02-15T02:57:32.000Z | example/my_foo_project/cli_client.py | mattpaletta/optional-grpc | 71d827630c02478cbad79762fa05282590aeacc1 | [
"MIT"
] | null | null | null | example/my_foo_project/cli_client.py | mattpaletta/optional-grpc | 71d827630c02478cbad79762fa05282590aeacc1 | [
"MIT"
] | null | null | null | import logging
from typing import Iterator
from my_foo_project.foo import Foo
from my_foo_project.client import foo_pb2
from optionalgrpc import IS_RUNNING_LOCAL
| 34.984615 | 118 | 0.611258 | import logging
from typing import Iterator
from my_foo_project.foo import Foo
from my_foo_project.client import foo_pb2
from optionalgrpc import IS_RUNNING_LOCAL
def run_sample_client(configs):
# Here's an example client.
# We want a client API, so we set `server = False`
client: Foo = Foo(configs = configs, server = False, use_rpc = not IS_RUNNING_LOCAL)
method_options = ["unary", "stream", "bistream"]
method = input("Please choose function: [{0}] > ".format(",".join(method_options)))
if method not in method_options:
logging.error("Invalid method: {0}".format(method))
exit(1)
msg_num = 0
# here I'm reusing the definitions from the list so I don't make typing errors
if method == method_options[0]:
msg_to_send = input("U: > ")
resp: foo_pb2.MyMessage = client.sendUnary(request = foo_pb2.MyMessage(num = msg_num, contents = msg_to_send))
logging.info("Sent: {0}".format(msg_to_send))
logging.info("Received: (#{0}) `{1}`".format(resp.num, resp.contents))
elif method == method_options[1]:
def helper():
print("Sending stream, type `done` to finish.")
msg_to_send = input("S: > ")
msg_num = 0
while msg_to_send != "done":
yield foo_pb2.MyMessage(num = msg_num, contents = msg_to_send)
msg_num += 1
msg_to_send = input("S: > ")
msg_generator = helper()
resp: foo_pb2.MyMessage = client.sendStream(request_iterator = msg_generator)
logging.info("Received: (#{0}) `{1}`".format(resp.num, resp.contents))
elif method == method_options[2]:
def helper():
print("Sending/Receiving stream, type `done` to finish.")
msg_to_send = input("B: > ")
msg_num = 0
while msg_to_send != "done":
yield foo_pb2.MyMessage(num = msg_num, contents = msg_to_send)
msg_num += 1
msg_to_send = input("B: > ")
msg_generator = helper()
resps: Iterator[foo_pb2.MyMessage] = client.sendBiStream(request_iterator = msg_generator)
for resp in resps:
logging.info("Received: (#{0}) `{1}`".format(resp.num, resp.contents))
| 2,087 | 0 | 23 |
a119345d10c5b52a1ebc1a6ecbd3b0e6b211e871 | 3,156 | py | Python | dnppy/raster/to_numpy.py | NASA-DEVELOP/dnppy | 8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b | [
"NASA-1.3"
] | 65 | 2015-09-10T12:59:56.000Z | 2022-02-27T22:09:03.000Z | dnppy/raster/to_numpy.py | snowzm/dnppy | 8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b | [
"NASA-1.3"
] | 40 | 2015-04-08T19:23:30.000Z | 2015-08-04T15:53:11.000Z | dnppy/raster/to_numpy.py | snowzm/dnppy | 8f7ef6f0653f5a4ea730ee557c72a2c89c06ce0b | [
"NASA-1.3"
] | 45 | 2015-08-14T19:09:38.000Z | 2022-02-15T18:53:16.000Z | __author__ = "jwely"
__all__ = ["to_numpy"]
from is_rast import is_rast
from metadata import metadata
import os
import arcpy
import numpy
def to_numpy(raster, numpy_datatype = None):
"""
Wrapper for arcpy.RasterToNumpyArray with better metadata handling
This is just a wraper for the RasterToNumPyArray function within arcpy, but it also
extracts out all the spatial referencing information that will probably be needed
to save the raster after desired manipulations have been performed.
also see raster.from_numpy function in this module.
:param raster: Any raster supported by the arcpy.RasterToNumPyArray function
:param numpy_datatype: must be a string equal to any of the types listed at the following
address [http://docs.scipy.org/doc/numpy/user/basics.types.html]
for example: 'uint8' or 'int32' or 'float32'
:return numpy_rast: the numpy array version of the input raster
:return Metadata: a metadata object. see ``raster.metadata``
"""
# perform some checks to convert to supported data format
if not is_rast(raster):
try:
print("Raster '{0}' may not be supported, converting to tif".format(raster))
tifraster = raster + ".tif"
if not os.path.exists(raster + ".tif"):
arcpy.CompositeBands_management(raster, tifraster)
raster = tifraster
except:
raise Exception("Raster type could not be recognized")
# read in the raster as a numpy array
numpy_rast = arcpy.RasterToNumPyArray(raster)
# build metadata for multi band raster
if len(numpy_rast.shape) == 3:
zs, ys, xs = numpy_rast.shape
meta = []
for i in range(zs):
bandpath = raster + "\\Band_{0}".format(i+1)
meta.append(metadata(bandpath, xs, ys))
if numpy_datatype is None:
numpy_datatype = meta[0].numpy_datatype
# build metadata for single band raster
else:
ys, xs = numpy_rast.shape
meta = metadata(raster, xs, ys)
if numpy_datatype is None:
numpy_datatype = meta.numpy_datatype
numpy_rast = numpy_rast.astype(numpy_datatype)
# mask NoData values from the array
if 'float' in numpy_datatype:
numpy_rast[numpy_rast == meta.NoData_Value] = numpy.nan
numpy_rast = numpy.ma.masked_array(numpy_rast, numpy.isnan(numpy_rast),
dtype = numpy_datatype)
elif 'int' in numpy_datatype: # (numpy.nan not supported by ints)
mask = numpy.zeros(numpy_rast.shape)
mask[numpy_rast != meta.NoData_Value] = False # do not mask
mask[numpy_rast == meta.NoData_Value] = True # mask
numpy_rast = numpy.ma.masked_array(numpy_rast, mask,
dtype = numpy_datatype)
return numpy_rast, meta
# testing area
if __name__ == "__main__":
path = r"C:\Users\jwely\Desktop\troubleshooting\test\MOD10A1\frac_snow\MYD09GQ.A2015160.h18v02.005.2015162071112_000.tif"
rast, meta = to_numpy(path) | 34.681319 | 125 | 0.647655 | __author__ = "jwely"
__all__ = ["to_numpy"]
from is_rast import is_rast
from metadata import metadata
import os
import arcpy
import numpy
def to_numpy(raster, numpy_datatype = None):
"""
Wrapper for arcpy.RasterToNumpyArray with better metadata handling
This is just a wraper for the RasterToNumPyArray function within arcpy, but it also
extracts out all the spatial referencing information that will probably be needed
to save the raster after desired manipulations have been performed.
also see raster.from_numpy function in this module.
:param raster: Any raster supported by the arcpy.RasterToNumPyArray function
:param numpy_datatype: must be a string equal to any of the types listed at the following
address [http://docs.scipy.org/doc/numpy/user/basics.types.html]
for example: 'uint8' or 'int32' or 'float32'
:return numpy_rast: the numpy array version of the input raster
:return Metadata: a metadata object. see ``raster.metadata``
"""
# perform some checks to convert to supported data format
if not is_rast(raster):
try:
print("Raster '{0}' may not be supported, converting to tif".format(raster))
tifraster = raster + ".tif"
if not os.path.exists(raster + ".tif"):
arcpy.CompositeBands_management(raster, tifraster)
raster = tifraster
except:
raise Exception("Raster type could not be recognized")
# read in the raster as a numpy array
numpy_rast = arcpy.RasterToNumPyArray(raster)
# build metadata for multi band raster
if len(numpy_rast.shape) == 3:
zs, ys, xs = numpy_rast.shape
meta = []
for i in range(zs):
bandpath = raster + "\\Band_{0}".format(i+1)
meta.append(metadata(bandpath, xs, ys))
if numpy_datatype is None:
numpy_datatype = meta[0].numpy_datatype
# build metadata for single band raster
else:
ys, xs = numpy_rast.shape
meta = metadata(raster, xs, ys)
if numpy_datatype is None:
numpy_datatype = meta.numpy_datatype
numpy_rast = numpy_rast.astype(numpy_datatype)
# mask NoData values from the array
if 'float' in numpy_datatype:
numpy_rast[numpy_rast == meta.NoData_Value] = numpy.nan
numpy_rast = numpy.ma.masked_array(numpy_rast, numpy.isnan(numpy_rast),
dtype = numpy_datatype)
elif 'int' in numpy_datatype: # (numpy.nan not supported by ints)
mask = numpy.zeros(numpy_rast.shape)
mask[numpy_rast != meta.NoData_Value] = False # do not mask
mask[numpy_rast == meta.NoData_Value] = True # mask
numpy_rast = numpy.ma.masked_array(numpy_rast, mask,
dtype = numpy_datatype)
return numpy_rast, meta
# testing area
if __name__ == "__main__":
path = r"C:\Users\jwely\Desktop\troubleshooting\test\MOD10A1\frac_snow\MYD09GQ.A2015160.h18v02.005.2015162071112_000.tif"
rast, meta = to_numpy(path) | 0 | 0 | 0 |
76d771bbdc8f3eb0a59c176355568f1a9c11c64f | 244 | py | Python | output/models/nist_data/list_pkg/duration/schema_instance/nistschema_sv_iv_list_duration_length_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/nist_data/list_pkg/duration/schema_instance/nistschema_sv_iv_list_duration_length_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/nist_data/list_pkg/duration/schema_instance/nistschema_sv_iv_list_duration_length_2_xsd/__init__.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from output.models.nist_data.list_pkg.duration.schema_instance.nistschema_sv_iv_list_duration_length_2_xsd.nistschema_sv_iv_list_duration_length_2 import NistschemaSvIvListDurationLength2
__all__ = [
"NistschemaSvIvListDurationLength2",
]
| 40.666667 | 187 | 0.889344 | from output.models.nist_data.list_pkg.duration.schema_instance.nistschema_sv_iv_list_duration_length_2_xsd.nistschema_sv_iv_list_duration_length_2 import NistschemaSvIvListDurationLength2
__all__ = [
"NistschemaSvIvListDurationLength2",
]
| 0 | 0 | 0 |
b23d36fa5033cff1b7860caf5d44f22ca9d35ade | 3,422 | py | Python | iwjam_import.py | patrickgh3/iwjam | fd6f58bd5217dc13ed475779fe7f1ff6ca7f13be | [
"MIT"
] | null | null | null | iwjam_import.py | patrickgh3/iwjam | fd6f58bd5217dc13ed475779fe7f1ff6ca7f13be | [
"MIT"
] | null | null | null | iwjam_import.py | patrickgh3/iwjam | fd6f58bd5217dc13ed475779fe7f1ff6ca7f13be | [
"MIT"
] | null | null | null | from lxml import etree
import os
import sys
import shutil
import iwjam_util
# Performs an import of a mod project into a base project given a
# previously computed ProjectDiff between them,
# and a list of folder names to prefix
# ('%modname%' will be replaced with the mod's name)
| 36.404255 | 78 | 0.63647 | from lxml import etree
import os
import sys
import shutil
import iwjam_util
# Performs an import of a mod project into a base project given a
# previously computed ProjectDiff between them,
# and a list of folder names to prefix
# ('%modname%' will be replaced with the mod's name)
def do_import(base_dir, mod_dir, pdiff, folder_prefixes=['%modname%']):
# Clone base project into out directory
#if os.path.isdir(out_dir):
# print('Out dir already exists, aborting')
# sys.exit()
#shutil.copytree(base_dir, out_dir)
#os.rename(iwjam_util.gmx_in_dir(out_dir),
# os.path.join(out_dir, 'output.project.gmx'))
#base_dir = out_dir
# Replace %modname%
for i, p in enumerate(folder_prefixes):
if p == '%modname%':
folder_prefixes[i] = pdiff.mod_name
# Set up XML
base_gmx = iwjam_util.gmx_in_dir(base_dir)
base_tree = etree.parse(base_gmx)
base_root = base_tree.getroot()
mod_gmx = iwjam_util.gmx_in_dir(mod_dir)
mod_tree = etree.parse(mod_gmx)
mod_root = mod_tree.getroot()
# For each added resource
for addedres in pdiff.added:
# Create a new resource element
new_elt = etree.Element(addedres.restype)
new_elt.text = addedres.elt_text
# Create list of names of groups to traverse/create
group_names = folder_prefixes + addedres.group_names
baseElt = base_root.find(addedres.restype_group_name)
# Create resource type element if it doesn't exist
if baseElt is None:
baseElt = etree.SubElement(base_root, addedres.restype_group_name)
# Traverse groups, creating nonexistent ones along the way
for g in group_names:
# Try to find group element with the current name
nextBaseElt = next(
(c for c in baseElt if c.get('name') == g), None)
# Create group element if it doesn't exist
if nextBaseElt is None:
nextBaseElt = etree.SubElement(baseElt, baseElt.tag)
nextBaseElt.set('name', g)
baseElt = nextBaseElt
# Add the new resource element
baseElt.append(new_elt)
# Write project file
base_tree.write(base_gmx, pretty_print=True)
# Now, copy the files
_recurse_files('', base_dir, mod_dir, [r.name for r in pdiff.added])
# TODO: Modified resources
def _recurse_files(subpath, base_dir, mod_dir, res_names):
subdirs = [e for e in os.scandir(os.path.join(mod_dir, subpath))
if e.is_dir() and e.name != 'Configs']
files = [e for e in os.scandir(os.path.join(mod_dir, subpath))
if e.is_file()]
for file in files:
resname = file.name.split('.')[0]
extension = file.name.split('.')[-1]
if subpath.split('\\')[0] == 'sprites' and extension == 'png':
resname = '_'.join(resname.split('_')[0:-1])
if resname in res_names:
relpath = os.path.relpath(file.path, mod_dir)
base_file_path = os.path.join(base_dir, relpath)
shutil.copyfile(file.path, base_file_path)
for subdir in subdirs:
relpath = os.path.relpath(subdir.path, mod_dir)
base_path = os.path.join(base_dir, relpath)
if not os.path.exists(base_path):
os.mkdir(base_path)
_recurse_files(relpath, base_dir, mod_dir, res_names)
| 3,086 | 0 | 45 |
da909f52e4e4629a36fb95351150753741737518 | 591 | py | Python | Code/Simple_plots/SDA.py | Basvdbrink1998/Influencing-social-networks | 7b512edc4127680a37115c7e1434b06ebfa67e8a | [
"MIT"
] | null | null | null | Code/Simple_plots/SDA.py | Basvdbrink1998/Influencing-social-networks | 7b512edc4127680a37115c7e1434b06ebfa67e8a | [
"MIT"
] | null | null | null | Code/Simple_plots/SDA.py | Basvdbrink1998/Influencing-social-networks | 7b512edc4127680a37115c7e1434b06ebfa67e8a | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
"""
SDA.py: Plots the probability of the SDA model generating a connection
based on different parameters for Figure 2.4.
"""
n = 3
alphas = [2, 3, 8]
betas = [2, 3, 5]
colors = ['r', 'g', 'b']
d = np.arange(0.0, 10.0, 0.1)
ax = plt.subplot()
for i in range(n):
res = prop(alphas[i], betas[i], d)
label = 'alpha= {}, beta= {}'.format(alphas[i], betas[i])
ax.plot(d, res, color=colors[i], label=label)
plt.xlabel('d(i,j)')
plt.ylabel('p(i,j)')
plt.legend()
plt.show()
| 19.7 | 74 | 0.597293 | import numpy as np
import matplotlib.pyplot as plt
"""
SDA.py: Plots the probability of the SDA model generating a connection
based on different parameters for Figure 2.4.
"""
def prop(a, b, d):
return 1/(1+(1/b*d)**a)
n = 3
alphas = [2, 3, 8]
betas = [2, 3, 5]
colors = ['r', 'g', 'b']
d = np.arange(0.0, 10.0, 0.1)
ax = plt.subplot()
for i in range(n):
res = prop(alphas[i], betas[i], d)
label = 'alpha= {}, beta= {}'.format(alphas[i], betas[i])
ax.plot(d, res, color=colors[i], label=label)
plt.xlabel('d(i,j)')
plt.ylabel('p(i,j)')
plt.legend()
plt.show()
| 25 | 0 | 23 |
d7abc7d7c4ba16df06d2efc32c2e5bcb55a94d2b | 3,939 | py | Python | stratified_group_fold/sources/StratifiedGroupKFold.py | erelcan/stratified-group-fold | 0c38f403636b8b493abd145b5e03c09895182e83 | [
"Apache-2.0"
] | 2 | 2021-07-03T15:52:59.000Z | 2021-07-03T17:24:19.000Z | stratified_group_fold/sources/StratifiedGroupKFold.py | erelcan/stratified-group-fold | 0c38f403636b8b493abd145b5e03c09895182e83 | [
"Apache-2.0"
] | null | null | null | stratified_group_fold/sources/StratifiedGroupKFold.py | erelcan/stratified-group-fold | 0c38f403636b8b493abd145b5e03c09895182e83 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
from sklearn.model_selection._split import _BaseKFold
from partition_optimizers import equally_partition_into_bins, mixed_equally_partition_into_bins
# Using get_n_splits from the super class.
| 44.258427 | 138 | 0.637218 | import pandas as pd
from sklearn.model_selection._split import _BaseKFold
from partition_optimizers import equally_partition_into_bins, mixed_equally_partition_into_bins
# Using get_n_splits from the super class.
class StratifiedGroupKFold(_BaseKFold):
def __init__(self, n_splits=5, mixed_groups=False, opt_type=0, reset_index=True):
super().__init__(n_splits=n_splits, shuffle=False, random_state=None)
self._mixed_groups = mixed_groups
self._opt_type = opt_type
self._reset_index = reset_index
def split(self, X, y=None, groups=None):
folds = self._prepare_folds(y, groups)
for i in range(len(folds)):
yield self._train_test_from_folds(folds, [i])
def _train_test_from_folds(self, folds, test_folds):
test_folds_set = set(test_folds)
train_ids = []
test_ids = []
for i in range(len(folds)):
if i in test_folds_set:
test_ids.extend(folds[i])
else:
train_ids.extend(folds[i])
return train_ids, test_ids
def _prepare_folds(self, labels, groups):
# groups and labels must be series with same indexing!
# Also, these indices should represent the (corresponding) sample indices (or must be carefully handled outside~)
# Since sklearn cross validation converts dataframes/series to arrays, it expects 0-based index samples. In otherwords,
# any sample id we return here is relative to location of the samples (arrays yielded by sklearn conversion).
# Since sklearn looks up samples by array indices; the index values remaining from the actual dataframe becomes incompatible.
# This raises "positional indexers are out-of-bounds" error.
# To prevent such issues, we provide index_resetting option.
df = pd.DataFrame({"groups": groups, "labels": labels})
if self._reset_index:
df["index"] = list(range(len(labels)))
df = df.set_index("index", drop=True)
if self._mixed_groups:
return self._handle_mixed_groups(df)
else:
return self._handle_non_mixed_groups(df)
def _handle_mixed_groups(self, df):
result_folds = {}
for i in range(self.n_splits):
result_folds[i] = []
# Optimize this in the future..
weights = {}
for c in df["labels"].unique():
weights[c] = {}
for i in range(len(df)):
cur_label = df.iloc[i]["labels"].item()
cur_group = df.iloc[i]["groups"].item()
if cur_group not in weights[cur_label]:
weights[cur_label][cur_group] = 0
weights[cur_label][cur_group] += 1
cur_folds = mixed_equally_partition_into_bins(df["groups"].unique().tolist(), weights, self.n_splits)
for i in range(len(cur_folds)):
result_folds[i] = self._get_sample_ids_for_groups(df, cur_folds[i]["ids"])
return result_folds
def _handle_non_mixed_groups(self, df):
# Handles the problems where for a group all members having the same class!
result_folds = {}
for i in range(self.n_splits):
result_folds[i] = []
for c in df["labels"].unique():
cur_df = df[df["labels"] == c]
group_counts = cur_df["groups"].value_counts().to_dict()
cur_folds = equally_partition_into_bins(list(group_counts.keys()), list(group_counts.values()), self.n_splits, self._opt_type)
for i in range(len(cur_folds)):
result_folds[i].extend(self._get_sample_ids_for_groups(cur_df, cur_folds[i]["ids"]))
return result_folds
def _get_sample_ids_for_groups(self, df, group_ids):
sample_ids = []
for group in group_ids:
sample_ids.extend(df[df["groups"] == group].index.tolist())
return sample_ids | 3,473 | 18 | 234 |
5bb3d6d12d12e77d5fb9cceeb72b3faad2278553 | 515 | py | Python | the_mechanic_backend/apps/stock/migrations/0002_spare_store.py | muthukumar4999/the-mechanic-backend | 1e31affddf60d2de72445a85dd2055bdeba6f670 | [
"MIT"
] | null | null | null | the_mechanic_backend/apps/stock/migrations/0002_spare_store.py | muthukumar4999/the-mechanic-backend | 1e31affddf60d2de72445a85dd2055bdeba6f670 | [
"MIT"
] | 5 | 2020-06-05T22:30:20.000Z | 2021-09-08T01:12:27.000Z | the_mechanic_backend/apps/stock/migrations/0002_spare_store.py | muthukumar4999/the-mechanic-backend | 1e31affddf60d2de72445a85dd2055bdeba6f670 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.5 on 2019-03-31 08:34
from django.db import migrations, models
import django.db.models.deletion
| 24.52381 | 125 | 0.623301 | # Generated by Django 2.1.5 on 2019-03-31 08:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
('stock', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='spare',
name='store',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='accounts.Store'),
),
]
| 0 | 368 | 23 |
9eea5aa106be395c4d8d95d6ed8621ad09adecab | 11,054 | py | Python | src/yativ/resources/testscale.py | Ahuge/yativ | 506511c9f5c6d77c6e488f6f593044b78541cfb5 | [
"MIT"
] | null | null | null | src/yativ/resources/testscale.py | Ahuge/yativ | 506511c9f5c6d77c6e488f6f593044b78541cfb5 | [
"MIT"
] | null | null | null | src/yativ/resources/testscale.py | Ahuge/yativ | 506511c9f5c6d77c6e488f6f593044b78541cfb5 | [
"MIT"
] | null | null | null | import numpy
from numpy import genfromtxt
my_data = genfromtxt('matrix.csv', delimiter=',')
def bin_ndarray(ndarray, new_shape, operation='sum'):
"""
Bins an ndarray in all axes based on the target shape, by summing or
averaging.
Number of output dimensions must match number of input dimensions and
new axes must divide old ones.
Example
-------
>>> m = numpy.arange(0,100,1).reshape((10,10))
>>> n = bin_ndarray(m, new_shape=(5,5), operation='sum')
>>> print(n)
[[ 22 30 38 46 54]
[102 110 118 126 134]
[182 190 198 206 214]
[262 270 278 286 294]
[342 350 358 366 374]]
"""
operation = operation.lower()
if not operation in ['sum', 'mean']:
raise ValueError("Operation not supported.")
if ndarray.ndim != len(new_shape):
raise ValueError("Shape mismatch: {} -> {}".format(ndarray.shape,
new_shape))
compression_pairs = [(d, c//d) for d,c in zip(new_shape,
ndarray.shape)]
flattened = [l for p in compression_pairs for l in p]
ndarray = ndarray.reshape(flattened)
for i in range(len(new_shape)):
op = getattr(ndarray, operation)
ndarray = op(-1*(i+1))
return ndarray
def resize_array(a, new_rows, new_cols):
'''
This function takes an 2D numpy array a and produces a smaller array
of size new_rows, new_cols. new_rows and new_cols must be less than
or equal to the number of rows and columns in a.
'''
rows = len(a)
cols = len(a[0])
yscale = float(rows) / new_rows
xscale = float(cols) / new_cols
# first average across the cols to shorten rows
new_a = numpy.zeros((rows, new_cols))
for j in range(new_cols):
# get the indices of the original array we are going to average across
the_x_range = (j*xscale, (j+1)*xscale)
firstx = int(the_x_range[0])
lastx = int(the_x_range[1])
# figure out the portion of the first and last index that overlap
# with the new index, and thus the portion of those cells that
# we need to include in our average
x0_scale = 1 - (the_x_range[0]-int(the_x_range[0]))
xEnd_scale = (the_x_range[1]-int(the_x_range[1]))
# scale_line is a 1d array that corresponds to the portion of each old
# index in the_x_range that should be included in the new average
scale_line = numpy.ones((lastx-firstx+1))
scale_line[0] = x0_scale
scale_line[-1] = xEnd_scale
# Make sure you don't screw up and include an index that is too large
# for the array. This isn't great, as there could be some floating
# point errors that mess up this comparison.
if scale_line[-1] == 0:
scale_line = scale_line[:-1]
lastx = lastx - 1
# Now it's linear algebra time. Take the dot product of a slice of
# the original array and the scale_line
new_a[:,j] = numpy.dot(a[:firstx:lastx+1], scale_line)/scale_line.sum()
# Then average across the rows to shorten the cols. Same method as above.
# It is probably possible to simplify this code, as this is more or less
# the same procedure as the block of code above, but transposed.
# Here I'm reusing the variable a. Sorry if that's confusing.
a = numpy.zeros((new_rows, new_cols))
for i in range(new_rows):
the_y_range = (i*yscale, (i+1)*yscale)
firsty = int(the_y_range[0])
lasty = int(the_y_range[1])
y0_scale = 1 - (the_y_range[0]-int(the_y_range[0]))
yEnd_scale = (the_y_range[1]-int(the_y_range[1]))
scale_line = numpy.ones((lasty-firsty+1))
scale_line[0] = y0_scale
scale_line[-1] = yEnd_scale
if scale_line[-1] == 0:
scale_line = scale_line[:-1]
lasty = lasty - 1
a[i:,] = numpy.dot(scale_line, new_a[firsty:lasty+1,])/scale_line.sum()
return a
red = [
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
]
green = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
blue = [
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
]
rgb = numpy.array([
red,
green,
blue
])
big = [
[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
]
fac2_small = [
[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 0],
[0, 1, 0, 1],
]
fac4_small = [
[0.5, 0.5],
[0.5, 0.5],
]
fac = 2
new_arr = []
# s = [0, 1, 2, 3, 4, 5]
# len(s) = 6
# ci = 7
# if ci < len(s)
print("\n------------------------")
print("Original")
print("========================")
for row in big:
print(row)
print("\n--------------------")
print("50%")
print("====================")
small2 = alex2_meth(big, 2)
for row in small2:
print(row)
print("\n----------")
print("33%")
print("==========")
small4 = alex2_meth(big, 3)
for row in small4:
print(row)
print("\n----------")
print("25%")
print("==========")
small4 = alex2_meth(big, 4)
for row in small4:
print(row)
| 33.907975 | 102 | 0.495024 | import numpy
from numpy import genfromtxt
my_data = genfromtxt('matrix.csv', delimiter=',')
def bin_ndarray(ndarray, new_shape, operation='sum'):
"""
Bins an ndarray in all axes based on the target shape, by summing or
averaging.
Number of output dimensions must match number of input dimensions and
new axes must divide old ones.
Example
-------
>>> m = numpy.arange(0,100,1).reshape((10,10))
>>> n = bin_ndarray(m, new_shape=(5,5), operation='sum')
>>> print(n)
[[ 22 30 38 46 54]
[102 110 118 126 134]
[182 190 198 206 214]
[262 270 278 286 294]
[342 350 358 366 374]]
"""
operation = operation.lower()
if not operation in ['sum', 'mean']:
raise ValueError("Operation not supported.")
if ndarray.ndim != len(new_shape):
raise ValueError("Shape mismatch: {} -> {}".format(ndarray.shape,
new_shape))
compression_pairs = [(d, c//d) for d,c in zip(new_shape,
ndarray.shape)]
flattened = [l for p in compression_pairs for l in p]
ndarray = ndarray.reshape(flattened)
for i in range(len(new_shape)):
op = getattr(ndarray, operation)
ndarray = op(-1*(i+1))
return ndarray
def get_row_compressor(old_dimension, new_dimension):
dim_compressor = numpy.zeros((new_dimension, old_dimension))
bin_size = float(old_dimension) / new_dimension
next_bin_break = bin_size
which_row = 0
which_column = 0
while which_row < dim_compressor.shape[0] and which_column < dim_compressor.shape[1]:
if round(next_bin_break - which_column, 10) >= 1:
dim_compressor[which_row, which_column] = 1
which_column += 1
elif next_bin_break == which_column:
which_row += 1
next_bin_break += bin_size
else:
partial_credit = next_bin_break - which_column
dim_compressor[which_row, which_column] = partial_credit
which_row += 1
dim_compressor[which_row, which_column] = 1 - partial_credit
which_column += 1
next_bin_break += bin_size
dim_compressor /= bin_size
return dim_compressor
def get_column_compressor(old_dimension, new_dimension):
return get_row_compressor(old_dimension, new_dimension).transpose()
def compress_and_average(array, new_shape):
# Note: new shape should be smaller in both dimensions than old shape
return numpy.mat(get_row_compressor(array.shape[0], new_shape[0])) * \
numpy.mat(array) * \
numpy.mat(get_column_compressor(array.shape[1], new_shape[1]))
def resize_array(a, new_rows, new_cols):
'''
This function takes an 2D numpy array a and produces a smaller array
of size new_rows, new_cols. new_rows and new_cols must be less than
or equal to the number of rows and columns in a.
'''
rows = len(a)
cols = len(a[0])
yscale = float(rows) / new_rows
xscale = float(cols) / new_cols
# first average across the cols to shorten rows
new_a = numpy.zeros((rows, new_cols))
for j in range(new_cols):
# get the indices of the original array we are going to average across
the_x_range = (j*xscale, (j+1)*xscale)
firstx = int(the_x_range[0])
lastx = int(the_x_range[1])
# figure out the portion of the first and last index that overlap
# with the new index, and thus the portion of those cells that
# we need to include in our average
x0_scale = 1 - (the_x_range[0]-int(the_x_range[0]))
xEnd_scale = (the_x_range[1]-int(the_x_range[1]))
# scale_line is a 1d array that corresponds to the portion of each old
# index in the_x_range that should be included in the new average
scale_line = numpy.ones((lastx-firstx+1))
scale_line[0] = x0_scale
scale_line[-1] = xEnd_scale
# Make sure you don't screw up and include an index that is too large
# for the array. This isn't great, as there could be some floating
# point errors that mess up this comparison.
if scale_line[-1] == 0:
scale_line = scale_line[:-1]
lastx = lastx - 1
# Now it's linear algebra time. Take the dot product of a slice of
# the original array and the scale_line
new_a[:,j] = numpy.dot(a[:firstx:lastx+1], scale_line)/scale_line.sum()
# Then average across the rows to shorten the cols. Same method as above.
# It is probably possible to simplify this code, as this is more or less
# the same procedure as the block of code above, but transposed.
# Here I'm reusing the variable a. Sorry if that's confusing.
a = numpy.zeros((new_rows, new_cols))
for i in range(new_rows):
the_y_range = (i*yscale, (i+1)*yscale)
firsty = int(the_y_range[0])
lasty = int(the_y_range[1])
y0_scale = 1 - (the_y_range[0]-int(the_y_range[0]))
yEnd_scale = (the_y_range[1]-int(the_y_range[1]))
scale_line = numpy.ones((lasty-firsty+1))
scale_line[0] = y0_scale
scale_line[-1] = yEnd_scale
if scale_line[-1] == 0:
scale_line = scale_line[:-1]
lasty = lasty - 1
a[i:,] = numpy.dot(scale_line, new_a[firsty:lasty+1,])/scale_line.sum()
return a
red = [
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1],
]
green = [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
blue = [
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0],
]
rgb = numpy.array([
red,
green,
blue
])
big = [
[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 0, 0, 1, 1],
]
fac2_small = [
[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 0, 1, 0],
[0, 1, 0, 1],
]
fac4_small = [
[0.5, 0.5],
[0.5, 0.5],
]
fac = 2
new_arr = []
def bruno_meth(big, fac):
new_arr = []
i = None
j = None
for rowindex, row in enumerate(big):
if i is None:
new_arr.append([])
i = 2
elif i == fac:
i = None
else:
i += 1
for colindex, col in enumerate(row):
if j is None:
if i is None:
new_arr[-1].append(col)
j = 2
elif j == fac:
j = None
else:
j += 1
return new_arr
def alex1_meth(big, fac):
new_arr = []
for row, next_row in zip(big[::fac], big[1::fac]):
new_arr.append([])
for r1c1, r1c2, r2c1, r2c2 in zip(row[::fac], row[1::fac], next_row[::fac], next_row[1::fac]):
# print("Pix: %s" % ((r1c1+r1c2+r2c1+r2c2)/4))
# print(" r1c1: %s\n r1c2: %s\n r2c1: %s\n r2c2: %s\n" % (r1c1, r1c2, r2c1, r2c2))
new_arr[-1].append((r1c1+r1c2+r2c1+r2c2)/(fac*fac))
return new_arr
def alex2_meth(big, fac):
def downsize():
new_arr = []
for row_index_mult, _ in enumerate(big[::fac]):
new_arr.append([])
for col_index_mult, _ in enumerate(big[0][::fac]):
values = []
for row_fac_index in range(fac):
ri = (row_index_mult*fac)+row_fac_index
if ri < len(big):
row = big[ri]
for col_fac_index in range(fac):
ci = (col_index_mult*fac)+col_fac_index
if ci < len(row):
cell = row[ci]
values.append(cell)
if values:
new_arr[-1].append(sum(values)/len(values))
return new_arr
def upscale():
return None
if fac == 1:
return big
elif fac > 1:
return downsize()
else:
return upscale()
# s = [0, 1, 2, 3, 4, 5]
# len(s) = 6
# ci = 7
# if ci < len(s)
print("\n------------------------")
print("Original")
print("========================")
for row in big:
print(row)
print("\n--------------------")
print("50%")
print("====================")
small2 = alex2_meth(big, 2)
for row in small2:
print(row)
print("\n----------")
print("33%")
print("==========")
small4 = alex2_meth(big, 3)
for row in small4:
print(row)
print("\n----------")
print("25%")
print("==========")
small4 = alex2_meth(big, 4)
for row in small4:
print(row)
| 3,226 | 0 | 138 |
0b841db886c83062d035de64ee8299f98561f3f8 | 14,235 | py | Python | apps/pay/views.py | agamgn/django-Tourism | ee8fae54981d135cbd7ddaf9131eb77ea7b2fb8a | [
"MIT"
] | 9 | 2019-06-30T06:34:22.000Z | 2021-11-09T17:21:16.000Z | apps/pay/views.py | agamgn/django-Tourism | ee8fae54981d135cbd7ddaf9131eb77ea7b2fb8a | [
"MIT"
] | 14 | 2019-12-22T02:04:18.000Z | 2022-03-11T23:44:38.000Z | apps/pay/views.py | agamgn/django-Tourism | ee8fae54981d135cbd7ddaf9131eb77ea7b2fb8a | [
"MIT"
] | 3 | 2019-06-30T06:35:57.000Z | 2019-12-18T03:42:43.000Z | from django.shortcuts import render
from django.views.generic import View
from django.shortcuts import render, redirect, HttpResponseRedirect, reverse, HttpResponse
from alipay import Alipay
import time
import string
import random
import json
from operation.models import ShoppingCart, Shopping
from .models import *
from scenicspots.models import Spots, Active
from utils.mixin_utils import LoginRequiredMixin
from treval import settings
def create_alipay():
"""
创建支付宝对象
:return: 支付宝对象
"""
alipay = Alipay(
appid=settings.ALIPAY_APPID,
# 回调地址
app_notify_url=None,
# 公钥路径
alipay_public_key_path=settings.ALIPAY_PUBLIC_KEY_PATH,
# 私钥路径
app_private_key_path=settings.APP_PRIVATE_KEY_PATH,
# 加密方式
sign_type='RSA2',
debug=True,
)
return alipay
def creat_order_num(user_id):
"""
生成订单号
:param user_id: 用户id
:return: 订单号
"""
time_stamp = int(round(time.time() * 1000))
randomnum = '%04d' % random.randint(0, 100000)
order_num = str(time_stamp) + str(randomnum) + str(user_id)
return order_num
def creat_cdk():
"""
创建cdk
:return: cdk
"""
cdk_area = string.digits + string.ascii_letters
cdk = ''
for i in range(1, 21):
cdk += random.choice(cdk_area) # 获取随机字符或数字
if i % 5 == 0 and i != 20: # 每隔4个字符增加'-'
cdk += '-'
return cdk
def check_cdk():
"""
cdk检测
:return: cdk
"""
# 首先创建一个cdk
cdk = creat_cdk()
try:
# 如果能查到订单
order = ScenicOrdersMainTable.objects.get(cdk=cdk)
# 重新执行检测
check_cdk()
except:
# 没找到就返回这个cdk
return cdk
# Create your views here.
class AliPayTestView(View):
"""
支付宝测试
"""
class SubmitOrderView(LoginRequiredMixin, View):
"""
提交订单
"""
class FinishPayView(View):
"""
支付完成后执行的操作
"""
class ProjectOrderView(View):
"""
商品订单页面
"""
class SubmitTravelsOrderView(View):
"""
旅游订单提交
"""
class ScenicOrderView(View):
"""
旅游订单页面
"""
| 33.104651 | 101 | 0.58595 | from django.shortcuts import render
from django.views.generic import View
from django.shortcuts import render, redirect, HttpResponseRedirect, reverse, HttpResponse
from alipay import Alipay
import time
import string
import random
import json
from operation.models import ShoppingCart, Shopping
from .models import *
from scenicspots.models import Spots, Active
from utils.mixin_utils import LoginRequiredMixin
from treval import settings
def create_alipay():
"""
创建支付宝对象
:return: 支付宝对象
"""
alipay = Alipay(
appid=settings.ALIPAY_APPID,
# 回调地址
app_notify_url=None,
# 公钥路径
alipay_public_key_path=settings.ALIPAY_PUBLIC_KEY_PATH,
# 私钥路径
app_private_key_path=settings.APP_PRIVATE_KEY_PATH,
# 加密方式
sign_type='RSA2',
debug=True,
)
return alipay
def creat_order_num(user_id):
"""
生成订单号
:param user_id: 用户id
:return: 订单号
"""
time_stamp = int(round(time.time() * 1000))
randomnum = '%04d' % random.randint(0, 100000)
order_num = str(time_stamp) + str(randomnum) + str(user_id)
return order_num
def creat_cdk():
"""
创建cdk
:return: cdk
"""
cdk_area = string.digits + string.ascii_letters
cdk = ''
for i in range(1, 21):
cdk += random.choice(cdk_area) # 获取随机字符或数字
if i % 5 == 0 and i != 20: # 每隔4个字符增加'-'
cdk += '-'
return cdk
def check_cdk():
"""
cdk检测
:return: cdk
"""
# 首先创建一个cdk
cdk = creat_cdk()
try:
# 如果能查到订单
order = ScenicOrdersMainTable.objects.get(cdk=cdk)
# 重新执行检测
check_cdk()
except:
# 没找到就返回这个cdk
return cdk
# Create your views here.
class AliPayTestView(View):
"""
支付宝测试
"""
def get(self, request):
return render(request, 'PayTest.html', {})
def post(self, request):
money = float(request.POST.get('money', ''))
goods = request.POST.get('goods', '')
alipay = create_alipay()
# 生成支付的url
query_params = alipay.api_alipay_trade_page_pay(
subject=goods, # 商品标题
# 时间+随机数+用户ID(数字)组合生成,因为用户ID是唯一的,生成的订单号也就不会重复了
out_trade_no=creat_order_num(request.user.id), # 商户订单号
total_amount=money, # 交易金额(单位: 元 保留俩位小数)
timeout_express='60m', # 订单关闭时间:60分钟
return_url=settings.DOMAIN_NAME + 'pay/alipayResultTest',
)
# 让用户进行支付的支付宝页面网址
url = settings.ALIPAY_URL + query_params
return redirect(url)
class AliPayResultTestView(View):
def get(self, request):
out_trade_no = request.GET.get('trade_no', '')
alipay = create_alipay()
response = alipay.api_alipay_trade_query(trade_no=out_trade_no)
code = response.get("code") # 支付宝接口调用成功或者错误的标志
print(code)
class SubmitOrderView(LoginRequiredMixin, View):
"""
提交订单
"""
def post(self, request):
# 价格和商品要从后台读取
user = request.user
consignee = request.POST.get('consignee', '')
address = request.POST.get('address', '')
mobile = request.POST.get('mobile', '')
zip_code = request.POST.get('zip_code', '')
frompage = request.GET.get('from', '')
# 商户订单号
out_trade_no = creat_order_num(request.user.id)
# 如果是从商品页面过来的
if frompage == 'detail':
# 商品是直接购买表中对应用户的最后一个商品,价格计算参考确认订单页面
goods = Shopping.objects.filter(user=user).order_by("-add_time").first()
totalprice = goods.product.price * goods.num + goods.product.freight
order_describe = goods.product.name
order_items = OrderItems()
order_items.good_name = goods.product.name
order_items.good_num = goods.num
order_items.order_num = out_trade_no
order_items.good_price = goods.product.price
order_items.good_image = goods.product.mainimg
order_items.good_id = goods.product.id
order_items.save()
# 商品减库存
goods.product.num -= goods.num
# 商品购买人数加1
goods.product.buyers += 1
goods.product.save()
# 否则是从购物车过来的
else:
# 商品是购物车中选中的商品,价格计算参考确认订单页面
goodsinfo = ShoppingCart.objects.filter(user=user, is_check=True)
# 订单描述信息
if goodsinfo.count() > 1:
order_describe = goodsinfo.first().product.name + '等多件商品'
else:
order_describe = goodsinfo.first().product.name
totalprice = 0
for good in goodsinfo:
# 总价计算
totalprice += good.product.price * good.num + good.product.freight
# 商品信息表存储
order_items = OrderItems()
order_items.good_name = good.product.name
order_items.good_num = good.num
order_items.order_num = out_trade_no
order_items.good_price = good.product.price
order_items.good_image = good.product.mainimg
order_items.good_id = good.product.id
order_items.save()
# 商品减库存
good.product.num -= good.num
# 商品购买人数加1
good.product.buyers += 1
good.product.save()
# 从购物车中删除
# goodsinfo.delete()
# 订单主表存储
goods_orders_main_table = GoodsOrdersMainTable()
goods_orders_main_table.user = user
goods_orders_main_table.order_num = out_trade_no
goods_orders_main_table.order_describe = order_describe
goods_orders_main_table.total_amount = totalprice
goods_orders_main_table.consignee = consignee
goods_orders_main_table.address = address
goods_orders_main_table.mobile = mobile
goods_orders_main_table.zip_code = zip_code
goods_orders_main_table.save()
# 跳转支付宝支付页面
alipay = create_alipay()
# 生成支付的url
query_params = alipay.api_alipay_trade_page_pay(
subject=order_describe,
out_trade_no=out_trade_no,
total_amount=totalprice,
timeout_express=settings.ALIPAY_CLOSE_TIME,
return_url=settings.DOMAIN_NAME + 'pay/finish_pay?ordertype=goods',
)
url = settings.ALIPAY_URL + query_params
return HttpResponseRedirect(url)
def get(self, request):
frompage = request.GET.get('from', '')
order_num = request.GET.get('order_num', '')
if frompage == 'goods_order':
order = GoodsOrdersMainTable.objects.get(order_num=order_num)
return_url = settings.DOMAIN_NAME + 'pay/finish_pay?ordertype=goods'
elif frompage == 'tickets_order':
order = ScenicOrdersMainTable.objects.get(order_num=order_num)
return_url = settings.DOMAIN_NAME + 'pay/finish_pay?ordertype=tickets'
elif frompage == 'actives_order':
order = ScenicOrdersMainTable.objects.get(order_num=order_num)
return_url = settings.DOMAIN_NAME + 'pay/finish_pay?ordertype=actives'
else:
result = json.dumps({"status": "failed", "msg": "来源错误"}, ensure_ascii=False)
return HttpResponse(result)
order_describe = order.order_describe
total_amount = order.total_amount
alipay = create_alipay()
# 生成支付的url
query_params = alipay.api_alipay_trade_page_pay(
subject=order_describe,
out_trade_no=order_num,
total_amount=float(total_amount),
timeout_express=settings.ALIPAY_CLOSE_TIME,
return_url=return_url,
)
url = settings.ALIPAY_URL + query_params
return HttpResponseRedirect(url)
class FinishPayView(View):
"""
支付完成后执行的操作
"""
def get(self, request):
out_trade_no = request.GET.get('out_trade_no', '')
alipay = create_alipay()
response = alipay.api_alipay_trade_query(out_trade_no=out_trade_no)
code = response.get("code") # 支付宝接口调用成功或者错误的标志
# 获取订单类型
ordertype = request.GET.get('ordertype', '')
# 如果订单类型是商品
if ordertype == 'goods':
# 支付成功!
if code == '10000':
# 订单变为“已支付”状态
order = GoodsOrdersMainTable.objects.get(order_num=out_trade_no)
order.order_state = 'yzf'
order.pay_time = datetime.now()
order.save()
# 跳转商品订单页
return HttpResponseRedirect(reverse('pay:project_order'))
elif ordertype == 'tickets' or ordertype == 'actives':
# 支付成功!
if code == '10000':
order = ScenicOrdersMainTable.objects.get(order_num=out_trade_no)
order.order_state = 'yzf'
order.pay_time = datetime.now()
order.cdk = check_cdk()
order.save()
if ordertype == 'actives':
# 支付成功了,再给购买人数加相应数量
scenic_id = order.scenic_id
num = order.buy_num
active = Active.objects.get(id=scenic_id)
active.now_num += num
active.save()
# 跳转旅游订单页
return HttpResponseRedirect(reverse('pay:scenic_order'))
class ProjectOrderView(View):
"""
商品订单页面
"""
def get(self, request):
user = request.user
# 得到该用户的所有订单
all_orders = GoodsOrdersMainTable.objects.all().order_by('-create_time').filter(user=user)
order_state = request.GET.get('order_state', '')
# 获取各种订单状态对应的订单号
if order_state:
all_orders = all_orders.filter(order_state=order_state)
# 获取订单号对应的详细信息
all_orders_list = []
for orders in all_orders:
orders_dic = {}
# 订单号
orders_dic['order_num'] = orders.order_num
# 下单日期
orders_dic['create_time'] = orders.create_time
# 总价
orders_dic['totalprice'] = orders.total_amount
# 订单状态
orders_dic['order_state'] = orders.order_state
goods_list = []
goods = OrderItems.objects.filter(order_num=orders_dic['order_num'])
for good in goods:
goods_dic = {}
# 商品名
goods_dic['good_name'] = good.good_name
# 商品数量
goods_dic['good_num'] = good.good_num
# 商品单价
goods_dic['good_price'] = good.good_price
# 商品图片
goods_dic['good_image'] = good.good_image
# 商品id
goods_dic['good_id'] = good.good_id
goods_list.append(goods_dic)
orders_dic['goods_list'] = goods_list
all_orders_list.append(orders_dic)
return render(request, 'project_order.html', {
'order_state': order_state,
'all_orders_list': all_orders_list,
})
class SubmitTravelsOrderView(View):
"""
旅游订单提交
"""
def get(self, request):
user = request.user
list_type = request.GET.get('list_type', '')
amount = request.GET.get('amount', '')
conname = request.GET.get('conname', '')
conphone = request.GET.get('conphone', '')
out_trade_no = creat_order_num(user.id)
if list_type == 'spots':
spots_id = request.GET.get('spots_id', '')
spot = Spots.objects.get(id=int(spots_id))
order_describe = spot.name + '门票'
price = int(amount) * spot.price
return_url = settings.DOMAIN_NAME + 'pay/finish_pay?ordertype=tickets'
name = spot.name
unit_price = spot.price
image = spot.image
id = int(spots_id)
scenic_type = 'mp'
elif list_type == 'active':
active_id = request.GET.get('active_id', '')
active = Active.objects.get(id=int(active_id))
# 购买数量小于等于剩余数量才可以生成订单
if int(amount) <= active.all_num - active.now_num:
order_describe = active.title
price = int(amount) * active.price
return_url = settings.DOMAIN_NAME + 'pay/finish_pay?ordertype=actives'
name = active.title
unit_price = active.price
image = active.image
id = int(active_id)
scenic_type = 'hd'
else:
result = json.dumps({"status": "failed", "msg": "购买数量超出剩余最大数量!"}, ensure_ascii=False)
return HttpResponse(result)
else:
return
# 订单信息存储
scenic_order = ScenicOrdersMainTable()
scenic_order.user = user
scenic_order.scenic_name = name
scenic_order.buy_num = int(amount)
scenic_order.ticket_price = unit_price
scenic_order.scenic_image = image
scenic_order.scenic_id = id
scenic_order.order_num = out_trade_no
scenic_order.order_describe = order_describe
scenic_order.total_amount = price
scenic_order.consignee = conname
scenic_order.mobile = conphone
scenic_order.classification = scenic_type
scenic_order.save()
# 跳转支付宝支付页面
alipay = create_alipay()
# 生成支付的url
query_params = alipay.api_alipay_trade_page_pay(
subject=order_describe,
out_trade_no=out_trade_no,
total_amount=price,
timeout_express=settings.ALIPAY_CLOSE_TIME,
return_url=return_url,
)
url = settings.ALIPAY_URL + query_params
return HttpResponseRedirect(url)
class ScenicOrderView(View):
"""
旅游订单页面
"""
def get(self, request):
user = request.user
orders = ScenicOrdersMainTable.objects.filter(user=user).order_by('-create_time')
order_state = request.GET.get('order_state', '')
if order_state:
orders = orders.filter(order_state=order_state)
return render(request, 'scenic_order.html', {
'orders': orders,
'order_state': order_state,
})
| 12,825 | 12 | 259 |
e7f8c7e0394d0153b2a8616fd41a38bdbade771f | 4,958 | py | Python | authorize/apis/transaction.py | pegler/authorizesauce | e0dc408638b916973db9185911a20aec8fa6143d | [
"MIT"
] | null | null | null | authorize/apis/transaction.py | pegler/authorizesauce | e0dc408638b916973db9185911a20aec8fa6143d | [
"MIT"
] | 1 | 2020-08-07T16:56:59.000Z | 2020-08-07T16:56:59.000Z | authorize/apis/transaction.py | pegler/authorizesauce | e0dc408638b916973db9185911a20aec8fa6143d | [
"MIT"
] | 2 | 2018-09-05T14:47:08.000Z | 2020-08-07T15:52:37.000Z | from decimal import Decimal
import urllib
import requests
from authorize.exceptions import AuthorizeConnectionError, \
AuthorizeResponseError
PROD_URL = 'https://secure.authorize.net/gateway/transact.dll'
TEST_URL = 'https://test.authorize.net/gateway/transact.dll'
RESPONSE_FIELDS = {
0: 'response_code',
2: 'response_reason_code',
3: 'response_reason_text',
4: 'authorization_code',
5: 'avs_response',
6: 'transaction_id',
9: 'amount',
11: 'transaction_type',
38: 'cvv_response',
}
| 38.434109 | 77 | 0.615772 | from decimal import Decimal
import urllib
import requests
from authorize.exceptions import AuthorizeConnectionError, \
AuthorizeResponseError
PROD_URL = 'https://secure.authorize.net/gateway/transact.dll'
TEST_URL = 'https://test.authorize.net/gateway/transact.dll'
RESPONSE_FIELDS = {
0: 'response_code',
2: 'response_reason_code',
3: 'response_reason_text',
4: 'authorization_code',
5: 'avs_response',
6: 'transaction_id',
9: 'amount',
11: 'transaction_type',
38: 'cvv_response',
}
def parse_response(response):
response = response.split(';')
fields = {}
for index, name in RESPONSE_FIELDS.items():
fields[name] = response[index]
return fields
class TransactionAPI(object):
def __init__(self, login_id, transaction_key, debug=True, test=False):
self.url = TEST_URL if debug else PROD_URL
self.base_params = {
'x_login': login_id,
'x_tran_key': transaction_key,
'x_version': '3.1',
'x_test_request': 'TRUE' if test else 'FALSE',
'x_delim_data': 'TRUE',
'x_delim_char': ';',
}
def _make_call(self, params):
response = requests.post(self.url, data=params)
fields = parse_response(response.content)
if fields['response_code'] != '1':
e = AuthorizeResponseError('%s full_response=%r' %
(fields['response_reason_text'], fields))
e.full_response = fields
raise e
return fields
def _add_params(self, params, credit_card=None, address=None):
if credit_card:
params.update({
'x_card_num': credit_card.card_number,
'x_exp_date': credit_card.expiration.strftime('%m-%Y'),
'x_card_code': credit_card.cvv,
'x_first_name': credit_card.first_name,
'x_last_name': credit_card.last_name,
})
if address:
params.update({
'x_address': address.street,
'x_city': address.city,
'x_state': address.state,
'x_zip': address.zip_code,
'x_country': address.country,
})
for key, value in params.items():
if value is None:
del params[key]
return params
def auth(self, amount, credit_card, address=None):
amount = Decimal(str(amount)).quantize(Decimal('0.01'))
params = self.base_params.copy()
params = self._add_params(params, credit_card, address)
params['x_type'] = 'AUTH_ONLY'
params['x_amount'] = str(amount)
return self._make_call(params)
def capture(self, amount, credit_card, address=None):
amount = Decimal(str(amount)).quantize(Decimal('0.01'))
params = self.base_params.copy()
params = self._add_params(params, credit_card, address)
params['x_type'] = 'AUTH_CAPTURE'
params['x_amount'] = str(amount)
return self._make_call(params)
def settle(self, transaction_id, amount=None):
# Amount is not required -- if provided, settles for a lower amount
# than the original auth; if not, settles the full amount authed.
params = self.base_params.copy()
params['x_type'] = 'PRIOR_AUTH_CAPTURE'
params['x_trans_id'] = transaction_id
if amount:
amount = Decimal(str(amount)).quantize(Decimal('0.01'))
params['x_amount'] = str(amount)
return self._make_call(params)
def credit(self, card_num, transaction_id, amount, duplicate_window=120):
# Authorize.net can do unlinked credits (not tied to a previous
# transaction) but we do not (at least for now).
# Provide the last four digits for the card number, as well as the
# transaction id and the amount to credit back.
# The following restrictions apply:
# - The transaction id must reference an existing, settled charge.
# (Note that in production, settlement happens once daily.)
# - The amount of the credit (and the sum of all credits against this
# original transaction) must be less than or equal to the original
# charge amount.
# - The credit must be submitted within 120 days of the original
# transaction being settled.
params = self.base_params.copy()
params['x_type'] = 'CREDIT'
params['x_duplicate_window'] = str(duplicate_window)
params['x_trans_id'] = transaction_id
params['x_card_num'] = str(card_num)
amount = Decimal(str(amount)).quantize(Decimal('0.01'))
params['x_amount'] = str(amount)
return self._make_call(params)
def void(self, transaction_id):
params = self.base_params.copy()
params['x_type'] = 'VOID'
params['x_trans_id'] = transaction_id
return self._make_call(params)
| 4,153 | 8 | 261 |
031d46e50198f0c6342052897973976a91bd5c45 | 3,897 | py | Python | src/core/src/tortuga/scripts/get_component_list.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 33 | 2018-03-02T17:07:39.000Z | 2021-05-21T18:02:51.000Z | src/core/src/tortuga/scripts/get_component_list.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 201 | 2018-03-05T14:28:24.000Z | 2020-11-23T19:58:27.000Z | src/core/src/tortuga/scripts/get_component_list.py | sutasu/tortuga | 48d7cde4fa652346600b217043b4a734fa2ba455 | [
"Apache-2.0"
] | 23 | 2018-03-02T17:21:59.000Z | 2020-11-18T14:52:38.000Z | # Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-member
import gettext
from tortuga.cli.tortugaCli import TortugaCli
from tortuga.helper.osHelper import getOsInfo
from tortuga.wsapi.kitWsApi import KitWsApi
from tortuga.wsapi.nodeWsApi import NodeWsApi
from tortuga.wsapi.softwareProfileWsApi import SoftwareProfileWsApi
_ = gettext.gettext
| 31.942623 | 80 | 0.627149 | # Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-member
import gettext
from tortuga.cli.tortugaCli import TortugaCli
from tortuga.helper.osHelper import getOsInfo
from tortuga.wsapi.kitWsApi import KitWsApi
from tortuga.wsapi.nodeWsApi import NodeWsApi
from tortuga.wsapi.softwareProfileWsApi import SoftwareProfileWsApi
_ = gettext.gettext
def displayComponent(c, kit):
# Depends on the __repr__ of Component and Kit objects
print('%s %s' % (kit, c))
class GetComponentList(TortugaCli):
def parseArgs(self, usage=None):
optGroup = 'Options'
group = self.addOptionGroup(optGroup, '')
excl_option_group = group.add_mutually_exclusive_group()
excl_option_group.add_argument(
'--software-profile',
dest='softwareprofile',
help=_('Display list of components enabled in software profile.')
)
excl_option_group.add_argument(
'-p',
dest='applyToInstaller',
action='store_true',
default=False,
help=_('Display components enabled on installer only')
)
excl_option_group.add_argument(
'--os',
dest='os',
metavar='NAME-VERSION-ARCH',
help=_('Display components suitable for specified OS only')
)
super().parseArgs(usage=usage)
def __get_software_profile(self):
# Determine software profile name based on command-line option(s)
if self.getArgs().applyToInstaller:
api = self.configureClient(NodeWsApi)
# Get software profile name from installer node
node = api.getInstallerNode(
optionDict={
'softwareprofile': True,
}
)
return node.getSoftwareProfile().getName()
return self.getArgs().softwareprofile
def runCommand(self):
self.parseArgs(_("""
Display list of components available for software profiles in the system.
"""))
softwareProfileName = self.__get_software_profile()
if softwareProfileName:
# Display all components enabled for software profile
swp_api = self.configureClient(SoftwareProfileWsApi)
for c in swp_api.getEnabledComponentList(softwareProfileName):
displayComponent(c, c.getKit())
return
if self.getArgs().os:
try:
name, version, arch = self.getArgs().os.split('-', 3)
except ValueError:
self.getParser().error(
'Malformed argument to --os. Must be in form of'
' NAME-VERSION-ARCH')
osinfo = getOsInfo(name, version, arch)
else:
osinfo = None
# Display all components
kit_api = self.configureClient(KitWsApi)
for kit in kit_api.getKitList():
for c in kit.getComponentList():
if osinfo and osinfo not in c.getOsInfoList() and \
osinfo.getOsFamilyInfo() not in c.getOsFamilyInfoList():
# Exclude those components that cannot be enabled on the
# specified operating system.
continue
displayComponent(c, kit)
def main():
GetComponentList().run()
| 2,830 | 14 | 149 |
c764640dcde065c4a996dc262f48bc4fce5645f6 | 6,311 | py | Python | darling_ansible/python_venv/lib/python3.7/site-packages/oci/os_management/models/software_package_file.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | null | null | null | darling_ansible/python_venv/lib/python3.7/site-packages/oci/os_management/models/software_package_file.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | null | null | null | darling_ansible/python_venv/lib/python3.7/site-packages/oci/os_management/models/software_package_file.py | revnav/sandbox | f9c8422233d093b76821686b6c249417502cf61d | [
"Apache-2.0"
] | 1 | 2020-06-25T03:12:58.000Z | 2020-06-25T03:12:58.000Z | # coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class SoftwarePackageFile(object):
"""
A file associated with a package
"""
def __init__(self, **kwargs):
"""
Initializes a new SoftwarePackageFile object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param path:
The value to assign to the path property of this SoftwarePackageFile.
:type path: str
:param type:
The value to assign to the type property of this SoftwarePackageFile.
:type type: str
:param time_modified:
The value to assign to the time_modified property of this SoftwarePackageFile.
:type time_modified: datetime
:param checksum:
The value to assign to the checksum property of this SoftwarePackageFile.
:type checksum: str
:param checksum_type:
The value to assign to the checksum_type property of this SoftwarePackageFile.
:type checksum_type: str
:param size_in_bytes:
The value to assign to the size_in_bytes property of this SoftwarePackageFile.
:type size_in_bytes: int
"""
self.swagger_types = {
'path': 'str',
'type': 'str',
'time_modified': 'datetime',
'checksum': 'str',
'checksum_type': 'str',
'size_in_bytes': 'int'
}
self.attribute_map = {
'path': 'path',
'type': 'type',
'time_modified': 'timeModified',
'checksum': 'checksum',
'checksum_type': 'checksumType',
'size_in_bytes': 'sizeInBytes'
}
self._path = None
self._type = None
self._time_modified = None
self._checksum = None
self._checksum_type = None
self._size_in_bytes = None
@property
def path(self):
"""
Gets the path of this SoftwarePackageFile.
file path
:return: The path of this SoftwarePackageFile.
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""
Sets the path of this SoftwarePackageFile.
file path
:param path: The path of this SoftwarePackageFile.
:type: str
"""
self._path = path
@property
def type(self):
"""
Gets the type of this SoftwarePackageFile.
type of the file
:return: The type of this SoftwarePackageFile.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this SoftwarePackageFile.
type of the file
:param type: The type of this SoftwarePackageFile.
:type: str
"""
self._type = type
@property
def time_modified(self):
"""
Gets the time_modified of this SoftwarePackageFile.
The date and time of the last modification to this file, as described
in `RFC 3339`__, section 14.29.
__ https://tools.ietf.org/rfc/rfc3339
:return: The time_modified of this SoftwarePackageFile.
:rtype: datetime
"""
return self._time_modified
@time_modified.setter
def time_modified(self, time_modified):
"""
Sets the time_modified of this SoftwarePackageFile.
The date and time of the last modification to this file, as described
in `RFC 3339`__, section 14.29.
__ https://tools.ietf.org/rfc/rfc3339
:param time_modified: The time_modified of this SoftwarePackageFile.
:type: datetime
"""
self._time_modified = time_modified
@property
def checksum(self):
"""
Gets the checksum of this SoftwarePackageFile.
checksum of the file
:return: The checksum of this SoftwarePackageFile.
:rtype: str
"""
return self._checksum
@checksum.setter
def checksum(self, checksum):
"""
Sets the checksum of this SoftwarePackageFile.
checksum of the file
:param checksum: The checksum of this SoftwarePackageFile.
:type: str
"""
self._checksum = checksum
@property
def checksum_type(self):
"""
Gets the checksum_type of this SoftwarePackageFile.
type of the checksum
:return: The checksum_type of this SoftwarePackageFile.
:rtype: str
"""
return self._checksum_type
@checksum_type.setter
def checksum_type(self, checksum_type):
"""
Sets the checksum_type of this SoftwarePackageFile.
type of the checksum
:param checksum_type: The checksum_type of this SoftwarePackageFile.
:type: str
"""
self._checksum_type = checksum_type
@property
def size_in_bytes(self):
"""
Gets the size_in_bytes of this SoftwarePackageFile.
size of the file in bytes
:return: The size_in_bytes of this SoftwarePackageFile.
:rtype: int
"""
return self._size_in_bytes
@size_in_bytes.setter
def size_in_bytes(self, size_in_bytes):
"""
Sets the size_in_bytes of this SoftwarePackageFile.
size of the file in bytes
:param size_in_bytes: The size_in_bytes of this SoftwarePackageFile.
:type: int
"""
self._size_in_bytes = size_in_bytes
| 27.202586 | 245 | 0.615116 | # coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class SoftwarePackageFile(object):
"""
A file associated with a package
"""
def __init__(self, **kwargs):
"""
Initializes a new SoftwarePackageFile object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param path:
The value to assign to the path property of this SoftwarePackageFile.
:type path: str
:param type:
The value to assign to the type property of this SoftwarePackageFile.
:type type: str
:param time_modified:
The value to assign to the time_modified property of this SoftwarePackageFile.
:type time_modified: datetime
:param checksum:
The value to assign to the checksum property of this SoftwarePackageFile.
:type checksum: str
:param checksum_type:
The value to assign to the checksum_type property of this SoftwarePackageFile.
:type checksum_type: str
:param size_in_bytes:
The value to assign to the size_in_bytes property of this SoftwarePackageFile.
:type size_in_bytes: int
"""
self.swagger_types = {
'path': 'str',
'type': 'str',
'time_modified': 'datetime',
'checksum': 'str',
'checksum_type': 'str',
'size_in_bytes': 'int'
}
self.attribute_map = {
'path': 'path',
'type': 'type',
'time_modified': 'timeModified',
'checksum': 'checksum',
'checksum_type': 'checksumType',
'size_in_bytes': 'sizeInBytes'
}
self._path = None
self._type = None
self._time_modified = None
self._checksum = None
self._checksum_type = None
self._size_in_bytes = None
@property
def path(self):
"""
Gets the path of this SoftwarePackageFile.
file path
:return: The path of this SoftwarePackageFile.
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""
Sets the path of this SoftwarePackageFile.
file path
:param path: The path of this SoftwarePackageFile.
:type: str
"""
self._path = path
@property
def type(self):
"""
Gets the type of this SoftwarePackageFile.
type of the file
:return: The type of this SoftwarePackageFile.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this SoftwarePackageFile.
type of the file
:param type: The type of this SoftwarePackageFile.
:type: str
"""
self._type = type
@property
def time_modified(self):
"""
Gets the time_modified of this SoftwarePackageFile.
The date and time of the last modification to this file, as described
in `RFC 3339`__, section 14.29.
__ https://tools.ietf.org/rfc/rfc3339
:return: The time_modified of this SoftwarePackageFile.
:rtype: datetime
"""
return self._time_modified
@time_modified.setter
def time_modified(self, time_modified):
"""
Sets the time_modified of this SoftwarePackageFile.
The date and time of the last modification to this file, as described
in `RFC 3339`__, section 14.29.
__ https://tools.ietf.org/rfc/rfc3339
:param time_modified: The time_modified of this SoftwarePackageFile.
:type: datetime
"""
self._time_modified = time_modified
@property
def checksum(self):
"""
Gets the checksum of this SoftwarePackageFile.
checksum of the file
:return: The checksum of this SoftwarePackageFile.
:rtype: str
"""
return self._checksum
@checksum.setter
def checksum(self, checksum):
"""
Sets the checksum of this SoftwarePackageFile.
checksum of the file
:param checksum: The checksum of this SoftwarePackageFile.
:type: str
"""
self._checksum = checksum
@property
def checksum_type(self):
"""
Gets the checksum_type of this SoftwarePackageFile.
type of the checksum
:return: The checksum_type of this SoftwarePackageFile.
:rtype: str
"""
return self._checksum_type
@checksum_type.setter
def checksum_type(self, checksum_type):
"""
Sets the checksum_type of this SoftwarePackageFile.
type of the checksum
:param checksum_type: The checksum_type of this SoftwarePackageFile.
:type: str
"""
self._checksum_type = checksum_type
@property
def size_in_bytes(self):
"""
Gets the size_in_bytes of this SoftwarePackageFile.
size of the file in bytes
:return: The size_in_bytes of this SoftwarePackageFile.
:rtype: int
"""
return self._size_in_bytes
@size_in_bytes.setter
def size_in_bytes(self, size_in_bytes):
"""
Sets the size_in_bytes of this SoftwarePackageFile.
size of the file in bytes
:param size_in_bytes: The size_in_bytes of this SoftwarePackageFile.
:type: int
"""
self._size_in_bytes = size_in_bytes
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 177 | 0 | 81 |
4a054595b354fc56a93e4664562ef67d04d9e103 | 1,765 | py | Python | esp32_ulp/definesdb.py | wnienhaus/micropython-esp32-ulp | 19d4b98d41fae74062d855760d7aaef1988804f6 | [
"MIT"
] | 63 | 2018-03-11T20:28:18.000Z | 2022-02-03T15:03:24.000Z | esp32_ulp/definesdb.py | wnienhaus/micropython-esp32-ulp | 19d4b98d41fae74062d855760d7aaef1988804f6 | [
"MIT"
] | 56 | 2018-03-11T18:48:18.000Z | 2022-03-01T00:16:26.000Z | esp32_ulp/definesdb.py | wnienhaus/micropython-esp32-ulp | 19d4b98d41fae74062d855760d7aaef1988804f6 | [
"MIT"
] | 14 | 2018-03-13T07:33:39.000Z | 2022-02-03T15:03:27.000Z | import os
import btree
from .util import file_exists
DBNAME = 'defines.db'
| 22.341772 | 52 | 0.538244 | import os
import btree
from .util import file_exists
DBNAME = 'defines.db'
class DefinesDB:
def __init__(self):
self._file = None
self._db = None
self._db_exists = None
def clear(self):
self.close()
try:
os.remove(DBNAME)
self._db_exists = False
except OSError:
pass
def is_open(self):
return self._db is not None
def open(self):
if self.is_open():
return
try:
self._file = open(DBNAME, 'r+b')
except OSError:
self._file = open(DBNAME, 'w+b')
self._db = btree.open(self._file)
self._db_exists = True
def close(self):
if not self.is_open():
return
self._db.close()
self._db = None
self._file.close()
self._file = None
def db_exists(self):
if self._db_exists is None:
self._db_exists = file_exists(DBNAME)
return self._db_exists
def update(self, dictionary):
for k, v in dictionary.items():
self.__setitem__(k, v)
def get(self, key, default):
try:
result = self.__getitem__(key)
except KeyError:
result = default
return result
def keys(self):
if not self.db_exists():
return []
self.open()
return [k.decode() for k in self._db.keys()]
def __getitem__(self, key):
if not self.db_exists():
raise KeyError
self.open()
return self._db[key.encode()].decode()
def __setitem__(self, key, value):
self.open()
self._db[key.encode()] = str(value).encode()
def __iter__(self):
return iter(self.keys())
| 1,347 | -5 | 346 |
5fba5e1f1bfcf9ffc2e970adf233be126a9270aa | 630 | py | Python | app/bootstrap/settings/components/db.py | enix403/django-compose-starter-template | 5195ee0a1e59f567fcb314973a91a531324e50b2 | [
"Apache-2.0"
] | null | null | null | app/bootstrap/settings/components/db.py | enix403/django-compose-starter-template | 5195ee0a1e59f567fcb314973a91a531324e50b2 | [
"Apache-2.0"
] | null | null | null | app/bootstrap/settings/components/db.py | enix403/django-compose-starter-template | 5195ee0a1e59f567fcb314973a91a531324e50b2 | [
"Apache-2.0"
] | null | null | null | from app.bootstrap.configmanager import ConfigManager
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'USER': ConfigManager.get('db.user'),
'NAME': ConfigManager.get('db.name'),
'PASSWORD': ConfigManager.get('db.pass'),
'HOST': ConfigManager.get('db.host'),
'PORT': ConfigManager.get('db.port'),
}
}
migration_subfolder = ConfigManager.get('main.migration_branch_name')
if not migration_subfolder:
migration_subfolder = 'unnamed'
MIGRATION_MODULES = {'app': f'migrations.{migration_subfolder}'}
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' | 31.5 | 69 | 0.684127 | from app.bootstrap.configmanager import ConfigManager
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'USER': ConfigManager.get('db.user'),
'NAME': ConfigManager.get('db.name'),
'PASSWORD': ConfigManager.get('db.pass'),
'HOST': ConfigManager.get('db.host'),
'PORT': ConfigManager.get('db.port'),
}
}
migration_subfolder = ConfigManager.get('main.migration_branch_name')
if not migration_subfolder:
migration_subfolder = 'unnamed'
MIGRATION_MODULES = {'app': f'migrations.{migration_subfolder}'}
DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' | 0 | 0 | 0 |
010a4ac03204c1908e45dac9fcc0777e17bb6512 | 1,485 | py | Python | polygones_rose_v4.py | mercadder/python | 85a005fee24a613ac5bb33847cccc8e32f1ceebd | [
"MIT"
] | null | null | null | polygones_rose_v4.py | mercadder/python | 85a005fee24a613ac5bb33847cccc8e32f1ceebd | [
"MIT"
] | null | null | null | polygones_rose_v4.py | mercadder/python | 85a005fee24a613ac5bb33847cccc8e32f1ceebd | [
"MIT"
] | null | null | null | import turtle #module tha drawn things
draw_square()
| 29.117647 | 63 | 0.531987 | import turtle #module tha drawn things
def draw_square():
turtle.setworldcoordinates(-300, -300, 600, 600)
background = turtle.Screen() #create the background
background.bgcolor ("#084B8A") #background color
titi = turtle.Turtle() #titi is an intance of Turtle class
titi.shape("square")
titi.color("white")
titi.speed(100)
puppy = turtle.Turtle() #another instance of Turtle class
puppy.shape("circle")
puppy.color("orange")
puppy.speed(100)
reddy = turtle.Turtle() #another instance of Turtle class
reddy.shape("circle")
reddy.color("red")
reddy.speed(200)
s = 10 # number of sides
x = s # counter overall
z = s*2 # counter big
w = s/2 # counter small
y = 360/s #angle
while x > 0:
while z > 0:
puppy_pos = puppy.pos()
titi_pos = titi.pos()
reddy_pos = reddy.pos()
puppy.forward(100)
puppy.left(y)
titi.forward(s*20) #move the cursor and draw
titi.circle(z*5)
titi.left(y)
w = s
while w > 0:
reddy.goto(puppy_pos)
reddy.circle(w*10)
reddy.left(y)
reddy.forward(100)
reddy.circle(s-2)
titi.left(y) #rotate
w = w - 1
z = z - 1
x = x - 1
background.exitonclick() #cierra el background
draw_square()
| 1,408 | 0 | 22 |
b55102e6c9f6acb641d3ac9a4e1d55faf0f27f26 | 692 | py | Python | tests/test_dialects.py | jdp/jarg | 61ddf8ab6ec4ec33df21943295c0dac3444105f3 | [
"MIT"
] | 45 | 2015-01-01T06:01:33.000Z | 2021-10-01T14:39:41.000Z | tests/test_dialects.py | btoztas/jarg | 61ddf8ab6ec4ec33df21943295c0dac3444105f3 | [
"MIT"
] | 1 | 2018-12-04T17:56:04.000Z | 2018-12-04T17:56:04.000Z | tests/test_dialects.py | btoztas/jarg | 61ddf8ab6ec4ec33df21943295c0dac3444105f3 | [
"MIT"
] | 1 | 2018-12-04T18:33:55.000Z | 2018-12-04T18:33:55.000Z | import pytest
from jarg.dialects import FormDialect, JSONDialect
| 25.62963 | 71 | 0.632948 | import pytest
from jarg.dialects import FormDialect, JSONDialect
def test_JSONDialect():
dialect = JSONDialect()
assert dialect.to_python("bar") == "bar"
assert dialect.to_python("42") == 42
assert dialect.to_python("4.20") == 4.2
assert dialect.to_python('"69"') == '"69"'
assert dialect.from_literal("true") == True
assert dialect.from_literal("false") == False
assert dialect.from_literal("[1, 2, 3]") == [1, 2, 3]
assert dialect.from_literal("{\"bar\": \"baz\"}") == {'bar': 'baz'}
def test_FormDialect():
dialect = FormDialect()
assert dialect.to_python("foo") == "foo"
assert dialect.from_literal("foo=bar") == {'foo': ['bar']}
| 577 | 0 | 46 |
1cb040e596b26589cbb11a733ba1ebdffd781115 | 2,400 | py | Python | cohesity_management_sdk/models/nfs_connection.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | null | null | null | cohesity_management_sdk/models/nfs_connection.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | null | null | null | cohesity_management_sdk/models/nfs_connection.py | nick6655/management-sdk-python | 88e792cb83e5c24a22af495b220c145d0c45841d | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class NfsConnection(object):
"""Implementation of the 'NfsConnection' model.
:TODO Type description here.
Attributes:
client_ip (string): Information of a Universal Data
Adapter cluster, only valid for an entity of view_name kCluster.
node_ip (string): Specifies a Node IP address where the connection
request is received.
server_ip (string): Specifies the Server IP address of the connection.
This could be a VIP, VLAN IP, or node IP on the Cluster.
view_id (long|int): Specifies the id of the view.
view_name (string): Specifies the name of the view.
"""
# Create a mapping from Model property names to API property names
_names = {
"client_ip":'clientIp',
"node_ip":'nodeIp',
"server_ip":'serverIp',
"view_id":'viewId',
"view_name":'viewName'
}
def __init__(self,
client_ip=None,
node_ip=None,
server_ip=None,
view_id=None,
view_name=None):
"""Constructor for the NfsConnection class"""
# Initialize members of the class
self.client_ip = client_ip
self.node_ip = node_ip
self.server_ip = server_ip
self.view_id = view_id
self.view_name = view_name
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
client_ip = dictionary.get('clientIp')
node_ip = dictionary.get('nodeIp')
server_ip = dictionary.get('serverIp')
view_id = dictionary.get('viewId')
view_name = dictionary.get('viewName')
# Return an object of this model
return cls(client_ip,
node_ip,
server_ip,
view_id,
view_name)
| 30.379747 | 81 | 0.588333 | # -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
class NfsConnection(object):
"""Implementation of the 'NfsConnection' model.
:TODO Type description here.
Attributes:
client_ip (string): Information of a Universal Data
Adapter cluster, only valid for an entity of view_name kCluster.
node_ip (string): Specifies a Node IP address where the connection
request is received.
server_ip (string): Specifies the Server IP address of the connection.
This could be a VIP, VLAN IP, or node IP on the Cluster.
view_id (long|int): Specifies the id of the view.
view_name (string): Specifies the name of the view.
"""
# Create a mapping from Model property names to API property names
_names = {
"client_ip":'clientIp',
"node_ip":'nodeIp',
"server_ip":'serverIp',
"view_id":'viewId',
"view_name":'viewName'
}
def __init__(self,
client_ip=None,
node_ip=None,
server_ip=None,
view_id=None,
view_name=None):
"""Constructor for the NfsConnection class"""
# Initialize members of the class
self.client_ip = client_ip
self.node_ip = node_ip
self.server_ip = server_ip
self.view_id = view_id
self.view_name = view_name
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
client_ip = dictionary.get('clientIp')
node_ip = dictionary.get('nodeIp')
server_ip = dictionary.get('serverIp')
view_id = dictionary.get('viewId')
view_name = dictionary.get('viewName')
# Return an object of this model
return cls(client_ip,
node_ip,
server_ip,
view_id,
view_name)
| 0 | 0 | 0 |
9599f3ef46405278aa67da39176eb1314ae4583e | 12,783 | py | Python | modules/windows_jumplist/JumpListParser.py | naaya17/carpe | fa2e3cfebe20f8839c985e5b9b78b538800172a1 | [
"Apache-2.0"
] | 56 | 2019-02-07T06:21:45.000Z | 2022-03-21T08:19:24.000Z | modules/windows_jumplist/JumpListParser.py | naaya17/carpe | fa2e3cfebe20f8839c985e5b9b78b538800172a1 | [
"Apache-2.0"
] | 5 | 2020-05-25T17:29:00.000Z | 2021-12-13T20:49:08.000Z | modules/windows_jumplist/JumpListParser.py | naaya17/carpe | fa2e3cfebe20f8839c985e5b9b78b538800172a1 | [
"Apache-2.0"
] | 31 | 2019-03-13T10:23:49.000Z | 2021-11-04T12:14:58.000Z | import os.path, sys
from modules.windows_jumplist.consts import *
from ctypes import *
from modules.windows_jumplist.lib.delphi import *
from modules.windows_jumplist.lib.yjSysUtils import *
from modules.windows_jumplist.lib.yjDateTime import *
from modules.windows_jumplist.LNKFileParser import TLNKFileParser
from modules.windows_jumplist.lib import olefile # https://pypi.org/project/olefile/
from modules.windows_jumplist.lib.yjSQLite3 import TSQLite3
def split_filename(fn):
""" 경로가 포함된 파일명 텍스트를 ['경로', '파일명', '파일확장자'] 로 나눈다. """
v = os.path.splitext(fn)
fileext = v[1]
v = os.path.split(v[0])
if (fileext == '') and (len(v[1]) > 0) and (v[1][0] == '.'):
v = list(v)
fileext = v[1]
v[1] = ''
return [v[0], v[1], fileext]
def get_files(path, w = '*'):
""" 지정 경로의 파일목록을 구한다. """
if os.path.isdir(path):
import glob
try:
path = IncludeTrailingBackslash(path)
return [v for v in glob.glob(path + w) if os.path.isfile(v)]
finally:
del glob
else: return []
fileext_customdestination = '.customdestinations-ms'
fileext_automaticdestinations = '.automaticdestinations-ms'
FILE_ATTRIBUTE_READONLY = 0x00000001
FILE_ATTRIBUTE_HIDDEN = 0x00000002
FILE_ATTRIBUTE_SYSTEM = 0x00000004
FILE_ATTRIBUTE_DIRECTORY = 0x00000010
FILE_ATTRIBUTE_ARCHIVE = 0x00000020
# DestListEntry : https://bonggang.tistory.com/120
"""
if sys.version_info < (3, 8):
print('\'%s\' \r\nError: \'%s\' works on Python v3.8 and above.' % (sys.version, ExtractFileName(__file__)))
exit(1)
"""
"""
if __name__ == "__main__":
app_path = IncludeTrailingBackslash(os.path.dirname(os.path.abspath( __file__ )))
main(sys.argv, len(sys.argv))
""" | 42.327815 | 132 | 0.509192 | import os.path, sys
from modules.windows_jumplist.consts import *
from ctypes import *
from modules.windows_jumplist.lib.delphi import *
from modules.windows_jumplist.lib.yjSysUtils import *
from modules.windows_jumplist.lib.yjDateTime import *
from modules.windows_jumplist.LNKFileParser import TLNKFileParser
from modules.windows_jumplist.lib import olefile # https://pypi.org/project/olefile/
from modules.windows_jumplist.lib.yjSQLite3 import TSQLite3
def exit(exit_code, msg = None):
if debug_mode: exit_code = 0
if msg: print(msg)
sys.exit(exit_code)
def split_filename(fn):
""" 경로가 포함된 파일명 텍스트를 ['경로', '파일명', '파일확장자'] 로 나눈다. """
v = os.path.splitext(fn)
fileext = v[1]
v = os.path.split(v[0])
if (fileext == '') and (len(v[1]) > 0) and (v[1][0] == '.'):
v = list(v)
fileext = v[1]
v[1] = ''
return [v[0], v[1], fileext]
def get_files(path, w = '*'):
""" 지정 경로의 파일목록을 구한다. """
if os.path.isdir(path):
import glob
try:
path = IncludeTrailingBackslash(path)
return [v for v in glob.glob(path + w) if os.path.isfile(v)]
finally:
del glob
else: return []
fileext_customdestination = '.customdestinations-ms'
fileext_automaticdestinations = '.automaticdestinations-ms'
FILE_ATTRIBUTE_READONLY = 0x00000001
FILE_ATTRIBUTE_HIDDEN = 0x00000002
FILE_ATTRIBUTE_SYSTEM = 0x00000004
FILE_ATTRIBUTE_DIRECTORY = 0x00000010
FILE_ATTRIBUTE_ARCHIVE = 0x00000020
# DestListEntry : https://bonggang.tistory.com/120
class TDestListEntry(LittleEndianStructure):
_pack_ = 1
_fields_ = [
('Checksum', c_uint64),
('NewVolumeID', c_ubyte * 16),
('NewObjectID', c_ubyte * 16),
('BirthVolumeID', c_ubyte * 16),
('BirthObjectID', c_ubyte * 16),
('NetBIOSName', c_char * 16),
('EntryID', c_uint32),
('_f08', c_ubyte * 8),
('last_recorded_aceess_time', c_uint64), # FILETIME
('Enty_pin_status', c_uint32), # FF FF FF FF
('_f11', c_uint32), # FF FF FF FF
('access_count', c_uint32),
('_f13', c_ubyte * 8), # 00 00 00 00 00 00 00 00
('length_of_unicode', c_uint16)
]
class TJumpListParser:
appids_file = None
def __init__(self, srcfile, src_id):
def getProgramName():
if not hasattr(TJumpListParser.appids_file, 'read'): return ''
app_dict = {}
f = TJumpListParser.appids_file
for line in f.readlines():
line = line.rstrip().split('\t')
if len(line) > 1: app_dict[line[0]] = line[1]
v = app_dict.get(split_filename(self.fileName)[1]) # split_filename : ['경로', '파일명', '파일 확장자']
return '' if v == None else v
def filesize(f):
p = f.tell()
try:
f.seek(0, os.SEEK_END)
return f.tell()
finally:
f.seek(p, os.SEEK_SET)
if not hasattr(TJumpListParser.appids_file, 'read'):
fn = ExtractFilePath(sys.argv[0]) + 'AppIDs.dat'
if FileExists(fn): TJumpListParser.appids_file = open(fn, 'rt')
self.src_id = src_id
if hasattr(srcfile, 'read'):
self.fileName = srcfile.name
self.fileObject = srcfile
self.fileCTime = ''
else:
self.fileName = srcfile
self.fileObject = open(srcfile, 'rb')
t = os.path.getctime(srcfile)
if debug_mode: assert type(t) is float # TDateTime
self.fileCTime = datetime.datetime.fromtimestamp(t)
self.fileExt = ExtractFileExt(self.fileName).lower()
self.fileSize = filesize(self.fileObject)
self.programName = getProgramName()
def parse(self):
f = self.fileObject
fn = self.fileName
fext = self.fileExt
result = {'RecentFileInfo': [['sid', 'Name', 'Path', 'ProgramName', 'CreationTime', 'ModifiedTime', 'AccessTime', 'Size']],
'LnkData' : [['sid', 'EntryId', 'ParentIdx', 'Item', 'ItemInfo']],
'DestList': [['sid', 'RecordedTime', 'AccessCount', 'EntryId', 'ComputerName', 'FileName', 'FilePath', 'FileExt']]
}
sid = self.src_id
finfo = result['RecentFileInfo']
finfo.append([sid, ExtractFileName(fn), ExtractFilePath(fn), self.programName, self.fileCTime, '', '', self.fileSize])
if debug_mode: assert len(finfo[0]) == len(finfo[1])
lnkData = result['LnkData']
destList = result['DestList']
if fext == fileext_customdestination:
# .customdestinations-ms 파일 처리
data = TDataAccess(f.read())
if data.size < 30: return False
sign_end = b'\xAB\xFB\xBF\xBA'
sign_lnk = b'\x4C\x00\x00\x00\x01\x14\x02\x00\x00\x00\x00\x00'
pos = 0
bg_offset = -1
while data.size > pos:
data.position = pos
sign_data = data.read(12)
if debug_mode and not pos:
assert len(sign_data[8:]) == len(sign_end)
assert len(sign_data) == len(sign_lnk)
if sign_data[8:] == sign_end:
if bg_offset > 0:
l = data.position - 4 - bg_offset
LNKFileParser = TLNKFileParser(data.read(l, offset = bg_offset), 0)
r = LNKFileParser.parse_data()['LinkHeaderInfo']
del r[0]
lnkData.extend(r)
break
elif sign_data == sign_lnk:
if bg_offset >= 0:
data.position -= 12
l = data.position - bg_offset
LNKFileParser = TLNKFileParser(data.read(l, offset = bg_offset), 0)
r = LNKFileParser.parse_data()['LinkHeaderInfo']
del r[0]
lnkData.extend(r)
bg_offset = -1
pos = data.position
else:
bg_offset = data.position - 12
pos += 1
pass
pass
else: pos += 1
else:
# .automaticdestinations-ms 파일 처리
_tmp = {}
if debug_mode:
assert self.fileExt == fileext_automaticdestinations
assert olefile.isOleFile(f)
ole = olefile.OleFileIO(f)
for item in ole.listdir():
if item == ['DestList']:
with ole.openstream(item) as f:
data = TDataAccess(f.read())
data.position = 32
while True:
entry = data.read_recdata(TDestListEntry)
if not entry: break
try:
fileName = data.read(entry.length_of_unicode * 2).decode('utf-16')
filePath = ExtractFilePath(fileName) if fileName.find('://') == -1 else fileName
computerName = entry.NetBIOSName.decode('utf-8')
destList.append([sid, filetime_to_datetime(entry.last_recorded_aceess_time, 0),
entry.access_count, entry.EntryID, computerName, ExtractFileName(fileName),
filePath, ExtractFileExt(fileName).lower()])
except Exception:
pass
data.position += 4
else:
entryid = int(item[0], 16) # entryid는 entry.EntryID 다.
f = ole.openstream(item)
LNKFileParser = TLNKFileParser(f.read(), entryid)
r = LNKFileParser.parse_data()['LinkHeaderInfo']
del r[0]
lnkData.extend(r)
fname = ''
ctime = ''
atime = ''
mtime = ''
fattr = ''
fsize = ''
for v in r:
if debug_mode: assert v[0] == entryid
name = v[2]
val = v[3]
if (ctime == '') and (name == RS_TargetFileCreateDT): ctime = val
if (atime == '') and (name == RS_TargetFileAccessDT): atime = val
if (mtime == '') and (name == RS_TargetFileModifyDT): mtime = val
if (fsize == '') and (name == RS_TargetFileSize): fsize = val
if (fattr == '') and (name == RS_TargetFileProp): fattr = val
if (fname == '') and (name == 'Base Path'): fname = val
del r
fattr_str = ''
fattr = StrToIntDef(fattr, 0)
if fattr:
if (fattr & FILE_ATTRIBUTE_ARCHIVE): fattr_str += 'A'
if (fattr & FILE_ATTRIBUTE_READONLY): fattr_str += 'R'
if (fattr & FILE_ATTRIBUTE_DIRECTORY): fattr_str += 'D'
if (fattr & FILE_ATTRIBUTE_SYSTEM): fattr_str += 'S'
if (fattr & FILE_ATTRIBUTE_HIDDEN): fattr_str += 'H'
fattr_str = '%s (%x)' % (fattr_str, fattr)
del fattr
_tmp[entryid] = {'CreatedTime': ctime,
'ModifiedTime': mtime,
'AccessedTime': atime,
'FileAttr': fattr_str,
'FileSize': fsize,
}
if len(destList) > 1:
if debug_mode: assert self.fileExt == fileext_automaticdestinations
for i, r in enumerate(destList):
if i == 0:
if debug_mode: assert r[3] == 'EntryId'
r.extend(list(_tmp[entryid].keys())) # 필드 확장
continue
entryid = r[3]
try:
v = list(_tmp[entryid].values())
except Exception as e:
v = ['', '', '', '', '']
r.extend(v)
del _tmp
if len(lnkData) > 1:
for i, rec in enumerate(lnkData):
if i == 0: continue
rec.insert(0, sid) # sid
if debug_mode:
if len(destList) > 1: assert len(destList[0]) == 8 + 5
if len(lnkData) > 1: assert (len(lnkData[0]) == 5) and (len(lnkData[1]) == 5)
return result
def printHelp():
print(
r"""
Usage:
JumpListParser.py <.automaticDestinations-ms file> <Output .db Filename>
JumpListParser.py <.customDestination-ms file> <Output .db Filename>
JumpListParser.py <Path> <Output .db Filename>
>python JumpListParser.py
>python JumpListParser.py 9d1f905ce5044aee.automaticDestinations-ms re.db
>python JumpListParser.py 28c8b86deab549a1.customDestinations-ms re.db
>python JumpListParser.py c:\jumplist_samples re.db
""")
def main(file, app_path):
fn = file
# 처리할 소스 파일(src_files)을 구한다.
src_files = []
#src_files.append(fn)
if os.path.isfile(fn):
if ExtractFilePath(fn) == '': fn = app_path + fn
if FileExists(fn): src_files.append(fn)
else: exit(1, 'Error: File not found - "%s"' % fn)
else:
if not DirectoryExists(fn): exit(1, 'Error: Directory not found - "%s"' % fn)
src_files = get_files(fn, '*.customdestination-ms')
src_files.extend(get_files(fn, '*.automaticdestinations-ms'))
i = 0
for fn in src_files:
# print(i + 1, fn if type(fn) is str else fn.name)
JumpListParser = TJumpListParser(fn, i)
i += 1
result = JumpListParser.parse()
# if debug_mode:
# assert (len(result['RecentFileInfo'][0]) == 8) and (len(result['LnkData'][0]) == 5)
# if JumpListParser.fileExt == fileext_automaticdestinations: assert len(result['DestList'][0]) == 8 + 5
JumpListParser.fileObject.close()
return result
"""
if sys.version_info < (3, 8):
print('\'%s\' \r\nError: \'%s\' works on Python v3.8 and above.' % (sys.version, ExtractFileName(__file__)))
exit(1)
"""
"""
if __name__ == "__main__":
app_path = IncludeTrailingBackslash(os.path.dirname(os.path.abspath( __file__ )))
main(sys.argv, len(sys.argv))
""" | 10,217 | 775 | 114 |
bb59e61d09ed7c1f58341c38fc5c90691b4f1d4e | 774 | py | Python | mimodata.py | theignorantzen/VLC-Pi | 66f50d9393429c55716c4c036240c44a4e601a98 | [
"MIT"
] | null | null | null | mimodata.py | theignorantzen/VLC-Pi | 66f50d9393429c55716c4c036240c44a4e601a98 | [
"MIT"
] | null | null | null | mimodata.py | theignorantzen/VLC-Pi | 66f50d9393429c55716c4c036240c44a4e601a98 | [
"MIT"
] | null | null | null | import RPi.GPIO as GPIO
import binascii
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(12, GPIO.OUT)
GPIO.setup(16, GPIO.OUT)
GPIO.setup(5, GPIO.OUT)
GPIO.setup(6, GPIO.OUT)
GPIO.setup(13, GPIO.OUT)
GPIO.setup(19, GPIO.OUT)
pin = [12, 16, 5, 6, 13, 19]
k = 0
st="110101011010111"
cols = 6
rows = len(st)/3
arr = [[0]*cols]*rows
for i in range(rows):
if st[k] == '0':
arr[i][0] = 1
else:
arr[i][1] = 1
if st[k+1] == '0' and st[k+2] == '0':
arr[i][2] = 1
elif st[k+1] == '0' and st[k+2] == '1':
arr[i][3] = 1
elif st[k+1] == '1' and st[k+2] == '0':
arr[i][4] = 1
elif st[k+1] == '1' and st[k+2] == '1':
arr[i][5] = 1
k = k+3
while True:
for i in range(rows):
for j in range(cols):
GPIO.output(pin[j], arr[i][j])
time.sleep(0.01)
GPIO.cleanup() | 18 | 40 | 0.564599 | import RPi.GPIO as GPIO
import binascii
import time
GPIO.setmode(GPIO.BCM)
GPIO.setup(12, GPIO.OUT)
GPIO.setup(16, GPIO.OUT)
GPIO.setup(5, GPIO.OUT)
GPIO.setup(6, GPIO.OUT)
GPIO.setup(13, GPIO.OUT)
GPIO.setup(19, GPIO.OUT)
pin = [12, 16, 5, 6, 13, 19]
k = 0
st="110101011010111"
cols = 6
rows = len(st)/3
arr = [[0]*cols]*rows
for i in range(rows):
if st[k] == '0':
arr[i][0] = 1
else:
arr[i][1] = 1
if st[k+1] == '0' and st[k+2] == '0':
arr[i][2] = 1
elif st[k+1] == '0' and st[k+2] == '1':
arr[i][3] = 1
elif st[k+1] == '1' and st[k+2] == '0':
arr[i][4] = 1
elif st[k+1] == '1' and st[k+2] == '1':
arr[i][5] = 1
k = k+3
while True:
for i in range(rows):
for j in range(cols):
GPIO.output(pin[j], arr[i][j])
time.sleep(0.01)
GPIO.cleanup() | 0 | 0 | 0 |
42ae96f4a947574eed72fb622a55bbd38b1b9cc0 | 3,589 | py | Python | code_fragment_generator/tcp_api_generator.py | Longxr/python-data-process | f5f8c2c89b449b76dde31d13c6cabb6b97b0023f | [
"MIT"
] | null | null | null | code_fragment_generator/tcp_api_generator.py | Longxr/python-data-process | f5f8c2c89b449b76dde31d13c6cabb6b97b0023f | [
"MIT"
] | null | null | null | code_fragment_generator/tcp_api_generator.py | Longxr/python-data-process | f5f8c2c89b449b76dde31d13c6cabb6b97b0023f | [
"MIT"
] | null | null | null | # coding=utf-8
import os
import re
import string
from string import Template
if __name__ == '__main__':
root_dir = os.path.dirname(os.path.abspath(__file__))
input_file = open(os.path.join(root_dir, 'input.cpp'), 'r', encoding='gb18030', errors='ignore')
out_file = open(os.path.join(root_dir, 'output.cpp'), 'w', encoding='gb18030')
template_http_api_file = open(
os.path.join(root_dir, os.path.join('template', 'tcp_api.template')), 'r')
attri_tmpl = Template('JY_PROPERTY_READWRITE($type_name, $var_up_name) \t\t')
request_param_tmpl = Template('params.insert("$var_name", _$var_up_name);\n')
response_replace_tmpl = Template('map.insert("$var_up_name","$var_name");\n')
constructor_tmpl = Template('this->_$var_up_name\t\t\t= obj._$var_up_name;\n')
http_api_tmpl = Template(template_http_api_file.read())
line_first = input_file.readline().split()
class_name = line_first[0]
request_type = line_first[1]
module_id = line_first[2]
response_name = class_name
if 'Notify' not in response_name:
response_name += 'Response'
print(class_name, request_type, module_id, response_name)
input_file.readline() #空行
request_attri = ''
request_param = ''
response_constructor = ''
response_attri = ''
response_replace = ''
table = str.maketrans({key: None for key in string.punctuation})
# new_s = s.translate(table)
while 1:
line = input_file.readline()
line = line.strip()
if not line:
break
attri_name, attri_up_name, attri_type, comment = fun_get_attri(line)
attri_item = attri_tmpl.safe_substitute(type_name=attri_type, var_up_name=attri_up_name)
request_attri += ' ' + attri_item + comment + '\n'
param_item = request_param_tmpl.safe_substitute(var_name=attri_name, var_up_name=attri_up_name)
request_param += ' ' + param_item
while 1:
line = input_file.readline()
line = line.strip()
if not line:
break
attri_name, attri_up_name, attri_type, comment = fun_get_attri(line)
attri_item = attri_tmpl.safe_substitute(type_name=attri_type, var_up_name=attri_up_name)
response_attri += ' ' + attri_item + comment + '\n'
replace_item = response_replace_tmpl.safe_substitute(var_name=attri_name, var_up_name=attri_up_name)
response_replace += ' ' + replace_item
str_out = http_api_tmpl.safe_substitute(str_api_class=class_name, str_api_class_response=response_name, str_request_type=request_type, str_module_id=module_id,
str_request_atti=request_attri, str_request_params=request_param, str_response_atti=response_attri, str_response_replace=response_replace)
out_file.write(str_out)
print(request_attri)
print(request_param)
print(response_attri)
print(response_replace)
input_file.close()
out_file.close()
| 32.333333 | 164 | 0.676512 | # coding=utf-8
import os
import re
import string
from string import Template
def fun_replace(str):
str1 = re.sub(r'[^\w\s]','', str)
return str1.strip()
def fun_get_attri(line):
line_list = line.split('//')
comment = ''
if len(line_list) > 1:
comment = '//' + line_list[1]
line_list = line_list[0].split(':')
line_list = list(map(fun_replace, line_list))
attri_name = line_list[0]
attri_up_name = attri_name[0].upper() + attri_name[1:]
attri_type = 'QString'
if len(line_list) > 1:
attri_type = line_list[1]
if attri_type == 'string':
attri_type = 'QString'
return attri_name, attri_up_name, attri_type, comment
if __name__ == '__main__':
root_dir = os.path.dirname(os.path.abspath(__file__))
input_file = open(os.path.join(root_dir, 'input.cpp'), 'r', encoding='gb18030', errors='ignore')
out_file = open(os.path.join(root_dir, 'output.cpp'), 'w', encoding='gb18030')
template_http_api_file = open(
os.path.join(root_dir, os.path.join('template', 'tcp_api.template')), 'r')
attri_tmpl = Template('JY_PROPERTY_READWRITE($type_name, $var_up_name) \t\t')
request_param_tmpl = Template('params.insert("$var_name", _$var_up_name);\n')
response_replace_tmpl = Template('map.insert("$var_up_name","$var_name");\n')
constructor_tmpl = Template('this->_$var_up_name\t\t\t= obj._$var_up_name;\n')
http_api_tmpl = Template(template_http_api_file.read())
line_first = input_file.readline().split()
class_name = line_first[0]
request_type = line_first[1]
module_id = line_first[2]
response_name = class_name
if 'Notify' not in response_name:
response_name += 'Response'
print(class_name, request_type, module_id, response_name)
input_file.readline() #空行
request_attri = ''
request_param = ''
response_constructor = ''
response_attri = ''
response_replace = ''
table = str.maketrans({key: None for key in string.punctuation})
# new_s = s.translate(table)
while 1:
line = input_file.readline()
line = line.strip()
if not line:
break
attri_name, attri_up_name, attri_type, comment = fun_get_attri(line)
attri_item = attri_tmpl.safe_substitute(type_name=attri_type, var_up_name=attri_up_name)
request_attri += ' ' + attri_item + comment + '\n'
param_item = request_param_tmpl.safe_substitute(var_name=attri_name, var_up_name=attri_up_name)
request_param += ' ' + param_item
while 1:
line = input_file.readline()
line = line.strip()
if not line:
break
attri_name, attri_up_name, attri_type, comment = fun_get_attri(line)
attri_item = attri_tmpl.safe_substitute(type_name=attri_type, var_up_name=attri_up_name)
response_attri += ' ' + attri_item + comment + '\n'
replace_item = response_replace_tmpl.safe_substitute(var_name=attri_name, var_up_name=attri_up_name)
response_replace += ' ' + replace_item
str_out = http_api_tmpl.safe_substitute(str_api_class=class_name, str_api_class_response=response_name, str_request_type=request_type, str_module_id=module_id,
str_request_atti=request_attri, str_request_params=request_param, str_response_atti=response_attri, str_response_replace=response_replace)
out_file.write(str_out)
print(request_attri)
print(request_param)
print(response_attri)
print(response_replace)
input_file.close()
out_file.close()
| 593 | 0 | 46 |
35b5028184a8de2f02ce4af30620f4dde17fa65e | 2,512 | py | Python | DeeProtein/scripts/extract_targets.py | juzb/DeeProtein | 487694a24abdb4656499111c8a8904dfcb1d98ab | [
"MIT"
] | 12 | 2019-02-21T14:09:13.000Z | 2021-03-05T02:02:21.000Z | DeeProtein/scripts/extract_targets.py | juzb/DeeProtein | 487694a24abdb4656499111c8a8904dfcb1d98ab | [
"MIT"
] | null | null | null | DeeProtein/scripts/extract_targets.py | juzb/DeeProtein | 487694a24abdb4656499111c8a8904dfcb1d98ab | [
"MIT"
] | 5 | 2019-05-15T05:37:41.000Z | 2021-09-29T12:20:00.000Z | import pandas as pd
import sys
import os
deep_go_data = sys.argv[1]
dataset_path_in = sys.argv[2]
dataset_path_out = sys.argv[3]
go_file_out = sys.argv[4]
df = pd.read_pickle(deep_go_data)
targets = list(df.targets)
gos = list(df.gos)
go_freqs = {}
target2go = dict(zip(targets, gos))
target_path = os.path.join(dataset_path_in, 'Target files')
map_path = os.path.join(dataset_path_in, 'Mapping files')
file_numbers = []
map_files = os.listdir(map_path)
sp_map_files = [f for f in map_files if 'sp_species' in f]
for file in sp_map_files:
file_numbers.append(file.split('.')[1])
with open(dataset_path_out, 'w') as ofile:
for file_number in file_numbers:
print('Working with file {}'.format(file_number))
target_file = os.path.join(target_path, 'target.{}.fasta'.format(file_number))
# retrieve gos from uniprot
seq = ""
saving = False
with open(target_file, 'r') as ifile:
for line in ifile:
if line.startswith('>'):
if saving:
saving = False
gos = target2go[target_id]
for go in gos:
if not go in go_freqs:
go_freqs[go] = 1
else:
go_freqs[go] += 1
if not len(seq) > 1000:
if not len(gos) == 0:
ofile.write('{};{};{}\n'.format(target_id, seq, ','.join(gos)))
else:
print('No gos found for target {}'.format(target_id))
else:
print('Sequence too long for target {}'.format(target_id))
seq = ""
target_id = line[1:].split()[0].strip()
if target_id in target2go:
print('OK for tareget {}'.format(target_id))
targets.remove(target_id)
saving = True
else:
saving = False
else:
if saving:
seq += line.strip()
print('Did not find entries for:\n{}'.format('\n'.join(targets)))
go_freq_keys = sorted(list(go_freqs.keys()), key=lambda x: -go_freqs[x])
with open(go_file_out, 'w') as ofile:
for go in go_freq_keys:
ofile.write(' {} {}.csv\n'.format(go_freqs[go], go))
| 35.380282 | 95 | 0.501194 | import pandas as pd
import sys
import os
deep_go_data = sys.argv[1]
dataset_path_in = sys.argv[2]
dataset_path_out = sys.argv[3]
go_file_out = sys.argv[4]
df = pd.read_pickle(deep_go_data)
targets = list(df.targets)
gos = list(df.gos)
go_freqs = {}
target2go = dict(zip(targets, gos))
target_path = os.path.join(dataset_path_in, 'Target files')
map_path = os.path.join(dataset_path_in, 'Mapping files')
file_numbers = []
map_files = os.listdir(map_path)
sp_map_files = [f for f in map_files if 'sp_species' in f]
for file in sp_map_files:
file_numbers.append(file.split('.')[1])
with open(dataset_path_out, 'w') as ofile:
for file_number in file_numbers:
print('Working with file {}'.format(file_number))
target_file = os.path.join(target_path, 'target.{}.fasta'.format(file_number))
# retrieve gos from uniprot
seq = ""
saving = False
with open(target_file, 'r') as ifile:
for line in ifile:
if line.startswith('>'):
if saving:
saving = False
gos = target2go[target_id]
for go in gos:
if not go in go_freqs:
go_freqs[go] = 1
else:
go_freqs[go] += 1
if not len(seq) > 1000:
if not len(gos) == 0:
ofile.write('{};{};{}\n'.format(target_id, seq, ','.join(gos)))
else:
print('No gos found for target {}'.format(target_id))
else:
print('Sequence too long for target {}'.format(target_id))
seq = ""
target_id = line[1:].split()[0].strip()
if target_id in target2go:
print('OK for tareget {}'.format(target_id))
targets.remove(target_id)
saving = True
else:
saving = False
else:
if saving:
seq += line.strip()
print('Did not find entries for:\n{}'.format('\n'.join(targets)))
go_freq_keys = sorted(list(go_freqs.keys()), key=lambda x: -go_freqs[x])
with open(go_file_out, 'w') as ofile:
for go in go_freq_keys:
ofile.write(' {} {}.csv\n'.format(go_freqs[go], go))
| 0 | 0 | 0 |
a445050476ee2d8a028886e92dc18e1d0a2333df | 717 | py | Python | getImage.py | tzuhan1106/Crawler_Of_Cosmetic | 2094388b248f269e787eeb90c01679735cc1e58c | [
"MIT"
] | 1 | 2016-04-04T09:42:47.000Z | 2016-04-04T09:42:47.000Z | getImage.py | tzuhan1106/Crawler_Of_Cosmetic | 2094388b248f269e787eeb90c01679735cc1e58c | [
"MIT"
] | null | null | null | getImage.py | tzuhan1106/Crawler_Of_Cosmetic | 2094388b248f269e787eeb90c01679735cc1e58c | [
"MIT"
] | null | null | null | import requests, json
from bs4 import BeautifulSoup
import shutil
import os
try:
os.chdir('img')
#means cd path
except:
print('path error')
imageUrlPattern = 'http://www.watsons.com.tw'
res =requests.get('http://www.watsons.com.tw/%E7%86%B1%E9%8A%B7%E5%95%86%E5%93%81/c/bestSeller?q=:igcBestSeller:category:1041&page=5&resultsForPage=30&text=&sort=')
soup = BeautifulSoup(res.text)
for i in soup.select('img'):
try:
fname = i['alt']
imageUrl = imageUrlPattern + i['src']
ires = requests.get(imageUrl,stream=True)
f = open(fname,'wb')
shutil.copyfileobj(ires.raw,f)
f.close()
del ires
except Exception as e:
print(i)
print(e) | 28.68 | 164 | 0.641562 | import requests, json
from bs4 import BeautifulSoup
import shutil
import os
try:
os.chdir('img')
#means cd path
except:
print('path error')
imageUrlPattern = 'http://www.watsons.com.tw'
res =requests.get('http://www.watsons.com.tw/%E7%86%B1%E9%8A%B7%E5%95%86%E5%93%81/c/bestSeller?q=:igcBestSeller:category:1041&page=5&resultsForPage=30&text=&sort=')
soup = BeautifulSoup(res.text)
for i in soup.select('img'):
try:
fname = i['alt']
imageUrl = imageUrlPattern + i['src']
ires = requests.get(imageUrl,stream=True)
f = open(fname,'wb')
shutil.copyfileobj(ires.raw,f)
f.close()
del ires
except Exception as e:
print(i)
print(e) | 0 | 0 | 0 |
321a802ba08a82beb1f2f48347ea60bfd848d06e | 11,155 | py | Python | pyatv/mrp/__init__.py | SylvainCecchetto/pyatv | b5386440ce6c170219975a17d6b6561026bfd790 | [
"MIT"
] | null | null | null | pyatv/mrp/__init__.py | SylvainCecchetto/pyatv | b5386440ce6c170219975a17d6b6561026bfd790 | [
"MIT"
] | null | null | null | pyatv/mrp/__init__.py | SylvainCecchetto/pyatv | b5386440ce6c170219975a17d6b6561026bfd790 | [
"MIT"
] | null | null | null | """Implementation of the MediaRemoteTV Protocol used by ATV4 and later."""
import logging
import asyncio
from datetime import datetime
from pyatv import (const, exceptions)
from pyatv.mrp import (messages, protobuf)
from pyatv.mrp.srp import SRPAuthHandler
from pyatv.mrp.connection import MrpConnection
from pyatv.mrp.protocol import MrpProtocol
from pyatv.mrp.protobuf import CommandInfo_pb2, SetStateMessage_pb2
from pyatv.mrp.player_state import PlayerStateManager
from pyatv.interface import (AppleTV, RemoteControl, Metadata,
Playing, PushUpdater)
_LOGGER = logging.getLogger(__name__)
# Source: https://github.com/Daij-Djan/DDHidLib/blob/master/usb_hid_usages.txt
_KEY_LOOKUP = {
# name: [usage_page, usage, button hold time (seconds)]
'up': [1, 0x8C, 0],
'down': [1, 0x8D, 0],
'left': [1, 0x8B, 0],
'right': [1, 0x8A, 0],
'stop': [12, 0xB7, 0],
'next': [12, 0xB5, 0],
'previous': [12, 0xB6, 0],
'select': [1, 0x89, 0],
'menu': [1, 0x86, 0],
'top_menu': [12, 0x60, 0],
'home': [12, 0x40, 0],
'home_hold': [12, 0x40, 1],
'suspend': [1, 0x82, 0],
'volume_up': [12, 0xE9, 0],
'volume_down': [12, 0xEA, 0],
# 'mic': [12, 0x04, 0], # Siri
}
class MrpRemoteControl(RemoteControl):
"""Implementation of API for controlling an Apple TV."""
def __init__(self, loop, protocol):
"""Initialize a new MrpRemoteControl."""
self.loop = loop
self.protocol = protocol
def up(self):
"""Press key up."""
return self._press_key('up')
def down(self):
"""Press key down."""
return self._press_key('down')
def left(self):
"""Press key left."""
return self._press_key('left')
def right(self):
"""Press key right."""
return self._press_key('right')
def play(self):
"""Press key play."""
return self.protocol.send(messages.command(CommandInfo_pb2.Play))
def pause(self):
"""Press key play."""
return self.protocol.send(messages.command(CommandInfo_pb2.Pause))
def stop(self):
"""Press key stop."""
return self.protocol.send(messages.command(CommandInfo_pb2.Stop))
def next(self):
"""Press key next."""
return self.protocol.send(messages.command(CommandInfo_pb2.NextTrack))
def previous(self):
"""Press key previous."""
return self.protocol.send(
messages.command(CommandInfo_pb2.PreviousTrack))
def select(self):
"""Press key select."""
return self._press_key('select')
def menu(self):
"""Press key menu."""
return self._press_key('menu')
def volume_up(self):
"""Press key volume up."""
return self._press_key('volume_up')
def volume_down(self):
"""Press key volume down."""
return self._press_key('volume_down')
def home(self):
"""Press key home."""
return self._press_key('home')
def home_hold(self):
"""Hold key home."""
return self._press_key('home_hold')
def top_menu(self):
"""Go to main menu (long press menu)."""
return self._press_key('top_menu')
def suspend(self):
"""Suspend the device."""
return self._press_key('suspend')
def set_position(self, pos):
"""Seek in the current playing media."""
return self.protocol.send(messages.seek_to_position(pos))
def set_shuffle(self, is_on):
"""Change shuffle mode to on or off."""
return self.protocol.send(messages.shuffle(is_on))
def set_repeat(self, repeat_mode):
"""Change repeat mode."""
# TODO: extract to convert module
if int(repeat_mode) == const.REPEAT_STATE_OFF:
state = 1
elif int(repeat_mode) == const.REPEAT_STATE_ALL:
state = 2
elif int(repeat_mode) == const.REPEAT_STATE_TRACK:
state = 3
else:
raise ValueError('Invalid repeat mode: ' + str(repeat_mode))
return self.protocol.send(messages.repeat(state))
class MrpPlaying(Playing):
"""Implementation of API for retrieving what is playing."""
def __init__(self, state):
"""Initialize a new MrpPlaying."""
self._state = state
@property
def media_type(self):
"""Type of media is currently playing, e.g. video, music."""
if self._state.metadata:
media_type = self._state.metadata.mediaType
cim = protobuf.ContentItemMetadata_pb2.ContentItemMetadata
if media_type == cim.Audio:
return const.MEDIA_TYPE_MUSIC
if media_type == cim.Video:
return const.MEDIA_TYPE_VIDEO
return const.MEDIA_TYPE_UNKNOWN
@property
def play_state(self):
"""Play state, e.g. playing or paused."""
if self._state is None:
return const.PLAY_STATE_IDLE
state = self._state.playback_state
ssm = SetStateMessage_pb2.SetStateMessage
if state == ssm.Playing:
return const.PLAY_STATE_PLAYING
if state == ssm.Paused:
return const.PLAY_STATE_PAUSED
if state == ssm.Stopped:
return const.PLAY_STATE_STOPPED
if state == ssm.Interrupted:
return const.PLAY_STATE_LOADING
# if state == SetStateMessage_pb2.Seeking
# return XXX
return const.PLAY_STATE_PAUSED
@property
def title(self):
"""Title of the current media, e.g. movie or song name."""
return self._state.metadata_field('title')
@property
def artist(self):
"""Artist of the currently playing song."""
return self._state.metadata_field('trackArtistName')
@property
def album(self):
"""Album of the currently playing song."""
return self._state.metadata_field('albumName')
@property
def genre(self):
"""Genre of the currently playing song."""
return self._state.metadata_field('genre')
@property
def total_time(self):
"""Total play time in seconds."""
duration = self._state.metadata_field('duration')
return None if duration is None else int(duration)
@property
def position(self):
"""Position in the playing media (seconds)."""
elapsed_time = self._state.metadata_field('elapsedTime')
if elapsed_time:
diff = (datetime.now() - self._state.timestamp).total_seconds()
if self.play_state == const.PLAY_STATE_PLAYING:
return int(elapsed_time + diff)
return int(elapsed_time)
return None
@property
def shuffle(self):
"""If shuffle is enabled or not."""
info = self._get_command_info(CommandInfo_pb2.ChangeShuffleMode)
return None if info is None else info.shuffleMode
@property
def repeat(self):
"""Repeat mode."""
info = self._get_command_info(CommandInfo_pb2.ChangeRepeatMode)
return None if info is None else info.repeatMode
class MrpMetadata(Metadata):
"""Implementation of API for retrieving metadata."""
def __init__(self, psm, identifier):
"""Initialize a new MrpPlaying."""
super().__init__(identifier)
self.psm = psm
async def artwork(self):
"""Return artwork for what is currently playing (or None)."""
raise exceptions.NotSupportedError
async def playing(self):
"""Return what is currently playing."""
return MrpPlaying(self.psm.playing)
class MrpPushUpdater(PushUpdater):
"""Implementation of API for handling push update from an Apple TV."""
def __init__(self, loop, metadata, psm):
"""Initialize a new MrpPushUpdater instance."""
super().__init__()
self.loop = loop
self.metadata = metadata
self.psm = psm
self.listener = None
def start(self, initial_delay=0):
"""Wait for push updates from device.
Will throw NoAsyncListenerError if no listner has been set.
"""
if self.listener is None:
raise exceptions.NoAsyncListenerError
self.psm.listener = self
def stop(self):
"""No longer wait for push updates."""
self.psm.listener = None
async def state_updated(self):
"""State was updated for active player."""
playstatus = await self.metadata.playing()
self.loop.call_soon(
self.listener.playstatus_update, self, playstatus)
class MrpAppleTV(AppleTV):
"""Implementation of API support for Apple TV."""
# This is a container class so it's OK with many attributes
# pylint: disable=too-many-instance-attributes
def __init__(self, loop, session, config, airplay):
"""Initialize a new Apple TV."""
super().__init__()
self._session = session
self._mrp_service = config.get_service(const.PROTOCOL_MRP)
self._connection = MrpConnection(
config.address, self._mrp_service.port, loop)
self._srp = SRPAuthHandler()
self._protocol = MrpProtocol(
loop, self._connection, self._srp, self._mrp_service)
self._psm = PlayerStateManager(self._protocol, loop)
self._mrp_remote = MrpRemoteControl(loop, self._protocol)
self._mrp_metadata = MrpMetadata(self._psm, config.identifier)
self._mrp_push_updater = MrpPushUpdater(
loop, self._mrp_metadata, self._psm)
self._airplay = airplay
async def connect(self):
"""Initiate connection to device.
Not needed as it is performed automatically.
"""
await self._protocol.start()
async def close(self):
"""Close connection and release allocated resources."""
await self._session.close()
self._protocol.stop()
@property
def service(self):
"""Return service used to connect to the Apple TV."""
return self._mrp_service
@property
def remote_control(self):
"""Return API for controlling the Apple TV."""
return self._mrp_remote
@property
def metadata(self):
"""Return API for retrieving metadata from Apple TV."""
return self._mrp_metadata
@property
def push_updater(self):
"""Return API for handling push update from the Apple TV."""
return self._mrp_push_updater
@property
def airplay(self):
"""Return API for working with AirPlay."""
return self._airplay
| 30.900277 | 78 | 0.622143 | """Implementation of the MediaRemoteTV Protocol used by ATV4 and later."""
import logging
import asyncio
from datetime import datetime
from pyatv import (const, exceptions)
from pyatv.mrp import (messages, protobuf)
from pyatv.mrp.srp import SRPAuthHandler
from pyatv.mrp.connection import MrpConnection
from pyatv.mrp.protocol import MrpProtocol
from pyatv.mrp.protobuf import CommandInfo_pb2, SetStateMessage_pb2
from pyatv.mrp.player_state import PlayerStateManager
from pyatv.interface import (AppleTV, RemoteControl, Metadata,
Playing, PushUpdater)
_LOGGER = logging.getLogger(__name__)
# Source: https://github.com/Daij-Djan/DDHidLib/blob/master/usb_hid_usages.txt
_KEY_LOOKUP = {
# name: [usage_page, usage, button hold time (seconds)]
'up': [1, 0x8C, 0],
'down': [1, 0x8D, 0],
'left': [1, 0x8B, 0],
'right': [1, 0x8A, 0],
'stop': [12, 0xB7, 0],
'next': [12, 0xB5, 0],
'previous': [12, 0xB6, 0],
'select': [1, 0x89, 0],
'menu': [1, 0x86, 0],
'top_menu': [12, 0x60, 0],
'home': [12, 0x40, 0],
'home_hold': [12, 0x40, 1],
'suspend': [1, 0x82, 0],
'volume_up': [12, 0xE9, 0],
'volume_down': [12, 0xEA, 0],
# 'mic': [12, 0x04, 0], # Siri
}
class MrpRemoteControl(RemoteControl):
"""Implementation of API for controlling an Apple TV."""
def __init__(self, loop, protocol):
"""Initialize a new MrpRemoteControl."""
self.loop = loop
self.protocol = protocol
async def _press_key(self, key):
lookup = _KEY_LOOKUP.get(key, None)
if lookup:
await self.protocol.send(
messages.send_hid_event(lookup[0], lookup[1], True))
await asyncio.sleep(lookup[2])
await self.protocol.send(
messages.send_hid_event(lookup[0], lookup[1], False))
else:
raise Exception('unknown key: ' + key)
def up(self):
"""Press key up."""
return self._press_key('up')
def down(self):
"""Press key down."""
return self._press_key('down')
def left(self):
"""Press key left."""
return self._press_key('left')
def right(self):
"""Press key right."""
return self._press_key('right')
def play(self):
"""Press key play."""
return self.protocol.send(messages.command(CommandInfo_pb2.Play))
def pause(self):
"""Press key play."""
return self.protocol.send(messages.command(CommandInfo_pb2.Pause))
def stop(self):
"""Press key stop."""
return self.protocol.send(messages.command(CommandInfo_pb2.Stop))
def next(self):
"""Press key next."""
return self.protocol.send(messages.command(CommandInfo_pb2.NextTrack))
def previous(self):
"""Press key previous."""
return self.protocol.send(
messages.command(CommandInfo_pb2.PreviousTrack))
def select(self):
"""Press key select."""
return self._press_key('select')
def menu(self):
"""Press key menu."""
return self._press_key('menu')
def volume_up(self):
"""Press key volume up."""
return self._press_key('volume_up')
def volume_down(self):
"""Press key volume down."""
return self._press_key('volume_down')
def home(self):
"""Press key home."""
return self._press_key('home')
def home_hold(self):
"""Hold key home."""
return self._press_key('home_hold')
def top_menu(self):
"""Go to main menu (long press menu)."""
return self._press_key('top_menu')
def suspend(self):
"""Suspend the device."""
return self._press_key('suspend')
def set_position(self, pos):
"""Seek in the current playing media."""
return self.protocol.send(messages.seek_to_position(pos))
def set_shuffle(self, is_on):
"""Change shuffle mode to on or off."""
return self.protocol.send(messages.shuffle(is_on))
def set_repeat(self, repeat_mode):
"""Change repeat mode."""
# TODO: extract to convert module
if int(repeat_mode) == const.REPEAT_STATE_OFF:
state = 1
elif int(repeat_mode) == const.REPEAT_STATE_ALL:
state = 2
elif int(repeat_mode) == const.REPEAT_STATE_TRACK:
state = 3
else:
raise ValueError('Invalid repeat mode: ' + str(repeat_mode))
return self.protocol.send(messages.repeat(state))
class MrpPlaying(Playing):
"""Implementation of API for retrieving what is playing."""
def __init__(self, state):
"""Initialize a new MrpPlaying."""
self._state = state
@property
def media_type(self):
"""Type of media is currently playing, e.g. video, music."""
if self._state.metadata:
media_type = self._state.metadata.mediaType
cim = protobuf.ContentItemMetadata_pb2.ContentItemMetadata
if media_type == cim.Audio:
return const.MEDIA_TYPE_MUSIC
if media_type == cim.Video:
return const.MEDIA_TYPE_VIDEO
return const.MEDIA_TYPE_UNKNOWN
@property
def play_state(self):
"""Play state, e.g. playing or paused."""
if self._state is None:
return const.PLAY_STATE_IDLE
state = self._state.playback_state
ssm = SetStateMessage_pb2.SetStateMessage
if state == ssm.Playing:
return const.PLAY_STATE_PLAYING
if state == ssm.Paused:
return const.PLAY_STATE_PAUSED
if state == ssm.Stopped:
return const.PLAY_STATE_STOPPED
if state == ssm.Interrupted:
return const.PLAY_STATE_LOADING
# if state == SetStateMessage_pb2.Seeking
# return XXX
return const.PLAY_STATE_PAUSED
@property
def title(self):
"""Title of the current media, e.g. movie or song name."""
return self._state.metadata_field('title')
@property
def artist(self):
"""Artist of the currently playing song."""
return self._state.metadata_field('trackArtistName')
@property
def album(self):
"""Album of the currently playing song."""
return self._state.metadata_field('albumName')
@property
def genre(self):
"""Genre of the currently playing song."""
return self._state.metadata_field('genre')
@property
def total_time(self):
"""Total play time in seconds."""
duration = self._state.metadata_field('duration')
return None if duration is None else int(duration)
@property
def position(self):
"""Position in the playing media (seconds)."""
elapsed_time = self._state.metadata_field('elapsedTime')
if elapsed_time:
diff = (datetime.now() - self._state.timestamp).total_seconds()
if self.play_state == const.PLAY_STATE_PLAYING:
return int(elapsed_time + diff)
return int(elapsed_time)
return None
def _get_command_info(self, command):
for cmd in self._state.supported_commands:
if cmd.command == command:
return cmd
return None
@property
def shuffle(self):
"""If shuffle is enabled or not."""
info = self._get_command_info(CommandInfo_pb2.ChangeShuffleMode)
return None if info is None else info.shuffleMode
@property
def repeat(self):
"""Repeat mode."""
info = self._get_command_info(CommandInfo_pb2.ChangeRepeatMode)
return None if info is None else info.repeatMode
class MrpMetadata(Metadata):
"""Implementation of API for retrieving metadata."""
def __init__(self, psm, identifier):
"""Initialize a new MrpPlaying."""
super().__init__(identifier)
self.psm = psm
async def artwork(self):
"""Return artwork for what is currently playing (or None)."""
raise exceptions.NotSupportedError
async def playing(self):
"""Return what is currently playing."""
return MrpPlaying(self.psm.playing)
class MrpPushUpdater(PushUpdater):
"""Implementation of API for handling push update from an Apple TV."""
def __init__(self, loop, metadata, psm):
"""Initialize a new MrpPushUpdater instance."""
super().__init__()
self.loop = loop
self.metadata = metadata
self.psm = psm
self.listener = None
def start(self, initial_delay=0):
"""Wait for push updates from device.
Will throw NoAsyncListenerError if no listner has been set.
"""
if self.listener is None:
raise exceptions.NoAsyncListenerError
self.psm.listener = self
def stop(self):
"""No longer wait for push updates."""
self.psm.listener = None
async def state_updated(self):
"""State was updated for active player."""
playstatus = await self.metadata.playing()
self.loop.call_soon(
self.listener.playstatus_update, self, playstatus)
class MrpAppleTV(AppleTV):
"""Implementation of API support for Apple TV."""
# This is a container class so it's OK with many attributes
# pylint: disable=too-many-instance-attributes
def __init__(self, loop, session, config, airplay):
"""Initialize a new Apple TV."""
super().__init__()
self._session = session
self._mrp_service = config.get_service(const.PROTOCOL_MRP)
self._connection = MrpConnection(
config.address, self._mrp_service.port, loop)
self._srp = SRPAuthHandler()
self._protocol = MrpProtocol(
loop, self._connection, self._srp, self._mrp_service)
self._psm = PlayerStateManager(self._protocol, loop)
self._mrp_remote = MrpRemoteControl(loop, self._protocol)
self._mrp_metadata = MrpMetadata(self._psm, config.identifier)
self._mrp_push_updater = MrpPushUpdater(
loop, self._mrp_metadata, self._psm)
self._airplay = airplay
async def connect(self):
"""Initiate connection to device.
Not needed as it is performed automatically.
"""
await self._protocol.start()
async def close(self):
"""Close connection and release allocated resources."""
await self._session.close()
self._protocol.stop()
@property
def service(self):
"""Return service used to connect to the Apple TV."""
return self._mrp_service
@property
def remote_control(self):
"""Return API for controlling the Apple TV."""
return self._mrp_remote
@property
def metadata(self):
"""Return API for retrieving metadata from Apple TV."""
return self._mrp_metadata
@property
def push_updater(self):
"""Return API for handling push update from the Apple TV."""
return self._mrp_push_updater
@property
def airplay(self):
"""Return API for working with AirPlay."""
return self._airplay
| 550 | 0 | 54 |
09f3c8ba4bc1ac8de8b0e61c1b08a475d36e80c0 | 14,319 | py | Python | quantipy/core/cluster.py | encount/quantipy3 | 01fe350b79594ba162cd48ce91f6e547e74265fe | [
"MIT"
] | null | null | null | quantipy/core/cluster.py | encount/quantipy3 | 01fe350b79594ba162cd48ce91f6e547e74265fe | [
"MIT"
] | null | null | null | quantipy/core/cluster.py | encount/quantipy3 | 01fe350b79594ba162cd48ce91f6e547e74265fe | [
"MIT"
] | null | null | null | from .chain import Chain
import pickle
from collections import OrderedDict
import pandas as pd
import copy
from quantipy.core.tools.view.query import get_dataframe
from quantipy.core.helpers.functions import get_text
import os
class Cluster(OrderedDict):
"""
Container class in form of an OrderedDict of Chains.
It is possible to interact with individual Chains through the Cluster
object. Clusters are mainly used to prepare aggregations for an export/
build, e.g. MS Excel Workbooks.
"""
def _verify_banked_chain_spec(self, spec):
"""
Verify chain conforms to the expected banked chain structure.
"""
if not type(spec) is dict:
return False
try:
ctype = spec['type']
cname = spec['name']
ctext = spec['text']
citems = spec['items']
cbases = spec['bases']
for c in citems:
cichain = c['chain']
citext = c['text']
except:
return False
if not ctype=='banked-chain':
return False
if not isinstance(cname, str):
return False
if not isinstance(ctext, dict):
return False
for key, value in list(ctext.items()):
if not isinstance(key, str):
return False
if not isinstance(value, str):
return False
if not isinstance(citems, list):
return False
if not isinstance(cbases, bool):
return False
if not all([isinstance(item['chain'], Chain) for item in citems]):
return False
if not all([isinstance(item['text'], dict) for item in citems]):
return False
if not all([len(item['text'])>0 for item in citems]):
return False
for item in citems:
for key, value in list(item['text'].items()):
if not isinstance(key, str):
return False
if not isinstance(value, str):
return False
cview = spec.get('view', None)
if cview is None:
for c in citems:
if 'view' in c:
if not isinstance(c['view'], str):
return False
else:
return False
else:
if not isinstance(cview, str):
return False
return True
def add_chain(self, chains=None):
""" Adds chains to a cluster """
# If a single item was supplied, change it to a list of items
is_banked_spec = False
if not isinstance(chains, (list, Chain, pd.DataFrame, dict)):
raise TypeError(
"You must pass either a Chain, a list of Chains or a"
" banked chain definition (as a dict) into"
" Cluster.add_chain().")
elif isinstance(chains, dict):
if chains.get('type', None)=='banked-chain':
is_banked_spec = True
if not self._verify_banked_chain_spec(chains):
raise TypeError(
"Your banked-chain definition is not correctly"
" formed. Please check it again.")
if isinstance(chains, Chain):
self[chains.name] = chains
elif is_banked_spec:
self[chains.get('name')] = chains
elif isinstance(chains, list) and all([
isinstance(chain, Chain) or \
self._verify_banked_chain_spec(chain)
for chain in chains]):
# Ensure that all items in chains is of the type Chain.
for chain in chains:
if chain.get('type', None)=='banked-chain':
self[chain.get('name')] = chain
else:
self[chain.name] = chain
elif isinstance(chains, pd.DataFrame):
if any([
isinstance(idx, pd.MultiIndex)
for idx in [chains.index, chains.columns]]):
if isinstance(chains.index, pd.MultiIndex):
idxs = '_'.join(chains.index.levels[0].tolist())
else:
idxs = chains.index
if isinstance(chains.columns, pd.MultiIndex):
cols = '_'.join(chains.columns.levels[0].tolist())
else:
idxs = chains.columns
self['_|_'.join([idxs, cols])] = chains
else:
self['_'.join(chains.columns.tolist())] = chains
else:
# One or more of the items in chains is not a chain.
raise TypeError("One or more of the supplied chains has an inappropriate type.")
def bank_chains(self, spec, text_key):
"""
Return a banked chain as defined by spec.
This method returns a banked or compound chain where the spec
describes how the view results from multiple chains should be
banked together into the same set of dataframes in a single
chain.
Parameters
----------
spec : dict
The banked chain specification object.
text_key : str, default='values'
Paint the x-axis of the banked chain using the spec provided
and this text_key.
Returns
-------
bchain : quantipy.Chain
The banked chain.
"""
if isinstance(text_key, str):
text_key = {'x': [text_key]}
chains = [c['chain'] for c in spec['items']]
bchain = chains[0].copy()
dk = bchain.data_key
fk = bchain.filter
xk = bchain.source_name
yks = bchain.content_of_axis
vk = spec.get('view', None)
if vk is None:
vk = spec['items'][0]['view']
else:
get_vk = False
for i, item in enumerate(spec['items']):
if not 'view' in item:
spec['items'][i].update({'view': vk})
vks = list(set([item['view'] for item in spec['items']]))
if len(vks)==1:
notation = vks[0].split('|')
notation[-1] = 'banked-{}'.format(spec['name'])
bvk = '|'.join(notation)
else:
base_method = vks[0].split('|')[1]
same_method = all([
vk.split('|')[1]==base_method
for vk in vks[1:]])
if same_method:
bvk = 'x|{}||||banked-{}'.format(base_method, spec['name'])
else:
bvk = 'x|||||banked-{}'.format(spec['name'])
for yk in list(bchain[dk][fk][xk].keys()):
bchain[dk][fk][xk][yk][bvk] = bchain[dk][fk][xk][yk].pop(vks[0])
bchain[dk][fk][xk][yk][bvk].name = bvk
bchain.views = [
vk_test
for vk_test in bchain.views
if 'cbase' in vk_test
]
bchain.views.append(bvk)
# Auto-painting approach
idx_cbase = pd.MultiIndex.from_tuples([
(get_text(spec['text'], text_key, 'x'), 'cbase')],
names=['Question', 'Values'])
# Non-auto-painting approach
# idx_cbase = pd.MultiIndex.from_tuples([
# (spec['name'], 'cbase')],
# names=['Question', 'Values'])
idx_banked = []
banked = {}
for yk in yks:
banked[yk] = []
for c, chain in enumerate(chains):
xk = chain.source_name
vk_temp = spec['items'][c]['view']
# print xk, yk, vk_temp
df = get_dataframe(chain, keys=[dk, fk, xk, yk, vk_temp])
if isinstance(idx_banked, list):
idx_banked.extend([
(spec['name'], '{}:{}'.format(xk, value[1]))
for value in df.index.values
])
banked[yk].append(df)
banked[yk] = pd.concat(banked[yk], axis=0)
if banked[yk].columns.levels[1][0]=='@':
banked[yk] = pd.DataFrame(
banked[yk].max(axis=1),
index=banked[yk].index,
columns=pd.MultiIndex.from_tuples(
[(spec['name'], '@')],
names=['Question', 'Values'])
)
xk = bchain.source_name
if isinstance(idx_banked, list):
banked_values_meta = [
{'value': idx[1], 'text': spec['items'][i]['text']}
for i, idx in enumerate(idx_banked)]
bchain.banked_meta = {
'name': spec['name'],
'type': spec['type'],
'text': spec['text'],
'values': banked_values_meta
}
# When switching to non-auto-painting, use this
# idx_banked = pd.MultiIndex.from_tuples(
# idx_banked,
# names=['Question', 'Values'])
# Auto-painting
question_text = get_text(spec['text'], text_key, 'x')
idx_banked = pd.MultiIndex.from_tuples([
(question_text, get_text(value['text'], text_key, 'x'))
for i, value in enumerate(bchain.banked_meta['values'])],
names=['Question', 'Values'])
banked[yk].index = idx_banked
bchain[dk][fk][xk][yk][bvk].dataframe = banked[yk]
bchain[dk][fk][xk][yk][bvk]._notation = bvk
# bchain[dk][fk][xk][yk][bvk].meta()['shape'] = banked[yk].shape
bchain[dk][fk][xk][yk][bvk]._x['name'] = spec['name']
bchain[dk][fk][xk][yk][bvk]._x['size'] = banked[yk].shape[0]
bchain.name = 'banked-{}'.format(bchain.name)
for yk in yks:
for vk in list(bchain[dk][fk][xk][yk].keys()):
if vk in bchain.views:
if 'cbase' in vk:
bchain[dk][fk][xk][yk][vk].dataframe.index = idx_cbase
bchain[dk][fk][xk][yk][vk]._x['name'] = spec['name']
else:
del bchain[dk][fk][xk][yk][vk]
bchain[dk][fk][spec['name']] = bchain[dk][fk].pop(xk)
bchain.props_tests = list()
bchain.props_tests_levels = list()
bchain.means_tests = list()
bchain.means_tests_levels = list()
bchain.has_props_tests = False
bchain.has_means_tests = False
bchain.annotations = None
bchain.is_banked = True
bchain.source_name = spec['name']
bchain.banked_view_key = bvk
bchain.banked_spec = spec
for i, item in enumerate(spec['items']):
bchain.banked_spec['items'][i]['chain'] = item['chain'].name
return bchain
def _build(self, type):
""" The Build exports the chains using methods supplied with 'type'. """
pass
def merge(self):
"""
Merges all Chains found in the Cluster into a new pandas.DataFrame.
"""
orient = self[list(self.keys())[0]].orientation
chainnames = list(self.keys())
if orient == 'y':
return pd.concat([self[chainname].concat()
for chainname in chainnames], axis=1)
else:
return pd.concat([self[chainname].concat()
for chainname in chainnames], axis=0)
def save(self, path_cluster):
"""
Load Stack instance from .stack file.
Parameters
----------
path_cluster : str
The full path to the .cluster file that should be created, including
the extension.
Returns
-------
None
"""
if not path_cluster.endswith('.cluster'):
raise ValueError(
"To avoid ambiguity, when using Cluster.save() you must provide the full path to "
"the cluster file you want to create, including the file extension. For example: "
"cluster.save(path_cluster='./output/MyCluster.cluster'). Your call looks like this: "
"cluster.save(path_cluster='%s', ...)" % (path_cluster)
)
f = open(path_cluster, 'wb')
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
f.close()
# STATIC METHODS
@staticmethod
def load(path_cluster):
"""
Load Stack instance from .stack file.
Parameters
----------
path_cluster : str
The full path to the .cluster file that should be created, including
the extension.
Returns
-------
None
"""
if not path_cluster.endswith('.cluster'):
raise ValueError(
"To avoid ambiguity, when using Cluster.load() you must provide the full path to "
"the cluster file you want to create, including the file extension. For example: "
"cluster.load(path_cluster='./output/MyCluster.cluster'). Your call looks like this: "
"cluster.load(path_cluster='%s', ...)" % (path_cluster)
)
f = open(path_cluster, 'rb')
obj = pickle.load(f)
f.close()
return obj | 37.681579 | 103 | 0.488163 | from .chain import Chain
import pickle
from collections import OrderedDict
import pandas as pd
import copy
from quantipy.core.tools.view.query import get_dataframe
from quantipy.core.helpers.functions import get_text
import os
class Cluster(OrderedDict):
"""
Container class in form of an OrderedDict of Chains.
It is possible to interact with individual Chains through the Cluster
object. Clusters are mainly used to prepare aggregations for an export/
build, e.g. MS Excel Workbooks.
"""
def __init__(self, name=""):
super(Cluster, self).__init__()
self.name = name
def __setstate__(self, attr_dict):
self.__dict__.update(attr_dict)
def __reduce__(self):
return self.__class__, (self.name, ), self.__dict__, None, iter(list(self.items()))
def _verify_banked_chain_spec(self, spec):
"""
Verify chain conforms to the expected banked chain structure.
"""
if not type(spec) is dict:
return False
try:
ctype = spec['type']
cname = spec['name']
ctext = spec['text']
citems = spec['items']
cbases = spec['bases']
for c in citems:
cichain = c['chain']
citext = c['text']
except:
return False
if not ctype=='banked-chain':
return False
if not isinstance(cname, str):
return False
if not isinstance(ctext, dict):
return False
for key, value in list(ctext.items()):
if not isinstance(key, str):
return False
if not isinstance(value, str):
return False
if not isinstance(citems, list):
return False
if not isinstance(cbases, bool):
return False
if not all([isinstance(item['chain'], Chain) for item in citems]):
return False
if not all([isinstance(item['text'], dict) for item in citems]):
return False
if not all([len(item['text'])>0 for item in citems]):
return False
for item in citems:
for key, value in list(item['text'].items()):
if not isinstance(key, str):
return False
if not isinstance(value, str):
return False
cview = spec.get('view', None)
if cview is None:
for c in citems:
if 'view' in c:
if not isinstance(c['view'], str):
return False
else:
return False
else:
if not isinstance(cview, str):
return False
return True
def add_chain(self, chains=None):
""" Adds chains to a cluster """
# If a single item was supplied, change it to a list of items
is_banked_spec = False
if not isinstance(chains, (list, Chain, pd.DataFrame, dict)):
raise TypeError(
"You must pass either a Chain, a list of Chains or a"
" banked chain definition (as a dict) into"
" Cluster.add_chain().")
elif isinstance(chains, dict):
if chains.get('type', None)=='banked-chain':
is_banked_spec = True
if not self._verify_banked_chain_spec(chains):
raise TypeError(
"Your banked-chain definition is not correctly"
" formed. Please check it again.")
if isinstance(chains, Chain):
self[chains.name] = chains
elif is_banked_spec:
self[chains.get('name')] = chains
elif isinstance(chains, list) and all([
isinstance(chain, Chain) or \
self._verify_banked_chain_spec(chain)
for chain in chains]):
# Ensure that all items in chains is of the type Chain.
for chain in chains:
if chain.get('type', None)=='banked-chain':
self[chain.get('name')] = chain
else:
self[chain.name] = chain
elif isinstance(chains, pd.DataFrame):
if any([
isinstance(idx, pd.MultiIndex)
for idx in [chains.index, chains.columns]]):
if isinstance(chains.index, pd.MultiIndex):
idxs = '_'.join(chains.index.levels[0].tolist())
else:
idxs = chains.index
if isinstance(chains.columns, pd.MultiIndex):
cols = '_'.join(chains.columns.levels[0].tolist())
else:
idxs = chains.columns
self['_|_'.join([idxs, cols])] = chains
else:
self['_'.join(chains.columns.tolist())] = chains
else:
# One or more of the items in chains is not a chain.
raise TypeError("One or more of the supplied chains has an inappropriate type.")
def bank_chains(self, spec, text_key):
"""
Return a banked chain as defined by spec.
This method returns a banked or compound chain where the spec
describes how the view results from multiple chains should be
banked together into the same set of dataframes in a single
chain.
Parameters
----------
spec : dict
The banked chain specification object.
text_key : str, default='values'
Paint the x-axis of the banked chain using the spec provided
and this text_key.
Returns
-------
bchain : quantipy.Chain
The banked chain.
"""
if isinstance(text_key, str):
text_key = {'x': [text_key]}
chains = [c['chain'] for c in spec['items']]
bchain = chains[0].copy()
dk = bchain.data_key
fk = bchain.filter
xk = bchain.source_name
yks = bchain.content_of_axis
vk = spec.get('view', None)
if vk is None:
vk = spec['items'][0]['view']
else:
get_vk = False
for i, item in enumerate(spec['items']):
if not 'view' in item:
spec['items'][i].update({'view': vk})
vks = list(set([item['view'] for item in spec['items']]))
if len(vks)==1:
notation = vks[0].split('|')
notation[-1] = 'banked-{}'.format(spec['name'])
bvk = '|'.join(notation)
else:
base_method = vks[0].split('|')[1]
same_method = all([
vk.split('|')[1]==base_method
for vk in vks[1:]])
if same_method:
bvk = 'x|{}||||banked-{}'.format(base_method, spec['name'])
else:
bvk = 'x|||||banked-{}'.format(spec['name'])
for yk in list(bchain[dk][fk][xk].keys()):
bchain[dk][fk][xk][yk][bvk] = bchain[dk][fk][xk][yk].pop(vks[0])
bchain[dk][fk][xk][yk][bvk].name = bvk
bchain.views = [
vk_test
for vk_test in bchain.views
if 'cbase' in vk_test
]
bchain.views.append(bvk)
# Auto-painting approach
idx_cbase = pd.MultiIndex.from_tuples([
(get_text(spec['text'], text_key, 'x'), 'cbase')],
names=['Question', 'Values'])
# Non-auto-painting approach
# idx_cbase = pd.MultiIndex.from_tuples([
# (spec['name'], 'cbase')],
# names=['Question', 'Values'])
idx_banked = []
banked = {}
for yk in yks:
banked[yk] = []
for c, chain in enumerate(chains):
xk = chain.source_name
vk_temp = spec['items'][c]['view']
# print xk, yk, vk_temp
df = get_dataframe(chain, keys=[dk, fk, xk, yk, vk_temp])
if isinstance(idx_banked, list):
idx_banked.extend([
(spec['name'], '{}:{}'.format(xk, value[1]))
for value in df.index.values
])
banked[yk].append(df)
banked[yk] = pd.concat(banked[yk], axis=0)
if banked[yk].columns.levels[1][0]=='@':
banked[yk] = pd.DataFrame(
banked[yk].max(axis=1),
index=banked[yk].index,
columns=pd.MultiIndex.from_tuples(
[(spec['name'], '@')],
names=['Question', 'Values'])
)
xk = bchain.source_name
if isinstance(idx_banked, list):
banked_values_meta = [
{'value': idx[1], 'text': spec['items'][i]['text']}
for i, idx in enumerate(idx_banked)]
bchain.banked_meta = {
'name': spec['name'],
'type': spec['type'],
'text': spec['text'],
'values': banked_values_meta
}
# When switching to non-auto-painting, use this
# idx_banked = pd.MultiIndex.from_tuples(
# idx_banked,
# names=['Question', 'Values'])
# Auto-painting
question_text = get_text(spec['text'], text_key, 'x')
idx_banked = pd.MultiIndex.from_tuples([
(question_text, get_text(value['text'], text_key, 'x'))
for i, value in enumerate(bchain.banked_meta['values'])],
names=['Question', 'Values'])
banked[yk].index = idx_banked
bchain[dk][fk][xk][yk][bvk].dataframe = banked[yk]
bchain[dk][fk][xk][yk][bvk]._notation = bvk
# bchain[dk][fk][xk][yk][bvk].meta()['shape'] = banked[yk].shape
bchain[dk][fk][xk][yk][bvk]._x['name'] = spec['name']
bchain[dk][fk][xk][yk][bvk]._x['size'] = banked[yk].shape[0]
bchain.name = 'banked-{}'.format(bchain.name)
for yk in yks:
for vk in list(bchain[dk][fk][xk][yk].keys()):
if vk in bchain.views:
if 'cbase' in vk:
bchain[dk][fk][xk][yk][vk].dataframe.index = idx_cbase
bchain[dk][fk][xk][yk][vk]._x['name'] = spec['name']
else:
del bchain[dk][fk][xk][yk][vk]
bchain[dk][fk][spec['name']] = bchain[dk][fk].pop(xk)
bchain.props_tests = list()
bchain.props_tests_levels = list()
bchain.means_tests = list()
bchain.means_tests_levels = list()
bchain.has_props_tests = False
bchain.has_means_tests = False
bchain.annotations = None
bchain.is_banked = True
bchain.source_name = spec['name']
bchain.banked_view_key = bvk
bchain.banked_spec = spec
for i, item in enumerate(spec['items']):
bchain.banked_spec['items'][i]['chain'] = item['chain'].name
return bchain
def _build(self, type):
""" The Build exports the chains using methods supplied with 'type'. """
pass
def merge(self):
"""
Merges all Chains found in the Cluster into a new pandas.DataFrame.
"""
orient = self[list(self.keys())[0]].orientation
chainnames = list(self.keys())
if orient == 'y':
return pd.concat([self[chainname].concat()
for chainname in chainnames], axis=1)
else:
return pd.concat([self[chainname].concat()
for chainname in chainnames], axis=0)
def save(self, path_cluster):
"""
Load Stack instance from .stack file.
Parameters
----------
path_cluster : str
The full path to the .cluster file that should be created, including
the extension.
Returns
-------
None
"""
if not path_cluster.endswith('.cluster'):
raise ValueError(
"To avoid ambiguity, when using Cluster.save() you must provide the full path to "
"the cluster file you want to create, including the file extension. For example: "
"cluster.save(path_cluster='./output/MyCluster.cluster'). Your call looks like this: "
"cluster.save(path_cluster='%s', ...)" % (path_cluster)
)
f = open(path_cluster, 'wb')
pickle.dump(self, f, pickle.HIGHEST_PROTOCOL)
f.close()
# STATIC METHODS
@staticmethod
def load(path_cluster):
"""
Load Stack instance from .stack file.
Parameters
----------
path_cluster : str
The full path to the .cluster file that should be created, including
the extension.
Returns
-------
None
"""
if not path_cluster.endswith('.cluster'):
raise ValueError(
"To avoid ambiguity, when using Cluster.load() you must provide the full path to "
"the cluster file you want to create, including the file extension. For example: "
"cluster.load(path_cluster='./output/MyCluster.cluster'). Your call looks like this: "
"cluster.load(path_cluster='%s', ...)" % (path_cluster)
)
f = open(path_cluster, 'rb')
obj = pickle.load(f)
f.close()
return obj | 221 | 0 | 87 |
428811f65bf037cde24824501010272b928b75b5 | 1,419 | py | Python | web/models/mysql.py | zhengbigbig/python_demo | 29b62bea9e5abaa02e51744a926f722d1b99ec8d | [
"MIT"
] | null | null | null | web/models/mysql.py | zhengbigbig/python_demo | 29b62bea9e5abaa02e51744a926f722d1b99ec8d | [
"MIT"
] | null | null | null | web/models/mysql.py | zhengbigbig/python_demo | 29b62bea9e5abaa02e51744a926f722d1b99ec8d | [
"MIT"
] | null | null | null | import pymysql
import logging
# 对数据进行转码
db = MySQLDB(user='root', password='root', db='test')
| 23.65 | 90 | 0.677237 | import pymysql
import logging
class MySQLDB(object):
def __init__(self, host='localhost', port=3306, user='root', password='root', db='test'):
# cursorclass=pymysql.cursors.DictCursor 数据库查询返回dict --> 默认:tuple
self.conn = pymysql.connect(
host=host, port=port, user=user, password=password, db=db,
charset='utf8', cursorclass=pymysql.cursors.DictCursor)
self.log = logging.getLogger(__name__)
def execute(self, sql, kwargs):
try:
cursor = self.conn.cursor()
cursor.execute(sql, kwargs)
self.conn.commit() # 插入、删除commit 、 查询
return cursor
except Exception as e:
self.log.error(f'mysqldb execute error :{e}', exc_info=True)
raise e
def query(self, sql, kwargs=None):
cursor = None
try:
cursor = self.execute(sql, kwargs)
if cursor:
return cursor.fetchall() # 查询所有内容,dict
else:
raise Exception(f'sql error: {sql}')
except Exception as e:
self.log.error(e)
raise e
finally:
if cursor:
cursor.close()
def insert(self, sql, kwargs=None):
cursor = None
try:
cursor = self.execute(sql, kwargs)
if cursor:
row_id = cursor.lastrowid
return row_id
else:
raise Exception(f'sql error: {sql}')
except Exception as e:
self.log.error(e)
raise e
finally:
if cursor:
cursor.close()
# 对数据进行转码
def escape_string(self, _):
return pymysql.escape_string(_)
db = MySQLDB(user='root', password='root', db='test')
| 1,228 | 1 | 141 |
d8a7faef64fcbcfb31c2692058def68f00243918 | 9,244 | py | Python | ark/utils/plot_utils.py | angelolab/ark-analysis | 5b248e5413c383bf03fac53d8ae9180fd2310222 | [
"Apache-2.0"
] | 17 | 2020-10-15T20:50:12.000Z | 2022-01-27T19:24:40.000Z | ark/utils/plot_utils.py | angelolab/ark-analysis | 5b248e5413c383bf03fac53d8ae9180fd2310222 | [
"Apache-2.0"
] | 309 | 2020-08-14T16:21:36.000Z | 2022-03-24T22:22:53.000Z | ark/utils/plot_utils.py | angelolab/ark-analysis | 5b248e5413c383bf03fac53d8ae9180fd2310222 | [
"Apache-2.0"
] | 4 | 2020-10-12T21:04:55.000Z | 2021-07-06T09:42:49.000Z | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import xarray as xr
from skimage.segmentation import find_boundaries
from skimage.exposure import rescale_intensity
from ark.utils import load_utils
from ark.utils import misc_utils
# plotting functions
from ark.utils.misc_utils import verify_in_list
def plot_clustering_result(img_xr, fovs, save_dir=None, cmap='tab20',
fov_col='fovs', figsize=(10, 10)):
"""Takes an xarray containing labeled images and displays them.
Args:
img_xr (xr.DataArray):
xarray containing labeled cell objects.
fovs (list):
list of fovs to display.
save_dir (str):
If provided, the image will be saved to this location.
cmap (str):
Cmap to use for the image that will be displayed.
fov_col (str):
column with the fovs names in img_xr.
figsize (tuple):
Size of the image that will be displayed.
"""
verify_in_list(fov_names=fovs, unique_fovs=img_xr.fovs)
for fov in fovs:
plt.figure(figsize=figsize)
ax = plt.gca()
plt.title(fov)
plt.imshow(img_xr[img_xr[fov_col] == fov].values.squeeze(), cmap=cmap)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(cax=cax)
if save_dir:
misc_utils.save_figure(save_dir, f'{fov}.png')
def tif_overlay_preprocess(segmentation_labels, plotting_tif):
"""Validates plotting_tif and preprocesses it accordingly
Args:
segmentation_labels (numpy.ndarray):
2D numpy array of labeled cell objects
plotting_tif (numpy.ndarray):
2D or 3D numpy array of imaging signal
Returns:
numpy.ndarray:
The preprocessed image
"""
if len(plotting_tif.shape) == 2:
if plotting_tif.shape != segmentation_labels.shape:
raise ValueError("plotting_tif and segmentation_labels array dimensions not equal.")
else:
# convert RGB image with the blue channel containing the plotting tif data
formatted_tif = np.zeros((plotting_tif.shape[0], plotting_tif.shape[1], 3),
dtype=plotting_tif.dtype)
formatted_tif[..., 2] = plotting_tif
elif len(plotting_tif.shape) == 3:
# can only support up to 3 channels
if plotting_tif.shape[2] > 3:
raise ValueError("max 3 channels of overlay supported, got {}".
format(plotting_tif.shape))
# set first n channels (in reverse order) of formatted_tif to plotting_tif
# (n = num channels in plotting_tif)
formatted_tif = np.zeros((plotting_tif.shape[0], plotting_tif.shape[1], 3),
dtype=plotting_tif.dtype)
formatted_tif[..., :plotting_tif.shape[2]] = plotting_tif
formatted_tif = np.flip(formatted_tif, axis=2)
else:
raise ValueError("plotting tif must be 2D or 3D array, got {}".
format(plotting_tif.shape))
return formatted_tif
def create_overlay(fov, segmentation_dir, data_dir,
img_overlay_chans, seg_overlay_comp, alternate_segmentation=None,
dtype='int16'):
"""Take in labeled contour data, along with optional mibi tif and second contour,
and overlay them for comparison"
Generates the outline(s) of the mask(s) as well as intensity from plotting tif. Predicted
contours are colored red, while alternate contours are colored white.
Args:
fov (str):
The name of the fov to overlay
segmentation_dir (str):
The path to the directory containing the segmentatation data
data_dir (str):
The path to the directory containing the nuclear and whole cell image data
img_overlay_chans (list):
List of channels the user will overlay
seg_overlay_comp (str):
The segmentted compartment the user will overlay
alternate_segmentation (numpy.ndarray):
2D numpy array of labeled cell objects
dtype (str/type):
optional specifier of image type. Overwritten with warning for float images
Returns:
numpy.ndarray:
The image with the channel overlay
"""
# load the specified fov data in
plotting_tif = load_utils.load_imgs_from_dir(
data_dir=data_dir,
files=[fov + '.tif'],
xr_dim_name='channels',
xr_channel_names=['nuclear_channel', 'membrane_channel'],
dtype=dtype
)
# verify that the provided image channels exist in plotting_tif
misc_utils.verify_in_list(
provided_channels=img_overlay_chans,
img_channels=plotting_tif.channels.values
)
# subset the plotting tif with the provided image overlay channels
plotting_tif = plotting_tif.loc[fov, :, :, img_overlay_chans].values
# read the segmentation data in
segmentation_labels_cell = load_utils.load_imgs_from_dir(data_dir=segmentation_dir,
files=[fov + '_feature_0.tif'],
xr_dim_name='compartments',
xr_channel_names=['whole_cell'],
trim_suffix='_feature_0',
match_substring='_feature_0',
force_ints=True)
segmentation_labels_nuc = load_utils.load_imgs_from_dir(data_dir=segmentation_dir,
files=[fov + '_feature_1.tif'],
xr_dim_name='compartments',
xr_channel_names=['nuclear'],
trim_suffix='_feature_1',
match_substring='_feature_1',
force_ints=True)
segmentation_labels = xr.DataArray(np.concatenate((segmentation_labels_cell.values,
segmentation_labels_nuc.values),
axis=-1),
coords=[segmentation_labels_cell.fovs,
segmentation_labels_cell.rows,
segmentation_labels_cell.cols,
['whole_cell', 'nuclear']],
dims=segmentation_labels_cell.dims)
# verify that the provided segmentation channels exist in segmentation_labels
misc_utils.verify_in_list(
provided_compartments=seg_overlay_comp,
seg_compartments=segmentation_labels.compartments.values
)
# subset segmentation labels with the provided segmentation overlay channels
segmentation_labels = segmentation_labels.loc[fov, :, :, seg_overlay_comp].values
# overlay the segmentation labels over the image
plotting_tif = tif_overlay_preprocess(segmentation_labels, plotting_tif)
# define borders of cells in mask
predicted_contour_mask = find_boundaries(segmentation_labels,
connectivity=1, mode='inner').astype(np.uint8)
predicted_contour_mask[predicted_contour_mask > 0] = 255
# rescale each channel to go from 0 to 255
rescaled = np.zeros(plotting_tif.shape, dtype='uint8')
for idx in range(plotting_tif.shape[2]):
if np.max(plotting_tif[:, :, idx]) == 0:
# don't need to rescale this channel
pass
else:
percentiles = np.percentile(plotting_tif[:, :, idx][plotting_tif[:, :, idx] > 0],
[5, 95])
rescaled_intensity = rescale_intensity(plotting_tif[:, :, idx],
in_range=(percentiles[0], percentiles[1]),
out_range='uint8')
rescaled[:, :, idx] = rescaled_intensity
# overlay first contour on all three RGB, to have it show up as white border
rescaled[predicted_contour_mask > 0, :] = 255
# overlay second contour as red outline if present
if alternate_segmentation is not None:
if segmentation_labels.shape != alternate_segmentation.shape:
raise ValueError("segmentation_labels and alternate_"
"segmentation array dimensions not equal.")
# define borders of cell in mask
alternate_contour_mask = find_boundaries(alternate_segmentation, connectivity=1,
mode='inner').astype(np.uint8)
rescaled[alternate_contour_mask > 0, 0] = 255
rescaled[alternate_contour_mask > 0, 1:] = 0
return rescaled
| 44.442308 | 96 | 0.583514 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import xarray as xr
from skimage.segmentation import find_boundaries
from skimage.exposure import rescale_intensity
from ark.utils import load_utils
from ark.utils import misc_utils
# plotting functions
from ark.utils.misc_utils import verify_in_list
def plot_clustering_result(img_xr, fovs, save_dir=None, cmap='tab20',
fov_col='fovs', figsize=(10, 10)):
"""Takes an xarray containing labeled images and displays them.
Args:
img_xr (xr.DataArray):
xarray containing labeled cell objects.
fovs (list):
list of fovs to display.
save_dir (str):
If provided, the image will be saved to this location.
cmap (str):
Cmap to use for the image that will be displayed.
fov_col (str):
column with the fovs names in img_xr.
figsize (tuple):
Size of the image that will be displayed.
"""
verify_in_list(fov_names=fovs, unique_fovs=img_xr.fovs)
for fov in fovs:
plt.figure(figsize=figsize)
ax = plt.gca()
plt.title(fov)
plt.imshow(img_xr[img_xr[fov_col] == fov].values.squeeze(), cmap=cmap)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(cax=cax)
if save_dir:
misc_utils.save_figure(save_dir, f'{fov}.png')
def tif_overlay_preprocess(segmentation_labels, plotting_tif):
"""Validates plotting_tif and preprocesses it accordingly
Args:
segmentation_labels (numpy.ndarray):
2D numpy array of labeled cell objects
plotting_tif (numpy.ndarray):
2D or 3D numpy array of imaging signal
Returns:
numpy.ndarray:
The preprocessed image
"""
if len(plotting_tif.shape) == 2:
if plotting_tif.shape != segmentation_labels.shape:
raise ValueError("plotting_tif and segmentation_labels array dimensions not equal.")
else:
# convert RGB image with the blue channel containing the plotting tif data
formatted_tif = np.zeros((plotting_tif.shape[0], plotting_tif.shape[1], 3),
dtype=plotting_tif.dtype)
formatted_tif[..., 2] = plotting_tif
elif len(plotting_tif.shape) == 3:
# can only support up to 3 channels
if plotting_tif.shape[2] > 3:
raise ValueError("max 3 channels of overlay supported, got {}".
format(plotting_tif.shape))
# set first n channels (in reverse order) of formatted_tif to plotting_tif
# (n = num channels in plotting_tif)
formatted_tif = np.zeros((plotting_tif.shape[0], plotting_tif.shape[1], 3),
dtype=plotting_tif.dtype)
formatted_tif[..., :plotting_tif.shape[2]] = plotting_tif
formatted_tif = np.flip(formatted_tif, axis=2)
else:
raise ValueError("plotting tif must be 2D or 3D array, got {}".
format(plotting_tif.shape))
return formatted_tif
def create_overlay(fov, segmentation_dir, data_dir,
img_overlay_chans, seg_overlay_comp, alternate_segmentation=None,
dtype='int16'):
"""Take in labeled contour data, along with optional mibi tif and second contour,
and overlay them for comparison"
Generates the outline(s) of the mask(s) as well as intensity from plotting tif. Predicted
contours are colored red, while alternate contours are colored white.
Args:
fov (str):
The name of the fov to overlay
segmentation_dir (str):
The path to the directory containing the segmentatation data
data_dir (str):
The path to the directory containing the nuclear and whole cell image data
img_overlay_chans (list):
List of channels the user will overlay
seg_overlay_comp (str):
The segmentted compartment the user will overlay
alternate_segmentation (numpy.ndarray):
2D numpy array of labeled cell objects
dtype (str/type):
optional specifier of image type. Overwritten with warning for float images
Returns:
numpy.ndarray:
The image with the channel overlay
"""
# load the specified fov data in
plotting_tif = load_utils.load_imgs_from_dir(
data_dir=data_dir,
files=[fov + '.tif'],
xr_dim_name='channels',
xr_channel_names=['nuclear_channel', 'membrane_channel'],
dtype=dtype
)
# verify that the provided image channels exist in plotting_tif
misc_utils.verify_in_list(
provided_channels=img_overlay_chans,
img_channels=plotting_tif.channels.values
)
# subset the plotting tif with the provided image overlay channels
plotting_tif = plotting_tif.loc[fov, :, :, img_overlay_chans].values
# read the segmentation data in
segmentation_labels_cell = load_utils.load_imgs_from_dir(data_dir=segmentation_dir,
files=[fov + '_feature_0.tif'],
xr_dim_name='compartments',
xr_channel_names=['whole_cell'],
trim_suffix='_feature_0',
match_substring='_feature_0',
force_ints=True)
segmentation_labels_nuc = load_utils.load_imgs_from_dir(data_dir=segmentation_dir,
files=[fov + '_feature_1.tif'],
xr_dim_name='compartments',
xr_channel_names=['nuclear'],
trim_suffix='_feature_1',
match_substring='_feature_1',
force_ints=True)
segmentation_labels = xr.DataArray(np.concatenate((segmentation_labels_cell.values,
segmentation_labels_nuc.values),
axis=-1),
coords=[segmentation_labels_cell.fovs,
segmentation_labels_cell.rows,
segmentation_labels_cell.cols,
['whole_cell', 'nuclear']],
dims=segmentation_labels_cell.dims)
# verify that the provided segmentation channels exist in segmentation_labels
misc_utils.verify_in_list(
provided_compartments=seg_overlay_comp,
seg_compartments=segmentation_labels.compartments.values
)
# subset segmentation labels with the provided segmentation overlay channels
segmentation_labels = segmentation_labels.loc[fov, :, :, seg_overlay_comp].values
# overlay the segmentation labels over the image
plotting_tif = tif_overlay_preprocess(segmentation_labels, plotting_tif)
# define borders of cells in mask
predicted_contour_mask = find_boundaries(segmentation_labels,
connectivity=1, mode='inner').astype(np.uint8)
predicted_contour_mask[predicted_contour_mask > 0] = 255
# rescale each channel to go from 0 to 255
rescaled = np.zeros(plotting_tif.shape, dtype='uint8')
for idx in range(plotting_tif.shape[2]):
if np.max(plotting_tif[:, :, idx]) == 0:
# don't need to rescale this channel
pass
else:
percentiles = np.percentile(plotting_tif[:, :, idx][plotting_tif[:, :, idx] > 0],
[5, 95])
rescaled_intensity = rescale_intensity(plotting_tif[:, :, idx],
in_range=(percentiles[0], percentiles[1]),
out_range='uint8')
rescaled[:, :, idx] = rescaled_intensity
# overlay first contour on all three RGB, to have it show up as white border
rescaled[predicted_contour_mask > 0, :] = 255
# overlay second contour as red outline if present
if alternate_segmentation is not None:
if segmentation_labels.shape != alternate_segmentation.shape:
raise ValueError("segmentation_labels and alternate_"
"segmentation array dimensions not equal.")
# define borders of cell in mask
alternate_contour_mask = find_boundaries(alternate_segmentation, connectivity=1,
mode='inner').astype(np.uint8)
rescaled[alternate_contour_mask > 0, 0] = 255
rescaled[alternate_contour_mask > 0, 1:] = 0
return rescaled
| 0 | 0 | 0 |
e923430b6a6eb327b695a54846e7a3076c5d0662 | 1,542 | py | Python | src/trainer/sr/base.py | sanghyun-son/srwarp | d7cc08db5ba5ec9103f1813f76d1da825afe1a5b | [
"MIT"
] | 82 | 2021-04-22T09:22:46.000Z | 2022-03-30T03:06:47.000Z | src/trainer/sr/base.py | sanghyun-son/srwarp | d7cc08db5ba5ec9103f1813f76d1da825afe1a5b | [
"MIT"
] | 3 | 2021-08-04T15:40:52.000Z | 2022-02-21T10:10:52.000Z | src/trainer/sr/base.py | sanghyun-son/srwarp | d7cc08db5ba5ec9103f1813f76d1da825afe1a5b | [
"MIT"
] | 11 | 2021-05-16T14:54:33.000Z | 2022-02-18T08:25:37.000Z | from trainer.gan import dcgan
from misc.gpu_utils import parallel_forward as pforward
from model.utils import forward_utils as futils
_parent_class = dcgan.GANTrainer
| 27.052632 | 63 | 0.479248 | from trainer.gan import dcgan
from misc.gpu_utils import parallel_forward as pforward
from model.utils import forward_utils as futils
_parent_class = dcgan.GANTrainer
class SRTrainer(_parent_class):
def __init__(self, *args, x8=False, quads=False, **kwargs):
super().__init__(*args, **kwargs)
self.x8 = x8
self.quads = quads
@staticmethod
def get_kwargs(cfg):
kwargs = _parent_class.get_kwargs(cfg)
kwargs['x8'] = cfg.x8
kwargs['quads'] = cfg.quads
return kwargs
def forward(self, **samples):
if self.training:
samples = self.split_batch(**samples)
lr_d = samples['d']['lr']
lr_g = samples['g']['lr']
hr_d = samples['d']['hr']
hr = samples['g']['hr']
sr = pforward(self.model, lr_g)
loss = self.loss(
lr=lr_g,
g=self.model,
lr_d=lr_d,
hr_d=hr_d,
sr=sr,
hr=hr,
)
else:
lr = samples['lr']
if self.quads:
sr = futils.quad_forward(self.model, lr)
elif self.x8:
sr = futils.x8_forward(self.model, lr)
else:
sr = pforward(self.model, lr)
loss = self.loss(
lr=lr,
g=None,
lr_d=None,
hr_d=None,
sr=sr,
hr=samples['hr'],
)
return loss, sr
| 1,242 | 109 | 23 |
132727ab48ce64a42309c69292ca835404984f0d | 9,500 | py | Python | soft/compiler.ctuning-cc/customize.py | G4V/ck-env | b882480b00b9dbd88f15eef58440772e09414f64 | [
"BSD-3-Clause"
] | 80 | 2015-03-03T14:27:39.000Z | 2022-01-04T15:37:01.000Z | soft/compiler.ctuning-cc/customize.py | G4V/ck-env | b882480b00b9dbd88f15eef58440772e09414f64 | [
"BSD-3-Clause"
] | 78 | 2016-02-20T07:47:05.000Z | 2021-05-01T13:33:31.000Z | soft/compiler.ctuning-cc/customize.py | G4V/ck-env | b882480b00b9dbd88f15eef58440772e09414f64 | [
"BSD-3-Clause"
] | 22 | 2016-07-29T07:25:11.000Z | 2021-02-08T16:18:26.000Z | #
# Collective Knowledge (individual environment - setup)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, Grigori.Fursin@cTuning.org, http://fursin.net
#
##############################################################################
# setup environment setup
def setup(i):
"""
Input: {
cfg - meta of this soft entry
self_cfg - meta of module soft
ck_kernel - import CK kernel module (to reuse functions)
host_os_uoa - host OS UOA
host_os_uid - host OS UID
host_os_dict - host OS meta
target_os_uoa - target OS UOA
target_os_uid - target OS UID
target_os_dict - target OS meta
target_device_id - target device ID (if via ADB)
tags - list of tags used to search this entry
env - updated environment vars from meta
customize - updated customize vars from meta
deps - resolved dependencies for this soft
interactive - if 'yes', can ask questions, otherwise quiet
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
bat - prepared string for bat file
}
"""
import os
# Get variables
ck=i['ck_kernel']
s=''
iv=i.get('interactive','')
env=i.get('env',{})
cfg=i.get('cfg',{})
deps=i.get('deps',{})
tags=i.get('tags',[])
cus=i.get('customize',{})
host_d=i.get('host_os_dict',{})
target_d=i.get('target_os_dict',{})
winh=host_d.get('windows_base','')
win=target_d.get('windows_base','')
remote=target_d.get('remote','')
mingw=target_d.get('mingw','')
tbits=target_d.get('bits','')
sdirs=host_d.get('dir_sep','')
envp=cus.get('env_prefix','')
pi=cus.get('path_install','')
fp=cus.get('full_path','')
p1=os.path.dirname(fp)
pi=os.path.dirname(p1)
pb=pi+sdirs+'bin'
cus['path_bin']=pb
ep=cus.get('env_prefix','')
if pi!='' and ep!='':
env[ep]=pi
env[ep+'_BIN']=pb
############################################################
# Prepare environment
if winh=='yes':
s='\n'
s+='set CCC_ROOT='+pi+'\n'
s+='set CCC_PLUGINS=%CCC_ROOT%\\src-plat-indep\n'
s+='set PATH=%CCC_ROOT%\\src-plat-indep\\plugins;%PATH%\n'
s+='set CTUNING_ANALYSIS_CC=%CK_ENV_COMPILER_GCC%\\bin\\gcc\n'
s+='set CTUNING_ANALYSIS_CPP=%CK_ENV_COMPILER_GCC%\\bin\\g++\n'
s+='set CTUNING_ANALYSIS_FORTRAN=%CK_ENV_COMPILER_GCC%\\bin\\gfortran\n'
s+='\n'
s+='set CTUNING_COMPILER_CC=%CK_CC%\n'
s+='set CTUNING_COMPILER_CPP=%CK_CXX%\n'
s+='set CTUNING_COMPILER_FORTRAN=%CK_FC%\n'
s+='\n'
s+='if "%CK_CC%" == "ctuning-cc" (\n'
s+=' set CTUNING_COMPILER_CC=gcc\n'
s+=' set CTUNING_COMPILER_CPP=g++\n'
s+=' set CTUNING_COMPILER_FORTRAN=gfortran\n'
s+=')\n'
s+='\n'
s+='set CK_MAKE=make\n'
s+='set CK_OBJDUMP="objdump -d"\n'
s+='\n'
s+='rem PRESET SOME DEFAULT VARIABLES\n'
s+='set ICI_PROG_FEAT_PASS=fre\n'
s+='\n'
s+='rem set cTuning web-service parameters\n'
s+='set CCC_CTS_URL=cTuning.org/wiki/index.php/Special:CDatabase?request=\n'
s+='rem set CCC_CTS_URL=localhost/cTuning/wiki/index.php/Special:CDatabase?request=\n'
s+='set CCC_CTS_DB=fursinne_coptcases\n'
s+='rem set cTuning username (self-register at http://cTuning.org/wiki/index.php/Special:UserLogin)\n'
s+='set CCC_CTS_USER=gfursin\n'
s+='\n'
s+='rem compiler which was used to extract features for all programs to keep at cTuning.org\n'
s+='rem do not change it unless you understand what you do ;) ...\n'
s+='set CCC_COMPILER_FEATURES_ID=129504539516446542\n'
s+='\n'
s+='rem use architecture flags from cTuning\n'
s+='set CCC_OPT_ARCH_USE=0\n'
s+='\n'
s+='rem retrieve opt cases only when execution time > TIME_THRESHOLD\n'
s+='set TIME_THRESHOLD=0.3\n'
s+='\n'
s+='rem retrieve opt cases only with specific notes\n'
s+='rem set NOTES=\n'
s+='\n'
s+='rem retrieve opt cases only when profile info is !=""\n'
s+='rem set PG_USE=1\n'
s+='\n'
s+='rem retrieve opt cases only when execution output is correct (or not if =0)\n'
s+='set OUTPUT_CORRECT=1\n'
s+='\n'
s+='rem check user or total execution time\n'
s+='rem set RUN_TIME=RUN_TIME_USER\n'
s+='set RUN_TIME=RUN_TIME\n'
s+='\n'
s+='rem Sort optimization case by speedup (0 - ex. time, 1 - code size, 2 - comp time)\n'
s+='set SORT=012\n'
s+='\n'
s+='rem produce additional optimization report including optimization space froniters\n'
s+='set CT_OPT_REPORT=1\n'
s+='\n'
s+='rem Produce optimization space frontier\n'
s+='rem set DIM=01 (2D frontier)\n'
s+='rem set DIM=02 (2D frontier)\n'
s+='rem set DIM=12 (2D frontier)\n'
s+='rem set DIM=012 (3D frontier)\n'
s+='rem set DIM=012\n'
s+='\n'
s+='rem Cut cases when producing frontier (select cases when speedup 0,1 or 2 is more than some threshold)\n'
s+='rem set CUT=0,0,1.2\n'
s+='rem set CUT=1,0.80,1\n'
s+='rem set CUT=0,0,1\n'
s+='\n'
s+='rem find similar cases from the following platform\n'
s+='set CCC_PLATFORM_ID=2111574609159278179\n'
s+='set CCC_ENVIRONMENT_ID=2781195477254972989\n'
s+='set CCC_COMPILER_ID=331350613878705696\n'
else:
s='\n'
s+='export CCC_ROOT='+pi+'\n'
s+='export CCC_PLUGINS=$CCC_ROOT/src-plat-indep\n'
s+='export PATH=$CCC_ROOT/src-plat-indep/plugins:$PATH\n'
s+='export CTUNING_ANALYSIS_CC=$CK_ENV_COMPILER_GCC/bin/gcc\n'
s+='export CTUNING_ANALYSIS_CPP=$CK_ENV_COMPILER_GCC/bin/g++\n'
s+='export CTUNING_ANALYSIS_FORTRAN=$CK_ENV_COMPILER_GCC/bin/gfortran\n'
s+='\n'
s+='export CTUNING_COMPILER_CC=$CK_CC\n'
s+='export CTUNING_COMPILER_CPP=$CK_CXX\n'
s+='export CTUNING_COMPILER_FORTRAN=$CK_FC\n'
s+='\n'
s+='if [ "${CK_CC}" == "ctuning-cc" ] ; then\n'
s+=' export CTUNING_COMPILER_CC=gcc\n'
s+=' export CTUNING_COMPILER_CPP=g++\n'
s+=' export CTUNING_COMPILER_FORTRAN=gfortran\n'
s+='fi\n'
s+='\n'
s+='export CK_MAKE=make\n'
s+='export CK_OBJDUMP="objdump -d"\n'
s+='\n'
s+='# PRESET SOME DEFAULT VARIABLES\n'
s+='export ICI_PROG_FEAT_PASS=fre\n'
s+='\n'
s+='#set cTuning web-service parameters\n'
s+='export CCC_CTS_URL=cTuning.org/wiki/index.php/Special:CDatabase?request=\n'
s+='#export CCC_CTS_URL=localhost/cTuning/wiki/index.php/Special:CDatabase?request=\n'
s+='export CCC_CTS_DB=fursinne_coptcases\n'
s+='#set cTuning username (self-register at http://cTuning.org/wiki/index.php/Special:UserLogin)\n'
s+='export CCC_CTS_USER=gfursin\n'
s+='\n'
s+='#compiler which was used to extract features for all programs to keep at cTuning.org\n'
s+='#do not change it unless you understand what you do ;) ...\n'
s+='export CCC_COMPILER_FEATURES_ID=129504539516446542\n'
s+='\n'
s+='#use architecture flags from cTuning\n'
s+='export CCC_OPT_ARCH_USE=0\n'
s+='\n'
s+='#retrieve opt cases only when execution time > TIME_THRESHOLD\n'
s+='export TIME_THRESHOLD=0.3\n'
s+='\n'
s+='#retrieve opt cases only with specific notes\n'
s+='#export NOTES=\n'
s+='\n'
s+='#retrieve opt cases only when profile info is !=""\n'
s+='#export PG_USE=1\n'
s+='\n'
s+='#retrieve opt cases only when execution output is correct (or not if =0)\n'
s+='export OUTPUT_CORRECT=1\n'
s+='\n'
s+='#check user or total execution time\n'
s+='#export RUN_TIME=RUN_TIME_USER\n'
s+='export RUN_TIME=RUN_TIME\n'
s+='\n'
s+='#Sort optimization case by speedup (0 - ex. time, 1 - code size, 2 - comp time)\n'
s+='export SORT=012\n'
s+='\n'
s+='#produce additional optimization report including optimization space froniters\n'
s+='export CT_OPT_REPORT=1\n'
s+='\n'
s+='#Produce optimization space frontier\n'
s+='#export DIM=01 (2D frontier)\n'
s+='#export DIM=02 (2D frontier)\n'
s+='#export DIM=12 (2D frontier)\n'
s+='#export DIM=012 (3D frontier)\n'
s+='#export DIM=012\n'
s+='\n'
s+='#Cut cases when producing frontier (select cases when speedup 0,1 or 2 is more than some threshold)\n'
s+='#export CUT=0,0,1.2\n'
s+='#export CUT=1,0.80,1\n'
s+='#export CUT=0,0,1\n'
s+='\n'
s+='#find similar cases from the following platform\n'
s+='export CCC_PLATFORM_ID=2111574609159278179\n'
s+='export CCC_ENVIRONMENT_ID=2781195477254972989\n'
s+='export CCC_COMPILER_ID=331350613878705696\n'
return {'return':0, 'bat':s, 'env':env, 'tags':tags}
| 33.333333 | 116 | 0.572842 | #
# Collective Knowledge (individual environment - setup)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, Grigori.Fursin@cTuning.org, http://fursin.net
#
##############################################################################
# setup environment setup
def setup(i):
"""
Input: {
cfg - meta of this soft entry
self_cfg - meta of module soft
ck_kernel - import CK kernel module (to reuse functions)
host_os_uoa - host OS UOA
host_os_uid - host OS UID
host_os_dict - host OS meta
target_os_uoa - target OS UOA
target_os_uid - target OS UID
target_os_dict - target OS meta
target_device_id - target device ID (if via ADB)
tags - list of tags used to search this entry
env - updated environment vars from meta
customize - updated customize vars from meta
deps - resolved dependencies for this soft
interactive - if 'yes', can ask questions, otherwise quiet
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
bat - prepared string for bat file
}
"""
import os
# Get variables
ck=i['ck_kernel']
s=''
iv=i.get('interactive','')
env=i.get('env',{})
cfg=i.get('cfg',{})
deps=i.get('deps',{})
tags=i.get('tags',[])
cus=i.get('customize',{})
host_d=i.get('host_os_dict',{})
target_d=i.get('target_os_dict',{})
winh=host_d.get('windows_base','')
win=target_d.get('windows_base','')
remote=target_d.get('remote','')
mingw=target_d.get('mingw','')
tbits=target_d.get('bits','')
sdirs=host_d.get('dir_sep','')
envp=cus.get('env_prefix','')
pi=cus.get('path_install','')
fp=cus.get('full_path','')
p1=os.path.dirname(fp)
pi=os.path.dirname(p1)
pb=pi+sdirs+'bin'
cus['path_bin']=pb
ep=cus.get('env_prefix','')
if pi!='' and ep!='':
env[ep]=pi
env[ep+'_BIN']=pb
############################################################
# Prepare environment
if winh=='yes':
s='\n'
s+='set CCC_ROOT='+pi+'\n'
s+='set CCC_PLUGINS=%CCC_ROOT%\\src-plat-indep\n'
s+='set PATH=%CCC_ROOT%\\src-plat-indep\\plugins;%PATH%\n'
s+='set CTUNING_ANALYSIS_CC=%CK_ENV_COMPILER_GCC%\\bin\\gcc\n'
s+='set CTUNING_ANALYSIS_CPP=%CK_ENV_COMPILER_GCC%\\bin\\g++\n'
s+='set CTUNING_ANALYSIS_FORTRAN=%CK_ENV_COMPILER_GCC%\\bin\\gfortran\n'
s+='\n'
s+='set CTUNING_COMPILER_CC=%CK_CC%\n'
s+='set CTUNING_COMPILER_CPP=%CK_CXX%\n'
s+='set CTUNING_COMPILER_FORTRAN=%CK_FC%\n'
s+='\n'
s+='if "%CK_CC%" == "ctuning-cc" (\n'
s+=' set CTUNING_COMPILER_CC=gcc\n'
s+=' set CTUNING_COMPILER_CPP=g++\n'
s+=' set CTUNING_COMPILER_FORTRAN=gfortran\n'
s+=')\n'
s+='\n'
s+='set CK_MAKE=make\n'
s+='set CK_OBJDUMP="objdump -d"\n'
s+='\n'
s+='rem PRESET SOME DEFAULT VARIABLES\n'
s+='set ICI_PROG_FEAT_PASS=fre\n'
s+='\n'
s+='rem set cTuning web-service parameters\n'
s+='set CCC_CTS_URL=cTuning.org/wiki/index.php/Special:CDatabase?request=\n'
s+='rem set CCC_CTS_URL=localhost/cTuning/wiki/index.php/Special:CDatabase?request=\n'
s+='set CCC_CTS_DB=fursinne_coptcases\n'
s+='rem set cTuning username (self-register at http://cTuning.org/wiki/index.php/Special:UserLogin)\n'
s+='set CCC_CTS_USER=gfursin\n'
s+='\n'
s+='rem compiler which was used to extract features for all programs to keep at cTuning.org\n'
s+='rem do not change it unless you understand what you do ;) ...\n'
s+='set CCC_COMPILER_FEATURES_ID=129504539516446542\n'
s+='\n'
s+='rem use architecture flags from cTuning\n'
s+='set CCC_OPT_ARCH_USE=0\n'
s+='\n'
s+='rem retrieve opt cases only when execution time > TIME_THRESHOLD\n'
s+='set TIME_THRESHOLD=0.3\n'
s+='\n'
s+='rem retrieve opt cases only with specific notes\n'
s+='rem set NOTES=\n'
s+='\n'
s+='rem retrieve opt cases only when profile info is !=""\n'
s+='rem set PG_USE=1\n'
s+='\n'
s+='rem retrieve opt cases only when execution output is correct (or not if =0)\n'
s+='set OUTPUT_CORRECT=1\n'
s+='\n'
s+='rem check user or total execution time\n'
s+='rem set RUN_TIME=RUN_TIME_USER\n'
s+='set RUN_TIME=RUN_TIME\n'
s+='\n'
s+='rem Sort optimization case by speedup (0 - ex. time, 1 - code size, 2 - comp time)\n'
s+='set SORT=012\n'
s+='\n'
s+='rem produce additional optimization report including optimization space froniters\n'
s+='set CT_OPT_REPORT=1\n'
s+='\n'
s+='rem Produce optimization space frontier\n'
s+='rem set DIM=01 (2D frontier)\n'
s+='rem set DIM=02 (2D frontier)\n'
s+='rem set DIM=12 (2D frontier)\n'
s+='rem set DIM=012 (3D frontier)\n'
s+='rem set DIM=012\n'
s+='\n'
s+='rem Cut cases when producing frontier (select cases when speedup 0,1 or 2 is more than some threshold)\n'
s+='rem set CUT=0,0,1.2\n'
s+='rem set CUT=1,0.80,1\n'
s+='rem set CUT=0,0,1\n'
s+='\n'
s+='rem find similar cases from the following platform\n'
s+='set CCC_PLATFORM_ID=2111574609159278179\n'
s+='set CCC_ENVIRONMENT_ID=2781195477254972989\n'
s+='set CCC_COMPILER_ID=331350613878705696\n'
else:
s='\n'
s+='export CCC_ROOT='+pi+'\n'
s+='export CCC_PLUGINS=$CCC_ROOT/src-plat-indep\n'
s+='export PATH=$CCC_ROOT/src-plat-indep/plugins:$PATH\n'
s+='export CTUNING_ANALYSIS_CC=$CK_ENV_COMPILER_GCC/bin/gcc\n'
s+='export CTUNING_ANALYSIS_CPP=$CK_ENV_COMPILER_GCC/bin/g++\n'
s+='export CTUNING_ANALYSIS_FORTRAN=$CK_ENV_COMPILER_GCC/bin/gfortran\n'
s+='\n'
s+='export CTUNING_COMPILER_CC=$CK_CC\n'
s+='export CTUNING_COMPILER_CPP=$CK_CXX\n'
s+='export CTUNING_COMPILER_FORTRAN=$CK_FC\n'
s+='\n'
s+='if [ "${CK_CC}" == "ctuning-cc" ] ; then\n'
s+=' export CTUNING_COMPILER_CC=gcc\n'
s+=' export CTUNING_COMPILER_CPP=g++\n'
s+=' export CTUNING_COMPILER_FORTRAN=gfortran\n'
s+='fi\n'
s+='\n'
s+='export CK_MAKE=make\n'
s+='export CK_OBJDUMP="objdump -d"\n'
s+='\n'
s+='# PRESET SOME DEFAULT VARIABLES\n'
s+='export ICI_PROG_FEAT_PASS=fre\n'
s+='\n'
s+='#set cTuning web-service parameters\n'
s+='export CCC_CTS_URL=cTuning.org/wiki/index.php/Special:CDatabase?request=\n'
s+='#export CCC_CTS_URL=localhost/cTuning/wiki/index.php/Special:CDatabase?request=\n'
s+='export CCC_CTS_DB=fursinne_coptcases\n'
s+='#set cTuning username (self-register at http://cTuning.org/wiki/index.php/Special:UserLogin)\n'
s+='export CCC_CTS_USER=gfursin\n'
s+='\n'
s+='#compiler which was used to extract features for all programs to keep at cTuning.org\n'
s+='#do not change it unless you understand what you do ;) ...\n'
s+='export CCC_COMPILER_FEATURES_ID=129504539516446542\n'
s+='\n'
s+='#use architecture flags from cTuning\n'
s+='export CCC_OPT_ARCH_USE=0\n'
s+='\n'
s+='#retrieve opt cases only when execution time > TIME_THRESHOLD\n'
s+='export TIME_THRESHOLD=0.3\n'
s+='\n'
s+='#retrieve opt cases only with specific notes\n'
s+='#export NOTES=\n'
s+='\n'
s+='#retrieve opt cases only when profile info is !=""\n'
s+='#export PG_USE=1\n'
s+='\n'
s+='#retrieve opt cases only when execution output is correct (or not if =0)\n'
s+='export OUTPUT_CORRECT=1\n'
s+='\n'
s+='#check user or total execution time\n'
s+='#export RUN_TIME=RUN_TIME_USER\n'
s+='export RUN_TIME=RUN_TIME\n'
s+='\n'
s+='#Sort optimization case by speedup (0 - ex. time, 1 - code size, 2 - comp time)\n'
s+='export SORT=012\n'
s+='\n'
s+='#produce additional optimization report including optimization space froniters\n'
s+='export CT_OPT_REPORT=1\n'
s+='\n'
s+='#Produce optimization space frontier\n'
s+='#export DIM=01 (2D frontier)\n'
s+='#export DIM=02 (2D frontier)\n'
s+='#export DIM=12 (2D frontier)\n'
s+='#export DIM=012 (3D frontier)\n'
s+='#export DIM=012\n'
s+='\n'
s+='#Cut cases when producing frontier (select cases when speedup 0,1 or 2 is more than some threshold)\n'
s+='#export CUT=0,0,1.2\n'
s+='#export CUT=1,0.80,1\n'
s+='#export CUT=0,0,1\n'
s+='\n'
s+='#find similar cases from the following platform\n'
s+='export CCC_PLATFORM_ID=2111574609159278179\n'
s+='export CCC_ENVIRONMENT_ID=2781195477254972989\n'
s+='export CCC_COMPILER_ID=331350613878705696\n'
return {'return':0, 'bat':s, 'env':env, 'tags':tags}
| 0 | 0 | 0 |
292b05d1269b68175aa7424e7be5c81a7dc02868 | 34,115 | py | Python | scripts/external.py | tth030/SM_ESR_isostasy | fbd2ac586e8e31dd18a0988181514bc2fff7f08a | [
"MIT"
] | null | null | null | scripts/external.py | tth030/SM_ESR_isostasy | fbd2ac586e8e31dd18a0988181514bc2fff7f08a | [
"MIT"
] | null | null | null | scripts/external.py | tth030/SM_ESR_isostasy | fbd2ac586e8e31dd18a0988181514bc2fff7f08a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import numpy as np
import colorsys
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator,ScalarFormatter)
import netCDF4
import os
import pyproj
import pickle
from statsmodels.stats.weightstats import DescrStatsW
from matplotlib import rcParams, rcParamsDefault
import xarray as xr
geod = pyproj.Geod(ellps='WGS84')
def gmtColormap(fileName,GMTPath = None):
''' https://scipy-cookbook.readthedocs.io/items/Matplotlib_Loading_a_colormap_dynamically.html
modified from James Boyle and Andrew Straw - Thomas Theunissen 2021'''
if type(GMTPath) == type(None):
filePath = "./"+ fileName+".cpt"
else:
filePath = GMTPath+"/"+ fileName +".cpt"
try:
f = open(filePath)
except:
print("file "+filePath+"not found")
return None
lines = f.readlines()
f.close()
x = []
r = []
g = []
b = []
colorModel = "RGB"
for l in lines:
ls = l.split()
if (len(l)>1):
if l[0] == "#":
if ls[-1] == "HSV":
colorModel = "HSV"
continue
else:
continue
if ls[0] == "B" or ls[0] == "F" or ls[0] == "N":
pass
else:
x.append(float(ls[0]))
r.append(float(ls[1]))
g.append(float(ls[2]))
b.append(float(ls[3]))
xtemp = float(ls[4])
rtemp = float(ls[5])
gtemp = float(ls[6])
btemp = float(ls[7])
x.append(xtemp)
r.append(rtemp)
g.append(gtemp)
b.append(btemp)
nTable = len(r)
x = np.array( x , np.float32)
r = np.array( r , np.float32)
g = np.array( g , np.float32)
b = np.array( b , np.float32)
if colorModel == "HSV":
for i in range(r.shape[0]):
rr,gg,bb = colorsys.hsv_to_rgb(r[i]/360.,g[i],b[i])
r[i] = rr ; g[i] = gg ; b[i] = bb
if colorModel == "HSV":
for i in range(r.shape[0]):
rr,gg,bb = colorsys.hsv_to_rgb(r[i]/360.,g[i],b[i])
r[i] = rr ; g[i] = gg ; b[i] = bb
if colorModel == "RGB":
r = r/255.
g = g/255.
b = b/255.
xNorm = (x - x[0])/(x[-1] - x[0])
red = []
blue = []
green = []
for i in range(len(x)):
red.append([xNorm[i],r[i],r[i]])
green.append([xNorm[i],g[i],g[i]])
blue.append([xNorm[i],b[i],b[i]])
colorDict = {"red":red, "green":green, "blue":blue}
return (colorDict)
def shoot(lon, lat, azimuth, maxdist=None):
"""Shooter Function
Original javascript on http://williams.best.vwh.net/gccalc.htm
Translated to python by Thomas Lecocq
https://www.geophysique.be/2011/02/20/matplotlib-basemap-tutorial-09-drawing-circles/
"""
glat1 = lat * np.pi / 180.
glon1 = lon * np.pi / 180.
s = maxdist / 1.852
faz = azimuth * np.pi / 180.
EPS= 0.00000000005
if ((np.abs(np.cos(glat1))<EPS) and not (np.abs(np.sin(faz))<EPS)):
alert("Only N-S courses are meaningful, starting at a pole!")
a=6378.13/1.852
f=1/298.257223563
r = 1 - f
tu = r * np.tan(glat1)
sf = np.sin(faz)
cf = np.cos(faz)
if (cf==0):
b=0.
else:
b=2. * np.arctan2 (tu, cf)
cu = 1. / np.sqrt(1 + tu * tu)
su = tu * cu
sa = cu * sf
c2a = 1 - sa * sa
x = 1. + np.sqrt(1. + c2a * (1. / (r * r) - 1.))
x = (x - 2.) / x
c = 1. - x
c = (x * x / 4. + 1.) / c
d = (0.375 * x * x - 1.) * x
tu = s / (r * a * c)
y = tu
c = y + 1
while (np.abs (y - c) > EPS):
sy = np.sin(y)
cy = np.cos(y)
cz = np.cos(b + y)
e = 2. * cz * cz - 1.
c = y
x = e * cy
y = e + e - 1.
y = (((sy * sy * 4. - 3.) * y * cz * d / 6. + x) *
d / 4. - cz) * sy * d + tu
b = cu * cy * cf - su * sy
c = r * np.sqrt(sa * sa + b * b)
d = su * cy + cu * sy * cf
glat2 = (np.arctan2(d, c) + np.pi) % (2*np.pi) - np.pi
c = cu * cy - su * sy * cf
x = np.arctan2(sy * sf, c)
c = ((-3. * c2a + 4.) * f + 4.) * c2a * f / 16.
d = ((e * cy * c + cz) * sy * c + y) * sa
glon2 = ((glon1 + x - (1. - c) * d * f + np.pi) % (2*np.pi)) - np.pi
baz = (np.arctan2(sa, b) + np.pi) % (2 * np.pi)
glon2 *= 180./np.pi
glat2 *= 180./np.pi
baz *= 180./np.pi
return (glon2, glat2, baz)
def equi(m, centerlon, centerlat, radius, *args, **kwargs):
''' Plotting circles on a map '''
glon1 = centerlon
glat1 = centerlat
X = [] ;
Y = [] ;
for azimuth in range(0, 360):
glon2, glat2, baz = shoot(glon1, glat1, azimuth, radius)
X.append(glon2)
Y.append(glat2)
X.append(X[0])
Y.append(Y[0])
X=np.asarray(X)
Y=np.asarray(Y)
diff = 999999999999999
if (np.min(X)>0):
diff = np.max(X)-np.min(X)
elif (np.min(X)<0 and np.max(X)>0):
diff = np.max(X)-np.min(X)
elif (np.min(X)<0 and np.max(X)<0):
diff = np.abs(np.min(X))-np.abs(np.max(X))
# Wrapping the circle correctly across map bounds
# simple fix enough here
if (diff>300):
X2 = X[X>0]
Y2 = Y[X>0]
Y = Y[X<=0]
X = X[X<=0]
X2,Y2 = m(X2,Y2)
p = plt.plot(X2,Y2,**kwargs)
X,Y = m(X,Y)
if 'color' in kwargs:
plt.plot(X,Y,**kwargs)
else:
plt.plot(X,Y,color=p[0].get_color(),**kwargs)
else:
#~ m.plot(X,Y,**kwargs) #Should work, but doesn't...
X,Y = m(X,Y)
plt.plot(X,Y,**kwargs)
def plot_histo(data,title,
filename='histo.pdf',
xlabel='Elevation (m)',unit='m',
GaussianModel=False,sigmamodel=270,meanmodel=-2950,
legends="upper right",text="left",
approximation_display="int",
xlim=None,
savefig=False,
fig_x=9.5,
fig_y=9,
weights=None,nbins=40):
'''
Plot histogram with some statistics
'''
define_rcParams()
plt.figure(figsize=(fig_x/2.54,fig_y/2.54))
if xlim:
data = np.ma.MaskedArray(data, mask=( (data<xlim[0]) | (data>xlim[1]) ))
n, bins, patches = plt.hist(x=data,bins=nbins, color='#0504aa',alpha=0.7, rwidth=0.85,weights=weights)
plt.grid(axis='y', alpha=0.75)
plt.xlabel(xlabel)
plt.ylabel('Frequency')
plt.title(title)
mean = np.nanmean(data)
median = np.ma.median(data)
sigma = np.nanstd(data)
if weights is not None:
ma = np.ma.MaskedArray(data, mask=np.isnan(data))
meanW = np.ma.average(ma, weights=weights)
dsw = DescrStatsW(ma, weights=weights)
stdW = dsw.std # weighted std
mean = meanW
sigma = stdW
xval = np.linspace(np.nanmin(data),np.nanmax(data),1000)
yval = np.exp(-(xval-mean)**2/(2*sigma**2)) / np.sqrt(2*np.pi*sigma**2)
yval = yval*n.max()/np.nanmax(yval)
plt.plot(xval,yval,label='Data Gaussian model')
if GaussianModel:
yval2 = np.exp(-(xval-meanmodel)**2/(2*sigmamodel**2)) / np.sqrt(2*np.pi*sigmamodel**2)
yval2 = yval2*n.max()/np.nanmax(yval2)
p = plt.plot(xval,yval2,label='Estimated Gaussian model')
if approximation_display=="int":
mean = int(np.ceil(mean))
med = int(np.ceil(median))
sigma = int(np.ceil(sigma))
else:
mean = np.around(mean,1)
med = np.around(median,1)
sigma = np.around(sigma,1)
if legends=="upper right":
plt.legend(loc=legends, bbox_to_anchor=(1,1),framealpha=0.4)
elif legends=="upper left":
plt.legend(loc=legends, bbox_to_anchor=(0,1),framealpha=0.4)
ax = plt.gca()
if text=="right":
xtext = 0.7
else:
xtext = 0.05
ytext = 0.7 ; voff = 0.035 ; hoff = 0.1
if weights is not None:
plt.text(xtext,ytext ,r'$\overline{elev}_W$',transform=ax.transAxes) ; plt.text(xtext+hoff,ytext ,r'$=$'+str(mean) +' '+unit,transform=ax.transAxes)
else:
plt.text(xtext,ytext ,r'$\overline{elev}$',transform=ax.transAxes) ; plt.text(xtext+hoff,ytext ,r'$=$'+str(mean) +' '+unit,transform=ax.transAxes)
plt.text(xtext,ytext-voff ,r'$median$',transform=ax.transAxes) ; plt.text(xtext+hoff,ytext-voff ,r'$=$'+str(med) +' '+unit,transform=ax.transAxes)
if weights is not None:
plt.text(xtext,ytext-2*voff,r'$\sigma_{W}$',transform=ax.transAxes) ; plt.text(xtext+hoff,ytext-2*voff,r'$=$'+str(sigma) +' '+unit,transform=ax.transAxes)
else:
plt.text(xtext,ytext-2*voff,r'$\sigma$',transform=ax.transAxes) ; plt.text(xtext+hoff,ytext-2*voff,r'$=$'+str(sigma) +' '+unit,transform=ax.transAxes)
if GaussianModel:
# model
if weights is None:
vshift = 0.15
else:
vshift = 0.18
plt.text(xtext,ytext-vshift ,r'$\overline{elev}$',transform=ax.transAxes,color=p[0].get_color())
plt.text(xtext+hoff,ytext-vshift,r'$=$'+str(meanmodel) +' '+unit,transform=ax.transAxes,color=p[0].get_color())
plt.text(xtext,ytext-vshift-voff ,r'$\sigma$',transform=ax.transAxes,color=p[0].get_color())
plt.text(xtext+hoff,ytext-vshift-voff,r'$=$'+str(sigmamodel) +' '+unit,transform=ax.transAxes,color=p[0].get_color())
maxfreq = n.max()
plt.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10)
if xlim:
plt.xlim(xlim)
ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
ax.yaxis.get_major_formatter().set_powerlimits((0, 1))
if (savefig):
plt.savefig(filename,dpi=300)
return ax
def plot_correlation(x,y,title,filename='correlation.pdf',
nbins = 20,
xlabel='x',ylabel='y',unit='m',text="right",plottext=True,
xlim=None,ylim=None,ticks=None,
savefig=False,
fig_x=9.5,
fig_y=9,
weights=None):
''' ticks = [tick_dx1,tick_dx2,tick_dy1,tick_dy2]
unit = from "y" variable
'''
define_rcParams()
plt.figure(figsize=(fig_x/2.54,fig_y/2.54))
ax = plt.gca()
plt.plot(x,y,'ko',markersize=0.5,zorder=0,rasterized=True)
if (xlim):
xstat = np.linspace(xlim[0],xlim[1],nbins)
nb12 = (xlim[1]-xlim[0])/(2*nbins)
else:
xstat = np.linspace(np.nanmin(x),np.nanmax(x),nbins)
nb12 = (np.nanmax(x)-np.nanmin(x))/(2*nbins)
mean = [] ; median = [] ; sigma = [] ; xused = []
for rate in xstat:
data = y[( (x>=rate-nb12) & (x<=rate+nb12))]
if weights is not None:
selweights = weights[( (x>=rate-nb12) & (x<=rate+nb12))]
if data!=[]:
xused.append(rate)
med = np.ma.median(data)
if weights is None:
avg = np.nanmean(data)
std = np.nanstd(data)
else:
ma = np.ma.MaskedArray(data, mask=np.isnan(data))
avgW = np.ma.average(ma, weights=selweights)
dsw = DescrStatsW(ma, weights=selweights)
stdW = dsw.std # weighted std
avg = avgW
std = stdW
mean.append(avg)
median.append(med)
sigma.append(std)
mean = np.asarray(mean) ; sigma = np.asarray(sigma) ; median = np.asarray(median) ; xused = np.asarray(xused)
plt.plot(xused,median,color='r',zorder=3,linewidth=2,label='median')
plt.plot(xused,np.add(median,sigma),color='g',linewidth=2,zorder=4)
plt.plot(xused,np.subtract(median,sigma),color='g',linewidth=2,zorder=5)
plt.plot(xused,mean,color='b',zorder=3,linewidth=2,label='mean')
if plottext:
if (xlim):
xstat = np.linspace(xlim[0],xlim[1],1000)
else:
xstat = np.linspace(np.nanmin(x),np.nanmax(x),1000)
themedian = np.zeros_like(xstat)
themedian = themedian + np.ma.median(median)
mean = np.around(np.nanmean(median),1)
med = np.around(np.ma.median(median),1)
sigma = np.around(np.nanstd(median),1)
plt.plot(xstat,themedian)
if text=="left":
xtext = 0.25
elif text=="right":
xtext = 0.7
else:
xtext = 0.35
ytext = 0.85 ; voff = 0.03 ; hoff = 0.1
plt.text(xtext,ytext ,r'$\overline{elev}$',transform=ax.transAxes) ; plt.text(xtext+hoff,ytext ,r'$=$'+str(mean) +' '+unit,transform=ax.transAxes)
plt.text(xtext,ytext-voff ,r'$median$',transform=ax.transAxes) ; plt.text(xtext+hoff,ytext-voff ,r'$=$'+str(med) +' '+unit,transform=ax.transAxes)
plt.text(xtext,ytext-2*voff,r'$\sigma$',transform=ax.transAxes) ; plt.text(xtext+hoff,ytext-2*voff,r'$=$'+str(sigma) +' '+unit,transform=ax.transAxes)
ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
ax.yaxis.get_major_formatter().set_powerlimits((0, 1))
if not ticks:
if (xlim):
tick_dx1 = int(np.ceil((xlim[1]-xlim[0])/6))
tick_dx2 = int(np.ceil((xlim[1]-xlim[0])/24))
else:
tick_dx1 = int(np.ceil((np.nanmax(x)-np.nanmin(x))/6))
tick_dx2 = int(np.ceil((np.nanmax(x)-np.nanmin(x))/24))
if (ylim):
tick_dy1 = int(np.ceil((ylim[1]-ylim[0])/5))
tick_dy2 = int(np.ceil((ylim[1]-ylim[0])/20))
else:
tick_dy1 = int(np.ceil((np.nanmax(y)-np.nanmin(y))/5))
tick_dy2 = int(np.ceil((np.nanmax(y)-np.nanmin(y))/20))
else:
tick_dx1 = ticks[0]
tick_dx2 = ticks[1]
tick_dy1 = ticks[2]
tick_dy2 = ticks[3]
ax.yaxis.set_major_locator(MultipleLocator(tick_dy1))
ax.yaxis.set_minor_locator(MultipleLocator(tick_dy2))
ax.xaxis.set_major_locator(MultipleLocator(tick_dx1))
ax.xaxis.set_minor_locator(MultipleLocator(tick_dx2))
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if (xlim):
plt.xlim(xlim)
if (ylim):
plt.ylim(ylim)
plt.legend()
if (savefig):
plt.savefig(filename,dpi=300)
return ax
#--------------------------- READING DATA ---------------------
def define_MOR_pts(path='./data/topography/',selection_name='MOR_pts_all',distance_between_pts_along_ridges = 25):
'''
Define points with elevation and spreading rates along MOR
selection_name = 'MOR_pts_all' or 'MOR_pts_far_from_hs' or 'MOR_pts_close_to_hs'
'''
if os.path.exists('./data_figshare/topography/'):
path='./data_figshare/topography/'
filename = './'+selection_name+'_'+str(distance_between_pts_along_ridges)+'km.dat'
distance_between_pts_along_ridges = distance_between_pts_along_ridges*1e3
if not (os.path.isfile(path+filename)):
print("#################### Loading datasets #########################")
x,y,elev = load_etopo(path=path)
x,y,spreading_rate = load_spreading_rate(path=path)
x,y,age = load_seafloor_ages(path=path)
x,y,strain_rate = load_strain_rate(path=path)
x,y,dist_closest_hs = load_hotspots(path=path)
print("#################### Applying mask on datasets #########################")
# data selection
min_dist_hs = 1000000 # m
max_seafloor_age = 10 # Myrs the width depends on spreading rate - 10 Myrs is a good compromise for computational reason
# It gives 50 km from ridge axis for ultra-slow 1 cm/yr full spreading rate MOR
max_seafloor_age_for_ridge_axis = 0.5
threshold_strain_rate = 1e-16 # s-1
xx, yy = np.meshgrid(x, y)
if selection_name=='MOR_pts_all':
mask = ( (elev<0) & (age<=max_seafloor_age) )
mask_axis = ( (elev<0) & (age<=max_seafloor_age_for_ridge_axis) )
elif selection_name=='MOR_pts_far_from_hs':
mask = ( (elev<0) & (age<=max_seafloor_age) & (dist_closest_hs > min_dist_hs) )
mask_axis = ( (elev<0) & (age<=max_seafloor_age_for_ridge_axis) & (dist_closest_hs > min_dist_hs) )
elif selection_name=='MOR_pts_close_to_hs':
mask = ( (elev<0) & (age<=max_seafloor_age) & (dist_closest_hs <= min_dist_hs) )
mask_axis = ( (elev<0) & (age<=max_seafloor_age_for_ridge_axis) & (dist_closest_hs <= min_dist_hs) )
else:
print("ERROR incorrect selection_name ")
quit()
# this array is used to define the localisation of the MOR
active_MOR_elev = elev[mask]
active_MOR_x = xx[mask]
active_MOR_y = yy[mask]
active_MOR_x_axis = xx[mask_axis]
active_MOR_y_axis = yy[mask_axis]
active_MOR_spreading_rate = spreading_rate[mask]
active_MOR_strain_rate = strain_rate[mask]
dd = 1.5 # Distance to look for points in the grid that belong to the same MOR segment
# given in degrees for computational reason, could be function of the spreading rate
# Here, we define a constant that cover all cases (Fig. S4b) (~150-200 km)
# W ~ 100-150 km ~ distance between rift flanks at ultra-slow spreading rates (Fig. 3)
#
new_active_MOR_y = [] ; new_active_MOR_x = [] ; new_active_MOR_elev = [] ; new_active_MOR_spreading_rate = []
ipt = 0
print('Total #pts on the grid for age<={} Myrs = {} '.format(max_seafloor_age_for_ridge_axis,len(active_MOR_x_axis)))
print("#################### Browsing all MOR points #########################")
for xpt,ypt in zip(active_MOR_x_axis,active_MOR_y_axis):
xsel = active_MOR_x_axis[ ( (np.abs(active_MOR_x_axis-xpt)<=dd/2) & (np.abs(active_MOR_y_axis-ypt)<=dd/2) ) ]
ysel = active_MOR_y_axis[ ( (np.abs(active_MOR_x_axis-xpt)<=dd/2) & (np.abs(active_MOR_y_axis-ypt)<=dd/2) ) ]
newx = np.median(xsel)
newy = np.median(ysel)
if (ipt==0):
new_active_MOR_x.append(newx)
new_active_MOR_y.append(newy)
esel = active_MOR_elev[ ( (np.abs(active_MOR_x-newx)<=dd/2) & (np.abs(active_MOR_y-newy)<=dd/2) ) ]
new_active_MOR_elev.append(np.max(esel))
srsel = active_MOR_spreading_rate[ ( (np.abs(active_MOR_x-newx)<=dd/2) & (np.abs(active_MOR_y-newy)<=dd/2) ) ]
new_active_MOR_spreading_rate.append(np.median(srsel))
else:
stsel = active_MOR_strain_rate[ ( (np.abs(active_MOR_x-newx)<=dd/2) & (np.abs(active_MOR_y-newy)<=dd/2) ) ]
if (np.any(stsel>=threshold_strain_rate)):
azimuth1, azimuth2, dist = geod.inv(new_active_MOR_x[-1], new_active_MOR_y[-1], newx, newy)
if ( dist >= distance_between_pts_along_ridges ):
esel = active_MOR_elev[ ( (np.abs(active_MOR_x-newx)<=dd/2) & (np.abs(active_MOR_y-newy)<=dd/2) ) ]
srsel = active_MOR_spreading_rate[ ( (np.abs(active_MOR_x-newx)<=dd/2) & (np.abs(active_MOR_y-newy)<=dd/2) ) ]
new_active_MOR_x.append(newx)
new_active_MOR_y.append(newy)
new_active_MOR_elev.append(np.max(esel))
new_active_MOR_spreading_rate.append(np.median(srsel))
ipt = ipt + 1
if ipt%5000 == 0:
print("{}/{}".format(ipt,len(active_MOR_x_axis)))
new_active_MOR_x = np.asarray(new_active_MOR_x)
new_active_MOR_y = np.asarray(new_active_MOR_y)
new_active_MOR_elev = np.asarray(new_active_MOR_elev)
new_active_MOR_spreading_rate = np.asarray(new_active_MOR_spreading_rate)
with open(path+filename, 'wb') as filehandle:
pickle.dump([new_active_MOR_x,new_active_MOR_y,new_active_MOR_elev,new_active_MOR_spreading_rate],filehandle)
print('Total defined pts along MOR = {} '.format(len(new_active_MOR_x)))
else:
print("This selection already exists ({})".format(path+filename))
with open(path+filename, 'rb') as filehandle:
# read the data as binary data stream
[new_active_MOR_x,new_active_MOR_y,new_active_MOR_elev,new_active_MOR_spreading_rate] = pickle.load(filehandle)
print('Total defined pts along MOR = {} '.format(len(new_active_MOR_x)))
| 42.378882 | 179 | 0.563535 | #!/usr/bin/env python
import numpy as np
import colorsys
import matplotlib.pyplot as plt
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator,ScalarFormatter)
import netCDF4
import os
import pyproj
import pickle
from statsmodels.stats.weightstats import DescrStatsW
from matplotlib import rcParams, rcParamsDefault
import xarray as xr
geod = pyproj.Geod(ellps='WGS84')
def define_rcParams():
rcParams.update({
"text.usetex": False,
"font.size": 6})
rcParams['axes.titlesize'] = 8
rcParams['axes.labelsize'] = 6
rcParams['lines.linewidth'] = 1
rcParams['lines.markersize'] = 4
rcParams['xtick.labelsize'] = 6
rcParams['ytick.labelsize'] = 6
rcParams['figure.figsize'] = [10/2.54, 8/2.54]
return rcParams
def gmtColormap(fileName,GMTPath = None):
''' https://scipy-cookbook.readthedocs.io/items/Matplotlib_Loading_a_colormap_dynamically.html
modified from James Boyle and Andrew Straw - Thomas Theunissen 2021'''
if type(GMTPath) == type(None):
filePath = "./"+ fileName+".cpt"
else:
filePath = GMTPath+"/"+ fileName +".cpt"
try:
f = open(filePath)
except:
print("file "+filePath+"not found")
return None
lines = f.readlines()
f.close()
x = []
r = []
g = []
b = []
colorModel = "RGB"
for l in lines:
ls = l.split()
if (len(l)>1):
if l[0] == "#":
if ls[-1] == "HSV":
colorModel = "HSV"
continue
else:
continue
if ls[0] == "B" or ls[0] == "F" or ls[0] == "N":
pass
else:
x.append(float(ls[0]))
r.append(float(ls[1]))
g.append(float(ls[2]))
b.append(float(ls[3]))
xtemp = float(ls[4])
rtemp = float(ls[5])
gtemp = float(ls[6])
btemp = float(ls[7])
x.append(xtemp)
r.append(rtemp)
g.append(gtemp)
b.append(btemp)
nTable = len(r)
x = np.array( x , np.float32)
r = np.array( r , np.float32)
g = np.array( g , np.float32)
b = np.array( b , np.float32)
if colorModel == "HSV":
for i in range(r.shape[0]):
rr,gg,bb = colorsys.hsv_to_rgb(r[i]/360.,g[i],b[i])
r[i] = rr ; g[i] = gg ; b[i] = bb
if colorModel == "HSV":
for i in range(r.shape[0]):
rr,gg,bb = colorsys.hsv_to_rgb(r[i]/360.,g[i],b[i])
r[i] = rr ; g[i] = gg ; b[i] = bb
if colorModel == "RGB":
r = r/255.
g = g/255.
b = b/255.
xNorm = (x - x[0])/(x[-1] - x[0])
red = []
blue = []
green = []
for i in range(len(x)):
red.append([xNorm[i],r[i],r[i]])
green.append([xNorm[i],g[i],g[i]])
blue.append([xNorm[i],b[i],b[i]])
colorDict = {"red":red, "green":green, "blue":blue}
return (colorDict)
def shoot(lon, lat, azimuth, maxdist=None):
"""Shooter Function
Original javascript on http://williams.best.vwh.net/gccalc.htm
Translated to python by Thomas Lecocq
https://www.geophysique.be/2011/02/20/matplotlib-basemap-tutorial-09-drawing-circles/
"""
glat1 = lat * np.pi / 180.
glon1 = lon * np.pi / 180.
s = maxdist / 1.852
faz = azimuth * np.pi / 180.
EPS= 0.00000000005
if ((np.abs(np.cos(glat1))<EPS) and not (np.abs(np.sin(faz))<EPS)):
alert("Only N-S courses are meaningful, starting at a pole!")
a=6378.13/1.852
f=1/298.257223563
r = 1 - f
tu = r * np.tan(glat1)
sf = np.sin(faz)
cf = np.cos(faz)
if (cf==0):
b=0.
else:
b=2. * np.arctan2 (tu, cf)
cu = 1. / np.sqrt(1 + tu * tu)
su = tu * cu
sa = cu * sf
c2a = 1 - sa * sa
x = 1. + np.sqrt(1. + c2a * (1. / (r * r) - 1.))
x = (x - 2.) / x
c = 1. - x
c = (x * x / 4. + 1.) / c
d = (0.375 * x * x - 1.) * x
tu = s / (r * a * c)
y = tu
c = y + 1
while (np.abs (y - c) > EPS):
sy = np.sin(y)
cy = np.cos(y)
cz = np.cos(b + y)
e = 2. * cz * cz - 1.
c = y
x = e * cy
y = e + e - 1.
y = (((sy * sy * 4. - 3.) * y * cz * d / 6. + x) *
d / 4. - cz) * sy * d + tu
b = cu * cy * cf - su * sy
c = r * np.sqrt(sa * sa + b * b)
d = su * cy + cu * sy * cf
glat2 = (np.arctan2(d, c) + np.pi) % (2*np.pi) - np.pi
c = cu * cy - su * sy * cf
x = np.arctan2(sy * sf, c)
c = ((-3. * c2a + 4.) * f + 4.) * c2a * f / 16.
d = ((e * cy * c + cz) * sy * c + y) * sa
glon2 = ((glon1 + x - (1. - c) * d * f + np.pi) % (2*np.pi)) - np.pi
baz = (np.arctan2(sa, b) + np.pi) % (2 * np.pi)
glon2 *= 180./np.pi
glat2 *= 180./np.pi
baz *= 180./np.pi
return (glon2, glat2, baz)
def equi(m, centerlon, centerlat, radius, *args, **kwargs):
''' Plotting circles on a map '''
glon1 = centerlon
glat1 = centerlat
X = [] ;
Y = [] ;
for azimuth in range(0, 360):
glon2, glat2, baz = shoot(glon1, glat1, azimuth, radius)
X.append(glon2)
Y.append(glat2)
X.append(X[0])
Y.append(Y[0])
X=np.asarray(X)
Y=np.asarray(Y)
diff = 999999999999999
if (np.min(X)>0):
diff = np.max(X)-np.min(X)
elif (np.min(X)<0 and np.max(X)>0):
diff = np.max(X)-np.min(X)
elif (np.min(X)<0 and np.max(X)<0):
diff = np.abs(np.min(X))-np.abs(np.max(X))
# Wrapping the circle correctly across map bounds
# simple fix enough here
if (diff>300):
X2 = X[X>0]
Y2 = Y[X>0]
Y = Y[X<=0]
X = X[X<=0]
X2,Y2 = m(X2,Y2)
p = plt.plot(X2,Y2,**kwargs)
X,Y = m(X,Y)
if 'color' in kwargs:
plt.plot(X,Y,**kwargs)
else:
plt.plot(X,Y,color=p[0].get_color(),**kwargs)
else:
#~ m.plot(X,Y,**kwargs) #Should work, but doesn't...
X,Y = m(X,Y)
plt.plot(X,Y,**kwargs)
def plot_histo(data,title,
filename='histo.pdf',
xlabel='Elevation (m)',unit='m',
GaussianModel=False,sigmamodel=270,meanmodel=-2950,
legends="upper right",text="left",
approximation_display="int",
xlim=None,
savefig=False,
fig_x=9.5,
fig_y=9,
weights=None,nbins=40):
'''
Plot histogram with some statistics
'''
define_rcParams()
plt.figure(figsize=(fig_x/2.54,fig_y/2.54))
if xlim:
data = np.ma.MaskedArray(data, mask=( (data<xlim[0]) | (data>xlim[1]) ))
n, bins, patches = plt.hist(x=data,bins=nbins, color='#0504aa',alpha=0.7, rwidth=0.85,weights=weights)
plt.grid(axis='y', alpha=0.75)
plt.xlabel(xlabel)
plt.ylabel('Frequency')
plt.title(title)
mean = np.nanmean(data)
median = np.ma.median(data)
sigma = np.nanstd(data)
if weights is not None:
ma = np.ma.MaskedArray(data, mask=np.isnan(data))
meanW = np.ma.average(ma, weights=weights)
dsw = DescrStatsW(ma, weights=weights)
stdW = dsw.std # weighted std
mean = meanW
sigma = stdW
xval = np.linspace(np.nanmin(data),np.nanmax(data),1000)
yval = np.exp(-(xval-mean)**2/(2*sigma**2)) / np.sqrt(2*np.pi*sigma**2)
yval = yval*n.max()/np.nanmax(yval)
plt.plot(xval,yval,label='Data Gaussian model')
if GaussianModel:
yval2 = np.exp(-(xval-meanmodel)**2/(2*sigmamodel**2)) / np.sqrt(2*np.pi*sigmamodel**2)
yval2 = yval2*n.max()/np.nanmax(yval2)
p = plt.plot(xval,yval2,label='Estimated Gaussian model')
if approximation_display=="int":
mean = int(np.ceil(mean))
med = int(np.ceil(median))
sigma = int(np.ceil(sigma))
else:
mean = np.around(mean,1)
med = np.around(median,1)
sigma = np.around(sigma,1)
if legends=="upper right":
plt.legend(loc=legends, bbox_to_anchor=(1,1),framealpha=0.4)
elif legends=="upper left":
plt.legend(loc=legends, bbox_to_anchor=(0,1),framealpha=0.4)
ax = plt.gca()
if text=="right":
xtext = 0.7
else:
xtext = 0.05
ytext = 0.7 ; voff = 0.035 ; hoff = 0.1
if weights is not None:
plt.text(xtext,ytext ,r'$\overline{elev}_W$',transform=ax.transAxes) ; plt.text(xtext+hoff,ytext ,r'$=$'+str(mean) +' '+unit,transform=ax.transAxes)
else:
plt.text(xtext,ytext ,r'$\overline{elev}$',transform=ax.transAxes) ; plt.text(xtext+hoff,ytext ,r'$=$'+str(mean) +' '+unit,transform=ax.transAxes)
plt.text(xtext,ytext-voff ,r'$median$',transform=ax.transAxes) ; plt.text(xtext+hoff,ytext-voff ,r'$=$'+str(med) +' '+unit,transform=ax.transAxes)
if weights is not None:
plt.text(xtext,ytext-2*voff,r'$\sigma_{W}$',transform=ax.transAxes) ; plt.text(xtext+hoff,ytext-2*voff,r'$=$'+str(sigma) +' '+unit,transform=ax.transAxes)
else:
plt.text(xtext,ytext-2*voff,r'$\sigma$',transform=ax.transAxes) ; plt.text(xtext+hoff,ytext-2*voff,r'$=$'+str(sigma) +' '+unit,transform=ax.transAxes)
if GaussianModel:
# model
if weights is None:
vshift = 0.15
else:
vshift = 0.18
plt.text(xtext,ytext-vshift ,r'$\overline{elev}$',transform=ax.transAxes,color=p[0].get_color())
plt.text(xtext+hoff,ytext-vshift,r'$=$'+str(meanmodel) +' '+unit,transform=ax.transAxes,color=p[0].get_color())
plt.text(xtext,ytext-vshift-voff ,r'$\sigma$',transform=ax.transAxes,color=p[0].get_color())
plt.text(xtext+hoff,ytext-vshift-voff,r'$=$'+str(sigmamodel) +' '+unit,transform=ax.transAxes,color=p[0].get_color())
maxfreq = n.max()
plt.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10)
if xlim:
plt.xlim(xlim)
ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
ax.yaxis.get_major_formatter().set_powerlimits((0, 1))
if (savefig):
plt.savefig(filename,dpi=300)
return ax
def plot_correlation(x,y,title,filename='correlation.pdf',
nbins = 20,
xlabel='x',ylabel='y',unit='m',text="right",plottext=True,
xlim=None,ylim=None,ticks=None,
savefig=False,
fig_x=9.5,
fig_y=9,
weights=None):
''' ticks = [tick_dx1,tick_dx2,tick_dy1,tick_dy2]
unit = from "y" variable
'''
define_rcParams()
plt.figure(figsize=(fig_x/2.54,fig_y/2.54))
ax = plt.gca()
plt.plot(x,y,'ko',markersize=0.5,zorder=0,rasterized=True)
if (xlim):
xstat = np.linspace(xlim[0],xlim[1],nbins)
nb12 = (xlim[1]-xlim[0])/(2*nbins)
else:
xstat = np.linspace(np.nanmin(x),np.nanmax(x),nbins)
nb12 = (np.nanmax(x)-np.nanmin(x))/(2*nbins)
mean = [] ; median = [] ; sigma = [] ; xused = []
for rate in xstat:
data = y[( (x>=rate-nb12) & (x<=rate+nb12))]
if weights is not None:
selweights = weights[( (x>=rate-nb12) & (x<=rate+nb12))]
if data!=[]:
xused.append(rate)
med = np.ma.median(data)
if weights is None:
avg = np.nanmean(data)
std = np.nanstd(data)
else:
ma = np.ma.MaskedArray(data, mask=np.isnan(data))
avgW = np.ma.average(ma, weights=selweights)
dsw = DescrStatsW(ma, weights=selweights)
stdW = dsw.std # weighted std
avg = avgW
std = stdW
mean.append(avg)
median.append(med)
sigma.append(std)
mean = np.asarray(mean) ; sigma = np.asarray(sigma) ; median = np.asarray(median) ; xused = np.asarray(xused)
plt.plot(xused,median,color='r',zorder=3,linewidth=2,label='median')
plt.plot(xused,np.add(median,sigma),color='g',linewidth=2,zorder=4)
plt.plot(xused,np.subtract(median,sigma),color='g',linewidth=2,zorder=5)
plt.plot(xused,mean,color='b',zorder=3,linewidth=2,label='mean')
if plottext:
if (xlim):
xstat = np.linspace(xlim[0],xlim[1],1000)
else:
xstat = np.linspace(np.nanmin(x),np.nanmax(x),1000)
themedian = np.zeros_like(xstat)
themedian = themedian + np.ma.median(median)
mean = np.around(np.nanmean(median),1)
med = np.around(np.ma.median(median),1)
sigma = np.around(np.nanstd(median),1)
plt.plot(xstat,themedian)
if text=="left":
xtext = 0.25
elif text=="right":
xtext = 0.7
else:
xtext = 0.35
ytext = 0.85 ; voff = 0.03 ; hoff = 0.1
plt.text(xtext,ytext ,r'$\overline{elev}$',transform=ax.transAxes) ; plt.text(xtext+hoff,ytext ,r'$=$'+str(mean) +' '+unit,transform=ax.transAxes)
plt.text(xtext,ytext-voff ,r'$median$',transform=ax.transAxes) ; plt.text(xtext+hoff,ytext-voff ,r'$=$'+str(med) +' '+unit,transform=ax.transAxes)
plt.text(xtext,ytext-2*voff,r'$\sigma$',transform=ax.transAxes) ; plt.text(xtext+hoff,ytext-2*voff,r'$=$'+str(sigma) +' '+unit,transform=ax.transAxes)
ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
ax.yaxis.get_major_formatter().set_powerlimits((0, 1))
if not ticks:
if (xlim):
tick_dx1 = int(np.ceil((xlim[1]-xlim[0])/6))
tick_dx2 = int(np.ceil((xlim[1]-xlim[0])/24))
else:
tick_dx1 = int(np.ceil((np.nanmax(x)-np.nanmin(x))/6))
tick_dx2 = int(np.ceil((np.nanmax(x)-np.nanmin(x))/24))
if (ylim):
tick_dy1 = int(np.ceil((ylim[1]-ylim[0])/5))
tick_dy2 = int(np.ceil((ylim[1]-ylim[0])/20))
else:
tick_dy1 = int(np.ceil((np.nanmax(y)-np.nanmin(y))/5))
tick_dy2 = int(np.ceil((np.nanmax(y)-np.nanmin(y))/20))
else:
tick_dx1 = ticks[0]
tick_dx2 = ticks[1]
tick_dy1 = ticks[2]
tick_dy2 = ticks[3]
ax.yaxis.set_major_locator(MultipleLocator(tick_dy1))
ax.yaxis.set_minor_locator(MultipleLocator(tick_dy2))
ax.xaxis.set_major_locator(MultipleLocator(tick_dx1))
ax.xaxis.set_minor_locator(MultipleLocator(tick_dx2))
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if (xlim):
plt.xlim(xlim)
if (ylim):
plt.ylim(ylim)
plt.legend()
if (savefig):
plt.savefig(filename,dpi=300)
return ax
#--------------------------- READING DATA ---------------------
def load_spreading_rate(path='./data/topography/'):
if os.path.exists('./data_figshare/topography/'):
path='./data_figshare/topography/'
# Half Spreading rate
nc_spreading_rate = netCDF4.Dataset(path+'rate.3.2.nc')
#print(nc_spreading_rate.variables.keys())
x=nc_spreading_rate.variables['lon'][:]
y=nc_spreading_rate.variables['lat'][:]
spreading_rate=nc_spreading_rate.variables['z'][:]
spreading_rate=2*spreading_rate/1000
print("Full seafloor spreading rate (Muller et al, 2008) cm/yr min/max {} {}".format(np.nanmin(spreading_rate),np.nanmax(spreading_rate)))
return x,y,spreading_rate
def load_seafloor_ages(path='./data/topography/'):
if os.path.exists('./data_figshare/topography/'):
path='./data_figshare/topography/'
# Seafloor age
nc_age = netCDF4.Dataset(path+'age.3.2.nc')
#print(nc_age.variables.keys())
x=nc_age.variables['lon'][:]
y=nc_age.variables['lat'][:]
age=nc_age.variables['z'][:]
age=age/100
print("Seafloor age (Muller et al, 2008) Myrs min/max {} {}".format(np.nanmin(age),np.nanmax(age)))
return x,y,age
def load_strain_rate(path='./data/topography/'):
if os.path.exists('./data_figshare/topography/'):
path='./data_figshare/topography/'
# Strain rate
nc_strain_rate = netCDF4.Dataset(path+'GSRM_strain_2m.grd')
#print(nc_strain_rate.variables.keys())
x=nc_strain_rate.variables['lon'][:]
y=nc_strain_rate.variables['lat'][:]
strain_rate=nc_strain_rate.variables['z'][:]
strain_rate=strain_rate*1e-9/(365*24*3600)
strain_rate=np.where(np.isnan(strain_rate),1e-21,strain_rate)
print("Strain rate (Kreemer et al, 2014) s-1 min/max {} {}".format(np.nanmin(strain_rate),np.nanmax(strain_rate)))
return x,y,strain_rate
def load_etopo(path='./data/topography/',filtered=True,resolution=2,corrected_from_ice=False):
if os.path.exists('./data_figshare/topography/'):
path='./data_figshare/topography/'
# Etopo 1
collection = False
if (filtered==True):
if corrected_from_ice:
filename = 'ETOPO1_BedIceCorrected_g_gmt4_filtered.grd'
else:
filename = 'ETOPO1_Bed_g_gmt4_filtered.grd'
if (os.path.isfile(path+filename)):
nc_etopo1 = netCDF4.Dataset(path+filename)
else:
name = filename.split('.grd')[0]
nc_etopo1 = xr.open_mfdataset([path+name+'_1.grd',path+name+'_2.grd',path+name+'_3.grd',path+name+'_4.grd',path+name+'_5.grd',path+name+'_6.grd'],
concat_dim=['lon'], combine='nested',engine='netcdf4')
collection = True
else:
if resolution==1:
filename = 'ETOPO1_Bed_g_gmt4.grd'
else:
filename = 'ETOPO1_Bed_g_gmt4_2m.grd'
if ( not os.path.isfile(path+filename)):
if filename == 'ETOPO1_Bed_g_gmt4.grd':
print("Unfiltered raw 1 arc-min ETOPO1 dataset not available (check if you have downloaded FigShare Dataset) - Not available on Binder ({})".format(path+filename))
x=[] ; y=[] ; elev=[]
return x,y,elev
name = filename.split('.grd')[0]
nc_etopo1 = xr.open_mfdataset([path+name+'_1.grd',path+name+'_2.grd',path+name+'_3.grd',path+name+'_4.grd',path+name+'_5.grd',path+name+'_6.grd'],
concat_dim=['lon'], combine='nested',engine='netcdf4')
collection = True
else:
nc_etopo1 = netCDF4.Dataset(path+filename)
#print(nc_etopo1.variables.keys())
x=nc_etopo1.variables['lon'][:]
y=nc_etopo1.variables['lat'][:]
elev=nc_etopo1.variables['z'][:]
if collection:
x=x.to_numpy()
y=y.to_numpy()
elev=elev.to_numpy()
print("ETOPO 1 m ({}) min/max {} {}".format(filename,np.nanmin(elev),np.nanmax(elev)))
return x,y,elev
def get_shape_etopo(path='./data/topography/'):
if os.path.exists('./data_figshare/topography/'):
path='./data_figshare/topography/'
# Etopo 1
filename = 'ETOPO1_Bed_g_gmt4_filtered.grd'
if (os.path.isfile(path+filename)):
nc_etopo1 = netCDF4.Dataset(path+filename)
else:
name = filename.split('.grd')[0]
nc_etopo1 = xr.open_mfdataset([path+name+'_1.grd',path+name+'_2.grd',path+name+'_3.grd',path+name+'_4.grd',path+name+'_5.grd',path+name+'_6.grd'],
concat_dim=['lon'], combine='nested',engine='netcdf4')
return nc_etopo1
def load_hotspots(path='./data/topography/',write_id=False,write_grid=False):
if os.path.exists('./data_figshare/topography/'):
path='./data_figshare/topography/'
# Hot spots
filename = 'dist_closest_hs.grd'
if write_grid:
if ( not os.path.isfile(path+filename)):
# Very slow to compute this grid
filename2='Morgan-Morgan_2007_hs.txt'
data = np.loadtxt(path+filename2,dtype='float',delimiter=' ')
lat_read = data[:,1]
lon_read = data[:,0]
nc_etopo1 = get_shape_etopo()
x=nc_etopo1.variables['lon'][:]
y=nc_etopo1.variables['lat'][:]
dist_closest_hs = np.zeros([nc_etopo1.variables['lat'].shape[0], nc_etopo1.variables['lon'].shape[0]])
id_closest_hs = np.zeros([nc_etopo1.variables['lat'].shape[0], nc_etopo1.variables['lon'].shape[0]])
ipt = 0
npt = len(x)*len(y)
for ilon in np.arange(0,len(x)):
lon1 = x[ilon]
for ilat in np.arange(0,len(y)):
min_dist = 9e9
min_id = 0
lat1 = y[ilat]
ipt = ipt + 1
for ihs in np.arange(0,len(lon_read)):
lon2 = lon_read[ihs]
lat2 = lat_read[ihs]
#if (np.abs(lon1-lon2)<25 and np.abs(lat1-lat2)<25):
azimuth1, azimuth2, dist = geod.inv(lon1, lat1, lon2, lat2)
if (dist<min_dist):
min_dist = dist
min_id = data[ihs,3]
#print('Hot spot {}'.format(data[ihs,3]))
dist_closest_hs[ilat,ilon] = min_dist
id_closest_hs[ilat,ilon] = min_id
if ipt%10000 == 0:
print("{}/{}".format(ipt,npt))
# Create a new netcdf file with distances to closest hotspots
if write_id:
with netCDF4.Dataset(path+'id_closest_hs.grd', "w", format="NETCDF4_CLASSIC") as f:
f.description = 'ID of the closest hotspot'
# dimensions
f.createDimension('x', nc_etopo1.variables['lon'].shape[0])
f.createDimension('y', nc_etopo1.variables['lat'].shape[0])
# variables
xnew = f.createVariable('x', 'f4', ('x',))
ynew = f.createVariable('y', 'f4', ('y',))
znew = f.createVariable('z', 'f4', ('y', 'x',))
xnew.units = "degrees east"
ynew.units = "degrees north"
znew.units = "-"
# data
xnew[:] = x
ynew[:] = y
znew[:,:] = id_closest_hs
with netCDF4.Dataset(path+filename, "w", format="NETCDF4_CLASSIC") as f:
f.description = 'Distance to the closest hotspot'
# dimensions
f.createDimension('x', nc_etopo1.variables['lon'].shape[0])
f.createDimension('y', nc_etopo1.variables['lat'].shape[0])
# variables
xnew = f.createVariable('x', 'f4', ('x',))
ynew = f.createVariable('y', 'f4', ('y',))
znew = f.createVariable('z', 'f4', ('y', 'x',))
xnew.units = "degrees east"
ynew.units = "degrees north"
znew.units = "m"
# data
xnew[:] = x
ynew[:] = y
znew[:,:] = dist_closest_hs
if (os.path.isfile(path+filename)):
nc_dist_closest_hs = netCDF4.Dataset(path+filename)
else:
name = filename.split('.grd')[0]
nc_dist_closest_hs = xr.open_mfdataset([path+name+'_1.grd',path+name+'_2.grd',path+name+'_3.grd',path+name+'_4.grd',path+name+'_5.grd',path+name+'_6.grd'],
concat_dim=['x'], combine='nested',engine='netcdf4')
#print(nc_dist_closest_hs.variables.keys())
x=nc_dist_closest_hs.variables['x'][:]
y=nc_dist_closest_hs.variables['y'][:]
dist_closest_hs=nc_dist_closest_hs.variables['z'][:]
if not os.path.isfile(path+filename):
x=x.to_numpy()
y=y.to_numpy()
dist_closest_hs=dist_closest_hs.to_numpy()
print("Distances closest hot spot (Morgan and Morgan, 2007) m min/max {} {}".format(np.nanmin(dist_closest_hs),np.nanmax(dist_closest_hs)))
return x,y,dist_closest_hs
def load_crustal_thickness(path='./data/topography/'):
if os.path.exists('./data_figshare/topography/'):
path='./data_figshare/topography/'
# Crustal thickness
filename = 'crust_thickness_GEMMA_2m.grd'
if (os.path.isfile(path+filename)):
nc_crust = netCDF4.Dataset(path+filename)
else:
name = filename.split('.grd')[0]
nc_crust = xr.open_mfdataset([path+name+'_1.grd',path+name+'_2.grd',path+name+'_3.grd',path+name+'_4.grd',path+name+'_5.grd',path+name+'_6.grd'],
concat_dim=['lon'], combine='nested',engine='netcdf4')
#print(nc_crust.variables.keys())
x=nc_crust.variables['lon'][:]
y=nc_crust.variables['lat'][:]
crustal_thickness=nc_crust.variables['z'][:]
if not os.path.isfile(path+filename):
x=x.to_numpy()
y=y.to_numpy()
crustal_thickness=crustal_thickness.to_numpy()
print("Crustal thickness (GEMMA 2 arc-min) km min/max {} {}".format(np.nanmin(crustal_thickness),np.nanmax(crustal_thickness)))
return x,y,crustal_thickness
def load_lithospheric_thickness(path='./data/topography/'):
if os.path.exists('./data_figshare/topography/'):
path='./data_figshare/topography/'
# lithospheric thickness
filename = 'lith_ave_no_slabs_-180_180_2m.grd'
if (os.path.isfile(path+filename)):
nc_liththick = netCDF4.Dataset(path+filename)
else:
name = filename.split('.grd')[0]
nc_liththick = xr.open_mfdataset([path+name+'_1.grd',path+name+'_2.grd',path+name+'_3.grd',path+name+'_4.grd',path+name+'_5.grd',path+name+'_6.grd'],
concat_dim=['lon'], combine='nested',engine='netcdf4')
#print(nc_liththick.variables.keys())
x=nc_liththick.variables['lon'][:]
y=nc_liththick.variables['lat'][:]
lithospheric_thickness=nc_liththick.variables['z'][:]
if not os.path.isfile(path+filename):
x=x.to_numpy()
y=y.to_numpy()
lithospheric_thickness=lithospheric_thickness.to_numpy()
print("Lithospheric thickness (SteinBerger 2016) km min/max {} {}".format(np.nanmin(lithospheric_thickness),np.nanmax(lithospheric_thickness)))
lithospheric_thickness=np.where(np.isnan(lithospheric_thickness),-999,lithospheric_thickness)
return x,y,lithospheric_thickness
def load_age_lithos(path='./data/topography/'):
if os.path.exists('./data_figshare/topography/'):
path='./data_figshare/topography/'
# thermal age continent
filename = 'mant_age_map_-180_180_qgis_2m.grd'
if (os.path.isfile(path+filename)):
nc_agelith = netCDF4.Dataset(path+filename)
else:
name = filename.split('.grd')[0]
nc_agelith = xr.open_mfdataset([path+name+'_1.grd',path+name+'_2.grd',path+name+'_3.grd',path+name+'_4.grd',path+name+'_5.grd',path+name+'_6.grd'],
concat_dim=['lon'], combine='nested',engine='netcdf4')
#print(nc_agelith.variables.keys())
x=nc_agelith.variables['lon'][:]
y=nc_agelith.variables['lat'][:]
age_lithos=nc_agelith.variables['z'][:]*1e3
if not os.path.isfile(path+filename):
x=x.to_numpy()
y=y.to_numpy()
age_lithos=age_lithos.to_numpy()
print("Age lithosphere (Poupinet_Shapiro_2008) Ma min/max {} {}".format(np.nanmin(age_lithos),np.nanmax(age_lithos)))
age_lithos=np.where(np.isnan(age_lithos),-999,age_lithos)
return x,y,age_lithos
def define_MOR_pts(path='./data/topography/',selection_name='MOR_pts_all',distance_between_pts_along_ridges = 25):
'''
Define points with elevation and spreading rates along MOR
selection_name = 'MOR_pts_all' or 'MOR_pts_far_from_hs' or 'MOR_pts_close_to_hs'
'''
if os.path.exists('./data_figshare/topography/'):
path='./data_figshare/topography/'
filename = './'+selection_name+'_'+str(distance_between_pts_along_ridges)+'km.dat'
distance_between_pts_along_ridges = distance_between_pts_along_ridges*1e3
if not (os.path.isfile(path+filename)):
print("#################### Loading datasets #########################")
x,y,elev = load_etopo(path=path)
x,y,spreading_rate = load_spreading_rate(path=path)
x,y,age = load_seafloor_ages(path=path)
x,y,strain_rate = load_strain_rate(path=path)
x,y,dist_closest_hs = load_hotspots(path=path)
print("#################### Applying mask on datasets #########################")
# data selection
min_dist_hs = 1000000 # m
max_seafloor_age = 10 # Myrs the width depends on spreading rate - 10 Myrs is a good compromise for computational reason
# It gives 50 km from ridge axis for ultra-slow 1 cm/yr full spreading rate MOR
max_seafloor_age_for_ridge_axis = 0.5
threshold_strain_rate = 1e-16 # s-1
xx, yy = np.meshgrid(x, y)
if selection_name=='MOR_pts_all':
mask = ( (elev<0) & (age<=max_seafloor_age) )
mask_axis = ( (elev<0) & (age<=max_seafloor_age_for_ridge_axis) )
elif selection_name=='MOR_pts_far_from_hs':
mask = ( (elev<0) & (age<=max_seafloor_age) & (dist_closest_hs > min_dist_hs) )
mask_axis = ( (elev<0) & (age<=max_seafloor_age_for_ridge_axis) & (dist_closest_hs > min_dist_hs) )
elif selection_name=='MOR_pts_close_to_hs':
mask = ( (elev<0) & (age<=max_seafloor_age) & (dist_closest_hs <= min_dist_hs) )
mask_axis = ( (elev<0) & (age<=max_seafloor_age_for_ridge_axis) & (dist_closest_hs <= min_dist_hs) )
else:
print("ERROR incorrect selection_name ")
quit()
# this array is used to define the localisation of the MOR
active_MOR_elev = elev[mask]
active_MOR_x = xx[mask]
active_MOR_y = yy[mask]
active_MOR_x_axis = xx[mask_axis]
active_MOR_y_axis = yy[mask_axis]
active_MOR_spreading_rate = spreading_rate[mask]
active_MOR_strain_rate = strain_rate[mask]
dd = 1.5 # Distance to look for points in the grid that belong to the same MOR segment
# given in degrees for computational reason, could be function of the spreading rate
# Here, we define a constant that cover all cases (Fig. S4b) (~150-200 km)
# W ~ 100-150 km ~ distance between rift flanks at ultra-slow spreading rates (Fig. 3)
#
new_active_MOR_y = [] ; new_active_MOR_x = [] ; new_active_MOR_elev = [] ; new_active_MOR_spreading_rate = []
ipt = 0
print('Total #pts on the grid for age<={} Myrs = {} '.format(max_seafloor_age_for_ridge_axis,len(active_MOR_x_axis)))
print("#################### Browsing all MOR points #########################")
for xpt,ypt in zip(active_MOR_x_axis,active_MOR_y_axis):
xsel = active_MOR_x_axis[ ( (np.abs(active_MOR_x_axis-xpt)<=dd/2) & (np.abs(active_MOR_y_axis-ypt)<=dd/2) ) ]
ysel = active_MOR_y_axis[ ( (np.abs(active_MOR_x_axis-xpt)<=dd/2) & (np.abs(active_MOR_y_axis-ypt)<=dd/2) ) ]
newx = np.median(xsel)
newy = np.median(ysel)
if (ipt==0):
new_active_MOR_x.append(newx)
new_active_MOR_y.append(newy)
esel = active_MOR_elev[ ( (np.abs(active_MOR_x-newx)<=dd/2) & (np.abs(active_MOR_y-newy)<=dd/2) ) ]
new_active_MOR_elev.append(np.max(esel))
srsel = active_MOR_spreading_rate[ ( (np.abs(active_MOR_x-newx)<=dd/2) & (np.abs(active_MOR_y-newy)<=dd/2) ) ]
new_active_MOR_spreading_rate.append(np.median(srsel))
else:
stsel = active_MOR_strain_rate[ ( (np.abs(active_MOR_x-newx)<=dd/2) & (np.abs(active_MOR_y-newy)<=dd/2) ) ]
if (np.any(stsel>=threshold_strain_rate)):
azimuth1, azimuth2, dist = geod.inv(new_active_MOR_x[-1], new_active_MOR_y[-1], newx, newy)
if ( dist >= distance_between_pts_along_ridges ):
esel = active_MOR_elev[ ( (np.abs(active_MOR_x-newx)<=dd/2) & (np.abs(active_MOR_y-newy)<=dd/2) ) ]
srsel = active_MOR_spreading_rate[ ( (np.abs(active_MOR_x-newx)<=dd/2) & (np.abs(active_MOR_y-newy)<=dd/2) ) ]
new_active_MOR_x.append(newx)
new_active_MOR_y.append(newy)
new_active_MOR_elev.append(np.max(esel))
new_active_MOR_spreading_rate.append(np.median(srsel))
ipt = ipt + 1
if ipt%5000 == 0:
print("{}/{}".format(ipt,len(active_MOR_x_axis)))
new_active_MOR_x = np.asarray(new_active_MOR_x)
new_active_MOR_y = np.asarray(new_active_MOR_y)
new_active_MOR_elev = np.asarray(new_active_MOR_elev)
new_active_MOR_spreading_rate = np.asarray(new_active_MOR_spreading_rate)
with open(path+filename, 'wb') as filehandle:
pickle.dump([new_active_MOR_x,new_active_MOR_y,new_active_MOR_elev,new_active_MOR_spreading_rate],filehandle)
print('Total defined pts along MOR = {} '.format(len(new_active_MOR_x)))
else:
print("This selection already exists ({})".format(path+filename))
with open(path+filename, 'rb') as filehandle:
# read the data as binary data stream
[new_active_MOR_x,new_active_MOR_y,new_active_MOR_elev,new_active_MOR_spreading_rate] = pickle.load(filehandle)
print('Total defined pts along MOR = {} '.format(len(new_active_MOR_x)))
| 12,773 | 0 | 230 |
7efc8c4fae98330e9b5cd2edb68a5c99c316055a | 1,601 | py | Python | connection_ss_simple.py | Liuzhe30/pygcn | ac13e8873b9eb7360ac88a88afb160db652e4699 | [
"MIT"
] | null | null | null | connection_ss_simple.py | Liuzhe30/pygcn | ac13e8873b9eb7360ac88a88afb160db652e4699 | [
"MIT"
] | null | null | null | connection_ss_simple.py | Liuzhe30/pygcn | ac13e8873b9eb7360ac88a88afb160db652e4699 | [
"MIT"
] | null | null | null | import numpy as np
content = np.loadtxt('/data/rasa.content')
with open('/data/join.fasta') as fasta:
with open('/data/rasa.cites', 'a+') as w:
line = fasta.readline()
num = 0
while line:
if(line[0] == '>'):
line = fasta.readline()
continue
length = len(line.strip())
i = 0
while i < length:
#if(i>0 and i<length-1 and content[num+i][-2] != content[num+i-1][-2] and content[num+i][-2] != content[num+i+1][-2]):
#continue
if(i == length-1 and content[num+i][-2] != content[num+i-1][-2]):
break
flag1 = i + num
flag2 = i + num
for j in range(i,length-1):
if(content[num+j][-2] != content[num+j+1][-2]):
flag2 = j + num
break
if(j == length-2):
flag2 = length + num
#print('i=' + str(i))
#print('flag1=' + str(flag1))
#print('flag2=' + str(flag2))
w.write(str(int(content[flag1][0])) + ' ' + str(int(content[flag2][0])) + '\n')
#print(str(int(content[t][0])) + ' ' + str(int(content[m][0])))
if((i + flag2 - flag1 + 1) > content[-1][0]-1):
break
i += flag2 - flag1 + 1
num += length
line = fasta.readline() | 39.04878 | 134 | 0.381012 | import numpy as np
content = np.loadtxt('/data/rasa.content')
with open('/data/join.fasta') as fasta:
with open('/data/rasa.cites', 'a+') as w:
line = fasta.readline()
num = 0
while line:
if(line[0] == '>'):
line = fasta.readline()
continue
length = len(line.strip())
i = 0
while i < length:
#if(i>0 and i<length-1 and content[num+i][-2] != content[num+i-1][-2] and content[num+i][-2] != content[num+i+1][-2]):
#continue
if(i == length-1 and content[num+i][-2] != content[num+i-1][-2]):
break
flag1 = i + num
flag2 = i + num
for j in range(i,length-1):
if(content[num+j][-2] != content[num+j+1][-2]):
flag2 = j + num
break
if(j == length-2):
flag2 = length + num
#print('i=' + str(i))
#print('flag1=' + str(flag1))
#print('flag2=' + str(flag2))
w.write(str(int(content[flag1][0])) + ' ' + str(int(content[flag2][0])) + '\n')
#print(str(int(content[t][0])) + ' ' + str(int(content[m][0])))
if((i + flag2 - flag1 + 1) > content[-1][0]-1):
break
i += flag2 - flag1 + 1
num += length
line = fasta.readline() | 0 | 0 | 0 |
81cccc9193a552e5305de4736e9b47ff1daf7cbb | 431 | py | Python | cpp/apps/MolecularEditor/py/gen_linkers.py | ProkopHapala/SimpleSimulationEngine | 240f9b7e85b3a6eda7a27dc15fe3f7b8c08774c5 | [
"MIT"
] | 26 | 2016-12-04T04:45:12.000Z | 2022-03-24T09:39:28.000Z | cpp/apps/MolecularEditor/py/gen_linkers.py | Aki78/FlightAI | 9c5480f2392c9c89b9fee4902db0c4cde5323a6c | [
"MIT"
] | null | null | null | cpp/apps/MolecularEditor/py/gen_linkers.py | Aki78/FlightAI | 9c5480f2392c9c89b9fee4902db0c4cde5323a6c | [
"MIT"
] | 2 | 2019-02-09T12:31:06.000Z | 2019-04-28T02:24:50.000Z | #!/usr/bin/python
import numpy as np
#fin = open("H56.bas",r)
bas=np.genfromtxt("H56.bas", skip_header=1)
#bas=bas[:5]
#print( bas )
k0=30.0
l0=0.0
mol0=0
pos0 = np.array([0.0,0.0,0.0])
fout = open("linkers.ini",'w')
fout.write("%i\n" %(len(bas)) )
for i,l in enumerate( bas ):
pos = l[1:]
fout.write("%i %i %s %s %f %f\n" %( mol0,i+1, ' '.join(map(str,pos)),' '.join(map(str,pos0)), k0, l0 ) )
fout.close()
| 15.962963 | 115 | 0.561485 | #!/usr/bin/python
import numpy as np
#fin = open("H56.bas",r)
bas=np.genfromtxt("H56.bas", skip_header=1)
#bas=bas[:5]
#print( bas )
k0=30.0
l0=0.0
mol0=0
pos0 = np.array([0.0,0.0,0.0])
fout = open("linkers.ini",'w')
fout.write("%i\n" %(len(bas)) )
for i,l in enumerate( bas ):
pos = l[1:]
fout.write("%i %i %s %s %f %f\n" %( mol0,i+1, ' '.join(map(str,pos)),' '.join(map(str,pos0)), k0, l0 ) )
fout.close()
| 0 | 0 | 0 |
083025e9c10a03d7077ec680c49de682ca96c2ac | 975 | py | Python | reconstruct_itinerary.py | pranavdave893/Leetcode | 1f30ea37af7b60585d168b15d9397143f53c92a1 | [
"MIT"
] | null | null | null | reconstruct_itinerary.py | pranavdave893/Leetcode | 1f30ea37af7b60585d168b15d9397143f53c92a1 | [
"MIT"
] | null | null | null | reconstruct_itinerary.py | pranavdave893/Leetcode | 1f30ea37af7b60585d168b15d9397143f53c92a1 | [
"MIT"
] | null | null | null | from collections import defaultdict
abc = Solution()
print (abc.findItinerary([["JFK","KUL"],["JFK","NRT"],["NRT","JFK"]])) # ["JFK","NRT","JFK","KUL"]
print (abc.findItinerary([["MUC", "LHR"], ["JFK", "MUC"], ["SFO", "SJC"], ["LHR", "SFO"]]))
print (abc.findItinerary([["JFK","SFO"],["JFK","ATL"],["SFO","ATL"],["ATL","JFK"],["ATL","SFO"]])) | 30.46875 | 98 | 0.493333 | from collections import defaultdict
class Solution(object):
def findItinerary(self, tickets):
"""
:type tickets: List[List[str]]
:rtype: List[str]
"""
if not tickets: return []
tickets.sort(key=lambda x:x[1])
graph = defaultdict(list)
for x, y in tickets:
graph[x].append(y)
ans = []
def dfs(start, ans):
neighbours = graph[start]
while neighbours:
next_dest = neighbours.pop(0)
dfs(next_dest, ans)
ans.append(start)
dfs("JFK", ans)
return ans[::-1]
abc = Solution()
print (abc.findItinerary([["JFK","KUL"],["JFK","NRT"],["NRT","JFK"]])) # ["JFK","NRT","JFK","KUL"]
print (abc.findItinerary([["MUC", "LHR"], ["JFK", "MUC"], ["SFO", "SJC"], ["LHR", "SFO"]]))
print (abc.findItinerary([["JFK","SFO"],["JFK","ATL"],["SFO","ATL"],["ATL","JFK"],["ATL","SFO"]])) | 179 | 430 | 23 |
afd755dfee7d3d7bac1e03b8b5df69a6f5aad4ee | 2,906 | py | Python | RoadCrackSegmentation/train.py | ForrestPi/DefectDetection | 7e999335ffbd50519cdfaba7de0d6bfa306a579a | [
"Unlicense"
] | 4 | 2020-06-03T08:10:13.000Z | 2021-06-11T09:46:48.000Z | RoadCrackSegmentation/train.py | ForrestPi/DefectDetection | 7e999335ffbd50519cdfaba7de0d6bfa306a579a | [
"Unlicense"
] | null | null | null | RoadCrackSegmentation/train.py | ForrestPi/DefectDetection | 7e999335ffbd50519cdfaba7de0d6bfa306a579a | [
"Unlicense"
] | 1 | 2020-04-14T08:28:04.000Z | 2020-04-14T08:28:04.000Z | import argparse, glob,os
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau
from keras.optimizers import Adam
import data_loader
from metrics import *
from net.Unet import Net
# from net.GCUnet import Net
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
parser = argparse.ArgumentParser()
parser.add_argument("--train_images", type=str, default='dataset/CRACK500/traincrop/')
parser.add_argument("--train_annotations", type=str,default='dataset/CRACK500/traincrop/')
parser.add_argument("--img_height", type=int, default=224)
parser.add_argument("--img_width", type=int, default=224)
parser.add_argument("--augment", type=bool, default=True)
parser.add_argument("--val_images", type=str, default='dataset/CRACK500/valcrop/')
parser.add_argument("--val_annotations", type=str, default='dataset/CRACK500/valcrop/')
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--load_weights", type=str, default=None)
parser.add_argument("--model", type=str, default='checkpoint/Unet',help="path to output model")
args = parser.parse_args()
if not os.path.exists(args.model):
os.makedirs(args.model)
train_images_path = args.train_images
train_segs_path = args.train_annotations
batch_size = args.batch_size
img_height = args.img_height
img_width = args.img_width
epochs = args.epochs
load_weights = args.load_weights
val_images_path = args.val_images
val_segs_path = args.val_annotations
num_train_images = len(glob.glob(train_images_path + '*.jpg'))
num_valid_images = len(glob.glob(val_images_path + '*.jpg'))
m = Net()
m.compile(loss='binary_crossentropy',optimizer= Adam(lr=1e-4),metrics=['accuracy', f1_score])
if load_weights:
m.load_weights(load_weights)
print("Model output shape: {}".format(m.output_shape))
train_gen = data_loader.imageSegmentationGenerator(train_images_path,
train_segs_path, batch_size, img_height, img_width, args.augment, phase='train')
val_gen = data_loader.imageSegmentationGenerator(val_images_path,
val_segs_path, batch_size, img_height, img_width, False, phase='test')
filepath = "weights-{epoch:03d}-{val_loss:.4f}-{val_acc:.4f}.h5"
model_weights = os.path.join(args.model, filepath)
checkpoint = ModelCheckpoint(model_weights, monitor='val_loss', verbose=1,save_best_only=False, mode='min', save_weights_only=True)
reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5,
verbose=1, mode='auto', epsilon=0.0001)
m.fit_generator(train_gen,
steps_per_epoch = num_train_images//batch_size,
validation_data = val_gen,
validation_steps = num_valid_images//batch_size,
epochs = epochs,
verbose = 1,
callbacks = [checkpoint, reduceLROnPlat])
| 38.236842 | 131 | 0.73916 | import argparse, glob,os
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau
from keras.optimizers import Adam
import data_loader
from metrics import *
from net.Unet import Net
# from net.GCUnet import Net
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
parser = argparse.ArgumentParser()
parser.add_argument("--train_images", type=str, default='dataset/CRACK500/traincrop/')
parser.add_argument("--train_annotations", type=str,default='dataset/CRACK500/traincrop/')
parser.add_argument("--img_height", type=int, default=224)
parser.add_argument("--img_width", type=int, default=224)
parser.add_argument("--augment", type=bool, default=True)
parser.add_argument("--val_images", type=str, default='dataset/CRACK500/valcrop/')
parser.add_argument("--val_annotations", type=str, default='dataset/CRACK500/valcrop/')
parser.add_argument("--epochs", type=int, default=100)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--load_weights", type=str, default=None)
parser.add_argument("--model", type=str, default='checkpoint/Unet',help="path to output model")
args = parser.parse_args()
if not os.path.exists(args.model):
os.makedirs(args.model)
train_images_path = args.train_images
train_segs_path = args.train_annotations
batch_size = args.batch_size
img_height = args.img_height
img_width = args.img_width
epochs = args.epochs
load_weights = args.load_weights
val_images_path = args.val_images
val_segs_path = args.val_annotations
num_train_images = len(glob.glob(train_images_path + '*.jpg'))
num_valid_images = len(glob.glob(val_images_path + '*.jpg'))
m = Net()
m.compile(loss='binary_crossentropy',optimizer= Adam(lr=1e-4),metrics=['accuracy', f1_score])
if load_weights:
m.load_weights(load_weights)
print("Model output shape: {}".format(m.output_shape))
train_gen = data_loader.imageSegmentationGenerator(train_images_path,
train_segs_path, batch_size, img_height, img_width, args.augment, phase='train')
val_gen = data_loader.imageSegmentationGenerator(val_images_path,
val_segs_path, batch_size, img_height, img_width, False, phase='test')
filepath = "weights-{epoch:03d}-{val_loss:.4f}-{val_acc:.4f}.h5"
model_weights = os.path.join(args.model, filepath)
checkpoint = ModelCheckpoint(model_weights, monitor='val_loss', verbose=1,save_best_only=False, mode='min', save_weights_only=True)
reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5,
verbose=1, mode='auto', epsilon=0.0001)
m.fit_generator(train_gen,
steps_per_epoch = num_train_images//batch_size,
validation_data = val_gen,
validation_steps = num_valid_images//batch_size,
epochs = epochs,
verbose = 1,
callbacks = [checkpoint, reduceLROnPlat])
| 0 | 0 | 0 |
caa97c1dcca054cf4bc1742ac91c84392b651a86 | 476 | py | Python | 5th/date_time.py | byplacebo/introduction-to-programming-with-python_kor | 73e2b023c30f0d70006540371b547e1ba5d020ff | [
"MIT"
] | null | null | null | 5th/date_time.py | byplacebo/introduction-to-programming-with-python_kor | 73e2b023c30f0d70006540371b547e1ba5d020ff | [
"MIT"
] | null | null | null | 5th/date_time.py | byplacebo/introduction-to-programming-with-python_kor | 73e2b023c30f0d70006540371b547e1ba5d020ff | [
"MIT"
] | null | null | null | import datetime
print(datetime.date.today())
today = datetime.date.today()
print(today)
print(today.year)
print(today.month)
print(today.day)
print(today.strftime("%d %b %Y"))
print(today.strftime("%A %B %y"))
print(today.strftime("Please attend out event %A, %B %d in the year %Y"))
userInput = input("Please enter your birthday (mm/dd/yyyy) ")
birthday = datetime.datetime.strptime(userInput, "%m/%d/%Y").date()
print(birthday)
days = birthday - today
print(days.days)
| 22.666667 | 73 | 0.712185 | import datetime
print(datetime.date.today())
today = datetime.date.today()
print(today)
print(today.year)
print(today.month)
print(today.day)
print(today.strftime("%d %b %Y"))
print(today.strftime("%A %B %y"))
print(today.strftime("Please attend out event %A, %B %d in the year %Y"))
userInput = input("Please enter your birthday (mm/dd/yyyy) ")
birthday = datetime.datetime.strptime(userInput, "%m/%d/%Y").date()
print(birthday)
days = birthday - today
print(days.days)
| 0 | 0 | 0 |
4c4d5fd83018f166f6ed697aab80a4fea008d871 | 7,005 | py | Python | zobs/ross/hf_precip_all.py | NMTHydro/Recharge | bbc1a05add92064acffeffb19f04e370b99a7918 | [
"Apache-2.0"
] | 7 | 2016-08-30T15:18:11.000Z | 2021-08-22T00:28:10.000Z | zobs/ross/hf_precip_all.py | dgketchum/etrm | f74f5771fbc6ba5750a790e384eac422b598325a | [
"Apache-2.0"
] | 2 | 2016-06-08T06:41:45.000Z | 2016-06-23T20:47:26.000Z | zobs/ross/hf_precip_all.py | dgketchum/etrm | f74f5771fbc6ba5750a790e384eac422b598325a | [
"Apache-2.0"
] | 1 | 2018-09-18T10:38:08.000Z | 2018-09-18T10:38:08.000Z | # ===============================================================================
# Copyright 2016 ross
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= standard library imports ========================
import datetime
import os
import logging
from dateutil import rrule
from arcpy import env
from numpy import array, multiply, column_stack, savetxt
# ============= local library imports ==========================
if __name__ == '__main__':
p = os.path.join('C:', 'Users', 'David', 'Documents', 'Recharge', 'Gauges', 'Gauge_Data_HF_csv')
op = os.path.join('C:', 'Users', 'David', 'Documents', 'Recharge', 'Gauges', 'Gauge_ppt_csv')
sp = os.path.join('C:', 'Recharge_GIS', 'Watersheds', 'nm_wtrs_11DEC15.shp')
dr = os.path.join('C:', 'Recharge_GIS', 'Precip', '800m', 'Daily')
precip(sp, p, op, dr)
# ============= EOF =============================================
| 42.97546 | 116 | 0.47666 | # ===============================================================================
# Copyright 2016 ross
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= standard library imports ========================
import datetime
import os
import logging
from dateutil import rrule
from arcpy import env
from numpy import array, multiply, column_stack, savetxt
# ============= local library imports ==========================
def precip(watershed, path, output, daily, field='USGS_Code'):
# make sure that this is getting the right string from the file name
files = os.listdir(path)
files_names = [str(name[1:8]) for name in files]
env.overwriteOutput = True # Ensure overwrite capability
for row in arcpy.SearchCursor(watershed):
gPoly = row.getValue(field)
gstr = arcpy.AddFieldDelimiters(watershed, field)
sql = '{} = {}'.format(gstr, gPoly)
geo = arcpy.SelectLayerByAttribute_management('wtrshds_lyr', 'NEW_SELECTION', sql)
logging.info('USGS code: {}'.format(gPoly))
# Get csv data from gauges and identify time interval of needed precip data
if str(gPoly) in files_names:
pos = files_names.index(gPoly)
p = files[pos]
recs = []
with open(p, 'r') as rfile:
for line in rfile:
row = line.split(',')
dt = datetime.strptime(row[2], '%m/%d/%Y')
try:
recs.append((dt, # date
float(row[3]))) # discharge
except ValueError:
recs.append((dt, # date
0.0)) # discharge
logging.info('Data points: {}'.format(len(recs)))
qRecs = array(recs)
# Make start and end dates correspond with available PRISM data (i.e., 1984-01-01 to 2013-12-31)
start = qRecs[0, 0]
beginPrecip = datetime(1984, 1, 1)
if start < beginPrecip:
start = beginPrecip
logging.info('Data start: {}'.format(start))
end = qRecs[len(qRecs) - 1, 0]
endPrecip = datetime(2013, 12, 31)
if end > endPrecip:
end = endPrecip
logging.info('Data end: {}'.format(end))
# Loop through raster data, clipping and creating arrays of data: Date Q Ppt
rasSq = 1013.02 ** 2 / 1000 # ppt [mm -> m] and cellsize (x*y) [m*m]
precip = []
date = []
q = []
for day in rrule.rrule(rrule.DAILY, dtstart=start, until=end):
# region = 'C:\\Recharge_GIS\\Precip\\800m\\Daily\\'
yr = day.year
d = day.strftime('%y%m%d')
if yr <= 1991:
ws = os.path.join(daily, str(yr), 'a')
# env.workspace = ws
ras = os.path.join(ws, 'PRISM_NM_{}.tif'.format(d))
if arcpy.Exists(ras):
try:
arcpy.CheckOutExtension('Spatial')
# mask = 'C:\\Recharge_GIS\\nm_gauges.gdb\\nm_wtrshds'
ras_part = arcpy.sa.ExtractByMask(ras, geo)
if day == beginPrecip:
op = os.path.join(ws, '{}_rasterClipTest.tif'.format(gPoly))
ras_part.save(op)
arr = arcpy.RasterToNumPyArray(ras_part, nodata_to_value=0)
arrVal = multiply(arr, rasSq)
arrSum = arrVal.sum()
logging.info('Sum of precip on {}: {}'.format(day, arrSum))
precip.append(arrSum)
date.append(day)
for rec in qRecs:
if rec[0] == day:
q.append(rec[1])
except BaseException, e:
logging.info('Exception pre1991 {}'.format(e))
if yr > 1991:
ws = os.path.join(daily, str(yr))
# env.workspace = ws
ras = os.path.join(ws, 'PRISM_NMHW2Buff_{}.tif'.format(d))
if arcpy.Exists(ras):
try:
arcpy.CheckOutExtension('Spatial')
# mask = 'C:\\Recharge_GIS\\nm_gauges.gdb\\nm_wtrshds'
ras_part = arcpy.sa.ExtractByMask(ras, geo)
if day == beginPrecip:
op = os.path.join(ws, '{}_rasterClipTest.tif'.format(gPoly))
ras_part.save(op)
arr = arcpy.RasterToNumPyArray(ras_part, nodata_to_value=0)
arrVal = multiply(arr, rasSq)
arrSum = arrVal.sum()
logging.info('Sum of precip on {}: {}'.format(day, arrSum))
precip.append(arrSum)
date.append(day)
for rec in qRecs:
if rec[0] == day:
q.append(rec[1])
except BaseException, e:
logging.info('Exception post1991 {}'.format(e))
# Create numpy arrays, convert time objects to strings, stack columns, save as CSV
q = array(q)
ppt = array(precip)
date = [rec.strftime('%Y/%m/%d') for rec in date]
date = array(date)
data = column_stack((date, q, ppt))
savetxt(os.path.join(output, '{}.csv'.format(gPoly)), data, fmt=['%s', '%1.1f', '%1.3f'], delimiter=',')
logging.info('You have been saved!')
if __name__ == '__main__':
p = os.path.join('C:', 'Users', 'David', 'Documents', 'Recharge', 'Gauges', 'Gauge_Data_HF_csv')
op = os.path.join('C:', 'Users', 'David', 'Documents', 'Recharge', 'Gauges', 'Gauge_ppt_csv')
sp = os.path.join('C:', 'Recharge_GIS', 'Watersheds', 'nm_wtrs_11DEC15.shp')
dr = os.path.join('C:', 'Recharge_GIS', 'Precip', '800m', 'Daily')
precip(sp, p, op, dr)
# ============= EOF =============================================
| 5,499 | 0 | 23 |
9dad5462e0f34fe2372c1417e1e6437ba154dbda | 115 | py | Python | python/setup.py | brkyvz/test-python | 73ca100915542cdde9aab2bdb97d1f37dc107249 | [
"Apache-2.0"
] | 892 | 2016-02-13T19:33:07.000Z | 2022-03-29T09:49:49.000Z | graphframes-dist/graphframes-0.4.0-spark2.1-s_2.11/python/setup.py | thuongdinh-agilityio/docker-spark-anaconda | 79834649704e94678752aef6e34b47b649c26349 | [
"MIT"
] | 380 | 2016-02-03T23:10:21.000Z | 2022-02-24T05:52:31.000Z | graphframes-dist/graphframes-0.4.0-spark2.1-s_2.11/python/setup.py | thuongdinh-agilityio/docker-spark-anaconda | 79834649704e94678752aef6e34b47b649c26349 | [
"MIT"
] | 250 | 2016-02-03T00:02:52.000Z | 2022-03-28T08:46:07.000Z | # Your python setup file. An example can be found at:
# https://github.com/pypa/sampleproject/blob/master/setup.py
| 38.333333 | 60 | 0.765217 | # Your python setup file. An example can be found at:
# https://github.com/pypa/sampleproject/blob/master/setup.py
| 0 | 0 | 0 |
a06fa2fed28030104a105135e887bc003bb4ac6c | 9,122 | py | Python | NLPCode/sentiment_analysis/LSTM/main.py | trusthlt/dp-across-nlp-tasks | ec3e03511420044cdb0bb1a3574925d354ff03f4 | [
"Apache-2.0"
] | 1 | 2021-12-21T14:05:34.000Z | 2021-12-21T14:05:34.000Z | NLPCode/sentiment_analysis/LSTM/main.py | trusthlt/dp-across-nlp-tasks | ec3e03511420044cdb0bb1a3574925d354ff03f4 | [
"Apache-2.0"
] | null | null | null | NLPCode/sentiment_analysis/LSTM/main.py | trusthlt/dp-across-nlp-tasks | ec3e03511420044cdb0bb1a3574925d354ff03f4 | [
"Apache-2.0"
] | null | null | null | import sys
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import torch
from torchtext.legacy import data
from torchtext.legacy import datasets
import random
import torch.optim as optim
import torch.nn as nn
import time
from train_eval_models import train, evaluate
from lstm_model import LSTM
from utils import epoch_time, EarlyStopping
from opacus import PrivacyEngine
from opacus.utils import module_modification
from tuning_structs import Privacy
import numpy as np
# download the dataset takes time so only do it once since it is not effected by the SEED
if __name__ == "__main__":
pass | 46.540816 | 228 | 0.638128 | import sys
import os
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import torch
from torchtext.legacy import data
from torchtext.legacy import datasets
import random
import torch.optim as optim
import torch.nn as nn
import time
from train_eval_models import train, evaluate
from lstm_model import LSTM
from utils import epoch_time, EarlyStopping
from opacus import PrivacyEngine
from opacus.utils import module_modification
from tuning_structs import Privacy
import numpy as np
def process_data(EP, train_data, test_data, TEXT, LABEL, SEED):
torch.manual_seed(SEED)
torch.backends.cudnn.deterministic = True
print("splitting training set into train and validation")
train_data, valid_data = train_data.split(random_state=random.seed(SEED))
print(f'so finally the length of the data: train={len(train_data)}, val={len(valid_data)}, test={len(test_data)}')
print("building vocab")
TEXT.build_vocab(train_data,
max_size=EP.MAX_VOCAB_SIZE,
vectors="glove.6B.100d", # pretraining (for no pretraining comment out)
unk_init=torch.Tensor.normal_) # init vectors not with 0 but randomly guassian distributed
LABEL.build_vocab(train_data)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print("getting iterators")
train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
(train_data, valid_data, test_data),
batch_size=EP.BATCH_SIZE,
sort_within_batch=True,
device=device)
return device, train_iterator, valid_iterator, test_iterator, EP.BATCH_SIZE / len(train_data)
def sentiment_analysis(EP, train_data, test_data, TEXT, LABEL, SEED):
print("---Starting Data Preprocessing---")
device, train_iterator, valid_iterator, test_iterator, sample_rate = process_data(EP, train_data, test_data, TEXT, LABEL, SEED)
print("---Create Model---")
# create model
PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]
INPUT_DIM = len(TEXT.vocab)
model = LSTM(INPUT_DIM, EP.EMBEDDING_DIM, EP.HIDDEN_DIM, EP.OUTPUT_DIM, EP.N_LAYERS, EP.BIDIRECTIONAL, EP.DROPOUT, PAD_IDX, EP.privacy)
# replace the initial weights of the embedding layer with the pretrained one
pretrained_embeddings = TEXT.vocab.vectors
model.embedding.weight.data.copy_(pretrained_embeddings)
# setting the <unk> and <pad> to zero since they have been inited using N(0,1), doing so tells the model that they
# are irelevant to the learning of sentiment
UNK_IDX = TEXT.vocab.stoi[TEXT.unk_token] # get the index of <unk>
model.embedding.weight.data[UNK_IDX] = torch.zeros(EP.EMBEDDING_DIM) # sets the embedding weights responsible for the
# influence of the two tags to 0
model.embedding.weight.data[PAD_IDX] = torch.zeros(EP.EMBEDDING_DIM)
model = model.to(device)
optimizer = optim.Adam(model.parameters())
criterion = nn.BCEWithLogitsLoss()
print("THE SAMPLE RATE IS: " + str(sample_rate))
# do the differential privacy stuff
if EP.privacy:
if EP.alpha == None:
privacy_engine = PrivacyEngine(model,
sample_rate=sample_rate,
target_delta= EP.delta,
target_epsilon=EP.epsilon,
noise_multiplier=EP.noise_multiplier,
epochs =EP.N_EPOCHS,
max_grad_norm=EP.max_grad_norm)
else:
print(f'Setting the alpha to {EP.alpha}')
privacy_engine = PrivacyEngine(model,
sample_rate=sample_rate,
target_delta= EP.delta,
target_epsilon=EP.epsilon,
noise_multiplier=EP.noise_multiplier,
alphas=[EP.alpha],
epochs =EP.N_EPOCHS,
max_grad_norm=EP.max_grad_norm)
privacy_engine = privacy_engine.to(device)
privacy_engine.attach(optimizer)
criterion = criterion.to(device)
best_valid_acc = -1
early_stopping = EarlyStopping(EP.PATIENCE)
print("---Start Training---")
for epoch in range(EP.N_EPOCHS):
start_time = time.time()
train_loss, train_acc, train_precission, train_recall, train_f1 = train(model, train_iterator, optimizer, criterion, device)
valid_loss, valid_acc, valid_precission, valid_recall, valid_f1 = evaluate(model, valid_iterator, criterion, device)
end_time = time.time()
epoch_mins, epoch_secs = epoch_time(start_time, end_time)
if valid_acc > best_valid_acc:
best_valid_acc = valid_acc
torch.save(model.state_dict(), f'{EP.output_dir_model}/{EP.output_file}{SEED}.pt')
print(f'Epoch: {epoch + 1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s\n')
print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc * 100:.2f}% | Train Prec.: {train_precission:.3f} | Train Rec.:{train_recall:.3f} | Train F1:{train_f1:.3f} \n')
print(f'\tValid Loss: {train_loss:.3f} | Valid Acc: {train_acc * 100:.2f}% | Valid Prec.: {train_precission:.3f} | Valid Rec.:{train_recall:.3f} | Valid F1:{train_f1:.3f} \n')
f = open(f'{EP.output_dir_txt}/{EP.output_file}{SEED}.txt', 'a')
f.write(f'Epoch: {epoch + 1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s\n')
f.write(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc * 100:.2f}% | Train Prec.: {train_precission:.3f} | Train Rec.:{train_recall:.3f} | Train F1:{train_f1:.3f} \n')
f.write(f'\tValid Loss: {valid_loss:.3f} | Valid Acc: {valid_acc * 100:.2f}% | Valid Prec.: {valid_precission:.3f} | Valid Rec.:{valid_recall:.3f} | Valid F1:{valid_f1:.3f} \n')
f.close()
# log privacy budget
if EP.privacy:
f = open(f'{EP.output_dir_txt}/{EP.priv_output}{SEED}.txt', 'a')
f.write(f"Epoch: {epoch}\n")
epsilon, best_alpha = optimizer.privacy_engine.get_privacy_spent(EP.delta)
nm = optimizer.privacy_engine.noise_multiplier
print(f"(ε = {epsilon:.2f}, δ = {EP.delta}) for α = {best_alpha} and noise multiplier = {nm}")
f.write(f"(ε = {epsilon:.2f}, δ = {EP.delta}) for α = {best_alpha} and noise multiplier = {nm}\n")
f.close()
# check for early stopping
if EP.early_stopping_active and early_stopping.should_stop(valid_acc):
print(f'Did early stoppin in epoch {epoch}')
f = open(f'{EP.output_dir_txt}/{EP.output_file}{SEED}.txt', 'a')
f.write(f'Did early stoppin in epoch {epoch}\n')
f.close()
break
model.load_state_dict(torch.load(f'{EP.output_dir_model}/{EP.output_file}{SEED}.pt'))
# test the model
test_loss, test_acc, test_precission, test_recall, test_f1 = evaluate(model, test_iterator, criterion, device)
print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc * 100:.2f}% | Test Prec.: {test_precission:.3f} | Test Rec.:{test_recall:.3f} | Test F1:{test_f1:.3f} \n')
f = open(f'{EP.output_dir_txt}/{EP.output_file}{SEED}.txt', 'a')
f.write(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc * 100:.2f}% | Test Prec.: {test_precission:.3f} | Test Rec.:{test_recall:.3f} | Test F1:{test_f1:.3f} \n')
f.close()
return test_loss, test_acc, test_precission, test_recall, test_f1
# download the dataset takes time so only do it once since it is not effected by the SEED
def get_dataset(EP):
# get the dataset once
print("creating data object for preprocessing")
TEXT = data.Field(tokenize='spacy',
tokenizer_language=EP.spacy_english_model,
include_lengths=True) # pytorch pads all sequences so they have equal length, now only the
# elements of the sequence that are not padding are part of the learning
print("getting Labels")
LABEL = data.LabelField(dtype=torch.float)
print("splitting datasets into train and test")
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL, root=EP.dataset_dir)
return train_data, test_data, TEXT, LABEL
def main(EP):
train_data, test_data, TEXT, LABEL = get_dataset(EP)
loss_arr = []
acc_arr = []
prec = []
rec = []
f1 = []
for SEED in EP.seeds:
loss, acc, test_precission, test_recall, test_f1 = sentiment_analysis(EP, train_data, test_data, TEXT, LABEL, SEED)
loss_arr.append(loss)
acc_arr.append(acc)
prec.append(test_precission)
rec.append(test_recall)
f1.append(test_f1)
print(f'ave. loss: {np.mean(np.array(loss_arr)):.3f} | ave. acc: {np.mean(np.array(acc_arr)):.3f} | ave. prec.:{np.mean(np.array(prec)):.3f} | ave. rec.:{np.mean(np.array(rec)):.3f} | ave. f1:{np.mean(np.array(f1)):.3f} \n')
if __name__ == "__main__":
pass | 8,423 | 0 | 91 |
006efc88627e312fb8810bcd78fa7b9cddb78c3f | 659 | py | Python | BOJ/exaustive_search_boj/start_link.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | BOJ/exaustive_search_boj/start_link.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | BOJ/exaustive_search_boj/start_link.py | mrbartrns/swacademy_structure | 778f0546030385237c383d81ec37d5bd9ed1272d | [
"MIT"
] | null | null | null | # BOJ 14889
import sys
from itertools import combinations
si = sys.stdin.readline
n = int(si())
graph = [list(map(int, si().split())) for _ in range(n)]
people = [i for i in range(n)]
comb = list(combinations(people, n // 2))
size = len(comb)
start = comb[: size // 2]
link = list(reversed(comb[size // 2 :]))
sub = 10000000
for i in range(size // 2):
s_couple = start[i]
l_couple = link[i]
s_s = 0
l_s = 0
for j in s_couple:
for k in s_couple:
s_s += graph[j][k]
for j in l_couple:
for k in l_couple:
l_s += graph[j][k]
if sub > abs(s_s - l_s):
sub = abs(s_s - l_s)
print(sub)
| 20.59375 | 56 | 0.569044 | # BOJ 14889
import sys
from itertools import combinations
si = sys.stdin.readline
n = int(si())
graph = [list(map(int, si().split())) for _ in range(n)]
people = [i for i in range(n)]
comb = list(combinations(people, n // 2))
size = len(comb)
start = comb[: size // 2]
link = list(reversed(comb[size // 2 :]))
sub = 10000000
for i in range(size // 2):
s_couple = start[i]
l_couple = link[i]
s_s = 0
l_s = 0
for j in s_couple:
for k in s_couple:
s_s += graph[j][k]
for j in l_couple:
for k in l_couple:
l_s += graph[j][k]
if sub > abs(s_s - l_s):
sub = abs(s_s - l_s)
print(sub)
| 0 | 0 | 0 |
f18108c4b4b18796e84bfc2bec317d23dbceab63 | 1,117 | py | Python | leetcode/math/largest-time-for-given-digits.py | jaimeulloa61/data-structure-and-algorithms | 76140bb36b62ebc7c60914c48a323aae4956fb0a | [
"MIT"
] | 81 | 2020-05-22T14:22:04.000Z | 2021-12-18T10:11:23.000Z | leetcode/math/largest-time-for-given-digits.py | jaimeulloa61/data-structure-and-algorithms | 76140bb36b62ebc7c60914c48a323aae4956fb0a | [
"MIT"
] | 4 | 2020-08-06T21:08:00.000Z | 2021-03-31T16:07:50.000Z | leetcode/math/largest-time-for-given-digits.py | jaimeulloa61/data-structure-and-algorithms | 76140bb36b62ebc7c60914c48a323aae4956fb0a | [
"MIT"
] | 37 | 2020-05-22T14:25:21.000Z | 2021-12-30T03:13:13.000Z | """
## Questions: EASY
### 949. [Largest Time for Given Digits](https://leetcode.com/problems/largest-time-for-given-digits)
Given an array of 4 digits, return the largest 24 hour time that can be made.
The smallest 24 hour time is 00:00, and the largest is 23:59. Starting from 00:00, a time is larger if more time has
elapsed since midnight.
Return the answer as a string of length 5. If no valid time can be made, return an empty string.
Example 1:
Input: [1,2,3,4]
Output: "23:41"
Example 2:
Input: [5,5,5,5]
Output: ""
Note:
A.length == 4
0 <= A[i] <= 9
"""
# Solutions
from itertools import permutations
# Runtime : 36 ms, faster than 63.05% of Python3 online submissions
# Memory Usage : 13.8 MB, less than 80.23% of Python3 online submissions
| 24.822222 | 117 | 0.622202 | """
## Questions: EASY
### 949. [Largest Time for Given Digits](https://leetcode.com/problems/largest-time-for-given-digits)
Given an array of 4 digits, return the largest 24 hour time that can be made.
The smallest 24 hour time is 00:00, and the largest is 23:59. Starting from 00:00, a time is larger if more time has
elapsed since midnight.
Return the answer as a string of length 5. If no valid time can be made, return an empty string.
Example 1:
Input: [1,2,3,4]
Output: "23:41"
Example 2:
Input: [5,5,5,5]
Output: ""
Note:
A.length == 4
0 <= A[i] <= 9
"""
# Solutions
from itertools import permutations
class Solution:
def largestTimeFromDigits(self, A: List[int]) -> str:
res = ""
for perm in permutations(A):
if perm[0] * 10 + perm[1] <= 23 and perm[2] <= 5:
res = max(
res, str(perm[0]) + str(perm[1]) + ":" + str(perm[2]) + str(perm[3])
)
return res
# Runtime : 36 ms, faster than 63.05% of Python3 online submissions
# Memory Usage : 13.8 MB, less than 80.23% of Python3 online submissions
| 302 | -6 | 49 |
2d9606afaaacd45328bcef0d50dd3a9ce97bc2cc | 3,244 | py | Python | docs/conf.py | Substancia/fdtd | 9444deba9a31f9ef4b4618c1b6c753cf46cc1ae7 | [
"MIT"
] | null | null | null | docs/conf.py | Substancia/fdtd | 9444deba9a31f9ef4b4618c1b6c753cf46cc1ae7 | [
"MIT"
] | null | null | null | docs/conf.py | Substancia/fdtd | 9444deba9a31f9ef4b4618c1b6c753cf46cc1ae7 | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import shutil
sys.path.insert(0, os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
import fdtd
# -- Project information -----------------------------------------------------
project = fdtd.__name__
copyright = "2021, {fdtd.__author__}"
author = fdtd.__author__
# The full version, including alpha/beta/rc tags
release = fdtd.__version__
# -- General configuration ---------------------------------------------------
master_doc = "index"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"nbsphinx",
"sphinx_rtd_theme",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Change how type hints are displayed (requires sphinx.ext.autodoc.typehints)
autodoc_typehints = "signature" # signature, description, none
autodoc_type_aliases = {}
# -- Autodoc ----------------------------------------------------------------
autodoc_mock_imports = ["tqdm", "numpy", "matplotlib", "torch"]
# -- Examples Folder ---------------------------------------------------------
sourcedir = os.path.dirname(__file__)
staticdir = os.path.join(sourcedir, "_static")
fdtd_src = os.path.abspath(os.path.join(sourcedir, "..", "fdtd"))
examples_src = os.path.abspath(os.path.join(sourcedir, "..", "examples"))
examples_dst = os.path.abspath(os.path.join(sourcedir, "examples"))
os.makedirs(staticdir, exist_ok=True)
shutil.rmtree(examples_dst, ignore_errors=True)
shutil.copytree(examples_src, examples_dst)
shutil.copytree(fdtd_src, os.path.join(examples_dst, "fdtd"))
| 34.147368 | 79 | 0.664612 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import shutil
sys.path.insert(0, os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
import fdtd
# -- Project information -----------------------------------------------------
project = fdtd.__name__
copyright = "2021, {fdtd.__author__}"
author = fdtd.__author__
# The full version, including alpha/beta/rc tags
release = fdtd.__version__
# -- General configuration ---------------------------------------------------
master_doc = "index"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"nbsphinx",
"sphinx_rtd_theme",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Change how type hints are displayed (requires sphinx.ext.autodoc.typehints)
autodoc_typehints = "signature" # signature, description, none
autodoc_type_aliases = {}
# -- Autodoc ----------------------------------------------------------------
def skip(app, what, name, obj, would_skip, options):
if name == "__init__":
return False
return would_skip
def setup(app):
app.connect("autodoc-skip-member", skip)
autodoc_mock_imports = ["tqdm", "numpy", "matplotlib", "torch"]
# -- Examples Folder ---------------------------------------------------------
sourcedir = os.path.dirname(__file__)
staticdir = os.path.join(sourcedir, "_static")
fdtd_src = os.path.abspath(os.path.join(sourcedir, "..", "fdtd"))
examples_src = os.path.abspath(os.path.join(sourcedir, "..", "examples"))
examples_dst = os.path.abspath(os.path.join(sourcedir, "examples"))
os.makedirs(staticdir, exist_ok=True)
shutil.rmtree(examples_dst, ignore_errors=True)
shutil.copytree(examples_src, examples_dst)
shutil.copytree(fdtd_src, os.path.join(examples_dst, "fdtd"))
| 140 | 0 | 45 |
5d5cfccfe104c17e1292b078cb47b07be3d866c4 | 1,065 | py | Python | app/deleteuser.py | ST4NSB/music-recommendation-system | eb8fef34b96a097b78c58e908307032f043bfcd2 | [
"MIT"
] | null | null | null | app/deleteuser.py | ST4NSB/music-recommendation-system | eb8fef34b96a097b78c58e908307032f043bfcd2 | [
"MIT"
] | null | null | null | app/deleteuser.py | ST4NSB/music-recommendation-system | eb8fef34b96a097b78c58e908307032f043bfcd2 | [
"MIT"
] | 1 | 2021-09-04T21:17:37.000Z | 2021-09-04T21:17:37.000Z | from typing import Any, Dict
from flask import jsonify, make_response, request, abort
from flask.helpers import make_response
from flask_restful import Resource
from werkzeug.exceptions import HTTPException
from werkzeug.wrappers import Response
| 35.5 | 74 | 0.613146 | from typing import Any, Dict
from flask import jsonify, make_response, request, abort
from flask.helpers import make_response
from flask_restful import Resource
from werkzeug.exceptions import HTTPException
from werkzeug.wrappers import Response
class DeleteUser(Resource):
def __init__(self, rs: Any, api_key: str):
self.rs = rs
self.api_key = api_key
def delete(self, userId: str) -> Response:
try:
key = request.headers.get('API-Key')
if key != self.api_key:
abort(401, "Wrong API-KEY for your request!")
self.rs.delete_user(userId)
response = make_response('', 204)
response.headers["Content-Type"] = "application/json"
response.headers.add("Access-Control-Allow-Origin", "*")
return response
except Exception as e:
if isinstance(e, HTTPException):
abort(e.code, e.description)
else:
abort(500, f"Something went wrong with the system! ({e})")
| 718 | 6 | 76 |
0c032bdb1b147e4aa1adad5967d0417d16a3da49 | 390 | py | Python | app/models/domain/articles.py | StanislavRud/api-realword-app-test | 9a49f299b02cec26d237f3bc4b363c8b93520b7b | [
"MIT"
] | 1,875 | 2019-03-27T14:26:20.000Z | 2022-03-31T14:52:50.000Z | app/models/domain/articles.py | StanislavRud/api-realword-app-test | 9a49f299b02cec26d237f3bc4b363c8b93520b7b | [
"MIT"
] | 232 | 2019-04-11T11:05:48.000Z | 2022-03-05T10:23:50.000Z | app/models/domain/articles.py | StanislavRud/api-realword-app-test | 9a49f299b02cec26d237f3bc4b363c8b93520b7b | [
"MIT"
] | 433 | 2019-04-11T01:48:59.000Z | 2022-03-31T10:33:42.000Z | from typing import List
from app.models.common import DateTimeModelMixin, IDModelMixin
from app.models.domain.profiles import Profile
from app.models.domain.rwmodel import RWModel
| 22.941176 | 62 | 0.753846 | from typing import List
from app.models.common import DateTimeModelMixin, IDModelMixin
from app.models.domain.profiles import Profile
from app.models.domain.rwmodel import RWModel
class Article(IDModelMixin, DateTimeModelMixin, RWModel):
slug: str
title: str
description: str
body: str
tags: List[str]
author: Profile
favorited: bool
favorites_count: int
| 0 | 185 | 23 |
4c2412ee0bfe7391af1f0ace035b2d7d655dc6b5 | 872 | py | Python | visual_mpc/policy/handcrafted/playback_policy.py | thomasweng15/visual_foresight | 0f00ffcfc05e9ce47d76d440d91c298a35d8f7b1 | [
"MIT"
] | 108 | 2018-12-04T04:57:07.000Z | 2022-03-15T21:13:36.000Z | visual_mpc/policy/handcrafted/playback_policy.py | thomasweng15/visual_foresight | 0f00ffcfc05e9ce47d76d440d91c298a35d8f7b1 | [
"MIT"
] | 16 | 2019-03-12T13:31:45.000Z | 2022-03-11T23:36:12.000Z | visual_mpc/policy/handcrafted/playback_policy.py | thomasweng15/visual_foresight | 0f00ffcfc05e9ce47d76d440d91c298a35d8f7b1 | [
"MIT"
] | 35 | 2018-12-05T08:41:54.000Z | 2022-03-15T21:13:37.000Z | import numpy as np
from visual_mpc.policy.policy import Policy
import sys
if sys.version_info[0] < 3:
import cPickle as pkl
else:
import pickle as pkl
| 30.068966 | 70 | 0.649083 | import numpy as np
from visual_mpc.policy.policy import Policy
import sys
if sys.version_info[0] < 3:
import cPickle as pkl
else:
import pickle as pkl
class PlaybackPolicy(Policy):
def __init__(self, agentparams, policyparams, gpu_id, npgu):
self._hp = self._default_hparams()
self._override_defaults(policyparams)
self.agentparams = agentparams
self._adim = agentparams['adim']
self._pkl = None
def _default_hparams(self):
parent_params = super(PlaybackPolicy, self)._default_hparams()
parent_params.add_hparam('file', './act.pkl')
return parent_params
def act(self, state, t):
if t == 0 or self._pkl is None:
self._pkl = pkl.load(open(self._hp.file, 'rb'))
assert 0 <= t < len(self._pkl), "too long!"
return {'actions': self._pkl[t]['actions']}
| 601 | 8 | 103 |
705c2e7f64edcd9f9ffde7a0277d8c6b633c79e3 | 2,338 | py | Python | portfolio/Python/scrapy/netthandelen/coverbrands_spider.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | null | null | null | portfolio/Python/scrapy/netthandelen/coverbrands_spider.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | null | null | null | portfolio/Python/scrapy/netthandelen/coverbrands_spider.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | 5 | 2016-03-22T07:40:46.000Z | 2021-05-30T16:12:21.000Z | import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.url import urljoin_rfc
from scrapy.utils.response import get_base_url
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from product_spiders.fuzzywuzzy import process
from product_spiders.fuzzywuzzy import fuzz
HERE = os.path.abspath(os.path.dirname(__file__))
| 50.826087 | 151 | 0.639008 | import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.url import urljoin_rfc
from scrapy.utils.response import get_base_url
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
from product_spiders.fuzzywuzzy import process
from product_spiders.fuzzywuzzy import fuzz
HERE = os.path.abspath(os.path.dirname(__file__))
class CoverBrandsSpider(BaseSpider):
name = 'coverbrands.no'
allowed_domains = ['coverbrands.no']
start_urls = ['http://www.coverbrands.no/shop/']
def parse(self, response):
hxs = HtmlXPathSelector(response)
categories = hxs.select('//ul[@class="leftMenu"]/li/a/@href').extract()
for category in categories:
url = urljoin_rfc(get_base_url(response), category)
yield Request(url, callback=self.parse_products)
def parse_products(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//li[@class="product" or @class="product end"]')
for product in products:
name = ''.join(product.select('div/div[@class="heading"]/h2/text()').extract())
if name:
loader = ProductLoader(item=Product(), selector=product)
brand = ''.join(product.select('div/div[@class="heading"]/h3/text()').extract())
loader.add_value('name', ' '.join((brand, name)))
relative_url = product.select('div/a[@class="productOverlay"]/@href').extract()
loader.add_value('url', urljoin_rfc(get_base_url(response), relative_url[0]))
price = ''.join(product.select('div/p[@class="price color"]/text()').extract()).replace('.', '').replace(',', '.').replace(u'\xa0', '')
if not price:
price = ''.join(product.select('div/p[@class="price "]/text()').extract()).replace('.', '').replace(',', '.').replace(u'\xa0', '')
loader.add_value('price', price)
yield loader.load_item()
next = product.select('//div[@class="pageNavigation"]/ul/li[@class="next"]/a/@href').extract()
if next:
url = urljoin_rfc(get_base_url(response), next[-1])
yield Request(url, callback=self.parse_products)
| 1,667 | 191 | 23 |
70ec56ff780f5dd797f1ef1f0a062e333cc08162 | 1,829 | py | Python | tests/integration/agent/test_agent_manager.py | datacraft-dsc/starfish-py | 95ff24410f056e8e2d313c3af97439fe003e294a | [
"Apache-2.0"
] | 4 | 2019-02-08T03:47:36.000Z | 2019-10-17T21:45:23.000Z | tests/integration/agent/test_agent_manager.py | datacraft-dsc/starfish-py | 95ff24410f056e8e2d313c3af97439fe003e294a | [
"Apache-2.0"
] | 81 | 2019-02-09T01:01:51.000Z | 2020-07-01T08:35:07.000Z | tests/integration/agent/test_agent_manager.py | oceanprotocol/ocean-py | 318ad0de2519e61d0a301c040a48d1839cd82425 | [
"Apache-2.0"
] | 1 | 2021-01-28T12:14:03.000Z | 2021-01-28T12:14:03.000Z | """
Unit test AgentManager
"""
import pytest
import secrets
from starfish.agent_manager import AgentManager
from starfish.asset import DataAsset
from starfish.network.ddo import DDO
| 25.054795 | 83 | 0.726627 | """
Unit test AgentManager
"""
import pytest
import secrets
from starfish.agent_manager import AgentManager
from starfish.asset import DataAsset
from starfish.network.ddo import DDO
def test_agent_manager_register(config):
manager = AgentManager()
manager.register_agents(config['agents'])
assert(manager.items)
def test_agent_manager_load_agent(config):
manager = AgentManager()
agent_items = config['agents']
manager.register_agents(agent_items)
name = 'surfer'
assert(name == 'surfer')
ddo = manager.load_ddo(name)
assert(ddo)
# load a named item
remote_agent = manager.load_agent(name)
assert(remote_agent)
assert(remote_agent.did == ddo.did)
# load from a agent did
remote_agent = manager.load_agent(ddo.did)
assert(remote_agent)
assert(remote_agent.did == ddo.did)
test_data = secrets.token_hex(1024)
asset_data = DataAsset.create('TestAsset', test_data)
asset = remote_agent.register_asset(asset_data)
assert(asset)
# load from a asset_did
remote_agent = manager.load_agent(asset.did)
assert(remote_agent)
assert(remote_agent.did == ddo.did)
def test_agent_manager_convex_network_resolve_did(convex_network, convex_accounts):
manager = AgentManager()
account = convex_accounts[0]
ddo = DDO.create('http://localhost')
convex_network.register_did(account, ddo.did, ddo.as_text)
manager.network = convex_network
resolve_ddo = manager.resolve_agent_did(ddo.did)
assert(resolve_ddo)
assert(resolve_ddo.did == ddo.did)
manager.network = None
with pytest.raises(ValueError):
resolve_ddo = manager.resolve_agent_did(ddo.did)
resolve_ddo = manager.resolve_agent_did(ddo.did, convex_network)
assert(resolve_ddo)
assert(resolve_ddo.did == ddo.did)
| 1,572 | 0 | 69 |
032adfb13952d189310f2016bb5a3fedf65a3dad | 2,356 | py | Python | ahvl/options/lookup/sshhostkey.py | gardar/ahvl | 045b5882d94fc2d4ba7b194bf65ebfbf9d2e1d6d | [
"MIT"
] | 4 | 2019-10-12T12:11:23.000Z | 2021-12-20T13:53:28.000Z | ahvl/options/lookup/sshhostkey.py | gardar/ahvl | 045b5882d94fc2d4ba7b194bf65ebfbf9d2e1d6d | [
"MIT"
] | 2 | 2021-02-05T12:52:55.000Z | 2022-02-11T10:58:52.000Z | ahvl/options/lookup/sshhostkey.py | gardar/ahvl | 045b5882d94fc2d4ba7b194bf65ebfbf9d2e1d6d | [
"MIT"
] | 1 | 2020-08-13T07:52:27.000Z | 2020-08-13T07:52:27.000Z | #
# import modules
#
from ahvl.options.base import OptionsBase
from ahvl.helper import AhvlMsg, AhvlHelper
#
# helper/message
#
msg = AhvlMsg()
hlp = AhvlHelper()
#
# OptionsLookupSSHHostKey
#
# set option prefix
# set path
# useable variables:
# - {find}
# - {hostname}
# set default options
# calculate any remaining options
# set required options
| 24.040816 | 129 | 0.532683 | #
# import modules
#
from ahvl.options.base import OptionsBase
from ahvl.helper import AhvlMsg, AhvlHelper
#
# helper/message
#
msg = AhvlMsg()
hlp = AhvlHelper()
#
# OptionsLookupSSHHostKey
#
class OptionsLookupSSHHostKey(OptionsBase):
# set option prefix
def get_prefix(self):
# return option prefix
return "ahvl_sshhostkey"
# set path
# useable variables:
# - {find}
# - {hostname}
def get_path(self):
# return basepath
return "hosts/{hostname}/sshhostkeys/{find}"
# set default options
def get_defaults(self):
# set default option values - dict
return {}
# calculate any remaining options
def get_appended(self):
# set shorthand
o = self.options
# return list of overide options or calculated options
return {}
# set required options
def get_required(self):
# return required options - list
return []
def validate(self):
# set shorthand
o = self.options
#
# set accepted values
#
allowed_in = ["private", # key type is set by sshhostkey_type (default: ed25519)
"private_keybits",
"private_keytype",
"fingerprint_sha256",
"fingerprint_sha256_clean",
"fingerprint_sha256_art",
"fingerprint_md5",
"fingerprint_md5_clean",
"fingerprint_md5_art",
"fingerprint_bubblebabble",
"fingerprint_bubblebabble_clean",
"dns_sha1",
"dns_sha1_clean",
"dns_sha256",
"dns_sha256_clean",
"public",
]
allowed_find = ["rsa", "ed25519"]
#
# sanity checks
#
if o['in'] not in allowed_in:
msg.fail("value for [in] parameter is invalid; [{}] given, but expected one of {}".format(o['in'], allowed_in))
if o['find'] not in allowed_find:
msg.fail("value for [find] parameter is invalid; [{}] given, but expected one of {}".format(o['find'], allowed_find))
if hlp.isempty(o['path']):
msg.fail("path is missing");
| 1,761 | 22 | 179 |
3a1594135a5ff0e3497e305d1ed292ee397596f7 | 1,276 | py | Python | tests/integration/__init__.py | codeanonorg/Simple | f96d241f6a39e9f9dc949a3598cd46f4b6d044f4 | [
"MIT"
] | 2 | 2021-01-23T23:33:08.000Z | 2021-01-24T14:01:53.000Z | tests/integration/__init__.py | codeanonorg/Simple | f96d241f6a39e9f9dc949a3598cd46f4b6d044f4 | [
"MIT"
] | null | null | null | tests/integration/__init__.py | codeanonorg/Simple | f96d241f6a39e9f9dc949a3598cd46f4b6d044f4 | [
"MIT"
] | null | null | null | """
Copyright (c) 2021 SolarLiner, jdrprod, Arxaqapi
This software is released under the MIT License.
https://opensource.org/licenses/MIT
"""
import os
import json
import unittest
from pathlib import Path
from Simple.document import Document
| 27.73913 | 83 | 0.583072 | """
Copyright (c) 2021 SolarLiner, jdrprod, Arxaqapi
This software is released under the MIT License.
https://opensource.org/licenses/MIT
"""
import os
import json
import unittest
from pathlib import Path
from Simple.document import Document
class FolderExpansionTest(unittest.TestCase):
def __init__(self, cwd: Path) -> None:
super().__init__(methodName="test_matches_snapshot")
self.path = cwd
def test_matches_snapshot(self):
input = Document(self.path / "index.html")
expected = Document(self.path / "expected.html")
if (data_path := self.path / "data.json").exists():
with data_path.open("rt") as d:
data = json.load(d)
else:
data = {}
self.assertEqual(
input.render(data).prettify(), expected.html.prettify(), str(self.path)
)
class IntegrationTestSuite(unittest.TestSuite):
def __init__(self, cwd: Path) -> None:
tests = list(
map(
FolderExpansionTest,
filter(
lambda p: p.is_dir() and not p.name.startswith("__"),
map(lambda s: cwd / s, os.listdir(cwd)),
),
)
)
super().__init__(tests=tests)
| 850 | 50 | 125 |
8c4caf932f48cc51a3aeb48a3097d04e3375be45 | 12,246 | py | Python | trading/views/offers.py | DarkoR12/dafi-system | f923ea4273b04f7acc7016b2f7d03e51eb00b85b | [
"MIT"
] | 7 | 2019-08-03T12:25:18.000Z | 2021-11-02T12:51:33.000Z | trading/views/offers.py | DarkoR12/dafi-system | f923ea4273b04f7acc7016b2f7d03e51eb00b85b | [
"MIT"
] | 11 | 2019-08-20T17:07:37.000Z | 2021-11-23T14:26:07.000Z | trading/views/offers.py | DarkoR12/dafi-system | f923ea4273b04f7acc7016b2f7d03e51eb00b85b | [
"MIT"
] | 4 | 2020-04-06T11:33:02.000Z | 2021-10-31T09:10:53.000Z | import json
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.shortcuts import redirect
from django.urls import reverse
from django.views.generic import DetailView, ListView, TemplateView
from meta.views import MetadataMixin
from bot.notifications import telegram_notify
from ..models import TradeOffer, TradeOfferAnswer, TradeOfferLine, YEARS
from .common import TradingPeriodMixin
| 31.643411 | 274 | 0.609832 | import json
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.core.exceptions import ValidationError
from django.db.models import Q
from django.shortcuts import redirect
from django.urls import reverse
from django.views.generic import DetailView, ListView, TemplateView
from meta.views import MetadataMixin
from bot.notifications import telegram_notify
from ..models import TradeOffer, TradeOfferAnswer, TradeOfferLine, YEARS
from .common import TradingPeriodMixin
class IndexView(MetadataMixin, TradingPeriodMixin, ListView):
paginate_by = 10
title = 'Permutas - DAFI'
description = 'Sistema de Permutas de la Delegación de Alumnos de la Facultad de Informática'
image = 'images/favicon.png'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.request.user.is_authenticated:
context['my_answer'] = TradeOffer.objects.filter(
period=self.get_current_period(), answer__user=self.request.user
).exclude(answer=None).first()
context['my_offer'] = TradeOffer.objects.filter(
user=self.request.user, period=self.get_current_period()
).first()
return context
def get_queryset(self):
user = self.request.user
query = Q(is_visible=True, answer=None)
if user.is_authenticated:
query = query | Q(user=user) | (~Q(answer=None) & Q(answer__user=user))
return TradeOffer.objects.prefetch_related('lines').filter(Q(period=self.get_current_period()) & query)
class TradeOfferDetailView(MetadataMixin, TradingPeriodMixin, UserPassesTestMixin, DetailView):
model = TradeOffer
title = 'Oferta de Permuta - DAFI'
description = 'Sistema de Permutas de la Delegación de Alumnos de la Facultad de Informática'
image = 'images/favicon.png'
def test_func(self):
offer = self.get_object()
user = self.request.user
return (
not offer.answer
and offer.is_visible
or user.has_perm('trading.is_manager')
or offer.user == user
or offer.answer.user == user
)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
if self.request.user.is_authenticated:
context['my_answer'] = self.get_object().answers.filter(user=self.request.user).first()
if 'answers' not in context:
context['answers'] = self.get_object().answers.filter(is_visible=True)
return context
def get_queryset(self):
return super().get_queryset().prefetch_related('lines')
class TradeOfferEditMixin(MetadataMixin, TradingPeriodMixin):
template_name = 'trading/tradeoffer_form.html'
description = 'Sistema de Permutas de la Delegación de Alumnos de la Facultad de Informática'
image = 'images/favicon.png'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._errors = []
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['offer'] = self.get_offer()
context['lines'] = self.get_lines()
context['errors'] = self._errors
context['years'] = json.dumps({y - 1: YEARS[y].groups for y in YEARS})
return context
def post(self, request, **kwargs):
valid = []
deleted = 0
for line in self.get_lines():
line.curr_group = request.POST.get('{}-curr_group'.format(line.i), 1)
line.curr_subgroup = request.POST.get('{}-curr_subgroup'.format(line.i), 1)
line.subjects = ','.join(request.POST.getlist('{}-subjects'.format(line.i)))
line.wanted_groups = ','.join(request.POST.getlist('{}-wanted_groups'.format(line.i)))
try:
line.full_clean(exclude=['offer'])
valid.append(line)
except ValidationError as e:
if line.get_subjects_list():
self._errors.append(e)
elif line.id:
line.delete()
deleted += 1
offer = self.get_offer()
if 'description' in request.POST:
offer.description = request.POST['description']
if offer.id:
offer.save()
if valid:
if self.is_creation:
offer.save()
else:
answers = TradeOfferAnswer.objects.filter(offer=offer)
for answer in answers:
telegram_notify(answer.user, 'Se ha eliminado tu respuesta a la oferta #{} porque ha sido modificada, deberías revisar la oferta por si todavía te interesa.'.format(offer.id), url=reverse('trading:offer_detail', args=[offer.id]), url_button='Ver oferta')
answer.delete()
for line in valid:
line.offer = offer
line.save()
return redirect(self.get_success_url(**kwargs))
elif deleted:
offer.delete()
return redirect('trading:list')
return super().get(request, **kwargs)
class TradeOfferAddView(LoginRequiredMixin, TradeOfferEditMixin, TemplateView):
title = 'Crear una Oferta de Permuta'
submit_btn = 'Crear Oferta'
is_creation = True
def __init__(self, *args, **kwargs):
self._offer = None
self._lines = None
return super().__init__(*args, **kwargs)
def get_offer(self):
if not self._offer:
self._offer = TradeOffer(user=self.request.user, period=self.get_current_period())
return self._offer
def get_lines(self):
if not self._lines:
self._lines = []
for year in YEARS:
self._lines.append(TradeOfferLine(offer=self.get_offer(), year=year))
return self._lines
def get_success_url(self, **kwargs):
return reverse('trading:offer_detail', args=[self.get_offer().id])
class TradeOfferEditView(UserPassesTestMixin, TradeOfferEditMixin, DetailView):
model = TradeOffer
title = 'Editar una Oferta de Permuta'
submit_btn = 'Guardar'
is_creation = False
def __init__(self, *args, **kwargs):
self._lines = None
return super().__init__(*args, **kwargs)
def test_func(self):
offer = self.get_object()
return not offer.answer and self.request.user == offer.user
def get_queryset(self):
return super().get_queryset().prefetch_related('lines')
def get_offer(self):
return self.get_object()
def get_lines(self):
if not self._lines:
self._lines = []
lines = {x.year: x for x in self.get_offer().lines.all()}
for year in YEARS:
if year in lines:
self._lines.append(lines[year])
else:
self._lines.append(TradeOfferLine(offer=self.get_offer(), year=year))
return self._lines
def get_success_url(self, **kwargs):
return reverse('trading:offer_edit', args=[self.get_offer().id])
class TradeOfferDeleteView(MetadataMixin, UserPassesTestMixin, TradingPeriodMixin, DetailView):
template_name = 'trading/tradeoffer_delete.html'
model = TradeOffer
title = 'Eliminar Oferta de Permuta - DAFI'
description = 'Sistema de Permutas de la Delegación de Alumnos de la Facultad de Informática'
image = 'images/favicon.png'
def test_func(self):
offer = self.get_object()
return not offer.answer and self.request.user == offer.user
def post(self, request, **kwargs):
offer = self.get_object()
for answer in offer.answers.all():
telegram_notify(answer.user, 'Se ha eliminado tu respuesta a la oferta #{} porque ha sido eliminada.'.format(offer.id))
answer.delete()
for line in offer.lines.all():
line.delete()
offer.delete()
return redirect('trading:list')
def get(self, request, *args, **kwargs):
if self.get_object().answer:
return redirect(self.get_object())
return super().get(request, *args, **kwargs)
class ChangeAccessMixin(UserPassesTestMixin):
def test_func(self):
offer = self.get_object()
user = self.request.user
return offer.answer and (user == offer.user or user == offer.answer.user)
class ChangeProcessView(ChangeAccessMixin, DetailView):
model = TradeOffer
template_name = 'trading/change_process.html'
title = 'Proceso de intercambio - Permutas - DAFI'
description = 'Sistema de Permutas de la Delegación de Alumnos de la Facultad de Informática'
image = 'images/favicon.png'
def dispatch(self, request, *args, **kwargs):
if self.get_object().is_completed:
return self.redirect_success()
return super().dispatch(request, *args, **kwargs)
def get_queryset(self):
return super().get_queryset().select_related('answer').prefetch_related('lines')
def get_lines_data(self):
data = []
offer = self.get_object()
for line in offer.lines.all():
if offer.user == self.request.user:
marked = line.get_started_list()
subjects = line.get_subjects()
else:
marked = line.get_completed_list()
subjects = line.get_started()
if subjects:
data.append((line, subjects, marked))
return data
def get_context_data(self, **kwargs):
lines_data = self.get_lines_data()
completed = True
if self.request.user == self.get_object().user:
for line, subjects, _ in self.get_lines_data():
if line.is_completed:
continue
elif len(line.get_started_list()) != len(subjects):
completed = False
break
else:
completed = False
context = super().get_context_data(**kwargs)
context['lines'] = lines_data
context['completed'] = completed
return context
def redirect_success(self):
return redirect(reverse('trading:change_completed', args=[self.get_object().id]))
def post(self, request, **kwargs):
total_completed = 0
lines_data = self.get_lines_data()
for line, _, marked in lines_data:
if line.is_completed:
total_completed += 1
continue
subjects = line.get_subjects_list()
add = []
for subject in request.POST.getlist('{}-subjects'.format(line.i)):
try:
subject = int(subject)
except ValueError:
continue
if subject not in subjects:
continue
elif subject not in marked:
add.append(subject)
if add:
marked += add
marked = ','.join(str(x) for x in marked)
if self.get_object().user == self.request.user:
line.started = marked
else:
line.completed = marked
if len(subjects) == len(line.get_completed_list()):
line.is_completed = True
total_completed += 1
line.save()
if total_completed > 0 and total_completed == len(lines_data):
offer = self.get_object()
offer.is_completed = True
offer.save()
offer.answer.is_completed = True
offer.answer.save()
return self.redirect_success()
return super().get(request, **kwargs)
class ChangeCompletedView(ChangeAccessMixin, DetailView):
model = TradeOffer
template_name = 'trading/change_completed.html'
title = 'Proceso finalizado - Permutas - DAFI'
description = 'Sistema de Permutas de la Delegación de Alumnos de la Facultad de Informática'
image = 'images/favicon.png'
def test_func(self):
return super().test_func() and self.get_object().is_completed
| 8,751 | 2,756 | 233 |
4d22960ef5f9a340afb99d07dec3bd573b96f594 | 2,096 | py | Python | lesson 1/parseXLS.py | olive-everest/wrangling_mongodb | f4af9c0e5d6ac9611b2a1b6dc2b2ed46df07bc15 | [
"MIT"
] | 2 | 2020-03-03T23:57:09.000Z | 2020-10-20T03:30:40.000Z | lesson 1/parseXLS.py | olive-everest/wrangling_mongodb | f4af9c0e5d6ac9611b2a1b6dc2b2ed46df07bc15 | [
"MIT"
] | null | null | null | lesson 1/parseXLS.py | olive-everest/wrangling_mongodb | f4af9c0e5d6ac9611b2a1b6dc2b2ed46df07bc15 | [
"MIT"
] | 2 | 2018-09-03T21:48:51.000Z | 2019-05-27T07:45:42.000Z | #!~/envs/python3/udacity_python_mongodb
import xlrd
import pprint
datafile = "datasets/2013_ERCOT_Hourly_Load_Data.xls"
data = parse_file(datafile)
pprint.pprint(data)
assert data['maxtime'] == (2013, 8, 13, 17, 0, 0)
assert round(data['maxvalue'], 10) == round(18779.02551, 10)
| 27.578947 | 113 | 0.614504 | #!~/envs/python3/udacity_python_mongodb
import xlrd
import pprint
datafile = "datasets/2013_ERCOT_Hourly_Load_Data.xls"
def parse_file(datafile):
workbook = xlrd.open_workbook(datafile)
sheet = workbook.sheet_by_index(0)
data = [
[
sheet.cell_value(r, col)
for col in range(sheet.ncols)
]
for r in range(sheet.nrows)
]
cv = sheet.col_values(1, start_rowx=1, end_rowx=None)
maxval = max(cv)
minval = min(cv)
maxpos = cv.index(maxval) + 1
minpos = cv.index(minval) + 1
maxtime = sheet.cell_value(maxpos, 0)
realtime = xlrd.xldate_as_tuple(maxtime, 0)
mintime = sheet.cell_value(minpos, 0)
realmintime = xlrd.xldate_as_tuple(mintime, 0)
data = {
'maxtime': realtime,
'maxvalue': maxval,
'mintime': realmintime,
'minvalue': minval,
'avgcoast': sum(cv) / float(len(cv))
}
return data
# print("\nList Comprehension")
# print("data[3][2]:", data[3][2])
# print("\nCells in a nested loop:")
# for row in range(sheet.nrows):
# for col in range(sheet.ncols):
# if row == 50:
# print(sheet.cell_value(row, col))
# other useful methods:
# print("\nROWS, COLUMNS and CELLS:")
# print("Number of rows in the sheet:", sheet.nrows)
# print("Type of data in cell (row 3, col 2):", sheet.cell_type(3, 2))
# print("Value in cell (row 3, col 2):", sheet.cell_value(3, 2))
# print("Get a slice of values in column 3, from rows 1-3:")
# print(sheet.col_values(3, start_rowx=1, end_rowx=4))
# print("\nDATES:")
# print("Type of data in cell (row 1, col 0):", sheet.cell_type(1, 0))
# exceltime = sheet.cell_value(1, 0)
# print("Time in Excel format:", exceltime)
# print("Convert time to a Python datetime tuple, from the Excel float:", xlrd.xldate_as_tuple(exceltime, 0))
# return data
data = parse_file(datafile)
pprint.pprint(data)
assert data['maxtime'] == (2013, 8, 13, 17, 0, 0)
assert round(data['maxvalue'], 10) == round(18779.02551, 10)
| 1,787 | 0 | 23 |
f2cc08c2712855bb70c1b2df5f5eceb724bfcb74 | 2,611 | py | Python | LinkedList/NthNode.py | pritsheth/Algorithms-Python | b8af30cbf95a772c9b9b997a30ff2c8b56a040e0 | [
"MIT"
] | null | null | null | LinkedList/NthNode.py | pritsheth/Algorithms-Python | b8af30cbf95a772c9b9b997a30ff2c8b56a040e0 | [
"MIT"
] | null | null | null | LinkedList/NthNode.py | pritsheth/Algorithms-Python | b8af30cbf95a772c9b9b997a30ff2c8b56a040e0 | [
"MIT"
] | null | null | null |
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
l1 = ListNode(1)
l2 = ListNode(6)
l3 = ListNode(2)
l4 = ListNode(5)
l5 = ListNode(4)
l1.next = l2
l2.next = l3
l3.next = l4
l4.next = l5
list = [1,2,3,4]
print("cond",list)
s = Solution()
s.partition(l1, 3)
# s.removeNthFromEnd(l1, 2)
| 20.24031 | 57 | 0.475297 |
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
head = point = ListNode(0)
q = PriorityQueue()
for l in lists:
if l:
q.put((l.val, l))
while not q.empty():
val, node = q.get()
point.next = ListNode(val)
point = point.next
node = node.next
if node:
q.put((node.val, node))
return head.next
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def removeNthFromEnd(self, head, n):
temp = head
fast = head
if head.next == None and n >= 1:
return []
while fast.next != None and n > 1:
fast = fast.next
n -= 1
pre = head
while fast.next != None:
pre = temp
temp = temp.next
fast = fast.next
if pre != temp:
pre.next = pre.next.next
else:
head = head.next
return head
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
def printList(self, head):
while head != None:
print(head.val)
head = head.next
def getMiddleNode(self, head):
slow = head
fast = head
while (fast != None and fast.next != None):
slow = slow.next
fast = fast.next.next
print("middle is ", slow.val)
return slow
# def sortList(self, head):
def partition(self, head, x):
temp = head
pre = head
result = ListNode(0)
answer = result
if head is None:
return None
while (temp != None):
if temp.val > x:
# print(temp.val)
pre.next = temp.next # Deleting the node
result.next = temp
result = result.next
temp = temp.next
continue
pre = temp
temp = temp.next
# self.printList(pre)
pre.next = answer.next
return head
l1 = ListNode(1)
l2 = ListNode(6)
l3 = ListNode(2)
l4 = ListNode(5)
l5 = ListNode(4)
l1.next = l2
l2.next = l3
l3.next = l4
l4.next = l5
list = [1,2,3,4]
print("cond",list)
s = Solution()
s.partition(l1, 3)
# s.removeNthFromEnd(l1, 2)
| 1,538 | 494 | 202 |
af764e5dafc2947fe07066ff6030906511929558 | 4,562 | py | Python | language-model/optimizers/.ipynb_checkpoints/SRRAdam-checkpoint.py | minhtannguyen/RAdam | 44f403288df375bae0785cc82dd8c888eaaaa441 | [
"Apache-2.0"
] | null | null | null | language-model/optimizers/.ipynb_checkpoints/SRRAdam-checkpoint.py | minhtannguyen/RAdam | 44f403288df375bae0785cc82dd8c888eaaaa441 | [
"Apache-2.0"
] | null | null | null | language-model/optimizers/.ipynb_checkpoints/SRRAdam-checkpoint.py | minhtannguyen/RAdam | 44f403288df375bae0785cc82dd8c888eaaaa441 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Scheduled restarting RAdam
"""
import math
import torch
from optimizer import Optimizer | 43.447619 | 176 | 0.47523 | # -*- coding: utf-8 -*-
"""
Scheduled restarting RAdam
"""
import math
import torch
from optimizer import Optimizer
class SRRAdam(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=5e-4, iter_count=1, restarting_iter=50):
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, iter_count=iter_count, restarting_iter=restarting_iter)
self.buffer = [[None, None, None] for ind in range(10)]
super(SRRAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(SRRAdam, self).__setstate__(state)
def update_iter(self):
idx = 1
for group in self.param_groups:
if idx == 1:
group['iter_count'] += 1
if group['iter_count'] >= group['restarting_iter']:
group['iter_count'] = 1
idx += 1
return group['iter_count'], group['restarting_iter']
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = (group['iter_count'] - 1.)/(group['iter_count'] + 2.)
#momentum = 0.9 # Test this
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('SRRAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1-beta2, grad, grad)
exp_avg.mul_(momentum).add_(1-momentum, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1-beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# More conservative since it's an approximated value
if N_sma >= 5:
step_size = group['lr'] * math.sqrt((1-beta2_t)*(N_sma-4) / (N_sma_max-4)*(N_sma-2) / N_sma * N_sma_max / (N_sma_max-2)) / (1 - momentum**state['step'])
else:
step_size = group['lr'] / (1 - momentum ** state['step'])
'''
if N_sma >= 5:
step_size = group['lr'] * math.sqrt((1-beta2_t)*(N_sma-4) / (N_sma_max-4)*(N_sma-2) / N_sma * N_sma_max / (N_sma_max-2)) / (1 - beta1**state['step'])
else:
step_size = group['lr'] / (1 - beta1 ** state['step'])
'''
buffered[2] = step_size
if 'momentum_buffer' not in state:
buf = state['momentum_buffer'] = torch.clone(grad).detach()
else:
buf = state['momentum_buffer']
buf.mul_(momentum).add_(1., grad)
grad = grad.add(momentum, buf)
if weight_decay != 0:
p_data_fp32.add_(-weight_decay*group['lr'], p_data_fp32)
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size, grad, denom) # TODO: grad or exp_avg?
else:
p_data_fp32.add_(-step_size, grad)
p.data.copy_(p_data_fp32)
return loss | 4,301 | 4 | 142 |
69cc881c3132317dc99b291b997c426537752b21 | 591 | py | Python | _Dist/NeuralNetworks/_Tests/_UnitTests/UnitTestUtil.py | leoatchina/MachineLearning | 071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3 | [
"MIT"
] | 1,107 | 2016-09-21T02:18:36.000Z | 2022-03-29T02:52:12.000Z | _Dist/NeuralNetworks/_Tests/_UnitTests/UnitTestUtil.py | leoatchina/MachineLearning | 071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3 | [
"MIT"
] | 18 | 2016-12-22T10:24:47.000Z | 2022-03-11T23:18:43.000Z | _Dist/NeuralNetworks/_Tests/_UnitTests/UnitTestUtil.py | leoatchina/MachineLearning | 071f2c0fc6f5af3d9550cfbeafe8d537c35a76d3 | [
"MIT"
] | 776 | 2016-12-21T12:08:08.000Z | 2022-03-21T06:12:08.000Z | import os
import shutil
root_cwd = os.path.abspath("../")
| 31.105263 | 69 | 0.700508 | import os
import shutil
root_cwd = os.path.abspath("../")
def clear_cache():
cwd = os.getcwd()
local_data_folder = os.path.join(cwd, "_Data")
if os.path.isdir(local_data_folder):
shutil.rmtree(local_data_folder)
shutil.rmtree(os.path.join(cwd, "_Models"))
remote_cache_folder = os.path.join(root_cwd, "_Data", "_Cache")
remote_info_folder = os.path.join(root_cwd, "_Data", "_DataInfo")
if os.path.isdir(remote_cache_folder):
shutil.rmtree(remote_cache_folder)
if os.path.isdir(remote_info_folder):
shutil.rmtree(remote_info_folder)
| 508 | 0 | 23 |
6f5491579cffa286e720c032978958a0958d8bfb | 7,293 | py | Python | cryptonote/classes/wallet/address.py | kayabaNerve/cryptonote-library | 00bef43527172cffa75c2445430f36ceb80fbcb6 | [
"MIT"
] | 8 | 2020-11-08T07:13:43.000Z | 2021-09-20T11:14:23.000Z | cryptonote/classes/wallet/address.py | kayabaNerve/cryptonote-library | 00bef43527172cffa75c2445430f36ceb80fbcb6 | [
"MIT"
] | 7 | 2020-11-08T04:23:32.000Z | 2022-03-24T04:26:13.000Z | cryptonote/classes/wallet/address.py | kayabaNerve/cryptonote-library | 00bef43527172cffa75c2445430f36ceb80fbcb6 | [
"MIT"
] | 5 | 2020-11-08T06:09:38.000Z | 2021-05-10T17:52:24.000Z | """Address file. Handles address encoding and decoding."""
# Types.
from typing import Tuple, Optional, Any
# Keccak hash function.
from Cryptodome.Hash import keccak
# Crypto class.
from cryptonote.crypto.crypto import Crypto
# Base58 Character Set.
BASE58: str = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
# AddressError.
class AddressError(Exception):
"""AddressError Exception. Used when an invalid address is parsed."""
# Address class.
class Address:
"""Contains address info and the serialized address."""
def __init__(
self,
crypto: Crypto,
key_pair: Tuple[bytes, bytes],
payment_id: Optional[bytes] = None,
network_byte: Optional[bytes] = None,
address: Optional[str] = None,
) -> None:
"""Converts a ViewKey and a SpendKey into an address."""
# Verify the data lengths
if len(crypto.network_bytes) not in {2, 3}:
raise Exception("Invalid network bytes.")
if (len(key_pair[0]) != 32) or (len(key_pair[1]) != 32):
raise Exception("Invalid key pair length.")
if (payment_id is not None) and (
len(payment_id) not in crypto.payment_id_lengths
):
raise Exception("Invalid payment ID.")
self.network: bytes
self.view_key: bytes = key_pair[0]
self.spend_key: bytes = key_pair[1]
self.payment_id: Optional[bytes] = payment_id
# If we were passed in an address, verify it against the regex.
if address is not None:
# Require a network byte was also specified.
if network_byte is None:
raise Exception("Address parsed without a specified network byte.")
if (not crypto.address_regex.match(address)) and (
not crypto.integrated_address_regex.match(address)
):
raise Exception("Invalid address used in constructor override.")
# Set the network byte, address type, and address. Then return.
self.network = network_byte
self.address: str = address
return
# If there's a payment ID, set the network byte to integrated address.
# Else, set it to subaddress if there is a subaddress byte.
# Else, set it to regular address.
if self.payment_id is not None:
self.network = crypto.network_bytes[1]
else:
if len(crypto.network_bytes) == 3:
self.network = crypto.network_bytes[2]
else:
self.network = crypto.network_bytes[0]
# If a network byte was specified, despite an address not being specified, use that.
if network_byte is not None:
self.network = network_byte
if self.network not in crypto.network_bytes:
raise Exception("Address doesn't have a valid network byte.")
# Get the data to be encoded.
data: bytes = self.network
if (self.payment_id is not None) and crypto.payment_id_leading:
data += self.payment_id
data += self.spend_key + self.view_key
if (self.payment_id is not None) and (not crypto.payment_id_leading):
data += self.payment_id
# Add the checksum.
checksum_hash: Any = keccak.new(digest_bits=256)
checksum_hash.update(data)
data += checksum_hash.digest()[0:4]
# Convert the bytes to Base58.
result: str = ""
for i in range(0, len(data), 8):
block: bytes = data[i : i + 8]
blockInt: int = int.from_bytes(block, byteorder="big")
blockStr: str = ""
remainder: int
while blockInt > 0:
remainder = blockInt % 58
blockInt = blockInt // 58
blockStr += BASE58[remainder]
# Pad the block as needed.
if len(block) == 8:
while len(blockStr) < 11:
blockStr += BASE58[0]
elif len(block) == 5:
while len(blockStr) < 7:
blockStr += BASE58[0]
result += blockStr[::-1]
# Set the address.
self.address: str = result
@staticmethod
def parse(crypto: Crypto, address: str) -> Any:
"""
Parse an address and extract the contained info.
Raises AddressError if it fails to parse the address.
"""
# Check the address against the regex.
if (not crypto.address_regex.match(address)) and (
not crypto.integrated_address_regex.match(address)
):
raise AddressError("Invalid address.")
# Convert the Base58 to bytes.
data: bytes = bytes()
for i in range(0, len(address), 11):
blockStr: str = address[i : i + 11]
blockInt: int = 0
multi = 1
for char in blockStr[::-1]:
blockInt += multi * BASE58.index(char)
multi = multi * 58
if len(blockStr) == 11:
data += blockInt.to_bytes(8, byteorder="big")
elif len(blockStr) == 7:
data += blockInt.to_bytes(5, byteorder="big")
# Extract the payment ID and checksum.
payment_id: Optional[bytes]
if crypto.payment_id_leading:
payment_id = data[crypto.network_byte_length : -68]
else:
payment_id = data[(crypto.network_byte_length + 64) : -4]
if not payment_id:
payment_id = None
checksum: bytes = data[-4:]
# Check the checksum.
checksum_hash: Any = keccak.new(digest_bits=256)
checksum_hash.update(data[0:-4])
if checksum_hash.digest()[0:4] != checksum:
raise AddressError("Invalid address checksum.")
# Verify the network byte is valid.
network_byte: bytes = data[0 : crypto.network_byte_length]
if (network_byte not in crypto.network_bytes) or (
(payment_id is not None) and (network_byte != crypto.network_bytes[1])
):
raise AddressError("Address doesn't have a valid network byte.")
# Return the Address.
view_key: bytes
spend_key: bytes
if crypto.payment_id_leading:
view_key = data[-36:-4]
spend_key = data[-68:-36]
else:
view_key = data[
(crypto.network_byte_length + 32) : (crypto.network_byte_length + 64)
]
spend_key = data[
crypto.network_byte_length : (crypto.network_byte_length + 32)
]
return Address(
crypto,
(view_key, spend_key),
payment_id,
network_byte,
address,
)
def __eq__(self, other: Any) -> bool:
"""Equality operator. Used by the tests."""
if (
(not isinstance(other, Address))
or (self.network != other.network)
or (self.view_key != other.view_key)
or (self.spend_key != other.spend_key)
or (self.payment_id != other.payment_id)
or (self.address != other.address)
):
return False
return True
| 34.728571 | 92 | 0.574386 | """Address file. Handles address encoding and decoding."""
# Types.
from typing import Tuple, Optional, Any
# Keccak hash function.
from Cryptodome.Hash import keccak
# Crypto class.
from cryptonote.crypto.crypto import Crypto
# Base58 Character Set.
BASE58: str = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"
# AddressError.
class AddressError(Exception):
"""AddressError Exception. Used when an invalid address is parsed."""
# Address class.
class Address:
"""Contains address info and the serialized address."""
def __init__(
self,
crypto: Crypto,
key_pair: Tuple[bytes, bytes],
payment_id: Optional[bytes] = None,
network_byte: Optional[bytes] = None,
address: Optional[str] = None,
) -> None:
"""Converts a ViewKey and a SpendKey into an address."""
# Verify the data lengths
if len(crypto.network_bytes) not in {2, 3}:
raise Exception("Invalid network bytes.")
if (len(key_pair[0]) != 32) or (len(key_pair[1]) != 32):
raise Exception("Invalid key pair length.")
if (payment_id is not None) and (
len(payment_id) not in crypto.payment_id_lengths
):
raise Exception("Invalid payment ID.")
self.network: bytes
self.view_key: bytes = key_pair[0]
self.spend_key: bytes = key_pair[1]
self.payment_id: Optional[bytes] = payment_id
# If we were passed in an address, verify it against the regex.
if address is not None:
# Require a network byte was also specified.
if network_byte is None:
raise Exception("Address parsed without a specified network byte.")
if (not crypto.address_regex.match(address)) and (
not crypto.integrated_address_regex.match(address)
):
raise Exception("Invalid address used in constructor override.")
# Set the network byte, address type, and address. Then return.
self.network = network_byte
self.address: str = address
return
# If there's a payment ID, set the network byte to integrated address.
# Else, set it to subaddress if there is a subaddress byte.
# Else, set it to regular address.
if self.payment_id is not None:
self.network = crypto.network_bytes[1]
else:
if len(crypto.network_bytes) == 3:
self.network = crypto.network_bytes[2]
else:
self.network = crypto.network_bytes[0]
# If a network byte was specified, despite an address not being specified, use that.
if network_byte is not None:
self.network = network_byte
if self.network not in crypto.network_bytes:
raise Exception("Address doesn't have a valid network byte.")
# Get the data to be encoded.
data: bytes = self.network
if (self.payment_id is not None) and crypto.payment_id_leading:
data += self.payment_id
data += self.spend_key + self.view_key
if (self.payment_id is not None) and (not crypto.payment_id_leading):
data += self.payment_id
# Add the checksum.
checksum_hash: Any = keccak.new(digest_bits=256)
checksum_hash.update(data)
data += checksum_hash.digest()[0:4]
# Convert the bytes to Base58.
result: str = ""
for i in range(0, len(data), 8):
block: bytes = data[i : i + 8]
blockInt: int = int.from_bytes(block, byteorder="big")
blockStr: str = ""
remainder: int
while blockInt > 0:
remainder = blockInt % 58
blockInt = blockInt // 58
blockStr += BASE58[remainder]
# Pad the block as needed.
if len(block) == 8:
while len(blockStr) < 11:
blockStr += BASE58[0]
elif len(block) == 5:
while len(blockStr) < 7:
blockStr += BASE58[0]
result += blockStr[::-1]
# Set the address.
self.address: str = result
@staticmethod
def parse(crypto: Crypto, address: str) -> Any:
"""
Parse an address and extract the contained info.
Raises AddressError if it fails to parse the address.
"""
# Check the address against the regex.
if (not crypto.address_regex.match(address)) and (
not crypto.integrated_address_regex.match(address)
):
raise AddressError("Invalid address.")
# Convert the Base58 to bytes.
data: bytes = bytes()
for i in range(0, len(address), 11):
blockStr: str = address[i : i + 11]
blockInt: int = 0
multi = 1
for char in blockStr[::-1]:
blockInt += multi * BASE58.index(char)
multi = multi * 58
if len(blockStr) == 11:
data += blockInt.to_bytes(8, byteorder="big")
elif len(blockStr) == 7:
data += blockInt.to_bytes(5, byteorder="big")
# Extract the payment ID and checksum.
payment_id: Optional[bytes]
if crypto.payment_id_leading:
payment_id = data[crypto.network_byte_length : -68]
else:
payment_id = data[(crypto.network_byte_length + 64) : -4]
if not payment_id:
payment_id = None
checksum: bytes = data[-4:]
# Check the checksum.
checksum_hash: Any = keccak.new(digest_bits=256)
checksum_hash.update(data[0:-4])
if checksum_hash.digest()[0:4] != checksum:
raise AddressError("Invalid address checksum.")
# Verify the network byte is valid.
network_byte: bytes = data[0 : crypto.network_byte_length]
if (network_byte not in crypto.network_bytes) or (
(payment_id is not None) and (network_byte != crypto.network_bytes[1])
):
raise AddressError("Address doesn't have a valid network byte.")
# Return the Address.
view_key: bytes
spend_key: bytes
if crypto.payment_id_leading:
view_key = data[-36:-4]
spend_key = data[-68:-36]
else:
view_key = data[
(crypto.network_byte_length + 32) : (crypto.network_byte_length + 64)
]
spend_key = data[
crypto.network_byte_length : (crypto.network_byte_length + 32)
]
return Address(
crypto,
(view_key, spend_key),
payment_id,
network_byte,
address,
)
def __eq__(self, other: Any) -> bool:
"""Equality operator. Used by the tests."""
if (
(not isinstance(other, Address))
or (self.network != other.network)
or (self.view_key != other.view_key)
or (self.spend_key != other.spend_key)
or (self.payment_id != other.payment_id)
or (self.address != other.address)
):
return False
return True
def __str__(self):
return self.address
| 25 | 0 | 27 |