hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aa063c8fe24a2414d08f79391cfa01c347db328a | 655 | py | Python | sitewebapp/migrations/0014_auto_20210130_0425.py | deucaleon18/debsoc-nitdgp-website | 41bd6ade7f4af143ef34aff01848f830cc533add | [
"MIT"
] | 2 | 2020-12-05T05:34:56.000Z | 2020-12-09T10:27:43.000Z | sitewebapp/migrations/0014_auto_20210130_0425.py | deucaleon18/debsoc-nitdgp-website | 41bd6ade7f4af143ef34aff01848f830cc533add | [
"MIT"
] | 3 | 2021-06-28T16:47:23.000Z | 2021-06-28T16:48:51.000Z | sitewebapp/migrations/0014_auto_20210130_0425.py | deucaleon18/debsoc-nitdgp-website | 41bd6ade7f4af143ef34aff01848f830cc533add | [
"MIT"
] | 9 | 2021-01-29T17:06:30.000Z | 2021-08-21T18:23:26.000Z | # Generated by Django 2.2.15 on 2021-01-29 22:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sitewebapp', '0013_auto_20210130_0409'),
]
operations = [
migrations.RemoveField(
model_name='auditionrounds',
name='candidate',
),
migrations.AddField(
model_name='auditionrounds',
name='candidate',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='candidates', to='sitewebapp.Candidates'),
),
]
| 27.291667 | 159 | 0.638168 | 528 | 0.806107 | 0 | 0 | 0 | 0 | 0 | 0 | 174 | 0.265649 |
aa071d641c21c8126205d398a23afa6631e2eb94 | 1,144 | py | Python | tests/micropython/heapalloc_exc_compressed.py | andihoff98/micropython | b0ef4364db9a75f761adf07c0f300cb0704fdf19 | [
"MIT"
] | 2 | 2022-03-10T03:17:48.000Z | 2022-03-10T03:17:58.000Z | tests/micropython/heapalloc_exc_compressed.py | andihoff98/micropython | b0ef4364db9a75f761adf07c0f300cb0704fdf19 | [
"MIT"
] | 19 | 2022-01-31T20:43:43.000Z | 2022-03-30T18:26:37.000Z | tests/micropython/heapalloc_exc_compressed.py | andihoff98/micropython | b0ef4364db9a75f761adf07c0f300cb0704fdf19 | [
"MIT"
] | 1 | 2022-03-30T18:38:08.000Z | 2022-03-30T18:38:08.000Z | import micropython
# Tests both code paths for built-in exception raising.
# mp_obj_new_exception_msg_varg (exception requires decompression at raise-time to format)
# mp_obj_new_exception_msg (decompression can be deferred)
# NameError uses mp_obj_new_exception_msg_varg for NameError("name '%q' isn't defined")
# `raise 0` uses mp_obj_new_exception_msg for TypeError("exceptions must derive from BaseException")
# Tests that deferred decompression works both via print(e) and accessing the message directly via e.args.
# First test the regular case (can use heap for allocating the decompression buffer).
try:
name()
except NameError as e:
print(type(e).__name__, e)
try:
raise 0
except TypeError as e:
print(type(e).__name__, e)
try:
name()
except NameError as e:
print(e.args[0])
try:
raise 0
except TypeError as e:
print(e.args[0])
# Then test that it still works when the heap is locked (i.e. in ISR context).
micropython.heap_lock()
try:
name()
except NameError as e:
print(type(e).__name__)
try:
raise 0
except TypeError as e:
print(type(e).__name__)
micropython.heap_unlock()
| 24.340426 | 106 | 0.740385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 659 | 0.576049 |
aa07eb294a968be138419270910fe3769b70aebc | 2,464 | py | Python | scripts/plot/klt_track_length.py | raphaelchang/omni_slam_eval | 7df7d76c520c1325ac4f1a85f87b7af07d9628c3 | [
"MIT"
] | 7 | 2020-06-15T01:04:10.000Z | 2021-12-15T03:49:05.000Z | scripts/plot/klt_track_length.py | raphaelchang/omni_slam_eval | 7df7d76c520c1325ac4f1a85f87b7af07d9628c3 | [
"MIT"
] | null | null | null | scripts/plot/klt_track_length.py | raphaelchang/omni_slam_eval | 7df7d76c520c1325ac4f1a85f87b7af07d9628c3 | [
"MIT"
] | 4 | 2020-06-15T16:02:12.000Z | 2021-10-12T07:18:47.000Z | import h5py
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas
import os
from parse import parse
import argparse
parser = argparse.ArgumentParser(description='Plot tracking evaluation results')
parser.add_argument('results_path', help='tracking results file or working directory')
args = parser.parse_args()
sns.set()
fovs = []
for yaml in os.listdir(args.results_path):
if not os.path.isdir(os.path.join(args.results_path, yaml)) and yaml.endswith('.yaml'):
fov = os.path.splitext(os.path.basename(yaml))[0]
fovs.append(fov)
fovs.sort(key=int)
motion_map = {'yaw': 'Yaw/pitch', 'roll': 'Roll', 'strafe_side': 'Sideways translate', 'strafe_forward': 'Forward translate', 'strafe_back': 'Backward translate', 'composite': 'Composite'}
df = pandas.DataFrame()
for motion in os.listdir(args.results_path):
if os.path.isdir(os.path.join(args.results_path, motion)):
bag_dir = os.path.join(args.results_path, motion)
for fovstr in fovs:
track_lengths = np.empty(shape=(1,0))
for filename in os.listdir(bag_dir):
if filename.split('.')[1] == fovstr and filename.endswith('.tracking.hdf5'):
results_file = os.path.join(bag_dir, filename)
with h5py.File(results_file, 'r') as f:
attrs = dict(f['attributes'].attrs.items())
rate = int(attrs['rate'])
if rate > 1:
continue
tl = f['track_lengths'][:]
track_lengths = np.hstack((track_lengths, tl[:, tl[0, :] > 2]))
file_exists = True
if file_exists:
if motion in motion_map:
motion = motion_map[motion]
df = df.append(pandas.DataFrame({'Motion': motion, 'FOV': [fovstr for i in range(len(track_lengths[0]))], 'Track lifetime (frames)': track_lengths[0, :]}))
latex = ''
for _, motion in motion_map.iteritems():
latex += motion
for fov in fovs:
latex += ' & '
rows = df.loc[(df['Motion'] == motion) & (df['FOV'] == fov)]
latex += '{} & {:.2f} & {:.2f} & {:.2f} & {:.2f} \\\\'.format(fov, rows['Track lifetime (frames)'].mean(), rows['Track lifetime (frames)'].median(), rows['Track lifetime (frames)'].quantile(0.75), rows['Track lifetime (frames)'].std())
print latex
latex = ''
| 44 | 243 | 0.590909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 510 | 0.206981 |
aa081249580e88b0253e91416aa676f87663a26d | 4,166 | py | Python | jtlib/test_client.py | bminard/jtlib | 0d3d83308bc4ed67bdf1732e5b0602eb8c48f53f | [
"BSD-2-Clause"
] | null | null | null | jtlib/test_client.py | bminard/jtlib | 0d3d83308bc4ed67bdf1732e5b0602eb8c48f53f | [
"BSD-2-Clause"
] | null | null | null | jtlib/test_client.py | bminard/jtlib | 0d3d83308bc4ed67bdf1732e5b0602eb8c48f53f | [
"BSD-2-Clause"
] | null | null | null | # -*-coding:Utf-8 -*
#--------------------------------------------------------------------------------
# jtlib: test_client.py
#
# jtlib module client test code.
#--------------------------------------------------------------------------------
# BSD 2-Clause License
#
# Copyright (c) 2018, Brian Minard
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#--------------------------------------------------------------------------------
import jtlib.client as client
import pytest
import re
@pytest.fixture(scope = 'session')
def timeout():
"""Time out for HTTP/HTTPS connections."""
return 5.0 # seconds; see http://docs.python-requests.org/en/master/user/quickstart/#timeouts
def test_client_without_url_argument(timeout):
"""Check client when no server URL argument is provided."""
with pytest.raises(client.InvalidUrl) as excinfo:
client.Jira(str(), timeout=timeout)
def test_client_with_invalid_url_argument(timeout):
"""Check client when an invalid server URL argument is provided."""
with pytest.raises(client.InvalidUrl) as excinfo:
client.Jira('https://www.example.com', timeout=timeout)
def test_client_without_url_argument(timeout):
"""Check client when no server URL argument is provided.
In most cases, this is going to look for a Jira server on the local host.
"""
with pytest.raises(client.InvalidUrl) as excinfo:
client.Jira(str(), timeout=timeout)
def test_client_with_invalid_url_argument(timeout):
"""Check client when an invalid server URL argument is provided."""
with pytest.raises(client.InvalidUrl) as excinfo:
client.Jira('https://www.example.com', timeout=timeout)
def test_client_with_valid_url_argument():
"""Check client when an invalid server URL argument is provided."""
client.Jira('https://jira.atlassian.com')
@pytest.fixture(scope = 'module')
def the_client():
"""Return a usable JIRA client."""
return client.Jira('https://jira.atlassian.com')
@pytest.fixture(scope = 'session')
def project_key_regex():
"""Regular expression for project key."""
return re.compile(r"""^[A-Z][A-Z]+$""") # Default project key for JIRA Server 7.1.
def test_client_projects_method(the_client, project_key_regex):
"""Check the projects method."""
projects = the_client.projects()
assert isinstance(projects, list)
for project in projects:
assert project_key_regex.match(project.key)
def test_client_issue_method(the_client):
issue = the_client.issue('CLOUD-10000')
assert 'CLOUD-10000' in issue.key
def test_client_search_method(the_client):
page_count = 0
for issue in the_client.search('PROJECT = CLOUD'):
if page_count > (client.Jira.maximum_search_results + 1): # Retrieve two pages of issues.
break
page_count += 1
assert page_count > 0, "JIRA project has too few issues to test search algorithm."
| 37.196429 | 97 | 0.694911 | 0 | 0 | 0 | 0 | 529 | 0.12698 | 0 | 0 | 2,611 | 0.62674 |
aa081c36750a5e0ea22144ff5ba5a1f43d5725af | 3,785 | py | Python | corsair/ibm/qradar/__init__.py | forkd/corsair | c854aed4b8b7f5d4b29610fc2b65881db6a0fb8f | [
"MIT"
] | 7 | 2019-05-01T00:04:02.000Z | 2019-10-04T18:22:59.000Z | corsair/ibm/qradar/__init__.py | lopes/corsair | c854aed4b8b7f5d4b29610fc2b65881db6a0fb8f | [
"MIT"
] | null | null | null | corsair/ibm/qradar/__init__.py | lopes/corsair | c854aed4b8b7f5d4b29610fc2b65881db6a0fb8f | [
"MIT"
] | 3 | 2019-05-04T04:05:15.000Z | 2020-01-20T17:29:30.000Z | import urllib.request
from urllib.parse import urlencode
from json import loads
from socket import timeout
from ssl import _create_unverified_context
from corsair import *
class Api(object):
def __init__(self, base_url, auth, tls_verify=True):
self.base_url = base_url if base_url[-1] != '/' else base_url[:-1]
self.auth = auth
self.tls_verify = tls_verify
self.credentials = (self.base_url, self.auth, self.tls_verify)
self.analytics = Endpoint(self.credentials, 'analytics')
self.ariel = Endpoint(self.credentials, 'ariel')
self.asset_model = Endpoint(self.credentials, 'asset_model')
self.auth = Endpoint(self.credentials, 'auth')
self.config = Endpoint(self.credentials, 'config')
self.data_classification = Endpoint(self.credentials, 'data_classification')
self.forensics = Endpoint(self.credentials, 'forensics')
self.gui_app_framework = Endpoint(self.credentials, 'gui_app_framework')
self.help = Endpoint(self.credentials, 'help')
self.qrm = Endpoint(self.credentials, 'qrm')
self.reference_data = Endpoint(self.credentials, 'reference_data')
self.scanner = Endpoint(self.credentials, 'scanner')
self.services = Endpoint(self.credentials, 'services')
self.siem = Endpoint(self.credentials, 'siem')
self.staged_config = Endpoint(self.credentials, 'staged_config')
self.system = Endpoint(self.credentials, 'system')
class Endpoint(object):
def __init__(self, credentials, endpoint):
self.base_url = credentials[0]
self.endpoint = endpoint
self.resource = ''
self.auth = credentials[1]
self.tls_verify = credentials[2]
def create(self, _resource, **filters):
self.resource = _resource
req = Request(make_url(self.base_url, self.endpoint, self.resource),
self.auth, self.tls_verify)
res = req.post(**filters)
if res.status == 201:
return loads(res.read())
else:
raise CorsairError('Could not create requisition')
def read(self, _resource, **filters):
self.resource = _resource
req = Request(make_url(self.base_url, self.endpoint, self.resource),
self.auth, self.tls_verify)
try:
res = req.get(**filters)
except timeout:
raise CorsairError('Operation timedout')
if res.status == 200:
crange = res.headers['Content-Range'].split(' ')[1] \
if 'Content-Range' in res.headers else None
return {'results': loads(res.read()), 'range': crange}
else:
raise CorsairError('Not found')
class Request(object):
def __init__(self, url, auth, tls_verify):
self.url = url
self.timeout = TIMEOUT
self.context = None if tls_verify else _create_unverified_context()
self.headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Version': '8.0',
'SEC': auth
}
def get(self, **filters):
if 'Range' in filters:
self.headers.update({'Range': filters['Range']})
filters.pop('Range')
url = f'{self.url}?{urlencode(filters)}' if filters else self.url
req = urllib.request.Request(url, headers=self.headers, method='GET')
return urllib.request.urlopen(req, timeout=self.timeout, context=self.context)
def post(self, **filters):
url = f'{self.url}?{urlencode(filters)}' if filters else self.url
req = urllib.request.Request(url, headers=self.headers, method='POST')
return urllib.request.urlopen(req, timeout=self.timeout, context=self.context)
| 40.265957 | 86 | 0.632497 | 3,602 | 0.951651 | 0 | 0 | 0 | 0 | 0 | 0 | 470 | 0.124174 |
aa0908d204cc1bd792f22cb281757498ea7ff6d8 | 129 | py | Python | coop/guide/context_processors.py | jalibras/coop | cb94560eb4a25eca3e241551e01eea6e3d4e3b6b | [
"Apache-2.0"
] | 1 | 2017-01-16T10:51:15.000Z | 2017-01-16T10:51:15.000Z | coop/guide/context_processors.py | jalibras/coop | cb94560eb4a25eca3e241551e01eea6e3d4e3b6b | [
"Apache-2.0"
] | null | null | null | coop/guide/context_processors.py | jalibras/coop | cb94560eb4a25eca3e241551e01eea6e3d4e3b6b | [
"Apache-2.0"
] | null | null | null |
from guide.models import Area
def nav(request):
areas = Area.objects.all()
return {
'areas':areas,
}
| 11.727273 | 30 | 0.565891 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.054264 |
aa0bad3348ebb20b51fc55cdea73057560ae7c2d | 1,009 | py | Python | python/ccxt/async_support/bitmax.py | mariuszskon/ccxt | 13253de7346e33cd384f79abf7dfb64dcbfdc35f | [
"MIT"
] | 4 | 2021-09-24T09:18:36.000Z | 2022-03-15T16:47:09.000Z | python/ccxt/async_support/bitmax.py | mariuszskon/ccxt | 13253de7346e33cd384f79abf7dfb64dcbfdc35f | [
"MIT"
] | 1 | 2017-10-28T14:35:08.000Z | 2017-10-28T14:35:08.000Z | python/ccxt/async_support/bitmax.py | mariuszskon/ccxt | 13253de7346e33cd384f79abf7dfb64dcbfdc35f | [
"MIT"
] | 3 | 2018-10-17T09:29:29.000Z | 2019-03-12T09:18:42.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.ascendex import ascendex
class bitmax(ascendex):
def describe(self):
return self.deep_extend(super(bitmax, self).describe(), {
'id': 'bitmax',
'name': 'BitMax',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/66820319-19710880-ef49-11e9-8fbe-16be62a11992.jpg',
'api': 'https://bitmax.io',
'test': 'https://bitmax-test.io',
'www': 'https://bitmax.io',
'doc': [
'https://bitmax-exchange.github.io/bitmax-pro-api/#bitmax-pro-api-documentation',
],
'fees': 'https://bitmax.io/#/feeRate/tradeRate',
'referral': 'https://bitmax.io/#/register?inviteCode=EL6BXBQM',
},
})
| 37.37037 | 126 | 0.557978 | 776 | 0.769078 | 0 | 0 | 0 | 0 | 0 | 0 | 583 | 0.5778 |
aa0beaaea8f9d9ab2689ecc5856703bdd0686fc3 | 1,625 | py | Python | path_to_root.py | Wsoukkachang/Python | 931b89d5ce5d4e07654403c3baa63ef0f2dae8b4 | [
"MIT"
] | null | null | null | path_to_root.py | Wsoukkachang/Python | 931b89d5ce5d4e07654403c3baa63ef0f2dae8b4 | [
"MIT"
] | null | null | null | path_to_root.py | Wsoukkachang/Python | 931b89d5ce5d4e07654403c3baa63ef0f2dae8b4 | [
"MIT"
] | null | null | null | from queue import Queue
def convert_arr_to_binary_tree(arr):
"""
Takes arr representing level-order traversal of Binary Tree
"""
index = 0
length = len(arr)
if length <= 0 or arr[0] == -1:
return None
root = BinaryTreeNode(arr[index])
index += 1
queue = Queue()
queue.put(root)
while not queue.empty():
current_node = queue.get()
left_child = arr[index]
index += 1
if left_child is not None:
left_node = BinaryTreeNode(left_child)
current_node.left = left_node
queue.put(left_node)
right_child = arr[index]
index += 1
if right_child is not None:
right_node = BinaryTreeNode(right_child)
current_node.right = right_node
queue.put(right_node)
return root
def path_from_root_to_node(root, data):
"""
Assuming data as input to find the node
The solution can be easily changed to find a node instead of data
:param data:
:return:
"""
output = path_from_node_to_root(root, data)
return list(reversed(output))
def path_from_node_to_root(root, data):
if root is None:
return None
elif root.data == data:
return [data]
left_answer = path_from_node_to_root(root.left, data)
if left_answer is not None:
left_answer.append(root.data)
return left_answer
right_answer = path_from_node_to_root(root.right, data)
if right_answer is not None:
right_answer.append(root.data)
return right_answer
return None | 25.793651 | 69 | 0.617231 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.142154 |
aa0d8ce33a090f902ad8f14cf64f48b5d4df3194 | 7,232 | py | Python | users/forms.py | henryyang42/NTHUOJ_web | b197ef8555aaf90cba176eba61da5c919dab7af6 | [
"MIT"
] | null | null | null | users/forms.py | henryyang42/NTHUOJ_web | b197ef8555aaf90cba176eba61da5c919dab7af6 | [
"MIT"
] | null | null | null | users/forms.py | henryyang42/NTHUOJ_web | b197ef8555aaf90cba176eba61da5c919dab7af6 | [
"MIT"
] | null | null | null | """
The MIT License (MIT)
Copyright (c) 2014 NTHUOJ team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from django import forms
from threading import Thread
from users.models import User
from problem.models import Problem, Submission, SubmissionDetail, Testcase
from utils import log_info, user_info, config_info, file_info
logger = log_info.get_logger()
class CodeSubmitForm(forms.Form):
SUBMIT_PATH = config_info.get_config('path', 'submission_code_path')
LANGUAGE_CHOICE = tuple(config_info.get_config_items('compiler_option'))
BACKEND_VERSION = config_info.get_config('system_version', 'backend')
GCC_VERSION = config_info.get_config('system_version', 'gcc')
GPP_VERSION = config_info.get_config('system_version', 'gpp')
pid = forms.CharField(label='Problem ID')
language = forms.ChoiceField(choices=LANGUAGE_CHOICE, initial=Submission.CPP,
help_text="Backend: %s<br>gcc: %s<br>g++: %s"
% (BACKEND_VERSION, GCC_VERSION, GPP_VERSION))
code = forms.CharField(max_length=40 * 1024,
widget=forms.Textarea(attrs={'id': 'code_editor'}))
def clean_pid(self):
pid = self.cleaned_data['pid']
if not unicode(pid).isnumeric():
raise forms.ValidationError("Problem ID must be a number")
try:
problem = Problem.objects.get(id=pid)
if not user_info.has_problem_auth(self.user, problem):
raise forms.ValidationError(
"You don't have permission to submit that problem")
except Problem.DoesNotExist:
logger.warning('Pid %s doe not exist' % pid)
raise forms.ValidationError('Problem of this pid does not exist')
return pid
def submit(self):
pid = self.cleaned_data['pid']
code = self.cleaned_data['code']
language = self.cleaned_data['language']
problem = Problem.objects.get(id=pid)
problem.total_submission += 1
problem.save()
submission = Submission.objects.create(
user=self.user,
problem=problem,
language=language)
try:
filename = '%s.%s' % (
submission.id, file_info.get_extension(submission.language))
f = open('%s%s' % (self.SUBMIT_PATH, filename), 'w')
f.write(code.encode('utf-8'))
f.close()
except IOError:
logger.warning('Sid %s fail to save code' % submission.id)
if problem.judge_source == Problem.OTHER:
# Send to other judge
pass
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', User())
super(CodeSubmitForm, self).__init__(*args, **kwargs)
class UserProfileForm(forms.ModelForm):
"""A form for updating user's profile. Includes all the required
fields, plus a repeated password."""
username = forms.CharField(label='Username',
widget=forms.TextInput(attrs={'readonly': True}))
email = forms.EmailField(label='Email')
theme = forms.ChoiceField(label='Theme', choices=User.THEME_CHOICE)
password1 = forms.CharField(label='Password', required=False,
widget=forms.PasswordInput())
password2 = forms.CharField(label='Password Confirmation', required=False,
widget=forms.PasswordInput())
class Meta:
model = User
fields = ('username', 'email', 'theme', 'password1', 'password2')
def clean_username(self):
# username is primary key, should not be changed
instance = getattr(self, 'instance', None)
if instance and instance.pk:
return instance.username
else:
return self.cleaned_data['username']
def clean_password2(self):
# Check that the two password entries match
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
if (not password1) != (not password2):
raise forms.ValidationError("Passwords can't be empty")
return password2
def save(self):
if self.cleaned_data["password1"]:
self.instance.set_password(self.cleaned_data["password1"])
self.instance.save()
return self.instance
class UserLevelForm(forms.ModelForm):
"""A form for updating user's userlevel."""
def __init__(self, *args, **kwargs):
request_user = kwargs.pop('request_user', User())
super(UserLevelForm, self).__init__(*args, **kwargs)
self.fields['user_level'].label = 'User Level'
# Admin can have all choices, which is the default
if request_user.has_admin_auth():
return
# Judge can only promote a user to these levels
if request_user.has_judge_auth():
self.fields['user_level'].choices = (
(User.SUB_JUDGE, 'Sub-judge'), (User.USER, 'User'))
class Meta:
model = User
fields = ('user_level',)
def is_valid(self, user):
# run the parent validation first
valid = super(UserLevelForm, self).is_valid()
# we're done now if not valid
if not valid:
return valid
# admin can change user to all levels
if user.has_admin_auth():
return True
# judge can change user to sub-judge, user
user_level = self.cleaned_data['user_level']
if user.has_judge_auth() and \
(user_level == User.SUB_JUDGE or user_level == User.USER):
return True
return False
class UserForgetPasswordForm(forms.Form):
username = forms.CharField()
email = forms.EmailField()
def clean_email(self):
# Check that if username and email match or not
username = self.cleaned_data['username']
email = self.cleaned_data['email']
if username and email and User.objects.filter(username=username, email=email):
return email
raise forms.ValidationError("Username and Email don't match")
| 39.304348 | 86 | 0.648921 | 5,880 | 0.813053 | 0 | 0 | 0 | 0 | 0 | 0 | 2,391 | 0.330614 |
aa0ef04e32539da4111738a97b4fb91b9adbfeb7 | 296 | py | Python | arena/objects/line.py | syreal17/ARENA-py | 84f73d09ca5bcfec5973b366784ec8385d5e156d | [
"BSD-3-Clause"
] | null | null | null | arena/objects/line.py | syreal17/ARENA-py | 84f73d09ca5bcfec5973b366784ec8385d5e156d | [
"BSD-3-Clause"
] | null | null | null | arena/objects/line.py | syreal17/ARENA-py | 84f73d09ca5bcfec5973b366784ec8385d5e156d | [
"BSD-3-Clause"
] | null | null | null | from .arena_object import Object
from ..attributes import Position
class Line(Object):
"""
Class for Line in the ARENA.
"""
def __init__(self, start=Position(0,0,0), end=Position(10,10,10), **kwargs):
super().__init__(object_type="line", start=start, end=end, **kwargs)
| 26.909091 | 80 | 0.665541 | 226 | 0.763514 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.168919 |
aa110cd2edc58ec667902045505d003d64be93a9 | 1,067 | py | Python | pset_challenging_ext/exercises/p18.py | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 5 | 2019-04-08T20:05:37.000Z | 2019-12-04T20:48:45.000Z | pset_challenging_ext/exercises/p18.py | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 8 | 2019-04-15T15:16:05.000Z | 2022-02-12T10:33:32.000Z | pset_challenging_ext/exercises/p18.py | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 2 | 2019-04-10T00:14:42.000Z | 2020-02-26T20:35:21.000Z | """
A website requires the users to input username and password to register. Write a program to check the validity of password input by users.
"""
"""Question 18
Level 3
Question:
A website requires the users to input username and password to register. Write a program to check the validity of password input by users.
Following are the criteria for checking the password:
1. At least 1 letter between [a-z]
2. At least 1 number between [0-9]
1. At least 1 letter between [A-Z]
3. At least 1 character from [$#@]
4. Minimum length of transaction password: 6
5. Maximum length of transaction password: 12
Your program should accept a sequence of comma separated passwords and will check them according to the above criteria. Passwords that match the criteria are to be printed, each separated by a comma.
Example
If the following passwords are given as input to the program:
ABd1234@1,a F1#,2w3E*,2We3345
Then, the output of the program should be:
ABd1234@1
Hints:
In case of input data being supplied to the question, it should be assumed to be a console input.
""" | 42.68 | 199 | 0.776007 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,065 | 0.998126 |
aa11a5a6d1a64b23db22e2fa37320c9cd8481f08 | 14,561 | py | Python | tool/taint_analysis/summary_functions.py | cpbscholten/karonte | b989f7dfe9dbe002dd0dc4c4e5b0293dde61ae72 | [
"BSD-2-Clause"
] | 294 | 2019-11-14T13:14:55.000Z | 2022-03-22T08:28:56.000Z | tool/taint_analysis/summary_functions.py | cpbscholten/karonte | b989f7dfe9dbe002dd0dc4c4e5b0293dde61ae72 | [
"BSD-2-Clause"
] | 17 | 2019-12-23T09:32:00.000Z | 2022-03-17T20:00:13.000Z | tool/taint_analysis/summary_functions.py | cpbscholten/karonte | b989f7dfe9dbe002dd0dc4c4e5b0293dde61ae72 | [
"BSD-2-Clause"
] | 50 | 2019-11-25T02:27:04.000Z | 2021-12-10T04:46:26.000Z | """
Though karonte relies on angr's sim procedures, sometimes these add in the current state some constraints to make the
used analysis faster. For example, if a malloc has an unconstrained size, angr add the constraint
size == angr-defined.MAX_SIZE. Though this makes the analysis faster, it makes impossible to reason about the maximum
buffer sizes (as needed by karonte).
In this module we wrap sim procedures to avoid them to add such constraints.
Note however, that the semantic of an expression might get lost.
Eg. strlen(taint_x) = taint_y, taint_y is an unconstrained variable
"""
from taint_analysis.coretaint import *
def _get_function_name(addr, p):
"""
Return a function name
:param addr: function address
:param p: angr project
:return: function name
"""
return p.loader.find_plt_stub_name(addr)
def source_dummy(*_, **__):
pass
def memcmp_unsized(_core, _, plt_path):
"""
memcmp-like unsized (e.g., strlen) function summary
:param _core: core taint engine
:param _: not used
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
p = _core.p
dst_reg = arg_reg_name(p, 0)
src_reg = arg_reg_name(p, 1)
b1 = _core.safe_load(plt_path, getattr(plt_path.active[0].regs, dst_reg))
b2 = _core.safe_load(plt_path, getattr(plt_path.active[0].regs, src_reg))
if not _core.is_tainted(b1, plt_path):
b1 = None
if not _core.is_tainted(b2, plt_path):
b2 = None
# if either of the two is not tainted, we untaint the other
if b1 is not None and b2 is None:
_core.do_recursive_untaint(b1, plt_path)
elif b2 is not None and b1 is None:
_core.do_recursive_untaint(b2, plt_path)
# step into it
plt_path.step()
assert _core.p.is_hooked(plt_path.active[0].addr), "memcmp_unsized: Summary function relies on angr's " \
"sim procedure, add option use_sim_procedures to the loader"
plt_path.step()
def memcmp_sized(_core, _, plt_path):
"""
memcmp-like sized (e.g., memcmp) function summary
:param _core: core taint engine
:param _: not used
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
p = _core.p
dst_reg = arg_reg_name(p, 0)
src_reg = arg_reg_name(p, 1)
reg_n = arg_reg_name(p, 2)
b1 = _core.safe_load(plt_path, getattr(plt_path.active[0].regs, dst_reg))
b2 = _core.safe_load(plt_path, getattr(plt_path.active[0].regs, src_reg))
n = _core.safe_load(plt_path, getattr(plt_path.active[0].regs, reg_n))
# we untaint buffers only if n is not tainted
if not _core.is_tainted(n, plt_path):
if not _core.is_tainted(b1, plt_path):
b1 = None
if not _core.is_tainted(b2, plt_path):
b2 = None
# if either of the two is not tainted, we untaint the other
if b1 is not None and b2 is None:
_core.do_recursive_untaint(b1, plt_path)
elif b2 is not None and b1 is None:
_core.do_recursive_untaint(b2, plt_path)
# step into it
plt_path.step()
assert _core.p.is_hooked(plt_path.active[0].addr), "memcmp_sized: Summary function relies on angr's " \
"sim procedure, add option use_sim_procedures to the loader"
plt_path.step()
def memcpy_sized(_core, call_site_path, plt_path):
"""
memcpy-like sized (e.g., memcpy) function summary
:param _core: core taint engine
:param call_site_path: call site angr path
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
p = _core.p
# if the second parameter is tainted (or pointing to a tainted location)
# or the third is tainted, we taint the first too
dst_reg = arg_reg_name(p, 0)
dst = getattr(plt_path.active[0].regs, dst_reg)
dst_loaded = _core.safe_load(plt_path, dst)
src_reg = arg_reg_name(p, 1)
src = getattr(plt_path.active[0].regs, src_reg)
src_loaded = _core.safe_load(plt_path, src)
reg_n = arg_reg_name(p, 2)
n = getattr(plt_path.active[0].regs, reg_n)
# n_loaded = _core.safe_load(plt_path_cp, size)
plt_path.step()
assert _core.p.is_hooked(plt_path.active[0].addr), "memcpy_sized: Summary function relies on angr's " \
"sim procedure, add option use_sim_procedures to the loader"
plt_path.step()
if not plt_path.active:
raise Exception("size of function has no active successors, not walking this path...")
# apply taint to dst if source is tainted and constrain this buffer
# TODO take N into account
if _core.is_tainted(src_loaded, path=plt_path):
src_loaded_full = _core.safe_load(plt_path, src, estimate_size=True)
new_dst_t = _core.get_sym_val(name=_core.taint_buf, bits=src_loaded_full.length).reversed
_core.add_taint_glob_dep(new_dst_t, src_loaded_full, plt_path)
plt_path.active[0].add_constraints(src_loaded_full == new_dst_t)
plt_path.active[0].memory.store(dst, new_dst_t)
# untaint if the size is constrained
if (_core.is_tainted(dst, path=plt_path) or
_core.is_tainted(dst_loaded, path=plt_path)) and \
not _core.is_tainted(n, path=plt_path):
# do untaint
_core.do_recursive_untaint(dst_loaded, plt_path)
def memcpy_unsized(_core, call_site_path, plt_path):
"""
memcpy-like unsize (e.g., strcpy) function summary
:param _core: core taint engine
:param call_site_path: call site angr path
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
p = _core.p
dst_reg = arg_reg_name(p, 0)
dst = getattr(plt_path.active[0].regs, dst_reg)
# dst_loaded = _core.safe_load(plt_path_cp, dst, estimate_size=True)
src_reg = arg_reg_name(p, 1)
src = getattr(plt_path.active[0].regs, src_reg)
src_loaded = _core.safe_load(plt_path, src)
# run the sim procedure
plt_path.step()
assert _core.p.is_hooked(plt_path.active[0].addr), "memcpy_unsized: Summary function relies on angr's " \
"sim procedure, add option use_sim_procedures to the loader"
plt_path.step()
if not plt_path.active:
raise Exception("size of function has no active successors, not walking this path...")
# apply taint to dst if source is tainted and constrain this buffer
if _core.is_tainted(src_loaded, path=plt_path):
src_loaded_full = _core.safe_load(plt_path, src, estimate_size=True)
new_dst_t = _core.get_sym_val(name=_core.taint_buf, bits=src_loaded_full.length).reversed
_core.add_taint_glob_dep(new_dst_t, src_loaded_full, plt_path)
plt_path.active[0].add_constraints(src_loaded_full == new_dst_t)
plt_path.active[0].memory.store(dst, new_dst_t)
def is_size_taint(v):
return '__size__' in str(v)
def sizeof(_core, call_site_path, plt_path):
"""
sizeof-like (e.g., strlen) function summary
:param _core: core taint engine
:param call_site_path: call site angr path
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
p = _core.p
n = getattr(plt_path.active[0].regs, arg_reg_name(p, 0))
cnt = _core.safe_load(plt_path, n, _core.taint_buf_size/8)
# use the sim procedure to continue to the next state and add constraints
plt_path.step()
assert _core.p.is_hooked(plt_path.active[0].addr), "sizeof: Summary function relies on angr's " \
"sim procedure, add option use_sim_procedures to the loader"
plt_path.step()
if not plt_path.active:
raise Exception("size of function has no active successors, not walking this path...")
return_value = getattr(plt_path.active[0].regs, ret_reg_name(p))
# TODO: check if the constraints set by angr sim procedure are correct
# if there is a tainted buffer in one of the registers then also taint this variable
if _core.is_tainted(cnt, path=plt_path) or _core.is_tainted(n, path=plt_path):
t = _core.get_sym_val(name=(_core.taint_buf + '__size__'), bits=p.arch.bits).reversed
_core.add_taint_glob_dep(t, cnt, plt_path)
# constrain output of this variable equal to the output of sizeof and add it to the return register
plt_path.active[0].add_constraints(return_value == t)
setattr(plt_path.active[0].regs, ret_reg_name(p), t)
#
# Heap functions
#
def _malloc(_core, _, plt_path):
"""
maclloc function summary
:param _core: core taint engine
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
p = _core.p
state = plt_path.active[0]
sim_size = getattr(state.regs, arg_reg_name(p, 0))
# when the size is symbolic, choose the maximum size possible
if state.solver.symbolic(sim_size):
size = state.solver.max(sim_size)
if size > state.libc.max_variable_size:
size = state.libc.max_variable_size
setattr(state.regs, arg_reg_name(p, 0), size)
# use the sim procedure
plt_path.step()
assert _core.p.is_hooked(plt_path.active[0].addr), "malloc: Summary function relies on angr's " \
"sim procedure, add option use_sim_procedures to the loader"
plt_path.step()
return sim_size
def _realloc(_core, _, plt_path):
"""
realloc function summary
:param _core: core taint engine
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
p = _core.p
state = plt_path.active[0]
sim_size = getattr(state.regs, arg_reg_name(p, 1))
# ptr = getattr(state.regs, arg_reg_name(p, 0))
# when the size is symbolic, choose the maximum size possible
if state.solver.symbolic(sim_size):
size = state.solver.max(sim_size)
if size > state.libc.max_variable_size:
size = state.libc.max_variable_size
setattr(state.regs, arg_reg_name(p, 0), size)
# if the size is not tainted, use the sim procedure
plt_path.step()
assert _core.p.is_hooked(plt_path.active[0].addr), "realloc: Summary function relies on angr's " \
"sim procedure, add option use_sim_procedures to the loader"
plt_path.step()
return sim_size
def heap_alloc(_core, call_site_path, plt_path):
"""
Heap allocation function stub
:param _core: core taint engine
:param call_site_path: call site angr path
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
fname = _get_function_name(plt_path.active[0].addr, _core.p)
sim_size = None
if fname == 'malloc':
sim_size = _malloc(_core, call_site_path, plt_path)
elif fname == 'realloc':
sim_size = _realloc(_core, call_site_path, plt_path)
else:
print(f"Implement this heap alloc: {fname}")
if sim_size is not None:
taint_args = [l for l in sim_size.recursive_leaf_asts if _core.is_tainted(l, call_site_path)]
if taint_args and len(set(taint_args)) == 1:
arg = taint_args[0]
if is_size_taint(arg):
_core.do_recursive_untaint(arg, plt_path)
#
# Env function
#
env_var = {}
def _setenv(_core, _, plt_path):
"""
setenv function summary
:param _core: core taint engine
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
global env_var
p = _core.p
plt_path_cp = plt_path.copy(deep=True)
plt_state_cp = plt_path_cp.active[0]
# add the environment variable to the list of env_variables with this key
key = getattr(plt_path.active[0].regs, arg_reg_name(p, 0))
env_var[str(key)] = getattr(plt_path.active[0].regs, arg_reg_name(p, 1))
# this call can continue with an empty sim procedure since it does nothing
next_state = plt_state_cp.step()
_core.p.hook(next_state.addr, ReturnUnconstrained())
plt_path.step().step()
def _getenv(_core, call_site_addr, plt_path):
"""
getenv function summary
:param _core: core taint engine
:param call_site_addr: call site angr path
:param plt_path: path to the plt (i.e., call_site.step())
:return: None
"""
global env_var
p = _core.p
env_var_size = _core.taint_buf_size
reg = getattr(plt_path.active[0].regs, arg_reg_name(p, 0))
cnt_mem = _core.safe_load(plt_path, reg)
key = str(reg)
# this info is passed by some user controllable source
if _core.is_tainted(reg, path=plt_path) or _core.is_tainted(cnt_mem, path=plt_path):
to_store = _core.get_sym_val(name=_core.taint_buf, bits=env_var_size)
# it was set before
elif key in env_var:
to_store = env_var[key]
# fresh symbolic var
else:
to_store = _core.get_sym_val(name="env_var", bits=env_var_size)
# store the symbolic buffer at the memory address
addr = plt_path.active[0].heap.allocate(env_var_size)
plt_path.active[0].memory.store(addr, to_store)
# use an empty hook as sim procedure to continue with the program
plt_path_cp = plt_path.copy(deep=True)
plt_state_cp = plt_path_cp.active[0]
next_state = plt_state_cp.step()
_core.p.hook(next_state.addr, ReturnUnconstrained())
plt_path.step().step()
# set the return address to the pointer
setattr(plt_path.active[0].regs, ret_reg_name(p), addr)
def env(_core, call_site_path, plt_path):
"""
Summarize environment functions (getenv, and setenv)
:param _core: core taint engin
:param call_site_path: call site angr path
:param plt_path: path to the plt (i.e., call_site.step())
:return:
"""
fname = _get_function_name(plt_path.active[0].addr, _core.p)
if fname == 'setenv':
_setenv(_core, call_site_path, plt_path)
elif fname == 'getenv':
_getenv(_core, call_site_path, plt_path)
else:
print(f"Implement this Env function: {fname}")
# return the env_var if tainted to store for bug_finders
#
# Numerical
#
def atoi(_core, _, plt_path):
p = _core.p
state = plt_path.active[0]
val = getattr(state.regs, arg_reg_name(p, 0))
if _core.is_or_points_to_tainted_data(val, plt_path):
addr = plt_path.active[0].memory.load(val, p.arch.bytes)
_core.do_recursive_untaint(addr, plt_path)
plt_path.step().step()
| 34.100703 | 117 | 0.665614 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,747 | 0.394684 |
aa11fb94d31ccf6c37536234d11058561f0102ae | 2,510 | py | Python | djangodash2013/settings.py | nnrcschmdt/djangodash2013 | 26e7a7ce62fdea976569144fed96d04cf631e32f | [
"BSD-3-Clause"
] | null | null | null | djangodash2013/settings.py | nnrcschmdt/djangodash2013 | 26e7a7ce62fdea976569144fed96d04cf631e32f | [
"BSD-3-Clause"
] | null | null | null | djangodash2013/settings.py | nnrcschmdt/djangodash2013 | 26e7a7ce62fdea976569144fed96d04cf631e32f | [
"BSD-3-Clause"
] | null | null | null | import os
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Ernesto Rico-Schmidt', 'e.rico.schmidt@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'data.sqlite'
}
}
ALLOWED_HOSTS = ['*']
TIME_ZONE = 'Europe/Vienna'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = ''
STATIC_URL = '/static/'
STATICFILES_DIRS = (
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'djangodash2013.urls'
WSGI_APPLICATION = 'djangodash2013.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(PROJECT_DIR, 'templates')
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'mocks',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
try:
from local_settings import SECRET_KEY
except ImportError:
SECRET_KEY = 'this-is-not-empty'
| 22.212389 | 73 | 0.664143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,435 | 0.571713 |
aa1329750f3a0be83521b3103dee6f387d00d620 | 15,773 | py | Python | Commongen_code/TED/transformers_local/examples/run_generation.py | nlgandnlu/TED-code | 7c00fc39c4412d57e378a5b205bcec1737e51e2e | [
"MIT"
] | 1 | 2022-01-14T08:00:57.000Z | 2022-01-14T08:00:57.000Z | Commongen_code/TED/transformers_local/examples/run_generation.py | nlgandnlu/TED | c0b04ddbeaa6061cd1c888bf1bcf70ef717f6bee | [
"MIT"
] | null | null | null | Commongen_code/TED/transformers_local/examples/run_generation.py | nlgandnlu/TED | c0b04ddbeaa6061cd1c888bf1bcf70ef717f6bee | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Conditional text generation with the auto-regressive models of the library (GPT/GPT-2/CTRL/Transformer-XL/XLNet)
"""
import argparse
import logging
import numpy as np
import torch
from transformers import (
CTRLLMHeadModel,
CTRLTokenizer,
GPT2LMHeadModel,
GPT2Tokenizer,
OpenAIGPTLMHeadModel,
OpenAIGPTTokenizer,
TransfoXLLMHeadModel,
TransfoXLTokenizer,
XLMTokenizer,
XLMWithLMHeadModel,
XLNetLMHeadModel,
XLNetTokenizer,
)
from thop import profile
from torchstat import stat
from transformers import BartForConditionalGeneration, BartTokenizer
from transformers import T5ForConditionalGeneration, T5Tokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO,
)
logger = logging.getLogger(__name__)
MAX_LENGTH = int(10000) # Hardcoded max length to avoid infinite loop
MODEL_CLASSES = {
"gpt2": (GPT2LMHeadModel, GPT2Tokenizer),
"ctrl": (CTRLLMHeadModel, CTRLTokenizer),
"openai-gpt": (OpenAIGPTLMHeadModel, OpenAIGPTTokenizer),
"xlnet": (XLNetLMHeadModel, XLNetTokenizer),
"transfo-xl": (TransfoXLLMHeadModel, TransfoXLTokenizer),
"xlm": (XLMWithLMHeadModel, XLMTokenizer),
}
# Padding text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
# in https://github.com/rusiaaman/XLNet-gen#methodology
# and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
PADDING_TEXT = """In 1991, the remains of Russian Tsar Nicholas II and his family
(except for Alexei and Maria) are discovered.
The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the
remainder of the story. 1883 Western Siberia,
a young Grigori Rasputin is asked by his father and a group of men to perform magic.
Rasputin has a vision and denounces one of the men as a horse thief. Although his
father initially slaps him for making such an accusation, Rasputin watches as the
man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous,
with people, even a bishop, begging for his blessing. <eod> </s> <eos>"""
def set_seed(args):
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
#
# Functions to prepare models' input
#
def prepare_ctrl_input(args, _, tokenizer, prompt_text):
if args.temperature > 0.7:
logger.info("CTRL typically works better with lower temperatures (and lower top_k).")
encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False)
if not any(encoded_prompt[0] == x for x in tokenizer.control_codes.values()):
logger.info("WARNING! You are not starting your generation from a control code so you won't get good results")
return prompt_text
def prepare_xlm_input(args, model, tokenizer, prompt_text):
# kwargs = {"language": None, "mask_token_id": None}
# Set the language
use_lang_emb = hasattr(model.config, "use_lang_emb") and model.config.use_lang_emb
if hasattr(model.config, "lang2id") and use_lang_emb:
available_languages = model.config.lang2id.keys()
if args.xlm_language in available_languages:
language = args.xlm_language
else:
language = None
while language not in available_languages:
language = input("Using XLM. Select language in " + str(list(available_languages)) + " >>> ")
model.config.lang_id = model.config.lang2id[language]
# kwargs["language"] = tokenizer.lang2id[language]
# TODO fix mask_token_id setup when configurations will be synchronized between models and tokenizers
# XLM masked-language modeling (MLM) models need masked token
# is_xlm_mlm = "mlm" in args.model_name_or_path
# if is_xlm_mlm:
# kwargs["mask_token_id"] = tokenizer.mask_token_id
return prompt_text
def prepare_xlnet_input(args, _, tokenizer, prompt_text):
prompt_text = (args.padding_text if args.padding_text else PADDING_TEXT) + prompt_text
return prompt_text
def prepare_transfoxl_input(args, _, tokenizer, prompt_text):
prompt_text = (args.padding_text if args.padding_text else PADDING_TEXT) + prompt_text
return prompt_text
PREPROCESSING_FUNCTIONS = {
"ctrl": prepare_ctrl_input,
"xlm": prepare_xlm_input,
"xlnet": prepare_xlnet_input,
"transfo-xl": prepare_transfoxl_input,
}
def adjust_length_to_model(length, max_sequence_length):
if length < 0 and max_sequence_length > 0:
length = max_sequence_length
elif 0 < max_sequence_length < length:
length = max_sequence_length # No generation bigger than model size
elif length < 0:
length = MAX_LENGTH # avoid infinite loop
return length
def collate_batch(zhutis,examples,device):
attention_mask, position_id, input_ids = get_mask_and_position(zhutis)
attention_mask=attention_mask.to(device)
position_id = position_id.to(device)
input_ids = input_ids.to(device)
f_attention_mask=torch.ones(zhutis.shape[-1],input_ids.shape[-1])
f_attention_mask = f_attention_mask.to(device)
return {"input_ids": examples, "zhutis": input_ids, "e_attention_mask": attention_mask,
"e_position_ids": position_id, "f_attention_mask": f_attention_mask}
def get_mask_and_position(ids):
# 输入topic信息而不是单独的tokenembedding,顿号544,句号545
length=ids.shape[-1]
num_sep=0
for i in range(length):
if ids[0][i].item() == 50259:
num_sep+=1
length_withoutsep=length-num_sep
#id groups
id_groups=[]
#token groups
token_groups=[]
# print('local_rank')
# print(local_rank)
mask=torch.zeros([length_withoutsep,length_withoutsep])
position=torch.arange(0, length_withoutsep, dtype=torch.long)
#to get relation_position encoding
relative_position=0
# to determine whether come to the last token
end_flag=True
seps_now=0
#[cls]
id_groups.append(torch.tensor([0]))
token_groups.append(ids.index_select(-1, torch.tensor([0])))
mask[0, :] = 1.0
mask[:, 0] = 1.0
for i in range(length):
if ids[0][i].item() == 50259:
seps_now+=1
ids_before=[]
ids_convert=[]
head=i
for tail in range(i+1,length):
if ids[0][tail].item() == 50259:
end_flag=False
break
ids_convert.append(tail - seps_now)
ids_before.append(tail)
id_groups.append(torch.tensor(ids_convert))
token_groups.append(ids.index_select(-1, torch.tensor(ids_before)))
if end_flag==False:
mask[head+1-seps_now:tail-seps_now,head+1-seps_now:tail-seps_now]=1.0
else:
mask[head+1-seps_now:tail+1-seps_now, head+1-seps_now:tail+1-seps_now] = 1.0
# relative position record starts again
relative_position = 1
end_flag=True
else:
position[i-seps_now]=relative_position
relative_position+=1
new_input_ids=torch.cat((token_groups),-1)
mask=mask.unsqueeze(0)
position = position.unsqueeze(0)
return mask,position,new_input_ids
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument("--prompt", type=str, default="")
parser.add_argument("--length", type=int, default=20)
parser.add_argument("--stop_token", type=str, default=None, help="Token at which text generation is stopped")
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="temperature of 1.0 has no effect, lower tend toward greedy sampling",
)
parser.add_argument(
"--repetition_penalty", type=float, default=1.0, help="primarily useful for CTRL model; in that case, use 1.2"
)
parser.add_argument("--k", type=int, default=1)
parser.add_argument("--p", type=float, default=0.9)
parser.add_argument("--padding_text", type=str, default="", help="Padding text for Transfo-XL and XLNet.")
parser.add_argument("--xlm_language", type=str, default="", help="Optional language when used with the XLM model.")
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument("--num_return_sequences", type=int, default=1, help="The number of samples to generate.")
parser.add_argument("--input_file", type=str, default='inference_test.txt', help="input_file.")
parser.add_argument("--concept_file", type=str, default='test_concept.txt', help="input_file.")
parser.add_argument("--output_file", type=str, default='decode_result.txt', help="output_file.")
args = parser.parse_args()
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
set_seed(args)
# Initialize the model and tokenizer
try:
args.model_type = args.model_type.lower()
model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
except KeyError:
raise KeyError("the model {} you specified is not supported. You are welcome to add it and open a PR :)")
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path)
model = model_class.from_pretrained(args.model_name_or_path)
total_params = sum(p.numel() for p in model.parameters())
print(f'{total_params:,} total parameters.')
model.to(args.device)
#args.length = adjust_length_to_model(args.length, max_sequence_length=model.config.max_position_embeddings)
logger.info(args)
results=[]
with open(args.input_file,'r',encoding='utf-8') as f:
prompts=f.readlines()
with open(args.concept_file,'r',encoding='utf-8') as f:
concepts=f.readlines()
time=0
old_text=''
starter,ender=torch.cuda.Event(enable_timing=True),torch.cuda.Event(enable_timing=True)
timings=np.zeros((len(prompts),1))
# to test bart model, open these codes and open teg env and close 283-287
#model = BartForConditionalGeneration.from_pretrained("facebook/bart-large")
#tokenizer = BartTokenizer.from_pretrained("facebook/bart-large")
#model.to(args.device)
# model = T5ForConditionalGeneration.from_pretrained("t5-base")
# tokenizer = T5Tokenizer.from_pretrained("t5-base")
# model.to(args.device)
for prompt_text,concept in zip(prompts,concepts):
if prompt_text != old_text:
old_text=prompt_text
prompt_text = prompt_text.strip()
# Different models need different input formatting and/or extra arguments
requires_preprocessing = args.model_type in PREPROCESSING_FUNCTIONS.keys()
if requires_preprocessing:
prepare_input = PREPROCESSING_FUNCTIONS.get(args.model_type)
preprocessed_prompt_text = prepare_input(args, model, tokenizer, prompt_text)
encoded_prompt = tokenizer.encode(
preprocessed_prompt_text, add_special_tokens=False, return_tensors="pt",
add_space_before_punct_symbol=True
)
else:
encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False, return_tensors="pt")
concept_prompt=tokenizer.encode(concept, add_special_tokens=False, return_tensors="pt")
encoded_prompt = encoded_prompt.to(args.device)
dic = collate_batch(concept_prompt, encoded_prompt, args.device)
# flops, params = profile(model, inputs=(dic["input_ids"],dic["zhutis"]))
# print(flops / 1e9, params / 1e6) # flops单位G,para单位M
#concept_prompt = concept_prompt.to(args.device)
# for key in dic:
# print(key + ':' )
# print(dic[key])
starter.record()
print(dic["input_ids"])
output_sequences = model.generate(
input_ids=dic["input_ids"],
# eos_token_id=58,
num_return_sequences=1,
max_length=20 + len(encoded_prompt[0]),
temperature=1.0,
top_k=1,
top_p=0.9,
repetition_penalty=1.0,
length_penalty=1,
do_sample=False,
num_beams=5,
#no_repeat_ngram_size=3,
use_cache=None,
zhutis=dic["zhutis"],
e_attention_mask=dic["e_attention_mask"],
e_position_ids=dic["e_position_ids"],
f_attention_mask=dic["f_attention_mask"],
)
ender.record()
torch.cuda.synchronize()
curr_time=starter.elapsed_time(ender)
timings[time]=curr_time
print(curr_time)
# Remove the batch dimension when returning multiple sequences
if len(output_sequences.shape) > 2:
output_sequences.squeeze_()
for generated_sequence_idx, generated_sequence in enumerate(output_sequences):
print("=== GENERATED SEQUENCE {} ===".format(generated_sequence_idx + 1))
generated_sequence = generated_sequence.tolist()
# Decode text
text = tokenizer.decode(generated_sequence, clean_up_tokenization_spaces=True)
# Remove all text after the stop token
text = text[: text.find(args.stop_token) if args.stop_token else None]
#print(text)
# Add the prompt at the beginning of the sequence. Remove the excess text that was used for pre-processing
total_sequence = (
text[len(tokenizer.decode(encoded_prompt[0], clean_up_tokenization_spaces=True))+1:] + '\n'
)
results.append(total_sequence)
print(prompt_text)
print(tokenizer.encode(prompt_text))
print(total_sequence)
time = time + 1
else:
results.append(total_sequence)
# print(prompt_text)
# print(total_sequence)
mean_syn=np.sum(timings)/time
print(mean_syn)
with open(args.output_file, 'w', encoding='utf-8') as f:
f.writelines(results)
if __name__ == "__main__":
main()
| 39.4325 | 122 | 0.669499 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,350 | 0.338372 |
aa135a9d2168bdcf4644fb75e14d15294cb6bf0d | 4,988 | py | Python | kenlm_training/tests/test_minify.py | ruinunca/data_tooling | 297e1f8c2898d00b523ccafb7bdd19c6d6aac9ff | [
"Apache-2.0"
] | 435 | 2019-11-04T22:35:50.000Z | 2022-03-29T20:15:07.000Z | kenlm_training/tests/test_minify.py | ruinunca/data_tooling | 297e1f8c2898d00b523ccafb7bdd19c6d6aac9ff | [
"Apache-2.0"
] | 331 | 2021-11-02T00:30:56.000Z | 2022-03-08T16:48:13.000Z | kenlm_training/tests/test_minify.py | ruinunca/data_tooling | 297e1f8c2898d00b523ccafb7bdd19c6d6aac9ff | [
"Apache-2.0"
] | 66 | 2019-11-06T01:28:12.000Z | 2022-03-01T09:18:32.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import json
from pathlib import Path
import pytest
import cc_net
import cc_net.minify as minify
from cc_net import jsonql, process_wet_file
from cc_net.minify import (
HASH_SIZE,
decode_hashes,
encode_hashes,
encode_line_ids,
get_hashes,
)
def test_encode_decode():
sentences = ["Hello world !", "Is everyone happy in here ?"]
hashes = get_hashes(sentences)
assert all([len(h) == HASH_SIZE for h in hashes])
hashes_int = [minify._b2i(h) for h in hashes]
encoded = encode_hashes(hashes)
decoded = decode_hashes(encoded)
assert all([len(d) == HASH_SIZE for d in decoded])
decoded_int = [minify._b2i(d) for d in decoded]
assert hashes_int == decoded_int
assert hashes == decoded
def test_minify():
doc = {
"raw_content": "Hello world !\nIs everyone happy in here ?",
"language": "en",
"perplexity": 120.0,
"line_ids": [0, 4],
}
expected = {"line_ids": "AAAEAA==", "language": "en", "perplexity": 120.0}
minifier = minify.Minifier()
assert expected == minifier(doc)
@pytest.fixture
def http_from_disk(monkeypatch):
def read_sample_file(url: str, n_retry: int = 3) -> bytes:
expected_url = process_wet_file.WET_URL_ROOT + "/crawl-data/sample.warc.wet"
assert expected_url == url
file = Path(__file__).parent / "data" / "sample.warc.txt"
return file.read_bytes()
monkeypatch.setattr(cc_net.jsonql, "request_get_content", read_sample_file)
def test_minify_and_fetch(http_from_disk, tmp_path: Path):
full_quotes = """Don't part with your illusions. When they are gone you may still exist, but you have ceased to live.
Education: that which reveals to the wise, and conceals from the stupid, the vast limits of their knowledge.
Facts are stubborn things, but statistics are more pliable.
Fiction is obliged to stick to possibilities. Truth isn't."""
# We don't need no education.
chosen_quotes = "\n".join(
l for l in full_quotes.splitlines() if "Education" not in l
)
cc_doc = {
"url": "http://sample_english.com",
"date_download": "2019-03-18T00:00:00Z",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"source_domain": "sample_english.com",
"title": "Famous Mark Twain Quotes",
"raw_content": full_quotes,
"cc_segment": "crawl-data/sample.warc.wet",
"nlines": 4,
"length": 353,
}
ccnet_metadata = {
"language": "en",
"language_score": 0.99,
"perplexity": 151.5,
"bucket": "head",
"raw_content": chosen_quotes,
"nlines": 3,
"length": len(chosen_quotes),
"original_nlines": 4,
"original_length": 353,
"line_ids": [0, 2, 3],
}
ccnet_doc = dict(cc_doc, **ccnet_metadata)
mini = minify.Minifier()(ccnet_doc.copy())
assert mini is not ccnet_doc
important_fields = [
"url",
"digest",
"cc_segment",
"language",
"language_score",
"perplexity",
"bucket",
"line_ids",
]
expected = {k: ccnet_doc[k] for k in important_fields}
expected["line_ids"] = encode_line_ids(expected["line_ids"]) # type: ignore
assert expected == mini
with jsonql.open_write(tmp_path / "sample.json") as o:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
# line_ids is removed when unminifying
ccnet_doc.pop("line_ids")
assert ccnet_doc == fetcher(cc_doc)
def test_fetch(http_from_disk, tmp_path: Path):
mini_docs = [
{
"url": "http://sample_chinese.com",
"digest": "sha1:Y4E6URVYGIAFNVRTPZ5S3J64RTZTP6HJ",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([2]),
"bucket": "not_that_great",
},
{
"url": "http://sample_english.com",
"digest": "sha1:XQZHW7QWIG54HVAV3KPRW6MK5ILDNCER",
"cc_segment": "crawl-data/sample.warc.wet",
"line_ids": encode_line_ids([3]),
"bucket": "top_notch",
},
]
with jsonql.open_write(tmp_path / "sample.json") as o:
for mini in mini_docs:
print(json.dumps(mini), file=o)
fetcher = minify.MetadataFetcher(tmp_path)
cc = process_wet_file.CCSegmentsReader(["crawl-data/sample.warc.wet"])
docs = [d for d in fetcher.map(cc) if d is not None]
assert cc.retrieved_segments == 1
# Note: documents are retrieved as they are ordered in the .warc.wet file
assert [
"Facts are stubborn things, but statistics are more pliable.",
"事實是固執的東西,但統計數字卻比較柔和。",
] == [d["raw_content"] for d in docs]
assert ["top_notch", "not_that_great"] == [d["bucket"] for d in docs]
| 32.38961 | 121 | 0.632518 | 0 | 0 | 0 | 0 | 411 | 0.081742 | 0 | 0 | 1,952 | 0.388226 |
aa140311bd46c7b3499277f3a584d6f7846994ac | 647 | py | Python | credentials.py | feliposz/submissions-scraper-selenium | e12c4194bfa96738e9042f4b0dd62e0ee14b3af4 | [
"MIT"
] | 3 | 2020-07-24T08:14:42.000Z | 2021-06-25T12:46:15.000Z | credentials.py | th3c0d3br34ker/hackerrank-scraper-selenium | e12c4194bfa96738e9042f4b0dd62e0ee14b3af4 | [
"MIT"
] | 8 | 2020-06-24T14:37:47.000Z | 2021-06-02T02:04:05.000Z | credentials.py | th3c0d3br34ker/hackerrank-scraper-selenium | e12c4194bfa96738e9042f4b0dd62e0ee14b3af4 | [
"MIT"
] | 2 | 2020-10-26T02:53:45.000Z | 2022-03-11T06:21:24.000Z | class ACCOUNTS():
def __init__(self):
self.CodeChef = {
"username": "username",
"password": "password"
}
self.Hackerrank = {
"username": "username",
"password": "password",
"tracks": ["python"]
# Available (add as per your requirements):
# Languages: "java", "python", "c", "cpp", "ruby", "shell", "sql", "fp",
# Domians: "algorithms", "data-structures", "mathematics", "ai", "databases", "regex"
# "tutorials"
}
def getAccounts(self):
return vars(self)
| 32.35 | 100 | 0.462133 | 645 | 0.996909 | 0 | 0 | 0 | 0 | 0 | 0 | 328 | 0.506955 |
aa164872ebae4c5cbc063c4c63a1e4a0016f243f | 9,353 | py | Python | beast/observationmodel/observations.py | cmurray-astro/beast | cbbf6a663126367632c065ae63b341bea325e2ea | [
"BSD-3-Clause"
] | null | null | null | beast/observationmodel/observations.py | cmurray-astro/beast | cbbf6a663126367632c065ae63b341bea325e2ea | [
"BSD-3-Clause"
] | 1 | 2020-06-25T16:26:02.000Z | 2020-06-25T16:26:02.000Z | beast/observationmodel/observations.py | cmurray-astro/beast | cbbf6a663126367632c065ae63b341bea325e2ea | [
"BSD-3-Clause"
] | null | null | null | """
Defines a generic interface to observation catalog
"""
import numpy as np
from astropy.table import Table, Column
from beast.observationmodel.vega import Vega
__all__ = ["Observations", "gen_SimObs_from_sedgrid"]
class Observations(object):
"""
A generic class that interfaces observation catalog in a standardized way
Attributes
----------
inputFile : str
catalog source file
filters : list
list of filter names (internal standards)
filter_aliases : dict
alias of filter names between internal and external names
desc : str
description of the observations
badvalue : float
value that tags a bad measurement that should not be used in the
fitting
nObs : int
number of observations in the catalog
"""
def __init__(self, inputFile, filters, obs_colnames=None, vega_fname=None, desc=None):
"""
Parameters
----------
inputFile : str
observation file
filters : list
interal filter names of the data
obs_colnames : list, optional
filter names in the observed catalog
vega_fname : str, optional
name of the file with the vega model spectrum
desc : str, optional
description of the observations
"""
if desc is None:
self.desc = "GENERIC: %s" % inputFile
else:
self.desc = desc
self.inputFile = inputFile
self.setFilters(filters)
self.filter_aliases = {}
for ik, k in enumerate(filters):
self.filter_aliases[k] = obs_colnames[ik]
self.readData()
self.setVegaFluxes(filters, vega_fname=vega_fname)
# some bad values smaller than expected
# in physical flux units
self.setBadValue(6e-40)
@property
def nObs(self):
return len(self.data)
def __len__(self):
return self.nObs
def __call__(self):
""" Calling the object will show info """
self.info()
def info(self):
""" Prints some information about the catalog """
txt = "Data read from {s.inputFile:s}\n"
if self.desc is not None:
txt += "Description: {s.desc:s}\n"
txt += "Number of records: {s.nObs:d}\n\n"
txt += "Dataset contains:"
print("Data read from %s " % self.inputFile)
if self.desc is not None:
print("Description: %s" % self.desc)
print("Number of records: %d" % self.nObs)
print("")
print("Dataset contains:")
for k in list(self.data.keys()):
txt += "\t {0:s}\n".format(k)
if self.filters is None:
txt += "\n No filters given yet!"
else:
txt += "\n Using Filters: {s.filters}\n"
print(txt.format(s=self))
def __getitem__(self, *args, **kwargs):
""" get item will generate a subsample """
return self.data.__getitem__(*args, **kwargs)
def keys(self):
""" Returns dataset content names """
return self.data.keys()
def setDescription(self, txt):
self.desc = txt
def setBadValue(self, val):
self.badvalue = val
def getFilters(self):
return self.filters
def setFilters(self, filters):
self.filters = filters
def setVegaFluxes(self, filters, vega_fname=None):
"""
Set vega reference fluxes for conversions
Parameters
----------
filters : list
list of filters using the internally normalized namings
vega_fname : str, optional
name of the file with the vega model spectrum
"""
# for optimization purpose: pre-compute
with Vega(source=vega_fname) as v:
_, vega_flux, _ = v.getFlux(filters)
self.vega_flux = vega_flux
def getFlux(self, num, units=False):
"""
Flux of an observation computed from normalized vega fluxes
Parameters
----------
num : int
index of the star in the catalog to get measurement from
units : bool
if set returns the fluxes with units
Returns
-------
flux : ndarray[dtype=float, ndim=1]
Measured integrated flux values throughout the filters
in erg/s/cm^2/A
"""
if self.vega_flux is None:
raise ValueError("vega_flux not set, can't return fluxes")
# case for using '_flux' result
d = self.data[num]
flux = (
np.array([d[self.filter_aliases[ok]] for ok in self.filters])
* self.vega_flux
)
if units is True:
return flux * units.erg / (units.s * units.cm * units.cm * units.angstrom)
else:
return flux
def getFluxerr(self, num):
"""returns the error on the flux of an observation from the number of
counts (not used in the analysis)"""
fluxerr = np.empty(len(self.filters), dtype=float)
for ek, ok in enumerate(self.filters):
fluxerr[ek] = self.data[ok + "_err"][num]
return fluxerr
def getObs(self, num=0):
""" returns the flux"""
if self.filters is None:
raise AttributeError("No filter set provided.")
flux = self.getFlux(num)
return flux
def readData(self):
""" read the dataset from the original source file """
if isinstance(self.inputFile, str):
self.data = Table.read(self.inputFile)
else:
self.data = self.inputFile
def iterobs(self):
""" yield getObs """
for k in range(self.nObs):
yield self.getObs(k)
def enumobs(self):
for k in range(self.nObs):
yield k, self.getObs(k)
def gen_SimObs_from_sedgrid(
sedgrid,
sedgrid_noisemodel,
nsim=100,
compl_filter="F475W",
ranseed=None,
vega_fname=None,
weight_to_use='weight',
):
"""
Generate simulated observations using the physics and observation grids.
The priors are sampled as they give the ensemble model for the stellar
and dust distributions (IMF, Av distribution etc.).
The physics model gives the SEDs based on the priors.
The observation model gives the noise, bias, and completeness all of
which are used in simulating the observations.
Currently written to only work for the toothpick noisemodel.
Parameters
----------
sedgrid: grid.SEDgrid instance
model grid
sedgrid_noisemodel: beast noisemodel instance
noise model data
nsim : int
number of observations to simulate
compl_filter : str
filter to use for completeness (required for toothpick model)
ranseed : int
used to set the seed to make the results reproducable
useful for testing
vega_fname : string
filename for the vega info
usefule for testing
weight_to_use : string (default='weight')
Set to either 'weight' (prior+grid), 'prior_weight', or 'grid_weight' to
choose the weighting for SED selection.
Returns
-------
simtable : astropy Table
table giving the simulated observed fluxes as well as the
physics model parmaeters
"""
flux = sedgrid.seds
n_models, n_filters = flux.shape
# hack to get things to run for now
short_filters = [filter.split(sep="_")[-1].upper() for filter in sedgrid.filters]
if compl_filter.upper() not in short_filters:
raise NotImplementedError(
"Requested completeness filter not present:"
+ compl_filter.upper()
+ "\nPossible filters:"
+ "\n".join(short_filters)
)
filter_k = short_filters.index(compl_filter.upper())
print("Completeness from %s" % sedgrid.filters[filter_k])
# cache the noisemodel values
model_bias = sedgrid_noisemodel["bias"]
model_unc = np.fabs(sedgrid_noisemodel["error"])
model_compl = sedgrid_noisemodel["completeness"]
# the combined prior and grid weights
# using both as the grid weight needed to account for the finite size
# of each grid bin
# if we change to interpolating between grid points, need to rethink this
gridweights = sedgrid[weight_to_use] * model_compl[:, filter_k]
# need to sum to 1
gridweights = gridweights / np.sum(gridweights)
# set the random seed - mainly for testing
if not None:
np.random.seed(ranseed)
# sample to get the indexes of the picked models
indx = range(n_models)
sim_indx = np.random.choice(indx, size=nsim, p=gridweights)
# get the vega fluxes for the filters
_, vega_flux, _ = Vega(source=vega_fname).getFlux(sedgrid.filters)
# setup the output table
ot = Table()
qnames = list(sedgrid.keys())
# simulated data
for k, filter in enumerate(sedgrid.filters):
colname = "%s_RATE" % filter.split(sep="_")[-1].upper()
simflux_wbias = flux[sim_indx, k] + model_bias[sim_indx, k]
simflux = np.random.normal(loc=simflux_wbias, scale=model_unc[sim_indx, k])
ot[colname] = Column(simflux / vega_flux[k])
# model parmaeters
for qname in qnames:
ot[qname] = Column(sedgrid[qname][sim_indx])
return ot
| 29.977564 | 90 | 0.610713 | 5,631 | 0.602053 | 204 | 0.021811 | 59 | 0.006308 | 0 | 0 | 4,602 | 0.492035 |
aa177fa6cb0e8f56b34cd0aa79227c8c06214fe1 | 85 | py | Python | src/lib/__init__.py | nekoffski/bachelor-thesis | 23f350e2c5e0a184620bf66f851be6e94df7cdbb | [
"MIT"
] | 2 | 2021-08-09T09:00:34.000Z | 2021-08-20T09:31:00.000Z | src/lib/__init__.py | nekoffski/bachelor-thesis | 23f350e2c5e0a184620bf66f851be6e94df7cdbb | [
"MIT"
] | null | null | null | src/lib/__init__.py | nekoffski/bachelor-thesis | 23f350e2c5e0a184620bf66f851be6e94df7cdbb | [
"MIT"
] | null | null | null | from .cvision import *
from .models import *
from .net import *
from .util import *
| 14.166667 | 22 | 0.705882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
aa1b3aaba80e142eb448dc289c39eaf7a010960f | 60 | py | Python | workflow/scripts/helpers/__init__.py | IMS-Bio2Core-Facility/single_snake_sequencing | dddf8fc8960da8938484f98a4ea94f74a4fd5b18 | [
"MIT"
] | null | null | null | workflow/scripts/helpers/__init__.py | IMS-Bio2Core-Facility/single_snake_sequencing | dddf8fc8960da8938484f98a4ea94f74a4fd5b18 | [
"MIT"
] | null | null | null | workflow/scripts/helpers/__init__.py | IMS-Bio2Core-Facility/single_snake_sequencing | dddf8fc8960da8938484f98a4ea94f74a4fd5b18 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""BIC083-Eunyoung-Lee analysis."""
| 20 | 35 | 0.583333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 58 | 0.966667 |
aa1b53928b03ab8946043bdbe847b07bb95b6c93 | 2,355 | py | Python | testsuite/driver/src/case/case_executor/clean.py | openmaple/MapleCompiler | 1648e63144766563f1ec44a25e0b618415648627 | [
"MulanPSL-1.0"
] | 5 | 2019-09-02T04:44:52.000Z | 2021-11-08T12:23:51.000Z | testsuite/driver/src/case/case_executor/clean.py | venshine/OpenArkCompiler | 264cd4463834356658154f0d254672ef559f245f | [
"MulanPSL-1.0"
] | 2 | 2020-07-21T01:22:01.000Z | 2021-12-06T08:07:16.000Z | testsuite/driver/src/case/case_executor/clean.py | venshine/OpenArkCompiler | 264cd4463834356658154f0d254672ef559f245f | [
"MulanPSL-1.0"
] | 4 | 2019-09-02T04:46:52.000Z | 2020-09-10T11:30:03.000Z | #
# Copyright (c) [2021] Huawei Technologies Co.,Ltd.All rights reserved.
#
# OpenArkCompiler is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
#
import os
from case.component import Component
from env_var import EnvVar
from basic_tools.file import get_sub_files
class Clean(Component):
def __init__(self, input: dict):
self.case_name = input["case_name"]
self.case_path = os.path.join(EnvVar.TEST_SUITE_ROOT, self.case_name)
self.detail = input["detail"]
def rm_tmp_files(self):
all_cur_files = [file for file in os.listdir(self.case_path) if not file.endswith('_tmp@')]
if '.raw_file_list.txt' not in all_cur_files:
return
with open(os.path.join(self.case_path,'.raw_file_list.txt'), 'r') as f:
raw_cur_files, raw_sub_files = f.read().split('\n-----\n')
tmp_cur_files = list(set(all_cur_files) - set(raw_cur_files.split('\n')))
if tmp_cur_files:
os.system('rm -rf %s'%(' '.join([os.path.join(self.case_path,f) for f in tmp_cur_files])))
all_sub_files = [file for file in get_sub_files(self.case_path) if '_tmp@' not in file]
tmp_sub_files = list(set(all_sub_files) - set(raw_sub_files.split('\n')))
if tmp_sub_files:
os.system('rm -rf %s'%(' '.join([os.path.join(self.case_path,f) for f in tmp_sub_files])))
if self.detail and (tmp_cur_files or tmp_sub_files):
print("\033[1;32m [[ CMD : rm -rf %s ]]\033[0m"%(' '.join(tmp_cur_files + tmp_sub_files)))
def rm_tmp_folders(self):
del_file_list = [os.path.join(self.case_path,f) for f in os.listdir(self.case_path) if f.endswith("_tmp@")]
if self.detail and del_file_list != []:
os.system('rm -rf ' + " ".join(del_file_list))
print("\033[1;32m [[ CMD : rm -rf %s ]]\033[0m"%(' '.join(del_file_list)))
def execute(self):
self.rm_tmp_files()
self.rm_tmp_folders()
def get_output(self):
pass
| 41.315789 | 115 | 0.6569 | 1,721 | 0.730786 | 0 | 0 | 0 | 0 | 0 | 0 | 731 | 0.310403 |
aa1ba1822280fb470d703129aa4c411447b4d5e2 | 5,749 | py | Python | teraserver/python/modules/FlaskModule/API/user/UserLogin.py | introlab/opentera | bfc4de672c9de40b7c9a659be2138731e7ee4e94 | [
"Apache-2.0"
] | 10 | 2020-03-16T14:46:06.000Z | 2022-02-11T16:07:38.000Z | teraserver/python/modules/FlaskModule/API/user/UserLogin.py | introlab/opentera | bfc4de672c9de40b7c9a659be2138731e7ee4e94 | [
"Apache-2.0"
] | 114 | 2019-09-16T13:02:50.000Z | 2022-03-22T19:17:36.000Z | teraserver/python/modules/FlaskModule/API/user/UserLogin.py | introlab/opentera | bfc4de672c9de40b7c9a659be2138731e7ee4e94 | [
"Apache-2.0"
] | null | null | null | from flask import session, request
from flask_restx import Resource, reqparse
from flask_babel import gettext
from modules.LoginModule.LoginModule import user_http_auth
from modules.FlaskModule.FlaskModule import user_api_ns as api
from opentera.redis.RedisRPCClient import RedisRPCClient
from opentera.modules.BaseModule import ModuleNames
# model = api.model('Login', {
# 'websocket_url': fields.String,
# 'user_uuid': fields.String,
# 'user_token': fields.String
# })
# Parser definition(s)
get_parser = api.parser()
class UserLogin(Resource):
def __init__(self, _api, *args, **kwargs):
Resource.__init__(self, _api, *args, **kwargs)
self.module = kwargs.get('flaskModule', None)
self.parser = reqparse.RequestParser()
@user_http_auth.login_required
@api.expect(get_parser)
@api.doc(description='Login to the server using HTTP Basic Authentification (HTTPAuth)')
def get(self):
session.permanent = True
# Redis key is handled in LoginModule
servername = self.module.config.server_config['hostname']
port = self.module.config.server_config['port']
if 'X_EXTERNALHOST' in request.headers:
if ':' in request.headers['X_EXTERNALHOST']:
servername, port = request.headers['X_EXTERNALHOST'].split(':', 1)
else:
servername = request.headers['X_EXTERNALHOST']
if 'X_EXTERNALPORT' in request.headers:
port = request.headers['X_EXTERNALPORT']
# Get user token key from redis
from opentera.redis.RedisVars import RedisVars
token_key = self.module.redisGet(RedisVars.RedisVar_UserTokenAPIKey)
# Get token for user
from opentera.db.models.TeraUser import TeraUser
current_user = TeraUser.get_user_by_uuid(session['_user_id'])
# Verify if user already logged in
rpc = RedisRPCClient(self.module.config.redis_config)
online_users = rpc.call(ModuleNames.USER_MANAGER_MODULE_NAME.value, 'online_users')
if current_user.user_uuid in online_users:
self.module.logger.log_warning(self.module.module_name,
UserLogin.__name__,
'get', 403,
'User already logged in', current_user.to_json(minimal=True))
return gettext('User already logged in.'), 403
current_user.update_last_online()
user_token = current_user.get_token(token_key)
print('Login - setting key with expiration in 60s', session['_id'], session['_user_id'])
self.module.redisSet(session['_id'], session['_user_id'], ex=60)
# Return reply as json object
reply = {"websocket_url": "wss://" + servername + ":" + str(port) + "/wss/user?id=" + session['_id'],
"user_uuid": session['_user_id'],
"user_token": user_token}
# Verify client version (optional for now)
# And add info to reply
if 'X-Client-Name' in request.headers and 'X-Client-Version' in request.headers:
try:
# Extract information
client_name = request.headers['X-Client-Name']
client_version = request.headers['X-Client-Version']
client_version_parts = client_version.split('.')
# Load known version from database.
from opentera.utils.TeraVersions import TeraVersions
versions = TeraVersions()
versions.load_from_db()
# Verify if we have client information in DB
client_info = versions.get_client_version_with_name(client_name)
if client_info:
# We have something stored for this client, let's verify version numbers
# For now, we still allow login even when version mismatch
# Reply full version information
reply['version_latest'] = client_info.to_dict()
if client_info.version != client_version:
reply['version_error'] = gettext('Client version mismatch')
# If major version mismatch, kill client, first part of the version
stored_client_version_parts = client_info.version.split('.')
if len(stored_client_version_parts) and len(client_version_parts):
if stored_client_version_parts[0] != client_version_parts[0]:
# return 426 = upgrade required
self.module.logger.log_warning(self.module.module_name,
UserLogin.__name__,
'get', 426,
'Client major version too old, not accepting login',
stored_client_version_parts[0],
client_version_parts[0])
return gettext('Client major version too old, not accepting login'), 426
else:
return gettext('Invalid client name :') + client_name, 403
except BaseException as e:
self.module.logger.log_error(self.module.module_name,
UserLogin.__name__,
'get', 500, 'Invalid client version handler', str(e))
return gettext('Invalid client version handler') + str(e), 500
return reply
| 48.310924 | 115 | 0.578361 | 5,212 | 0.906592 | 0 | 0 | 4,976 | 0.865542 | 0 | 0 | 1,490 | 0.259176 |
aa1c025ad6114c52c33a5057e8bad9a15f3be813 | 10,790 | py | Python | yemek.py | raydingoz/cerrahapp | 81a242dcf20d525f44a54f025414e9e8775caa37 | [
"MIT"
] | 2 | 2017-12-14T10:30:45.000Z | 2022-01-11T19:19:33.000Z | yemek.py | raydingoz/cerrahapp | 81a242dcf20d525f44a54f025414e9e8775caa37 | [
"MIT"
] | null | null | null | yemek.py | raydingoz/cerrahapp | 81a242dcf20d525f44a54f025414e9e8775caa37 | [
"MIT"
] | null | null | null | ##bu python kodu, selenium ve chromedriver ile çalışmakta, siteyi normal kullanıcı gibi ziyaret edip, gerekli verileri parse ediyor
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import os
from bs4 import BeautifulSoup
import time, datetime
import json
import requests
import sys
import ftplib
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--window-size=1920x1080")
chrome_driver = os.getcwd() +"\\chromedriver.exe"
browser = webdriver.Chrome(chrome_options=chrome_options, executable_path=chrome_driver) #replace with .Firefox(), or with the browser of your choice
url = "http://sks.istanbulc.edu.tr/tr/yemeklistesi"
browser.get(url) #navigate to the page
time.sleep(10)
kah_buton = browser.find_element_by_xpath('//*[@id="4E00590053005F006D004C00500035005500720059003100"]/div/div/div[2]/ul/li[1]')
ogle_buton = browser.find_element_by_xpath('//*[@id="4E00590053005F006D004C00500035005500720059003100"]/div/div/div[2]/ul/li[2]')
#aksam_buton = browser.find_element_by_xpath('//*[@id="4E00590053005F006D004C00500035005500720059003100"]/div/div/div[2]/ul/li[3]')
vegan_buton = browser.find_element_by_xpath('//*[@id="4E00590053005F006D004C00500035005500720059003100"]/div/div/div[2]/ul/li[6]')
kumanya_buton = browser.find_element_by_xpath('//*[@id="4E00590053005F006D004C00500035005500720059003100"]/div/div/div[2]/ul/li[4]')
son = {}
son["yemek_liste"] = []
def kah_json_olustur():
time.sleep(5)
kah = browser.find_element_by_id("tab-kahvalti")
bs = BeautifulSoup(kah.get_attribute('innerHTML'), "lxml")
bs2 = bs.find_all('table')
js = []
for h in bs2:
b = h.find_all('tr')
try:
yemek1 = b[1].text.split('\n')[2]
except:
yemek1="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
yemek2 = b[1].text.split('\n')[3]
except:
yemek2="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
yemek3 = b[1].text.split('\n')[4]
except:
yemek3="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
yemek4 = b[1].text.split('\n')[5]
except:
yemek4="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
calori = b[2].text.replace("\n", "")
except:
calori="---"
print("Oops!", sys.exc_info()[0], "occured.")
if yemek1 == "":
yemek1 = "---"
if yemek2 == "":
yemek2 = "---"
if yemek3 == "":
yemek3 = "---"
if yemek4 == "":
yemek4 = "---"
dt = datetime.datetime.strptime(b[0].text.replace("\n", ""), '%d.%m.%Y')
dt = dt.strftime('%Y-%m-%d %H:%M:%S')
ta = {"tarih": dt,"ogun":"Kahvaltı","yemek1":yemek1,"yemek2":yemek2,"yemek3":yemek3,"yemek4":yemek4,"calori":calori }
son["yemek_liste"].append(ta)
def ogle_json_olustur():
time.sleep(5)
kah = browser.find_element_by_id("tab-ogle")
bs = BeautifulSoup(kah.get_attribute('innerHTML'), "lxml")
bs2 = bs.find_all('table')
js = []
for h in bs2:
b = h.find_all('tr')
try:
yemek1 = b[1].text.split('\n')[1]
except:
yemek1="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
yemek2 = b[1].text.split('\n')[2]
except:
yemek2="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
yemek3 = b[1].text.split('\n')[3]
except:
yemek3="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
yemek4 = b[1].text.split('\n')[4]
except:
yemek4="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
calori = b[2].text.replace("\n", "")
except:
calori="---"
print("Oops!", sys.exc_info()[0], "occured.")
dt = datetime.datetime.strptime(b[0].text.replace("\n", ""), '%d.%m.%Y')
dt = dt.strftime('%Y-%m-%d %H:%M:%S')
if yemek1 == "":
yemek1 = "---"
if yemek2 == "":
yemek2 = "---"
if yemek3 == "":
yemek3 = "---"
if yemek4 == "":
yemek4 = "---"
ta = {"tarih": dt,"ogun":"Öğle Yemeği","yemek1":yemek1,"yemek2":yemek2,"yemek3":yemek3,"yemek4":yemek4,"calori":calori}
son["yemek_liste"].append(ta)
def aksam_json_olustur():
time.sleep(5)
kah = browser.find_element_by_id("tab-ogle")
bs = BeautifulSoup(kah.get_attribute('innerHTML'), "lxml")
bs2 = bs.find_all('table')
js = []
for h in bs2:
b = h.find_all('tr')
try:
yemek1 = b[1].text.split('\n')[1]
except:
yemek1="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
yemek2 = b[1].text.split('\n')[2]
except:
yemek2="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
yemek3 = b[1].text.split('\n')[3]
except:
yemek3="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
yemek4 = b[1].text.split('\n')[4]
except:
yemek4="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
calori = b[2].text.replace("\n", "")
except:
calori="---"
print("Oops!", sys.exc_info()[0], "occured.")
if yemek1 == "":
yemek1 = "---"
if yemek2 == "":
yemek2 = "---"
if yemek3 == "":
yemek3 = "---"
if yemek4 == "":
yemek4 = "---"
dt = datetime.datetime.strptime(b[0].text.replace("\n", ""), '%d.%m.%Y')
dt = dt.strftime('%Y-%m-%d %H:%M:%S')
ta = {"tarih": dt,"ogun":"Akşam Yemeği","yemek1":yemek1,"yemek2":yemek2,"yemek3":yemek3,"yemek4":yemek4,"calori": calori}
son["yemek_liste"].append(ta)
def vegan_json_olustur():
time.sleep(5)
kah = browser.find_element_by_id("tab-vegan")
bs = BeautifulSoup(kah.get_attribute('innerHTML'), "lxml")
bs2 = bs.find_all('table')
for h in bs2:
b = h.find_all('tr')
try:
yemek1 = b[1].text.split('\n')[1]
except:
yemek1="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
yemek2 = b[1].text.split('\n')[2]
except:
yemek2="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
yemek3 = b[1].text.split('\n')[3]
except:
yemek3="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
yemek4 = b[1].text.split('\n')[4]
except:
yemek4="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
calori = b[2].text.replace("\n", "")
except:
calori="---"
print("Oops!", sys.exc_info()[0], "occured.")
if yemek1 == "":
yemek1 = "---"
if yemek2 == "":
yemek2 = "---"
if yemek3 == "":
yemek3 = "---"
if yemek4 == "":
yemek4 = "---"
dt = datetime.datetime.strptime(b[0].text.replace("\n", ""), '%d.%m.%Y')
dt = dt.strftime('%Y-%m-%d %H:%M:%S')
ta = {"tarih": dt,"ogun":"Vegan","yemek1":yemek1,"yemek2":yemek2,"yemek3":yemek3,"yemek4":yemek4,"calori": calori}
son["yemek_liste"].append(ta)
def kumanya_json_olustur():
time.sleep(5)
kah = browser.find_element_by_id("tab-kumanya")
bs = BeautifulSoup(kah.get_attribute('innerHTML'), "lxml")
bs2 = bs.find_all('table')
for h in bs2:
b = h.find_all('tr')
try:
yemek1 = b[1].text.split('\n')[1]
except:
yemek1="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
yemek2 = b[1].text.split('\n')[2]
except:
yemek2="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
yemek3 = b[1].text.split('\n')[3]
except:
yemek3="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
yemek4 = b[1].text.split('\n')[4]
except:
yemek4="---"
print("Oops!", sys.exc_info()[0], "occured.")
try:
calori = b[2].text.replace("\n", "")
except:
calori="---"
print("Oops!", sys.exc_info()[0], "occured.")
if yemek1 == "":
yemek1 = "---"
if yemek2 == "":
yemek2 = "---"
if yemek3 == "":
yemek3 = "---"
if yemek4 == "":
yemek4 = "---"
dt = datetime.datetime.strptime(b[0].text.replace("\n", ""), '%d.%m.%Y')
dt = dt.strftime('%Y-%m-%d %H:%M:%S')
ta = {"tarih": dt,"ogun":"Öğle Yemeği","yemek1":yemek1,"yemek2":yemek2,"yemek3":yemek3,"yemek4":yemek4,"calori": calori}
son["yemek_liste"].append(ta)
def dosya_olsutur():
with open('yemek.json', 'w') as outfile:
json.dump(son, outfile)
def mysql_isleri():
requests.get("*****")
def ftp_yukle():
print("----------------")
print(" ")
print("ftp deneniyor...")
import ftplib
ftp = ftplib.FTP()
host = "****"
port = 21
ftp.connect(host, port)
print(ftp.getwelcome())
File2Send = "yemek.json"
Output_Directory = "//****//"
try:
print("Giriş Yapılıyor...")
ftp.login("****", "****")
time.sleep(6)
mysql_isleri()
print("Başarılı")
except Exception as e:
print(e)
try:
file = open('yemek.json', 'rb') # file to send
ftp.storbinary('STOR yemek.json', file) # send the file
except Exception as e:
print(e)
ftp.quit()
print(" ")
print("----------------")
def sonuc():
dosya_olsutur()
ftp_yukle()
try:
kah_buton.click()
kah_json_olustur()
except:
print("Kahvaltı oluşturalamadı", sys.exc_info()[0])
try:
ogle_buton.click()
ogle_json_olustur()
except:
print("Öğle oluşturalamadı", sys.exc_info()[0])
try:
ogle_buton.click()
aksam_json_olustur()
except:
print("Akşam oluşturalamadı", sys.exc_info()[0])
try:
vegan_buton.click()
vegan_json_olustur()
except:
print("Vegan oluşturalamadı", sys.exc_info()[0])
try:
kumanya_buton.click()
kumanya_json_olustur()
except:
print("Kumanya oluşturalamadı", sys.exc_info()[0])
browser.close()
print(json.dumps(son))
print("-------------")
sonuc()
time.sleep(5)
mysql_isleri()
print("-----Güncelleme Bitti----")
| 29.80663 | 149 | 0.510843 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,764 | 0.255335 |
aa1c0ae5f77bc0f8eac0e19d1462d0bcf251eb0b | 6,542 | py | Python | our_env.py | Venatoral/Slight | 0156ffa39011b035ff1523da4967b4b1c75108d4 | [
"Apache-2.0"
] | null | null | null | our_env.py | Venatoral/Slight | 0156ffa39011b035ff1523da4967b4b1c75108d4 | [
"Apache-2.0"
] | null | null | null | our_env.py | Venatoral/Slight | 0156ffa39011b035ff1523da4967b4b1c75108d4 | [
"Apache-2.0"
] | null | null | null | from typing import List, overload
from flow.envs.multiagent.traffic_light_grid import MultiTrafficLightGridPOEnv
from flow.envs.traffic_light_grid import TrafficLightGridPOEnv
from gym.spaces import Box, Discrete
import numpy as np
ID_IDX = 1
class SeqTraffiLightEnv(TrafficLightGridPOEnv):
def __init__(self, env_params, sim_params, network, simulator):
super().__init__(env_params, sim_params, network, simulator=simulator)
# number of nearest lights to observe, defaults to 4
self.num_local_lights = env_params.additional_params.get(
"num_local_lights", 4)
# number of nearest edges to observe, defaults to 4
self.num_local_edges = env_params.additional_params.get(
"num_local_edges", 4)
@property
def observation_space(self):
"""State space that is partially observed.
Velocities, distance to intersections, edge number (for nearby
vehicles) from each direction, local edge information, and traffic
light state.
"""
tl_box = Box(
low=0.,
high=1,
shape=(
self.num_traffic_lights,
3 * 4 * self.num_observed +
2 * self.num_local_edges +
2 * (1 + self.num_local_lights),
),
dtype=np.float32)
return tl_box
def get_state(self):
"""Observations for each traffic light agent.
:return: dictionary which contains agent-wise observations as follows:
- For the self.num_observed number of vehicles closest and incoming
towards traffic light agent, gives the vehicle velocity, distance to
intersection, edge number.
- For edges in the network, gives the density and average velocity.
- For the self.num_local_lights number of nearest lights (itself
included), gives the traffic light information, including the last
change time, light direction (i.e. phase), and a currently_yellow flag.
"""
# Normalization factors
max_speed = max(
self.k.network.speed_limit(edge)
for edge in self.k.network.get_edge_list())
grid_array = self.net_params.additional_params["grid_array"]
max_dist = max(grid_array["short_length"], grid_array["long_length"],
grid_array["inner_length"])
# TODO(cathywu) refactor TrafficLightGridPOEnv with convenience
# methods for observations, but remember to flatten for single-agent
# Observed vehicle information
speeds = []
dist_to_intersec = []
edge_number = []
all_observed_ids = []
for _, edges in self.network.node_mapping:
local_speeds = []
local_dists_to_intersec = []
local_edge_numbers = []
for edge in edges:
observed_ids = \
self.get_closest_to_intersection(edge, self.num_observed)
all_observed_ids.append(observed_ids)
# check which edges we have so we can always pad in the right
# positions
local_speeds.extend(
[self.k.vehicle.get_speed(veh_id) / max_speed for veh_id in
observed_ids])
local_dists_to_intersec.extend([(self.k.network.edge_length(
self.k.vehicle.get_edge(
veh_id)) - self.k.vehicle.get_position(
veh_id)) / max_dist for veh_id in observed_ids])
local_edge_numbers.extend([self._convert_edge(
self.k.vehicle.get_edge(veh_id)) / (
self.k.network.network.num_edges - 1) for veh_id in
observed_ids])
if len(observed_ids) < self.num_observed:
diff = self.num_observed - len(observed_ids)
local_speeds.extend([1] * diff)
local_dists_to_intersec.extend([1] * diff)
local_edge_numbers.extend([0] * diff)
speeds.append(local_speeds)
dist_to_intersec.append(local_dists_to_intersec)
edge_number.append(local_edge_numbers)
# Edge information
density = []
velocity_avg = []
for edge in self.k.network.get_edge_list():
ids = self.k.vehicle.get_ids_by_edge(edge)
if len(ids) > 0:
# TODO(cathywu) Why is there a 5 here?
density += [5 * len(ids) / self.k.network.edge_length(edge)]
velocity_avg += [np.mean(
[self.k.vehicle.get_speed(veh_id) for veh_id in
ids]) / max_speed]
else:
density += [0]
velocity_avg += [0]
density = np.array(density)
velocity_avg = np.array(velocity_avg)
self.observed_ids = all_observed_ids
# Traffic light information
direction = self.direction.flatten()
currently_yellow = self.currently_yellow.flatten()
# This is a catch-all for when the relative_node method returns a -1
# (when there is no node in the direction sought). We add a last
# item to the lists here, which will serve as a default value.
# TODO(cathywu) are these values reasonable?
direction = np.append(direction, [0])
currently_yellow = np.append(currently_yellow, [1])
obs = []
# obs -> [num_light, observation]
node_to_edges = self.network.node_mapping
for rl_id in self.k.traffic_light.get_ids():
rl_id_num = int(rl_id.split("center")[ID_IDX])
local_edges = node_to_edges[rl_id_num][1]
local_edge_numbers = [self.k.network.get_edge_list().index(e)
for e in local_edges]
local_id_nums = [rl_id_num, self._get_relative_node(rl_id, "top"),
self._get_relative_node(rl_id, "bottom"),
self._get_relative_node(rl_id, "left"),
self._get_relative_node(rl_id, "right")]
observation = np.array(np.concatenate(
[speeds[rl_id_num], dist_to_intersec[rl_id_num],
edge_number[rl_id_num], density[local_edge_numbers],
velocity_avg[local_edge_numbers],
direction[local_id_nums], currently_yellow[local_id_nums]
]))
obs.append(observation)
return obs
| 42.480519 | 79 | 0.597371 | 6,295 | 0.962244 | 0 | 0 | 599 | 0.091562 | 0 | 0 | 1,686 | 0.257719 |
aa1c1132f05dc4d99bcca0197b212ef7e7e8de3b | 1,449 | py | Python | example/nodelabeled-unweighted.py | yhtang/GraphDot | 3d5ed4fbb2f6912052baa42780b436da76979691 | [
"BSD-3-Clause-LBNL"
] | 9 | 2020-02-14T18:07:39.000Z | 2021-12-15T12:07:31.000Z | example/nodelabeled-unweighted.py | yhtang/graphdot | 3d5ed4fbb2f6912052baa42780b436da76979691 | [
"BSD-3-Clause-LBNL"
] | 3 | 2020-03-19T19:07:26.000Z | 2021-02-24T06:08:51.000Z | example/nodelabeled-unweighted.py | yhtang/graphdot | 3d5ed4fbb2f6912052baa42780b436da76979691 | [
"BSD-3-Clause-LBNL"
] | 3 | 2019-10-17T06:11:18.000Z | 2021-05-07T11:56:33.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''An example of similarity comparison between node-labeled but unweighted
graphs using the marginalized graph kernel.'''
import numpy as np
import networkx as nx
from graphdot import Graph
from graphdot.kernel.marginalized import MarginalizedGraphKernel
from graphdot.microkernel import (
TensorProduct,
SquareExponential,
KroneckerDelta,
Constant
)
# {1.0, 1} -- {2.0, 1}
g1 = nx.Graph()
g1.add_node(0, radius=1.0, category=1)
g1.add_node(1, radius=2.0, category=1)
g1.add_edge(0, 1)
# {1.0, 1} -- {2.0, 1} -- {1.0, 2}
g2 = nx.Graph()
g2.add_node(0, radius=1.0, category=1)
g2.add_node(1, radius=2.0, category=1)
g2.add_node(2, radius=1.0, category=2)
g2.add_edge(0, 1)
g2.add_edge(1, 2)
# {1.0, 1} -- {2.0, 1}
# \ /
# {1.0, 2}
g3 = nx.Graph()
g3.add_node(0, radius=1.0, category=1)
g3.add_node(1, radius=2.0, category=1)
g3.add_node(2, radius=1.0, category=2)
g3.add_edge(0, 1)
g3.add_edge(0, 2)
g3.add_edge(1, 2)
# define node and edge kernelets
knode = TensorProduct(radius=SquareExponential(0.5),
category=KroneckerDelta(0.5))
kedge = Constant(1.0)
# compose the marginalized graph kernel and compute pairwise similarity
mlgk = MarginalizedGraphKernel(knode, kedge, q=0.05)
R = mlgk([Graph.from_networkx(g) for g in [g1, g2, g3]])
# normalize the similarity matrix
d = np.diag(R)**-0.5
K = np.diag(d).dot(R).dot(np.diag(d))
print(K)
| 25.421053 | 74 | 0.675638 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 411 | 0.283644 |
aa1c3ea5d289362a0fc7eea0894a32edac8f196e | 9,735 | py | Python | sparse_causal_model_learner_rl/annealer/threshold.py | sergeivolodin/causality-disentanglement-rl | 5a41b4a2e3d85fa7e9c8450215fdc6cf954df867 | [
"CC0-1.0"
] | 2 | 2020-12-11T05:26:24.000Z | 2021-04-21T06:12:58.000Z | sparse_causal_model_learner_rl/annealer/threshold.py | sergeivolodin/causality-disentanglement-rl | 5a41b4a2e3d85fa7e9c8450215fdc6cf954df867 | [
"CC0-1.0"
] | 9 | 2020-04-30T16:29:50.000Z | 2021-03-26T07:32:18.000Z | sparse_causal_model_learner_rl/annealer/threshold.py | sergeivolodin/causality-disentanglement-rl | 5a41b4a2e3d85fa7e9c8450215fdc6cf954df867 | [
"CC0-1.0"
] | null | null | null | import gin
import torch
import logging
from sparse_causal_model_learner_rl.metrics import find_value, find_key
@gin.configurable
def AnnealerThresholdSelector(config, config_object, epoch_info, temp,
adjust_every=100,
multiplier=10, # allow the loss to be 10 times bigger than the best
source_quality_key=None,
non_sparse_threshold_disable=None,
additive=True,
source_fit_loss_key='no_sparse_fit',
gin_variable='ThresholdAnnealer.fit_threshold',
**kwargs):
"""Adjust the fit threshold based on a non-sparse model's loss."""
try:
non_sparse_fit_loss = find_value(epoch_info, source_fit_loss_key)
logging.info(f"Threshold detector found non-sparse loss {non_sparse_fit_loss}")
except AssertionError as e:
return config
if 'last_hyper_adjustment' not in temp:
temp['last_hyper_adjustment'] = 0
i = epoch_info['epochs']
if additive:
temp['suggested_hyper'] = non_sparse_fit_loss + multiplier
else:
temp['suggested_hyper'] = non_sparse_fit_loss * multiplier
# disable annealing in case if target performance in terms of non-sparse loss is not reached
if non_sparse_threshold_disable is not None and non_sparse_fit_loss >= non_sparse_threshold_disable:
temp['suggested_hyper'] = 0.0
if temp.get('suggested_hyper', None) is not None and (i - temp['last_hyper_adjustment'] >= adjust_every):
with gin.unlock_config():
gin.bind_parameter(gin_variable, temp['suggested_hyper'])
temp['suggested_hyper'] = None
temp['last_hyper_adjustment'] = i
return config
@gin.configurable
def turn_on_features(m, ctx, logits_on=1.5, gap_threshold=1.1, loss_fcn=None):
"""Turn on features giving better loss."""
with torch.no_grad():
for fout in range(m.n_features + m.n_additional_features):
if fout >= m.n_features:
fout_add = fout - m.n_features
logits = getattr(m, m.additional_models[fout_add]).switch.logits
else:
logits = getattr(m, m.models[fout]).switch.logits
for fin in range(m.n_features):
orig_logits0, orig_logits1 = logits[0, fin].item(), logits[1, fin].item()
# trying 0...
logits[0, fin], logits[1, fin] = 5, -5
loss_0 = loss_fcn(**ctx)
if isinstance(loss_0, dict):
loss_0 = loss_0['loss']
loss_0 = loss_0.item()
# trying 1...
logits[0, fin], logits[1, fin] = -5, 5
loss_1 = loss_fcn(**ctx)
if isinstance(loss_1, dict):
loss_1 = loss_1['loss']
loss_1 = loss_1.item()
logits[0, fin], logits[1, fin] = orig_logits0, orig_logits1
loss_ratio = loss_0 / loss_1
if loss_ratio > gap_threshold:
logging.info(f'Turn on feature {fout} <- {fin}')
logits[0, fin], logits[1, fin] = -logits_on, logits_on
@gin.configurable
def ModelResetter(config, epoch_info, temp,
learner=None,
gin_annealer_cls='ThresholdAnnealer',
trainables=None,
reset_weights=True,
reset_logits=True,
reset_optimizers=False,
grace_epochs=2000, # give that many epochs to try to recover on its own
last_context=None,
reset_turn_on=False,
new_logits=0.0, **kwargs):
source_metric_key = gin.query_parameter(f"{gin_annealer_cls}.source_metric_key")
try:
fit_loss = find_value(epoch_info, source_metric_key)
# logging.warning("Cannot find loss with sparsity, defaulting to fit loss")
except AssertionError as e:
return config
if 'first_not_good' not in temp:
temp['first_not_good'] = None
fit_threshold = gin.query_parameter(f"{gin_annealer_cls}.fit_threshold")
is_good = fit_loss <= fit_threshold
i = epoch_info['epochs']
logging.info(f"Resetter found loss {fit_loss} threshold {fit_threshold}, good {is_good} epoch {i} fng {temp['first_not_good']}")
if is_good:
temp['first_not_good'] = None
elif temp['first_not_good'] is None:
temp['first_not_good'] = i
elif i - temp['first_not_good'] >= grace_epochs:
if reset_weights:
for key, param in trainables.get('model').named_parameters():
if 'switch' not in key:
logging.info(f'Resetting parameter {key}')
if 'bias' in key:
torch.nn.init.zeros_(param)
else:
torch.nn.init.xavier_uniform_(param)
if reset_logits:
for p in trainables.get('model').switch__params:
logging.info(f"Resetting switch parameter with shape {p.data.shape}")
p_orig = p.data.detach().clone()
p.data[1, p_orig[1] < -new_logits] = -new_logits
p.data[0, p_orig[1] < -new_logits] = new_logits
if reset_optimizers:
learner.create_optimizers()
if reset_turn_on:
turn_on_features(m=learner.model, ctx=last_context)
temp['first_not_good'] = None
@gin.configurable
def ThresholdAnnealer(config, epoch_info, temp,
fit_threshold=1e-2,
min_hyper=1e-5,
learner=None,
max_hyper=100,
freeze_time=100,
freeze_threshold_probas=0.8,
adjust_every=100,
reset_on_fail=False,
source_metric_key='with_sparse_fit',
factor=0.5, # if cool/warm not specified, use this one for both
factor_cool=None, # when increasing the coefficient (regularization -> cooling)
factor_heat=None, # when decreasing the coefficient (no reg -> warming)
emergency_heating=False,
**kwargs):
"""Increase sparsity if fit loss is low, decrease otherwise."""
try:
fit_loss = find_value(epoch_info, source_metric_key)
# logging.warning("Cannot find loss with sparsity, defaulting to fit loss")
logging.info(f"Annealer found loss {fit_loss} {source_metric_key}")
except AssertionError as e:
#logging.warning(f"Annealer source metric not found: {source_metric_key}, {e}")
return config
# fit_loss = find_value(epoch_info, '/fit/value')
if factor_cool is None:
factor_cool = factor
if factor_heat is None:
factor_heat = factor
need_heating = False
if 'last_hyper_adjustment' not in temp:
temp['last_hyper_adjustment'] = 0
i = epoch_info['epochs']
if temp.get('last_freeze_start', -1) >= 0:
if i - temp.get('last_freeze_start') >= freeze_time:
logging.warning(f"Freezing finished at {i}!")
del temp['last_freeze_start']
else:
if freeze_threshold_probas is not None:
p = learner.model.model.switch.probas
p.data[p.data > freeze_threshold_probas] = freeze_threshold_probas
return config
if fit_loss > fit_threshold: # FREE ENERGY (loss) IS HIGH -> NEED WARMING (decrease regul coeff)
if reset_on_fail:
temp['suggested_hyper'] = min_hyper
else:
if config['losses']['sparsity']['coeff'] > min_hyper:
temp['suggested_hyper'] = config['losses']['sparsity']['coeff'] * factor_heat
need_heating = True
temp['suggested_hyper'] = max(min_hyper, temp['suggested_hyper'])
else: # FREE ENRGY (loss) is low -> CAN DO COOLING (increase regul coeff)
if config['losses']['sparsity']['coeff'] < max_hyper:
temp['suggested_hyper'] = config['losses']['sparsity']['coeff'] / factor_cool
temp['suggested_hyper'] = min(max_hyper, temp['suggested_hyper'])
epochs_enough = (i - temp['last_hyper_adjustment'] >= adjust_every)
if emergency_heating and need_heating:
epochs_enough = True
if temp.get('suggested_hyper', None) is not None and epochs_enough:
if temp['suggested_hyper'] < config['losses']['sparsity']['coeff']:
direction = 'heat'
elif temp['suggested_hyper'] > config['losses']['sparsity']['coeff']:
direction = 'cool'
else:
direction = 'same'
# if were cooling down but now have to warm...
# freezing the model for some time
if 'last_direction' in temp and temp['last_direction'] in ['cool', 'same'] and direction == 'heat':
temp['last_freeze_start'] = i
logging.warning(f"Starting model freeze at {i}")
temp['last_direction'] = direction
config['losses']['sparsity']['coeff'] = temp['suggested_hyper']
temp['suggested_hyper'] = None
temp['last_hyper_adjustment'] = i
return config
@gin.configurable
def threshold_annealer_threshold(**kwargs):
return gin.query_parameter('ThresholdAnnealer.fit_threshold')
| 41.781116 | 133 | 0.576271 | 0 | 0 | 0 | 0 | 9,592 | 0.985311 | 0 | 0 | 2,666 | 0.273857 |
aa1d2d976b2cf58daaaccaed749a8600fff9d87f | 3,164 | py | Python | Episode01-Move_Player/Pygame/Shmup tutorial-01.py | Inksaver/Shmup_With_Pygame_Love2D_Monogame | 84838516d9dd9d6639b1b699dca546bfdfec73dc | [
"CC0-1.0"
] | 1 | 2022-02-01T04:05:04.000Z | 2022-02-01T04:05:04.000Z | Episode01-Move_Player/Pygame/Shmup tutorial-01.py | Inksaver/Shmup_With_Pygame_Love2D_Monogame | 84838516d9dd9d6639b1b699dca546bfdfec73dc | [
"CC0-1.0"
] | null | null | null | Episode01-Move_Player/Pygame/Shmup tutorial-01.py | Inksaver/Shmup_With_Pygame_Love2D_Monogame | 84838516d9dd9d6639b1b699dca546bfdfec73dc | [
"CC0-1.0"
] | null | null | null | # https://www.youtube.com/watch?v=nGufy7weyGY
'''
Only 1 player so use code module (static class) for player
and a separate shared code module for global variables
'''
# import libraries
import pygame, os
import shared, player
def process_events() -> (object, object, bool):
''' get keyboard input and check if user closed window '''
key_down = None
quit = False
for event in pygame.event.get(): # process events
if event.type == pygame.QUIT: # close button clicked
quit = True
elif event.type == pygame.KEYDOWN: # single press of any key
key_down = event.key
keystate = pygame.key.get_pressed() # get keypressed events
if keystate[pygame.K_ESCAPE]: # player pressing escape continuously
quit = True
return keystate, key_down, quit # usage: if key_down == pygame.K_RETURN:, if keystate[pygame.K_UP]:
def load() -> None:
''' Setup pygame and load all assets '''
global player
shared.game_folder = os.getcwd() # current directory
os.environ["SDL_VIDEO_CENTERED"] = "1" # Centre the Pygame window on screen
pygame.init() # initialise pygame and game window
try:
pygame.mixer.init() # start pygame sound library
except:
shared.audio_present = False # audio driver not installed
shared.screen = pygame.display.set_mode((shared.WIDTH, shared.HEIGHT))
pygame.display.set_caption(shared.window_title) # The window title
shared.clock = pygame.time.Clock() # Keep track of framerate etc
player.init(40, 50, 500, shared.GREEN) # create a player (50x40 green rectangle)
def update() -> None:
'''
Update all game items and keyboard input
delta-time used in Love2D and Monogame is measured in seconds
Can be obtained from clock.tick() in ms
'''
dt = shared.clock.tick(shared.FPS) / 1000 # update clock. dt can be passed to other update functions
keystate, key_down, quit = process_events() # get keypressed, keydown and close/esc user choice
if quit:
shared.gamestate = shared.gamestates["quit"] # set gamestate to quit
else:
if shared.gamestate == shared.gamestates["play"]:
#shared.player.update(keystate, dt)
player.update(keystate, dt)
def draw() -> None:
''' Draw background and all active sprites '''
shared.screen.fill(shared.BLACK) # make screen black
if shared.gamestate == shared.gamestates["play"]:
#shared.player.draw()
player.draw()
pygame.display.flip() # flip display to make it visible
def main() -> None:
''' Run game loop and call other functions from here '''
shared.WIDTH = 480 # default screen width: alter as required
shared.HEIGHT = 600 # default screen height: alter as required
shared.window_title = "Shmup!" # default window title: change as required
load() # setup window and game assets
''' gameloop '''
shared.gamestate = shared.gamestates["play"] # gamestate starting at 1 ('play': no menu)
while shared.gamestate < shared.gamestates["quit"]: # 3 = quit
update() # go through update functions
draw() # go through draw functions
pygame.quit()
main() | 38.585366 | 105 | 0.677623 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,582 | 0.5 |
aa1ecc00932000826e0af0c980cbcdd6a456cc46 | 926 | py | Python | python/091-100/Interleaving String.py | KaiyuWei/leetcode | fd61f5df60cfc7086f7e85774704bacacb4aaa5c | [
"MIT"
] | 150 | 2015-04-04T06:53:49.000Z | 2022-03-21T13:32:08.000Z | python/091-100/Interleaving String.py | yizhu1012/leetcode | d6fa443a8517956f1fcc149c8c4f42c0ad93a4a7 | [
"MIT"
] | 1 | 2015-04-13T15:15:40.000Z | 2015-04-21T20:23:16.000Z | python/091-100/Interleaving String.py | yizhu1012/leetcode | d6fa443a8517956f1fcc149c8c4f42c0ad93a4a7 | [
"MIT"
] | 64 | 2015-06-30T08:00:07.000Z | 2022-01-01T16:44:14.000Z | class Solution:
# @param {string} s1
# @param {string} s2
# @param {string} s3
# @return {boolean}
def isInterleave(self, s1, s2, s3):
m = len(s1)
n = len(s2)
if m+n != len(s3):
return False
table = [([False] * (m+1)) for i in range(n+1)]
table[0][0] = True
for i in range (1, m+1):
if s3[i-1] == s1[i-1] and table[0][i-1] == True:
table[0][i] = True
for i in range (1, n+1):
if s3[i-1] == s2[i-1] and table[i-1][0] == True:
table[i][0] = True
for i in range (1, n+1):
for j in range(1, m+1):
if s3[i+j-1] == s2[i-1] and table[i-1][j] == True:
table[i][j] = True
if s3[i+j-1] == s1[j-1] and table[i][j-1] == True:
table[i][j] = True
return table[n][m] | 33.071429 | 66 | 0.402808 | 926 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.085313 |
aa1ed9f3c5b53362f9ffc3df9a734baef6b42e38 | 3,119 | py | Python | vvrest/vault.py | GRM-VisualVault/vvPyRest | 59c15d3377b9955f575cfdb1362facc48f253442 | [
"MIT"
] | 1 | 2019-01-31T10:41:35.000Z | 2019-01-31T10:41:35.000Z | vvrest/vault.py | GRM-VisualVault/vvPyRest | 59c15d3377b9955f575cfdb1362facc48f253442 | [
"MIT"
] | 5 | 2018-12-30T03:15:20.000Z | 2021-06-30T21:49:41.000Z | vvrest/vault.py | GRM-VisualVault/vvPyRest | 59c15d3377b9955f575cfdb1362facc48f253442 | [
"MIT"
] | null | null | null | from .token import Token
from .utilities import get_token_expiration
from .services.auth_service import AuthService
class Vault:
def __init__(self, url, customer_alias, database_alias, client_id, client_secret, user_web_token=None, jwt=None):
"""
if user_web_token is passed in, then vv will authenticate on behalf of the user that
the web_token belongs to. if user_web_token is not passed in (default=None), then
vv will authenticate on behalf of the user that the client_id and client_secret belong to.
:param url: string, example: https://demo.visualvault.com
:param customer_alias: str
:param database_alias: str
:param client_id: str, UUID(version=4)
:param client_secret: str, example: khN18YAZPe6F3Z0tc2W0HXCb487jm0wgwe6kNffUNf0=
:param user_web_token: str UUID(version=4), passed in if authentication is user impersonation
:param jwt: string, JSON Web Token
"""
self.url = url
self.customer_alias = customer_alias
self.database_alias = database_alias
self.client_id = client_id
self.client_secret = client_secret
self.user_web_token = user_web_token
self.jwt = jwt
self.token = self.get_access_token()
self.base_url = self.get_base_url()
def get_access_token(self):
"""
requests access token
:return: Token
"""
authentication_service = AuthService(self.url, self.customer_alias, self.database_alias, self.client_id,
self.client_secret, self.user_web_token)
if not self.jwt:
resp = authentication_service.get_access_token()
access_token = resp['access_token']
token_expiration = get_token_expiration(resp['expires_in'])
refresh_token = resp['refresh_token']
else:
access_token = self.jwt
token_expiration = None
refresh_token = None
token = Token(access_token, token_expiration, refresh_token)
return token
def get_base_url(self):
"""
:return: string
"""
base_url = self.url + '/api/v1/' + self.customer_alias + '/' + self.database_alias + '/'
return base_url
def refresh_access_token(self):
"""
void method that refreshes Vault.token
:return: None
"""
authentication_service = AuthService(self.url, self.customer_alias, self.database_alias, self.client_id,
self.client_secret, self.user_web_token)
resp = authentication_service.refresh_access_token(self.token.refresh_token)
access_token = resp['access_token']
token_expiration = get_token_expiration(resp['expires_in'])
refresh_token = resp['refresh_token']
self.token = Token(access_token, token_expiration, refresh_token)
def get_auth_headers(self):
"""
:return: dict
"""
headers = {'Authorization': 'Bearer ' + self.token.access_token}
return headers
| 38.506173 | 117 | 0.643796 | 3,000 | 0.961847 | 0 | 0 | 0 | 0 | 0 | 0 | 1,064 | 0.341135 |
aa2023ac99077f9cb543158669d698802bc7ef7d | 2,000 | py | Python | setup.py | saimn/stsci.tools | b926e11b4f1afa09fe7cf6b7893ebf2a54182c94 | [
"BSD-3-Clause"
] | null | null | null | setup.py | saimn/stsci.tools | b926e11b4f1afa09fe7cf6b7893ebf2a54182c94 | [
"BSD-3-Clause"
] | null | null | null | setup.py | saimn/stsci.tools | b926e11b4f1afa09fe7cf6b7893ebf2a54182c94 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import os
import pkgutil
import sys
from setuptools import setup, find_packages
from subprocess import check_call, CalledProcessError
if not pkgutil.find_loader('relic'):
relic_local = os.path.exists('relic')
relic_submodule = (relic_local and
os.path.exists('.gitmodules') and
not os.listdir('relic'))
try:
if relic_submodule:
check_call(['git', 'submodule', 'update', '--init', '--recursive'])
elif not relic_local:
check_call(['git', 'clone', 'https://github.com/spacetelescope/relic.git'])
sys.path.insert(1, 'relic')
except CalledProcessError as e:
print(e)
exit(1)
import relic.release
version = relic.release.get_info()
relic.release.write_template(version, 'lib/stsci/tools')
setup(
name = 'stsci.tools',
version = version.pep386,
author = 'STScI',
author_email = 'help@stsci.edu',
description = 'Collection of STScI utility functions',
url = 'https://github.com/spacetelescope/stsci.tools',
classifiers = [
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires = [
'astropy<=3.1',
'numpy',
],
setup_requires = [
'pytest-runner'
],
tests_require = [
'pytest',
'pytest-doctestplus'
],
package_dir = {
'': 'lib',
},
packages = find_packages('lib'),
package_data = {
'': ['LICENSE.txt'],
'stsci/tools/tests': ['data/*.*']
},
entry_points = {
'console_scripts': [
'convertwaiveredfits=stsci.tools.convertwaiveredfits:main',
'convertlog=stsci.tools.convertlog:main'
],
},
)
| 28.169014 | 87 | 0.593 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 793 | 0.3965 |
aa2088862a2b3c7cee42f6b2595d03cd35464ec2 | 5,362 | py | Python | settings.py | Nayigiziki/apartment-finder | 0d95c67d1bbabe5ba99a8fee92b219b94726c19c | [
"MIT"
] | null | null | null | settings.py | Nayigiziki/apartment-finder | 0d95c67d1bbabe5ba99a8fee92b219b94726c19c | [
"MIT"
] | null | null | null | settings.py | Nayigiziki/apartment-finder | 0d95c67d1bbabe5ba99a8fee92b219b94726c19c | [
"MIT"
] | null | null | null | import os
# filters
FILTERS = {
'min_bathrooms': 1,
'min_bedrooms': 3
}
## Location preferences
# The Craigslist site you want to search on.
# For instance, https://sfbay.craigslist.org is SF and the Bay Area.
# You only need the beginning of the URL.
CRAIGSLIST_SITE = 'sfbay'
# What Craigslist subdirectories to search on.
# For instance, https://sfbay.craigslist.org/eby/ is the East Bay, and https://sfbay.craigslist.org/sfc/ is San Francisco.
# You only need the last three letters of the URLs.
AREAS = ["sfc"]
# A list of neighborhoods and coordinates that you want to look for apartments in. Any listing that has coordinates
# attached will be checked to see which area it is in. If there's a match, it will be annotated with the area
# name. If no match, the neighborhood field, which is a string, will be checked to see if it matches
# anything in NEIGHBORHOODS.
BOXES = {
"mission": [
[37.747808, -122.429121],
[37.772749, -122.407797]
]
}
# A list of neighborhood names to look for in the Craigslist neighborhood name field. If a listing doesn't fall into
# one of the boxes you defined, it will be checked to see if the neighborhood name it was listed under matches one
# of these. This is less accurate than the boxes, because it relies on the owner to set the right neighborhood,
# but it also catches listings that don't have coordinates (many listings are missing this info).
NEIGHBORHOODS = ["berkeley north", "berkeley", "rockridge", "adams point", "oakland lake merritt", "cow hollow", "piedmont", "pac hts", "pacific heights", "lower haight", "inner sunset", "outer sunset", "presidio", "palo alto", "richmond / seacliff", "haight ashbury", "alameda", "twin peaks", "noe valley", "bernal heights", "glen park", "sunset", "mission district", "potrero hill", "dogpatch"]
## Transit preferences
# The farthest you want to live from a transit stop.
MAX_TRANSIT_DIST = 2 # kilometers
# Transit stations you want to check against. Every coordinate here will be checked against each listing,
# and the closest station name will be added to the result and posted into Slack.
GOOGLE_STOPS = {
"Van Ness @ Union": [37.798656,-122.424156],
"Van Ness @ Sacramento": [37.791363,-122.422707],
"Columbus @ Powell": [37.800591,-122.410721],
"San Francisco Office": [37.791172,-122.389923],
"Soma": [37.777119,-122.395134],
"Civic Center": [37.778316,-122.414398],
"Stanyan @ Frederick": [37.766594,-122.45295],
"Haight @ Divisadero": [37.771225,-122.436745],
"Hayes @ Steiner": [37.775612,-122.432495],
"24th @ Castro": [37.75124,-122.433762],
"24th @ Church": [37.751598,-122.427704],
"30th @ Dolores": [37.742188,-122.424614],
"18th & Dolores": [37.76125,-122.42585],
"24th @ Valencia": [37.752033,-122.420387],
"Park Presido @ Geary": [37.780266,-122.47245],
"19th @ Kirkham": [37.759975,-122.476974],
"19th @ Taraval": [37.743191,-122.475822],
"Glen Park BART": [37.733131,-122.434143],
"San Francisco Office Pickup": [37.789299,-122.388672],
"Valencia @ 24th": [37.751945,-122.420769],
"14th and Market (Late AM Quad, Sweep, & Evening Drop Off)": [37.768764,-122.427574],
"18th & Castro": [37.760788,-122.434914],
"201 Toland Street": [37.745743,-122.397133],
"18th & Dolores": [37.761444,-122.426628],
"Jackson Playground": [37.765011,-122.399948],
"Potrero & 18th": [37.761635,-122.407318],
"Potrero & 23rd": [37.753986,-122.406586],
"Lombard @ Pierce": [37.799282,-122.439499],
"Market @ Dolores": [37.768872,-122.427169]
}
FB_STOPS = {
"SOMA-1": [37.785083,-122.419667],
"SOMA-2": [37.778306,-122.414389],
"SOMA-3": [37.778056,-122.397056],
"SOMA-4": [37.774417,-122.404444],
"Mission-1": [37.76427,-122.430571],
"Mission-2": [37.748643,-122.420834],
"Mission-3": [37.748095,-122.418281],
"Mission-4": [37.751702,-122.427492],
"Mission-5": [37.765028,-122.419278],
"Hayes Valley-1": [37.773118,-122.44628],
"Hayes Valley-2": [37.777639,-122.42325],
"Hayes Valley-3": [37.773778,-122.432083],
"Hayes Valley-4": [37.780352,-122.438784],
"Hayes Valley-5": [37.784972,-122.424667],
"Portero-1": [37.765028,-122.399861],
"Portero-2": [37.761889,-122.41025],
"Portero-3": [37.755722,-122.409528]
}
## Search type preferences
# The Craigslist section underneath housing that you want to search in.
# For instance, https://sfbay.craigslist.org/search/apa find apartments for rent.
# https://sfbay.craigslist.org/search/sub finds sublets.
# You only need the last 3 letters of the URLs.
CRAIGSLIST_HOUSING_SECTION = 'sub'
## System settings
# How long we should sleep between scrapes of Craigslist.
# Too fast may get rate limited.
# Too slow may miss listings.
SLEEP_INTERVAL = 20 * 60 # 20 minutes
# Which slack channel to post the listings into.
SLACK_CHANNEL = "#housing"
# The token that allows us to connect to slack.
# Should be put in private.py, or set as an environment variable.
SLACK_TOKEN = os.getenv('SLACK_TOKEN', "")
# Any private settings are imported here.
OFFICE_ADDRESS = '1965 Charleston Road Mountain View, CA 94043';
try:
from private import *
except Exception:
pass
# Any external private settings are imported from here.
try:
from config.private import *
except Exception:
pass | 40.931298 | 396 | 0.684073 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,365 | 0.627564 |
aa23e634e8fb312ea7c22cb42c2ae28b5cc0039c | 2,569 | py | Python | randomPanFlute.py | aleixcm/tensorflow-wavenet | e38d4b71fe905042ed02cceffd6d4a8c8c356302 | [
"MIT"
] | null | null | null | randomPanFlute.py | aleixcm/tensorflow-wavenet | e38d4b71fe905042ed02cceffd6d4a8c8c356302 | [
"MIT"
] | null | null | null | randomPanFlute.py | aleixcm/tensorflow-wavenet | e38d4b71fe905042ed02cceffd6d4a8c8c356302 | [
"MIT"
] | 1 | 2018-09-22T09:56:03.000Z | 2018-09-22T09:56:03.000Z | import os
import scipy.io.wavfile
import matplotlib.pyplot as plt
import numpy as np
import os
import random
'''
Create a random dataset with three different frequencies that are always in fase.
Frequencies will be octave [440, 880, 1320].
'''
fs = 16000
x1 = scipy.io.wavfile.read('corpus/Analysis/a440.wav')[1]
x2 = scipy.io.wavfile.read('corpus/Analysis/c531.wav')[1]
x3 = scipy.io.wavfile.read('corpus/Analysis/e667.wav')[1]
x4 = scipy.io.wavfile.read('corpus/Analysis/a880.wav')[1]
x5 = scipy.io.wavfile.read('corpus/Analysis/c1056.wav')[1]
x6 = scipy.io.wavfile.read('corpus/Analysis/e1320.wav')[1]
x7 = scipy.io.wavfile.read('corpus/Analysis/a1760.wav')[1]
# Categories
a = [0]
b = [1]
c = [2]
def createRandomSequence():
# sequence length
sq_length = random.randint(5, 10)
#create sequence
sequence = []
sampleSequence = []
minLen = 1818
for i in range(0, sq_length):
value = random.randint(0,6)
sequence.append(value)
#create lengths per value
lenValue = minLen * random.randint(1,10)
sampleSequence.append(lenValue)
return sequence, sampleSequence
def genFile(sequence, sampleSequence, c):
newSequence = []
fullSequence = []
for i in range(len(sequence)):
newSequence = int(sampleSequence[i]) * [sequence[i]]
fullSequence = fullSequence + newSequence
file00 = open(os.path.join('corpus', 'panFluteBigDataset', 'lc_train%s.txt' % c), 'w')
for item in fullSequence:
file00.write('%i,\n' % item)
file00.close()
def case(x):
return {
0: x1,
1: x2,
2: x3,
3: x4,
4: x5,
5: x6,
6: x7
}[x]
def genSignals(sequence, sampleSequence, c):
y=[]
for i in range(len(sequence)):
# convert categories to frequencies
freq = case(sequence[i])
#nSamples = np.arange(sampleSequence[i])
#a = random.randint(25, 100)/100
a = 1
#y0 = a*np.sin(2*np.pi*freq*nSamples / fs)
y0= freq[:sampleSequence[i]]
y = scipy.hstack((y, y0))
y = y / y[np.argmax(y)]
noise = 0.01*np.random.normal(0, 1, len(y))
y = np.asarray(y) + noise
scipy.io.wavfile.write(os.path.join('corpus', 'panFluteBigDataset7freq', 'lc_train%s.wav' % c), fs, y)
def main():
for c in range(0,100):
sequence, sampleSequence = createRandomSequence()
#print(sequence, sampleSequence)
#genFile(sequence, sampleSequence, c)
genSignals(sequence, sampleSequence, c)
if __name__ == '__main__':
main()
| 26.484536 | 106 | 0.626703 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 720 | 0.280265 |
aa23ed79c757b361839b4005aa702d809d5ea5a0 | 43 | py | Python | Very Easy/hello.py | Maverick-cmd/Python-Practice | 4dc3f1eb5d633e20057052531cfc6e04772bc061 | [
"MIT"
] | null | null | null | Very Easy/hello.py | Maverick-cmd/Python-Practice | 4dc3f1eb5d633e20057052531cfc6e04772bc061 | [
"MIT"
] | null | null | null | Very Easy/hello.py | Maverick-cmd/Python-Practice | 4dc3f1eb5d633e20057052531cfc6e04772bc061 | [
"MIT"
] | null | null | null | def hello():
return "hello edabit.com"
| 14.333333 | 29 | 0.651163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.418605 |
aa24ad1cd629e57a49d0bc27903187170354eee3 | 3,162 | py | Python | py/scripts/ci_imstats.py | dstndstn/gfa_reduce | 410729fd3c652d96739299be4b196455a2490d33 | [
"BSD-3-Clause"
] | null | null | null | py/scripts/ci_imstats.py | dstndstn/gfa_reduce | 410729fd3c652d96739299be4b196455a2490d33 | [
"BSD-3-Clause"
] | 13 | 2019-02-08T02:11:01.000Z | 2019-09-10T02:11:28.000Z | py/scripts/ci_imstats.py | dstndstn/gfa_reduce | 410729fd3c652d96739299be4b196455a2490d33 | [
"BSD-3-Clause"
] | 2 | 2019-12-23T15:25:32.000Z | 2020-12-22T13:43:52.000Z | #!/usr/bin/env python
import argparse
import os
import gfa_reduce.io as io
import glob
import time
import numpy as np
import copy
import gfa_reduce.common as common
import astropy.io.fits as fits
import gfa_reduce.dark_current as dark_current
from gfa_reduce.analysis.sky import adu_to_surface_brightness
def print_imstats_1exp(imstats, fname_in, verbose=False):
# this is a small amount of unnecessary I/O but whatever
h = fits.getheader(fname_in, extname='CI')
exptime = h['EXPTIME']
if verbose:
for row in imstats:
print(row['camera'], fname_in)
for c in row.colnames:
print(' '*5, '{:16}'.format(c), ' : ', row[c])
cols = ['expid', 'camera', 'median', 'max', 'min', 'sigma', 'med-bias',
'dark_tot_adu', 'sky_ab']
_imstats = copy.deepcopy(imstats)
# save horizntal space on printouts
_imstats['median'] = np.round(_imstats['median']).astype('int')
_imstats['min'] = np.round(_imstats['min']).astype('int')
_imstats['max'] = np.round(_imstats['max']).astype('int')
_imstats['sigma'] = np.round(_imstats['sig_robust']).astype('int')
_imstats['expid'] = common.expid_from_filename(fname_in)
median_minus_bias = np.zeros(len(_imstats))
total_dark_adu = np.zeros(len(_imstats))
sky_mag_ab = np.zeros(len(_imstats))
for i in range(len(_imstats)):
median_minus_bias[i] = np.round(imstats['median'][i] - common.get_median_bias_adu(_imstats['camera'][i])).astype(int)
ccdtemp = io.get_temperature_celsius(fname_in, _imstats['camera'][i])
# should really get rid of the hardcoding of 7.5 Celsius below !!!
total_dark_adu[i] = exptime*common.get_median_dark_current(_imstats['camera'][i])*dark_current.dark_current_rate(ccdtemp)/dark_current.dark_current_rate(7.5)
sky_mag_ab[i] = adu_to_surface_brightness(median_minus_bias[i]-total_dark_adu[i], exptime,_imstats['camera'][i])
_imstats['med-bias'] = median_minus_bias
_imstats['dark_tot_adu'] = total_dark_adu
_imstats['sky_ab'] = sky_mag_ab
print(_imstats[cols])
print('*sigma column is a robust standard deviation measurement')
print('**all pixel values quoted are in ADU')
def imstats_1exp(fname_in, verbose=False):
for i in range(5):
exp = io.load_exposure(fname_in, realtime=True)
if exp is not None:
break
else:
time.sleep(2.0)
imstats = io.gather_pixel_stats(exp)
colnames = imstats.colnames
colnames.insert(0, colnames.pop())
imstats = imstats[colnames]
print_imstats_1exp(imstats, fname_in, verbose=verbose)
if __name__=="__main__":
# works on simulated data
#fname_in = '/project/projectdirs/desi/users/ameisner/CI/ci_data_challenge/sims/dci-01402.fits'
# try a real frame
#fname_in = '/project/projectdirs/desi/spectro/data/20190330/00002930/ci-00002930.fits.fz'
flist = glob.glob('/project/projectdirs/desi/spectro/data/20190330/*/ci*.fits.fz')
for f in flist:
imstats_1exp(f)
#fname = '/project/projectdirs/desi/spectro/data/20190403/00003746/ci-00003746.fits.fz'
imstats_1exp(fname)
| 35.133333 | 165 | 0.686591 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 924 | 0.29222 |
aa25080d95db6003a47cb7e9144e0411c4a11460 | 88 | py | Python | onnxmltools/convert/common/utils.py | xjarvik/onnxmltools | e4fbdc09814ceedc7655d85b6c4203ca21d8433a | [
"Apache-2.0"
] | 1 | 2022-01-28T04:59:37.000Z | 2022-01-28T04:59:37.000Z | onnxmltools/convert/common/utils.py | xjarvik/onnxmltools | e4fbdc09814ceedc7655d85b6c4203ca21d8433a | [
"Apache-2.0"
] | null | null | null | onnxmltools/convert/common/utils.py | xjarvik/onnxmltools | e4fbdc09814ceedc7655d85b6c4203ca21d8433a | [
"Apache-2.0"
] | 1 | 2021-07-05T23:51:56.000Z | 2021-07-05T23:51:56.000Z | # SPDX-License-Identifier: Apache-2.0
from onnxconverter_common.utils import * # noqa
| 22 | 48 | 0.772727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.488636 |
aa25c2e709cfea704e04eb7eb4828a1320e8fafc | 5,280 | py | Python | navegador5/time_utils.py | ihgazni2/navegador5 | 8d7bcf39f6adf27f6c3ebd1a803383cd3d9555ab | [
"MIT"
] | null | null | null | navegador5/time_utils.py | ihgazni2/navegador5 | 8d7bcf39f6adf27f6c3ebd1a803383cd3d9555ab | [
"MIT"
] | null | null | null | navegador5/time_utils.py | ihgazni2/navegador5 | 8d7bcf39f6adf27f6c3ebd1a803383cd3d9555ab | [
"MIT"
] | null | null | null | import re
import time
import datetime
import html
def in_ignoreUpper(lora,key):
for each in lora:
if(key.lower() == each.lower()):
return((True,each))
else:
pass
return((False,None))
def s2hms(seconds):
arr = seconds.split(".")
s1 = arr[0]
s2 = arr[1]
h = int(s1) // 3600
m1 = int(s1) % 3600
m = int(m1) // 60
s = int(m1) % 60
print(str(h)+':'+str(m)+':'+str(s)+'.'+s2)
return({'hours':int(h),'minutes':int(m),'seconds':float(str(s)+'.'+s2)})
def which_time_format(date_value):
'''
####################HTTP-date###############
# HTTP-date = rfc1123-date | rfc850-date | asctime-date
# rfc1123-date = wkday "," SP date1 SP time SP "GMT"
# rfc850-date = weekday "," SP date2 SP time SP "GMT"
# asctime-date = wkday SP date3 SP time SP 4DIGIT
# date1 = 2DIGIT SP month SP 4DIGIT
# ; day month year (e.g., 02 Jun 1982)
# date2 = 2DIGIT "-" month "-" 2DIGIT
# ; day-month-year (e.g., 02-Jun-82)
# date3 = month SP ( 2DIGIT | ( SP 1DIGIT ))
# ; month day (e.g., Jun 2)
# time = 2DIGIT ":" 2DIGIT ":" 2DIGIT
# ; 00:00:00 - 23:59:59
# wkday = "Mon" | "Tue" | "Wed"
# | "Thu" | "Fri" | "Sat" | "Sun"
# weekday = "Monday" | "Tuesday" | "Wednesday"
# | "Thursday" | "Friday" | "Saturday" | "Sunday"
'''
month = 'Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec'
weekday = 'Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday'
wkday = 'Mon|Tue|Wed|Thu|Fri|Sat|Sun'
rfc1123 = ''.join(("(",wkday,")",", ","[0-9]{2} ","(",month,")"," [0-9]{4} ","[0-9]{2}:[0-9]{2}:[0-9]{2} ","GMT"))
regex_rfc1123 = re.compile(rfc1123)
rfc1123_hypen = ''.join(("(",wkday,")",", ","[0-9]{2}-","(",month,")","-[0-9]{4} ","[0-9]{2}:[0-9]{2}:[0-9]{2} ","GMT"))
regex_rfc1123_hypen = re.compile(rfc1123_hypen)
rfc850 = ''.join(("(",weekday,")",", ","[0-9]{2}-","(",month,")","-[0-9]{2} ","[0-9]{2}:[0-9]{2}:[0-9]{2} ","GMT"))
regex_rfc850 = re.compile(rfc850)
rfc850_a = ''.join(("(",wkday,")",", ","[0-9]{2}-","(",month,")","-[0-9]{2} ","[0-9]{2}:[0-9]{2}:[0-9]{2} ","GMT"))
regex_rfc850_a = re.compile(rfc850_a)
asctime = ''.join(("(",wkday,")"," ","(",month,")","(( [0-9]{2})|( [0-9]{1}))"," ","[0-9]{2}:[0-9]{2}:[0-9]{2} ","[0-9]{4}"))
regex_asctime = re.compile(asctime)
if(regex_rfc1123.search(date_value)):
return('rfc1123_date')
elif(regex_rfc1123_hypen.search(date_value)):
return('rfc1123_hypen_date')
elif(regex_rfc850.search(date_value)):
return('rfc850_date')
elif(regex_rfc850_a.search(date_value)):
return('rfc850_date_a')
elif(regex_asctime.search(date_value)):
return('asctime_date')
else:
return(None)
TIMEFMT = {
'rfc1123':'%a, %d %b %Y %H:%M:%S GMT',
'rfc1123_hypen':'%a, %d-%b-%Y %H:%M:%S GMT',
'rfc850':'%A, %d-%b-%y %H:%M:%S GMT',
'rfc850_a':'%a, %d-%b-%y %H:%M:%S GMT',
'asctime':'%a, %b %d %H:%M:%S %Y',
'%a, %d %b %Y %H:%M:%S GMT':'rfc1123',
'%a, %d-%b-%Y %H:%M:%S GMT':'rfc1123_hypen',
'%A, %d-%b-%y %H:%M:%S GMT':'rfc850',
'%a, %d-%b-%y %H:%M:%S GMT':'rfc850_a',
'%a, %b %d %H:%M:%S %Y':'asctime'
}
def fromat_asc(asc):
asc = asc.replace(" "," 0")
return(asc)
def standlize(s):
regex = re.compile("[\s]+")
s = re.sub(regex," ",s)
return(s)
def get_format_name(format):
return(TIMEFMT['format'])
def ts2dt(ts):
'''
only 6 bits keeped
'''
return(datetime.datetime.fromtimestamp(ts))
def dt2ts(dt,**kwargs):
''''''
return(dt.timestamp())
def str2dt(s,**kwargs):
if('format' in kwargs):
format = kwargs['format']
else:
format_name = detect_time_format(s)
format = TIMEFMT[format_name]
if(format == 'asctime'):
s = format_asc(s)
else:
s = standlize(s)
return(datetime.datetime.strptime(s,format))
def dt2str(dt,**kwargs):
if('format' in kwargs):
format = kwargs['format']
elif('format_name' in kwargs):
format_name = kwargs['format_name']
format = TIMEFMT[format_name]
else:
format = TIMEFMT['rfc1123']
return(dt.strftime(format))
def str2ts(s,**kwargs):
if('format' in kwargs):
format = kwargs['format']
else:
format_name = detect_time_format(s)
format = TIMEFMT[format_name]
dt = str2dt(s,format=format)
ts = dt2ts(dt)
return(ts)
def ts2str(ts,**kwargs):
dt = ts2dt(ts)
if('format' in kwargs):
format = kwargs['format']
elif('format_name' in kwargs):
format_name = kwargs['format_name']
format = TIMEFMT[format_name]
else:
format = TIMEFMT['rfc1123']
return(dt.strftime(format))
| 31.058824 | 131 | 0.485985 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,326 | 0.44053 |
aa2716777e5784947bfab68b664551c0c517f631 | 4,540 | py | Python | scripts/job/memcached_submit.py | Container-Projects/firmament | d0de5258f0805f8d17b45d70c0a8e6d6a67617c0 | [
"Apache-2.0",
"OpenSSL"
] | 287 | 2016-05-13T17:45:48.000Z | 2022-01-23T00:26:20.000Z | scripts/job/memcached_submit.py | Container-Projects/firmament | d0de5258f0805f8d17b45d70c0a8e6d6a67617c0 | [
"Apache-2.0",
"OpenSSL"
] | 33 | 2016-05-13T11:40:21.000Z | 2020-11-16T17:57:17.000Z | scripts/job/memcached_submit.py | Container-Projects/firmament | d0de5258f0805f8d17b45d70c0a8e6d6a67617c0 | [
"Apache-2.0",
"OpenSSL"
] | 64 | 2016-05-26T06:35:39.000Z | 2021-09-27T12:02:44.000Z | from base import job_desc_pb2
from base import task_desc_pb2
from base import reference_desc_pb2
from google.protobuf import text_format
import httplib, urllib, re, sys, random
import binascii
import time
import shlex
def add_worker_task(job_name, task, binary, args, worker_id, num_workers, extra_args):
task.uid = 0
task.name = "%s/%d" % (job_name, worker_id)
task.state = task_desc_pb2.TaskDescriptor.CREATED
task.binary = "/usr/bin/python"
task.args.extend(args)
task.args.append(str(worker_id))
task.args.append(str(num_workers))
task.args.append(binary)
task.args.extend(extra_args)
task.inject_task_lib = True
if len(sys.argv) < 4:
print "usage: memcached_submit.py <coordinator hostname> <web UI port> " \
"<task binary> [<args>] [<num workers>] [<job name>]"
sys.exit(1)
hostname = sys.argv[1]
port = int(sys.argv[2])
memcached_exe = sys.argv[3]
if len(sys.argv) > 4:
extra_args = shlex.split(sys.argv[4])
else:
extra_args = []
if len(sys.argv) > 5:
num_workers = int(sys.argv[5])
else:
num_workers = 1
if len(sys.argv) > 6:
job_name = sys.argv[6]
else:
job_name = "memcached_job_at_%d" % (int(time.time()))
basic_args = []
basic_args.append("/home/srguser/firmament-experiments/helpers/napper/napper_memcached.py")
basic_args.append("caelum-301:2181")
basic_args.append(job_name)
job_desc = job_desc_pb2.JobDescriptor()
job_desc.uuid = "" # UUID will be set automatically on submission
job_desc.name = job_name
# set up root task
job_desc.root_task.uid = 0
job_desc.root_task.name = job_name + "/0"
job_desc.root_task.state = task_desc_pb2.TaskDescriptor.CREATED
job_desc.root_task.binary = "/usr/bin/python"
job_desc.root_task.args.extend(basic_args)
job_desc.root_task.args.append("0") # root task is worker ID 0
job_desc.root_task.args.append(str(num_workers))
job_desc.root_task.args.append(memcached_exe)
job_desc.root_task.args.extend(extra_args)
job_desc.root_task.inject_task_lib = True
# add workers
for i in range(1, num_workers):
task = job_desc.root_task.spawned.add()
add_worker_task(job_name, task, memcached_exe, basic_args, i, num_workers, extra_args)
input_id = binascii.unhexlify('feedcafedeadbeeffeedcafedeadbeeffeedcafedeadbeeffeedcafedeadbeef')
output_id = binascii.unhexlify('db33daba280d8e68eea6e490723b02cedb33daba280d8e68eea6e490723b02ce')
output2_id = binascii.unhexlify('feedcafedeadbeeffeedcafedeadbeeffeedcafedeadbeeffeedcafedeadbeef')
job_desc.output_ids.append(output_id)
job_desc.output_ids.append(output2_id)
input_desc = job_desc.root_task.dependencies.add()
input_desc.id = input_id
input_desc.scope = reference_desc_pb2.ReferenceDescriptor.PUBLIC
input_desc.type = reference_desc_pb2.ReferenceDescriptor.CONCRETE
input_desc.non_deterministic = False
input_desc.location = "blob:/tmp/fib_in"
final_output_desc = job_desc.root_task.outputs.add()
final_output_desc.id = output_id
final_output_desc.scope = reference_desc_pb2.ReferenceDescriptor.PUBLIC
final_output_desc.type = reference_desc_pb2.ReferenceDescriptor.FUTURE
final_output_desc.non_deterministic = True
final_output_desc.location = "blob:/tmp/out1"
final_output2_desc = job_desc.root_task.outputs.add()
final_output2_desc.id = output2_id
final_output2_desc.scope = reference_desc_pb2.ReferenceDescriptor.PUBLIC
final_output2_desc.type = reference_desc_pb2.ReferenceDescriptor.FUTURE
final_output2_desc.non_deterministic = True
final_output2_desc.location = "blob:/tmp/out2"
#params = urllib.urlencode({'test': text_format.MessageToString(job_desc)})
params = 'jd=%s' % text_format.MessageToString(job_desc)
print "SUBMITTING job with parameters:"
print params
print ""
try:
headers = {"Content-type": "application/x-www-form-urlencoded"}
conn = httplib.HTTPConnection("%s:%s" % (hostname, port))
conn.request("POST", "/job/submit/", params, headers)
response = conn.getresponse()
except Exception as e:
print "ERROR connecting to coordinator: %s" % (e)
sys.exit(1)
data = response.read()
match = re.search(r"([0-9a-f\-]+)", data, re.MULTILINE | re.S | re.I | re.U)
print "----------------------------------------------"
if match and response.status == 200:
job_id = match.group(1)
print "JOB SUBMITTED successfully!\nJOB ID is %s\nStatus page: " \
"http://%s:%d/job/status/?id=%s" % (job_id, hostname, port, job_id)
else:
print "ERROR submitting job -- response was: %s (Code %d)" % (response.reason,
response.status)
print "----------------------------------------------"
conn.close()
| 36.910569 | 99 | 0.745154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,114 | 0.245374 |
aa276bd863f21ed290ee913192b1706b7cc21ea2 | 16,059 | py | Python | gala/dynamics/_genfunc/genfunc_3d.py | ltlancas/gala | 2621bb599d67e74a85446abf72d5930ef70ca181 | [
"MIT"
] | 1 | 2021-10-14T03:36:15.000Z | 2021-10-14T03:36:15.000Z | gala/dynamics/_genfunc/genfunc_3d.py | ltlancas/gala | 2621bb599d67e74a85446abf72d5930ef70ca181 | [
"MIT"
] | null | null | null | gala/dynamics/_genfunc/genfunc_3d.py | ltlancas/gala | 2621bb599d67e74a85446abf72d5930ef70ca181 | [
"MIT"
] | null | null | null | # Solving the series of linear equations for true action
# and generating function Fourier components
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
from matplotlib.ticker import MaxNLocator
import matplotlib.cm as cm
import time
# in units kpc, km/s and 10^11 M_solar
Grav = 430091.7270069976
Conv = 0.9777922216
import toy_potentials as toy
import test_potentials as pot
import solver
import visualize_surfaces as vs
from solver import unroll_angles as ua
def choose_NT(N_max,iffreq=True):
""" calculates number of time samples required to constrain N_max modes
--- equation (21) from Sanders & Binney (2014) """
if(iffreq):
return max(200,9*N_max**3/4)
else:
return max(100,N_max**3/2)
def check_angle_solution(ang,n_vec,toy_aa,timeseries):
""" Plots the toy angle solution against the toy angles ---
Takes true angles and frequencies ang,
the Fourier vectors n_vec,
the toy action-angles toy_aa
and the timeseries """
f,a=plt.subplots(3,1)
for i in range(3):
a[i].plot(toy_aa.T[i+3],'.')
size = len(ang[6:])/3
AA = np.array([np.sum(ang[6+i*size:6+(i+1)*size]*np.sin(np.sum(n_vec*K,axis=1))) for K in toy_aa.T[3:].T])
a[i].plot((ang[i]+ang[i+3]*timeseries-2.*AA) % (2.*np.pi),'.')
a[i].set_ylabel(r'$\theta$'+str(i+1))
a[2].set_xlabel(r'$t$')
plt.show()
def check_target_angle_solution(ang,n_vec,toy_aa,timeseries):
""" Plots the angle solution and the toy angles ---
Takes true angles and frequencies ang,
the Fourier vectors n_vec,
the toy action-angles toy_aa
and the timeseries """
f,a=plt.subplots(3,1)
for i in range(3):
# a[i].plot(toy_aa.T[i+3],'.')
size = len(ang[6:])/3
AA = np.array([np.sum(ang[6+i*size:6+(i+1)*size]*np.sin(np.sum(n_vec*K,axis=1))) for K in toy_aa.T[3:].T])
a[i].plot(((toy_aa.T[i+3]+2.*AA) % (2.*np.pi))-(ang[i]+timeseries*ang[i+3]) % (2.*np.pi),'.')
a[i].plot(toy_aa.T[i+3],'.')
a[i].set_ylabel(r'$\theta$'+str(i+1))
a[2].set_xlabel(r'$t$')
plt.show()
def eval_mean_error_functions(act,ang,n_vec,toy_aa,timeseries,withplot=False):
""" Calculates sqrt(mean(E)) and sqrt(mean(F)) """
Err = np.zeros(6)
NT = len(timeseries)
size = len(ang[6:])/3
UA = ua(toy_aa.T[3:].T,np.ones(3))
fig,axis=None,None
if(withplot):
fig,axis=plt.subplots(3,2)
plt.subplots_adjust(wspace=0.3)
for K in range(3):
ErrJ = np.array([(i[K]-act[K]-2.*np.sum(n_vec.T[K]*act[3:]*np.cos(np.dot(n_vec,i[3:]))))**2 for i in toy_aa])
Err[K] = np.sum(ErrJ)
ErrT = np.array(((ang[K]+timeseries*ang[K+3]-UA.T[K]-2.*np.array([np.sum(ang[6+K*size:6+(K+1)*size]*np.sin(np.sum(n_vec*i,axis=1))) for i in toy_aa.T[3:].T])))**2)
Err[K+3] = np.sum(ErrT)
if(withplot):
axis[K][0].plot(ErrJ,'.')
axis[K][0].set_ylabel(r'$E$'+str(K+1))
axis[K][1].plot(ErrT,'.')
axis[K][1].set_ylabel(r'$F$'+str(K+1))
if(withplot):
for i in range(3):
axis[i][0].set_xlabel(r'$t$')
axis[i][1].set_xlabel(r'$t$')
plt.show()
EJ = np.sqrt(Err[:3]/NT)
ET = np.sqrt(Err[3:]/NT)
return np.array([EJ,ET])
def box_actions(results, times, N_matrix, ifprint):
"""
Finds actions, angles and frequencies for box orbit.
Takes a series of phase-space points from an orbit integration at times t and returns
L = (act,ang,n_vec,toy_aa, pars) -- explained in find_actions() below.
"""
if(ifprint):
print("\n=====\nUsing triaxial harmonic toy potential")
t = time.time()
# Find best toy parameters
omega = toy.findbestparams_ho(results)
if(ifprint):
print("Best omega "+str(omega)+" found in "+str(time.time()-t)+" seconds")
# Now find toy actions and angles
AA = np.array([toy.angact_ho(i,omega) for i in results])
AA = AA[~np.isnan(AA).any(1)]
if(len(AA)==0):
return
t = time.time()
act = solver.solver(AA, N_matrix)
if act==None:
return
if(ifprint):
print("Action solution found for N_max = "+str(N_matrix)+", size "+str(len(act[0]))+" symmetric matrix in "+str(time.time()-t)+" seconds")
np.savetxt("GF.Sn_box",np.vstack((act[1].T,act[0][3:])).T)
ang = solver.angle_solver(AA,times,N_matrix,np.ones(3))
if(ifprint):
print("Angle solution found for N_max = "+str(N_matrix)+", size "+str(len(ang))+" symmetric matrix in "+str(time.time()-t)+" seconds")
# Just some checks
if(len(ang)>len(AA)):
print("More unknowns than equations")
return act[0], ang, act[1], AA, omega
def loop_actions(results, times, N_matrix, ifprint):
"""
Finds actions, angles and frequencies for loop orbit.
Takes a series of phase-space points from an orbit integration at times t and returns
L = (act,ang,n_vec,toy_aa, pars) -- explained in find_actions() below.
results must be oriented such that circulation is about the z-axis
"""
if(ifprint):
print("\n=====\nUsing isochrone toy potential")
t = time.time()
# First find the best set of toy parameters
params = toy.findbestparams_iso(results)
if(params[0]!=params[0]):
params = np.array([10.,10.])
if(ifprint):
print("Best params "+str(params)+" found in "+str(time.time()-t)+" seconds")
# Now find the toy angles and actions in this potential
AA = np.array([toy.angact_iso(i,params) for i in results])
AA = AA[~np.isnan(AA).any(1)]
if(len(AA)==0):
return
t = time.time()
act = solver.solver(AA, N_matrix,symNx = 1)
if act==None:
return
if(ifprint):
print("Action solution found for N_max = "+str(N_matrix)+", size "+str(len(act[0]))+" symmetric matrix in "+str(time.time()-t)+" seconds")
# Store Sn
np.savetxt("GF.Sn_loop",np.vstack((act[1].T,act[0][3:])).T)
# Find angles
sign = np.array([1.,np.sign(results[0][0]*results[0][4]-results[0][1]*results[0][3]),1.])
ang = solver.angle_solver(AA,times,N_matrix,sign,symNx = 1)
if(ifprint):
print("Angle solution found for N_max = "+str(N_matrix)+", size "+str(len(ang))+" symmetric matrix in "+str(time.time()-t)+" seconds")
# Just some checks
if(len(ang)>len(AA)):
print("More unknowns than equations")
return act[0], ang, act[1], AA, params
def angmom(x):
""" returns angular momentum vector of phase-space point x"""
return np.array([x[1]*x[5]-x[2]*x[4],x[2]*x[3]-x[0]*x[5],x[0]*x[4]-x[1]*x[3]])
def assess_angmom(X):
"""
Checks for change of sign in each component of the angular momentum.
Returns an array with ith entry 1 if no sign change in i component
and 0 if sign change.
Box = (0,0,0)
S.A loop = (0,0,1)
L.A loop = (1,0,0)
"""
L=angmom(X[0])
loop = np.array([1,1,1])
for i in X[1:]:
L0 = angmom(i)
if(L0[0]*L[0]<0.):
loop[0] = 0
if(L0[1]*L[1]<0.):
loop[1] = 0
if(L0[2]*L[2]<0.):
loop[2] = 0
return loop
def flip_coords(X,loop):
""" Align circulation with z-axis """
if(loop[0]==1):
return np.array(map(lambda i: np.array([i[2],i[1],i[0],i[5],i[4],i[3]]),X))
else:
return X
def find_actions(results, t, N_matrix=8, use_box=False, ifloop=False, ifprint = True):
"""
Main routine:
Takes a series of phase-space points from an orbit integration at times t and returns
L = (act,ang,n_vec,toy_aa, pars) where act is the actions, ang the initial angles and
frequencies, n_vec the n vectors of the Fourier modes, toy_aa the toy action-angle
coords, and pars are the toy potential parameters
N_matrix sets the maximum |n| of the Fourier modes used,
use_box forces the routine to use the triaxial harmonic oscillator as the toy potential,
ifloop=True returns orbit classification,
ifprint=True prints progress messages.
"""
# Determine orbit class
loop = assess_angmom(results)
arethereloops = np.any(loop>0)
if(arethereloops and not use_box):
L = loop_actions(flip_coords(results,loop),t,N_matrix, ifprint)
if(L==None):
if(ifprint):
print "Failed to find actions for this orbit"
return
# Used for switching J_2 and J_3 for long-axis loop orbits
# This is so the orbit classes form a continuous plane in action space
# if(loop[0]):
# L[0][1],L[0][2]=L[0][2],L[0][1]
# L[1][1],L[1][2]=L[1][2],L[1][1]
# L[1][4],L[1][5]=L[1][5],L[1][4]
# L[3].T[1],L[3].T[2]=L[3].T[2],L[3].T[1]
else:
L = box_actions(results,t,N_matrix, ifprint)
if(L==None):
if(ifprint):
print "Failed to find actions for this orbit"
return
if(ifloop):
return L,loop
else:
return L
###################
# Plotting tests #
###################
from solver import check_each_direction as ced
def plot_Sn_timesamples(PSP):
""" Plots Fig. 5 from Sanders & Binney (2014) """
TT = pot.stackel_triax()
f,a = plt.subplots(2,1,figsize=[3.32,3.6])
plt.subplots_adjust(hspace=0.,top=0.8)
LowestPeriod = 2.*np.pi/38.86564386
Times = np.array([2.,4.,8.,12.])
Sr = np.arange(2,14,2)
# Loop over length of integration window
for i,P,C in zip(Times,['.','s','D','^'],['k','r','b','g']):
diffact = np.zeros((len(Sr),3))
difffreq = np.zeros((len(Sr),3))
MAXGAPS = np.array([])
# Loop over N_max
for k,j in enumerate(Sr):
NT = choose_NT(j)
timeseries=np.linspace(0.,i*LowestPeriod,NT)
results = odeint(pot.orbit_derivs2,PSP,timeseries,args=(TT,),rtol=1e-13,atol=1e-13)
act,ang,n_vec,toy_aa, pars = find_actions(results, timeseries,N_matrix=j,ifprint=False,use_box=True)
# Check all modes
checks,maxgap = ced(n_vec,ua(toy_aa.T[3:].T,np.ones(3)))
if len(maxgap)>0:
maxgap = np.max(maxgap)
else:
maxgap = 0
diffact[k] = act[:3]/TT.action(results[0])
print i,j,print_max_average(n_vec,toy_aa.T[3:].T,act[3:]),str(ang[3:6]-TT.freq(results[0])).replace('[','').replace(']',''),str(np.abs(act[:3]-TT.action(results[0]))).replace('[','').replace(']',''),len(checks),maxgap
MAXGAPS = np.append(MAXGAPS,maxgap)
difffreq[k] = ang[3:6]/TT.freq(results[0])
size = 15
if(P=='.'):
size = 30
LW = np.array(map(lambda i: 0.5+i*0.5, MAXGAPS))
a[0].scatter(Sr,np.log10(np.abs(diffact.T[2]-1)),marker=P,s=size, color=C,facecolors="none",lw=LW,label=r'$T =\,$'+str(i)+r'$\,T_F$')
a[1].scatter(Sr,np.log10(np.abs(difffreq.T[2]-1)),marker=P,s=size, color=C,facecolors="none", lw=LW)
a[1].get_yticklabels()[-1].set_visible(False)
a[0].set_xticklabels([])
a[0].set_xlim(1,13)
a[0].set_ylabel(r"$\log_{10}|J_3^\prime/J_{3, \rm true}-1|$")
leg = a[0].legend(loc='upper center',bbox_to_anchor=(0.5,1.4),ncol=2, scatterpoints = 1)
leg.draw_frame(False)
a[1].set_xlim(1,13)
a[1].set_xlabel(r'$N_{\rm max}$')
a[1].set_ylabel(r"$\log_{10}|\Omega_3^\prime/\Omega_{3,\rm true}-1|$")
plt.savefig('Sn_T_box.pdf',bbox_inches='tight')
def plot3D_stacktriax(initial,final_t,N_MAT,file_output):
""" For producing plots from paper """
# Setup Stackel potential
TT = pot.stackel_triax()
times = choose_NT(N_MAT)
timeseries=np.linspace(0.,final_t,times)
# Integrate orbit
results = odeint(pot.orbit_derivs2,initial,timeseries,args=(TT,),rtol=1e-13,atol=1e-13)
# Find actions, angles and frequencies
(act,ang,n_vec,toy_aa, pars),loop = find_actions(results, timeseries,N_matrix=N_MAT,ifloop=True)
toy_pot = 0
if(loop[2]>0.5 or loop[0]>0.5):
toy_pot = pot.isochrone(par=np.append(pars,0.))
else:
toy_pot = pot.harmonic_oscillator(omega=pars[:3])
# Integrate initial condition in toy potential
timeseries_2=np.linspace(0.,2.*final_t,3500)
results_toy = odeint(pot.orbit_derivs2,initial,timeseries_2,args=(toy_pot,))
print "True actions: ", TT.action(results[0])
print "Found actions: ", act[:3]
print "True frequencies: ",TT.freq(results[0])
print "Found frequencies: ",ang[3:6]
# and plot
f,a = plt.subplots(2,3,figsize=[3.32,5.5])
a[0,0] = plt.subplot2grid((3,2), (0, 0))
a[1,0] = plt.subplot2grid((3,2), (0, 1))
a[0,1] = plt.subplot2grid((3,2), (1, 0))
a[1,1] = plt.subplot2grid((3,2), (1, 1))
a[0,2] = plt.subplot2grid((3,2), (2, 0),colspan=2)
plt.subplots_adjust(wspace=0.5,hspace=0.45)
# xy orbit
a[0,0].plot(results.T[0],results.T[1],'k')
a[0,0].set_xlabel(r'$x/{\rm kpc}$')
a[0,0].set_ylabel(r'$y/{\rm kpc}$')
a[0,0].xaxis.set_major_locator(MaxNLocator(5))
# xz orbit
a[1,0].plot(results.T[0],results.T[2],'k')
a[1,0].set_xlabel(r'$x/{\rm kpc}$')
a[1,0].set_ylabel(r'$z/{\rm kpc}$')
a[1,0].xaxis.set_major_locator(MaxNLocator(5))
# toy orbits
a[0,0].plot(results_toy.T[0],results_toy.T[1],'r',alpha=0.2,linewidth=0.3)
a[1,0].plot(results_toy.T[0],results_toy.T[2],'r',alpha=0.2,linewidth=0.3)
# Toy actions
a[0,2].plot(Conv*timeseries,toy_aa.T[0],'k:',label='Toy action')
a[0,2].plot(Conv*timeseries,toy_aa.T[1],'r:')
a[0,2].plot(Conv*timeseries,toy_aa.T[2],'b:')
# Arrows to show approx. actions
arrow_end = a[0,2].get_xlim()[1]
arrowd = 0.08*(arrow_end-a[0,2].get_xlim()[0])
a[0,2].annotate('',(arrow_end+arrowd,act[0]),(arrow_end,act[0]),arrowprops=dict(arrowstyle='<-',color='k'),annotation_clip=False)
a[0,2].annotate('',(arrow_end+arrowd,act[1]),(arrow_end,act[1]),arrowprops=dict(arrowstyle='<-',color='r'),annotation_clip=False)
a[0,2].annotate('',(arrow_end+arrowd,act[2]),(arrow_end,act[2]),arrowprops=dict(arrowstyle='<-',color='b'),annotation_clip=False)
# True actions
a[0,2].plot(Conv*timeseries,TT.action(results[0])[0]*np.ones(len(timeseries)),'k',label='True action')
a[0,2].plot(Conv*timeseries,TT.action(results[0])[1]*np.ones(len(timeseries)),'k')
a[0,2].plot(Conv*timeseries,TT.action(results[0])[2]*np.ones(len(timeseries)),'k')
a[0,2].set_xlabel(r'$t/{\rm Gyr}$')
a[0,2].set_ylabel(r'$J/{\rm kpc\,km\,s}^{-1}$')
leg = a[0,2].legend(loc='upper center',bbox_to_anchor=(0.5,1.2),ncol=3, numpoints = 1)
leg.draw_frame(False)
# Toy angle coverage
a[0,1].plot(toy_aa.T[3]/(np.pi),toy_aa.T[4]/(np.pi),'k.',markersize=0.4)
a[0,1].set_xlabel(r'$\theta_1/\pi$')
a[0,1].set_ylabel(r'$\theta_2/\pi$')
a[1,1].plot(toy_aa.T[3]/(np.pi),toy_aa.T[5]/(np.pi),'k.',markersize=0.4)
a[1,1].set_xlabel(r'$\theta_1/\pi$')
a[1,1].set_ylabel(r'$\theta_3/\pi$')
plt.savefig(file_output,bbox_inches='tight')
return act
if __name__=="__main__":
BoxP = np.array([0.1,0.1,0.1,142.,140.,251.])
LoopP = np.array([10.,1.,8.,40.,152.,63.])
ResP = np.array([0.1,0.1,0.1,142.,150.,216.5])
LongP = np.array([-0.5,18.,0.5,25.,20.,-133.1])
# Short-axis Loop
LowestPeriodLoop = 2*np.pi/15.30362865
# Fig 1
loop = plot3D_stacktriax(LoopP,8*LowestPeriodLoop,6,'genfunc_3d_example_LT_Stack_Loop.pdf')
# Fig 3
vs.Sn_plots('GF.Sn_loop','loop',loop,1)
# Box
LowestPeriodBox = 2.*np.pi/38.86564386
# Fig 2
box = plot3D_stacktriax(BoxP,8*LowestPeriodBox,6,'genfunc_3d_example_LT_Stack_Box.pdf')
# Fig 4
vs.Sn_plots('GF.Sn_box','box',box,0)
# Res
LowestPeriodRes = 2.*np.pi/42.182
# Fig 5
res = plot3D_stacktriax(ResP,8*LowestPeriodBox,6,'genfunc_3d_example_LT_Stack_Res.pdf')
# vs.Sn_plots('GF.Sn_box','box',res,0)
# Long-axis loop
LowestPeriodLong = 2.*np.pi/12.3
| 37.964539 | 229 | 0.600598 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,891 | 0.304564 |
aa27ad417a5f5847632ae2f34039763f50ca24d4 | 748 | py | Python | scripts/find-attorneys.py | kendallcorner/oscn | 6ba2d939b27d0d54af236e14f8a6b3f5f2aa995c | [
"MIT"
] | null | null | null | scripts/find-attorneys.py | kendallcorner/oscn | 6ba2d939b27d0d54af236e14f8a6b3f5f2aa995c | [
"MIT"
] | null | null | null | scripts/find-attorneys.py | kendallcorner/oscn | 6ba2d939b27d0d54af236e14f8a6b3f5f2aa995c | [
"MIT"
] | null | null | null | import sys
import time
import csv
import oscn
counties = ['tulsa', 'cimarron', 'adair', 'delaware']
years = ['2010']
for county in counties:
csv_file = open(f'data/{county}-attorneys.csv', "w")
# if this breaks, you may need to mkdir data
writer = csv.writer(csv_file, delimiter=',')
for year in years:
sys.stdout.write(f'{county} {year}')
case_iter = oscn.request.CaseList(county=county, year=year, stop=25)
for case in case_iter:
sys.stdout.write(case.case_number)
sys.stdout.flush()
writer.writerow([year, county, case.case_number])
writer.writerow(case.attorneys)
sys.stdout.write('.')
sys.stdout.flush()
csv_file.close()
| 26.714286 | 76 | 0.617647 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 141 | 0.188503 |
aa282f1ac7388b3707ced3a023f5c1233c6cf501 | 156 | py | Python | tests/middlewares/__init__.py | caputomarcos/mongorest | 57d6b28d75e18afed5cef7160522958153b5be15 | [
"BSD-3-Clause"
] | 16 | 2015-04-18T02:51:09.000Z | 2020-12-15T18:05:16.000Z | tests/middlewares/__init__.py | caputomarcos/mongorest | 57d6b28d75e18afed5cef7160522958153b5be15 | [
"BSD-3-Clause"
] | 8 | 2015-11-24T23:06:03.000Z | 2016-07-21T17:57:59.000Z | tests/middlewares/__init__.py | caputomarcos/mongorest | 57d6b28d75e18afed5cef7160522958153b5be15 | [
"BSD-3-Clause"
] | 2 | 2015-12-04T13:45:32.000Z | 2016-06-11T13:44:53.000Z | # -*- encoding: UTF-8 -*-
from __future__ import absolute_import, unicode_literals
from .authentication_middleware import *
from .cors_middleware import *
| 26 | 56 | 0.788462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.160256 |
aa2c05b3df99db71aa37cd2cc5fcd8d03b928ba5 | 628 | py | Python | katena_chain_sdk_py/serializer/bytes_field.py | katena-chain/sdk-py | 9e1344c6cf2b73680a39c533be58d668b172304f | [
"Apache-2.0"
] | null | null | null | katena_chain_sdk_py/serializer/bytes_field.py | katena-chain/sdk-py | 9e1344c6cf2b73680a39c533be58d668b172304f | [
"Apache-2.0"
] | null | null | null | katena_chain_sdk_py/serializer/bytes_field.py | katena-chain/sdk-py | 9e1344c6cf2b73680a39c533be58d668b172304f | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2019, TransChain.
This source code is licensed under the Apache 2.0 license found in the
LICENSE file in the root directory of this source tree.
"""
from marshmallow import fields
from base64 import b64encode, b64decode
class BytesField(fields.Field):
# BytesField allows to serialize and deserialize a bytes object.
def _serialize(self, value: bytes, attr, obj, **kwargs) -> str:
if value is None:
return ""
return b64encode(value).decode("utf-8")
def _deserialize(self, value: str, attr, data, **kwargs) -> bytes:
return b64decode(value.encode("utf-8"))
| 28.545455 | 70 | 0.691083 | 385 | 0.613057 | 0 | 0 | 0 | 0 | 0 | 0 | 247 | 0.393312 |
aa2fbcd8fca4fea0b2a9735c1802f1a2bfebbf80 | 717 | py | Python | requests_api/adapter.py | degagne/requests-api | e2d65503d7152293efb8105bb7f48bf46399cd44 | [
"MIT"
] | 1 | 2021-12-09T05:41:47.000Z | 2021-12-09T05:41:47.000Z | requests_api/adapter.py | degagne/requests-api | e2d65503d7152293efb8105bb7f48bf46399cd44 | [
"MIT"
] | null | null | null | requests_api/adapter.py | degagne/requests-api | e2d65503d7152293efb8105bb7f48bf46399cd44 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from typing import List, Dict, NoReturn, Any
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry
from requests_api.constants import (
BACKOFF_FACTOR,
STATUS_FORCELIST,
ALLOWED_METHODS
)
class RetryAdapter(HTTPAdapter):
""" creates retry adapter to allow for retries """
def __init__(self, *args: List[Any], **kwargs: Dict[str, Any]) -> NoReturn:
""" class constructor """
super().__init__(*args, **kwargs)
self.max_retries = Retry(
total=self.max_retries,
backoff_factor=BACKOFF_FACTOR,
status_forcelist=STATUS_FORCELIST,
allowed_methods=ALLOWED_METHODS)
| 29.875 | 79 | 0.693166 | 449 | 0.62622 | 0 | 0 | 0 | 0 | 0 | 0 | 75 | 0.104603 |
aa2fcb5d798f5d19c8f670fc3a91f498e60643c5 | 225 | py | Python | tests/engine/dice_test.py | slalom/slaloms-and-dragons | 3ae6a6ec3feb188d767983ab9277668baab18533 | [
"MIT"
] | 7 | 2019-03-28T15:56:21.000Z | 2021-11-08T09:02:05.000Z | tests/engine/dice_test.py | slalom/slaloms-and-dragons | 3ae6a6ec3feb188d767983ab9277668baab18533 | [
"MIT"
] | 2 | 2019-05-16T02:00:22.000Z | 2021-06-08T19:49:46.000Z | tests/engine/dice_test.py | slalom/slaloms-and-dragons | 3ae6a6ec3feb188d767983ab9277668baab18533 | [
"MIT"
] | 4 | 2019-04-02T17:49:30.000Z | 2019-04-04T00:38:54.000Z | import unittest
import game.engine.dice as dice
class DiceRollTest(unittest.TestCase):
def test_dice_roll(self):
roll = dice.roll()
self.assertGreaterEqual(roll, 1)
self.assertLessEqual(roll, 6)
| 22.5 | 40 | 0.697778 | 174 | 0.773333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
aa33835c67781bbb57fb61d41ada81b8f333456f | 7,209 | py | Python | pepenc/data/peptide_encoder_training_dataset.py | bmmalone/peptide-encoder | c4b4d5a0dc68bd2442ab5046713ff15dc3cd4900 | [
"MIT"
] | null | null | null | pepenc/data/peptide_encoder_training_dataset.py | bmmalone/peptide-encoder | c4b4d5a0dc68bd2442ab5046713ff15dc3cd4900 | [
"MIT"
] | null | null | null | pepenc/data/peptide_encoder_training_dataset.py | bmmalone/peptide-encoder | c4b4d5a0dc68bd2442ab5046713ff15dc3cd4900 | [
"MIT"
] | null | null | null | """ This module contains a pytorch dataset for learning peptide embeddings.
In particular, each "instance" of the dataset comprises two peptide sequences,
as well as the sNebula similarity between them. The sNebula distance reflects
the BLOSSUM similarity transformed from 0 to 1.
"""
import logging
logger = logging.getLogger(__name__)
import numpy as np
import torch
import torch.utils.data
from lifesci.peptide_dataset import PeptideDataset
import lifesci.sequence_similarity_utils as sequence_similarity_utils
import pyllars.string_utils as string_utils
from typing import NamedTuple, Optional
class PeptideEncoderTrainingDatasetItem(NamedTuple):
aa_sequence_xs: str
aa_sequence_ys: str
encoded_xs: torch.IntTensor
encoded_ys: torch.IntTensor
similarities: torch.FloatTensor
_DEFAULT_SEQUENCE_COLUMN = 'sequence'
_DEFAULT_SEED = 8675309
_DEFAULT_NAME = "PeptideEncoderTrainingDataset"
_DEFAULT_MAX_LEN = 25
class PeptideEncoderTrainingDataset(torch.utils.data.Dataset):
""" Generate training samples from a list of amino acid sequences
In particular, this class reads a list of peptides from `dataset_path`. It
then draws pairs of peptides from the list and calculates the sNebula
similarity score between them. Thus, each item from this dataset consists
of two peptide sequences and the similarity score.
In case the dataset object should be used for validation, the
`is_validation` flag can be set to `True`. In that case, a fixed set of
pairings will be selected for the peptides so that performance metrics are
constant from iteration to iteration. Otherwise (i.e., for training), one
member of each pair is randomly sampled.
Parameters
----------
dataset_path : str
The path to the dataset. It should be compatible with `pandas.read_csv`
and contain a column named `sequence_column` which includes the
sequences. Other columns are ignored.
aa_encoding_map : pyllars.string_utils.encoding_map_type
A mapping from each amino acid to its integer index.
N.B. This should **not** be a one-hot representation, but, as stated,
the integer index. Further, the padding character must be "-".
is_validation : bool
Whether the dataset will be used for validation (or testing)
sequence_column : str
The name of the column which contains the amino acid sequences
max_len : int
The maximum length for a peptide. Peptides longer than this will be
truncated, and shorter peptides will be padded to this length.
seed : int
Seed for the random number generator. This is used to randomly select
the second sequence in each of the instances.
name : str
A name for the dataset instance. This is mostly used for logging.
"""
def __init__(self,
dataset_path:str,
aa_encoding_map:string_utils.encoding_map_type,
is_validation:bool=False,
sequence_column:str=_DEFAULT_SEQUENCE_COLUMN,
max_len:int=_DEFAULT_MAX_LEN,
seed:int=_DEFAULT_SEED,
name:str=_DEFAULT_NAME):
self.aa_encoding_map = aa_encoding_map
self.is_validation = is_validation
self.sequence_column = sequence_column
self.max_len = max_len
self.seed = seed
self.name = name
self.rng = np.random.default_rng(self.seed)
df_peptides = PeptideDataset.load(dataset_path, sequence_column, filters=["standard_aa_only"])
self.aa_sequences = df_peptides[self.sequence_column].values
self.encoded_aa_sequences = string_utils.encode_all_sequences(
sequences=self.aa_sequences,
encoding_map=self.aa_encoding_map,
maxlen=self.max_len,
pad_value='-',
same_length=False
)
self.encoded_aa_sequences = self.encoded_aa_sequences.astype(int)
if self.is_validation:
self._matching_validation_item = np.random.permutation(len(self.aa_sequences))
def log(self, msg:str, level:int=logging.INFO) -> None:
""" Log `msg` using `level` using the module-level logger """
msg = "[{}] {}".format(self.name, msg)
logger.log(level, msg)
def __len__(self) -> int:
return len(self.aa_sequences)
def __getitem__(self, idx) -> PeptideEncoderTrainingDatasetItem:
x = idx
# and choose an appropriate matching index based on the dataset status
if self.is_validation:
y = self._matching_validation_item[idx]
else:
# select the second sequence randomly
y = self.rng.integers(low=0, high=len(self), size=1)
# the rng returns an array...
y = y[0]
encoded_xs = self.encoded_aa_sequences[x]
encoded_ys = self.encoded_aa_sequences[y]
peptide_xs = self.aa_sequences[x]
peptide_ys = self.aa_sequences[y]
similarities = sequence_similarity_utils.get_snebula_score(peptide_xs, peptide_ys)
encoded_xs = torch.as_tensor(encoded_xs, dtype=torch.long)
encoded_ys = torch.as_tensor(encoded_ys, dtype=torch.long)
similarities = torch.as_tensor(similarities, dtype=torch.float32)
ret = PeptideEncoderTrainingDatasetItem(
peptide_xs, peptide_ys, encoded_xs, encoded_ys, similarities
)
return ret
def get_trimmed_peptide_lengths(self, peptides) -> np.ndarray:
""" Extract the trimmed length of the given peptides, which accounts for max_len """
peptide_lengths = [len(p) for p in peptides]
trimmed_peptide_lengths = np.clip(peptide_lengths, 0, self.max_len)
return trimmed_peptide_lengths
@classmethod
def load(clazz,
dataset_path:Optional[str],
aa_encoding_map:string_utils.encoding_map_type,
is_validation:bool,
name:str) -> Optional["PeptideEncoderTrainingDataset"]:
""" Load the dataset given by `key` in `self.config`
Additionally, `name` will be used for the name of the dataset.
Parameters
----------
dataset_path : typing.Optional[str]
The path to the dataset
aa_encoding_map : pyllars.string_utils.encoding_map_type
A mapping from each amino acid to its integer index.
is_validation : bool
Whether the dataset will be used for validation (or testing)
name : str
The name for the dataset, if it is in the config file. Example:
"TrainingSet"
Returns
-------
dataset : typing.Optional[AAEncoderDataset]
If `key` is in `self.config`, then `dataset` will be the dataset
object based on that file. Otherwise, this function returns `None`.
"""
dataset = None
if dataset_path is not None:
dataset = PeptideEncoderTrainingDataset (
dataset_path=dataset_path,
aa_encoding_map=aa_encoding_map,
is_validation=is_validation,
name=name
)
return dataset
| 36.593909 | 102 | 0.674019 | 6,467 | 0.897073 | 0 | 0 | 1,414 | 0.196144 | 0 | 0 | 3,352 | 0.464974 |
aa338df0bf1425d20b2eaf00cec44ce15acc8852 | 3,961 | py | Python | src/chime_dash/app/components/__init__.py | nickcanz/chime | cb03218ee5cc71b92704c8be379924ac459259d7 | [
"MIT"
] | 2 | 2020-04-02T15:57:41.000Z | 2020-06-27T18:06:47.000Z | src/chime_dash/app/components/__init__.py | nickcanz/chime | cb03218ee5cc71b92704c8be379924ac459259d7 | [
"MIT"
] | 1 | 2020-04-05T17:23:45.000Z | 2020-04-05T17:23:45.000Z | src/chime_dash/app/components/__init__.py | nickcanz/chime | cb03218ee5cc71b92704c8be379924ac459259d7 | [
"MIT"
] | null | null | null | """Combines all components
The `sidebar` component combines all the inputs while other components potentially
have callbacks.
To add or remove components, adjust the `setup`.
If callbacks are present, also adjust `CALLBACK_INPUTS`, `CALLBACK_OUTPUTS` and
`callback_body`.
"""
from collections import OrderedDict
from dash_bootstrap_components import Row, Col
from dash_bootstrap_components.themes import BOOTSTRAP
from dash_html_components import Script, Div
from penn_chime.defaults import Constants
from penn_chime.models import SimSirModel
from chime_dash.app.components.base import Component, HTMLComponentError
from chime_dash.app.components.sidebar import Sidebar
from chime_dash.app.components.header import Header
from chime_dash.app.components.intro import Intro, ToolDetails
from chime_dash.app.components.additions import Additions
from chime_dash.app.components.visualizations import Visualizations
from chime_dash.app.components.definitions import Definitions
from chime_dash.app.components.footer import Footer
from chime_dash.app.components.navbar import Navbar
class Body(Component):
"""
"""
external_stylesheets = [
"https://www1.pennmedicine.org/styles/shared/penn-medicine-header.css",
BOOTSTRAP,
]
def __init__(self, language, defaults):
"""
"""
super().__init__(language, defaults)
self.components = OrderedDict(
sidebar=Sidebar(language, defaults),
header=Header(language, defaults),
intro=Intro(language, defaults),
tool_details=ToolDetails(language, defaults),
visualizations=Visualizations(language, defaults),
additions=Additions(language, defaults),
definitions=Definitions(language, defaults),
footer=Footer(language, defaults),
navbar=Navbar(language, defaults),
)
self.callback_outputs = []
self.callback_inputs = OrderedDict()
self.callback_keys = []
for component in self.components.values():
self.callback_outputs += component.callback_outputs
self.callback_inputs.update(component.callback_inputs)
def get_html(self):
"""Glues individual setup components together
"""
return Div(
children=self.components["navbar"].html
+ [
Row(
children=[
Col(
id="sidebar",
children=self.components["sidebar"].html,
width=3,
className="mt-4",
),
Col(width=1),
Col(
self.components["header"].html
+ self.components["intro"].html
+ self.components["tool_details"].html
+ self.components["visualizations"].html
+ self.components["additions"].html
+ self.components["definitions"].html
+ self.components["footer"].html,
width=8,
className="mt-4",
),
],
className="container",
),
]
)
def callback(self, *args, **kwargs):
"""
"""
kwargs = dict(zip(self.callback_inputs, args))
pars = self.components["sidebar"].parse_form_parameters(**kwargs)
kwargs["model"] = SimSirModel(pars)
kwargs["pars"] = pars
callback_returns = []
for component in self.components.values():
try:
callback_returns += component.callback(**kwargs)
except Exception as error:
raise HTMLComponentError(component, error)
return callback_returns
| 36.009091 | 82 | 0.58874 | 2,875 | 0.725827 | 0 | 0 | 0 | 0 | 0 | 0 | 593 | 0.14971 |
aa35c651bcb63b7652b7a85574cf82938974798d | 206 | py | Python | src/page/page_parser.py | baallezx/collect | 7156f239d133660e03bba334d716025b96d6b230 | [
"MIT"
] | 1 | 2016-02-08T10:53:48.000Z | 2016-02-08T10:53:48.000Z | src/page/page_parser.py | baallezx/collect | 7156f239d133660e03bba334d716025b96d6b230 | [
"MIT"
] | null | null | null | src/page/page_parser.py | baallezx/collect | 7156f239d133660e03bba334d716025b96d6b230 | [
"MIT"
] | null | null | null | # TODO: implement a page_parser that uses nlp and stats to get a good read of a file.
class page_parser(object):
"""
a multi purpose parser that can read these file types
"""
def __init__(self):
pass
| 25.75 | 85 | 0.728155 | 119 | 0.57767 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.718447 |
aa35ead0147cdaa5e16792a1159c28c73e8158c5 | 110 | py | Python | crawl_and_scrap/__main__.py | byung-u/GranXiSearch | 80a4a2cd19e39424013b7838aafbbbffd2a3574b | [
"MIT"
] | 1 | 2017-06-21T10:44:27.000Z | 2017-06-21T10:44:27.000Z | crawl_and_scrap/__main__.py | byung-u/GranXiSearch | 80a4a2cd19e39424013b7838aafbbbffd2a3574b | [
"MIT"
] | 5 | 2017-02-05T15:20:32.000Z | 2017-03-11T14:09:49.000Z | crawl_and_scrap/__main__.py | byung-u/FindTheTreasure | 80a4a2cd19e39424013b7838aafbbbffd2a3574b | [
"MIT"
] | null | null | null | """crwal_and_scrap trying to gathering news with web scrawl"""
from crwal_and_scrap.main import main
main()
| 18.333333 | 62 | 0.781818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.563636 |
aa38f5ac2a7fc58b2bcf11437fbfffc67543b98c | 1,525 | py | Python | output/models/ibm_data/valid/d3_4_6/d3_4_6v06_xsd/d3_4_6v06.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 1 | 2021-08-14T17:59:21.000Z | 2021-08-14T17:59:21.000Z | output/models/ibm_data/valid/d3_4_6/d3_4_6v06_xsd/d3_4_6v06.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | 4 | 2020-02-12T21:30:44.000Z | 2020-04-15T20:06:46.000Z | output/models/ibm_data/valid/d3_4_6/d3_4_6v06_xsd/d3_4_6v06.py | tefra/xsdata-w3c-tests | b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import List
__NAMESPACE__ = "a"
@dataclass
class Nametest:
choice: List[object] = field(
default_factory=list,
metadata={
"type": "Elements",
"choices": (
{
"name": "_ele",
"type": str,
"namespace": "a",
},
{
"name": "_-",
"type": str,
"namespace": "a",
},
{
"name": "_.",
"type": str,
"namespace": "a",
},
{
"name": "_9",
"type": str,
"namespace": "a",
},
{
"name": "___",
"type": str,
"namespace": "a",
},
{
"name": "a_a",
"type": str,
"namespace": "a",
},
{
"name": "a.a",
"type": str,
"namespace": "a",
},
{
"name": "ele",
"type": str,
"namespace": "a",
},
),
}
)
@dataclass
class Root(Nametest):
class Meta:
name = "root"
namespace = "a"
| 23.828125 | 40 | 0.260984 | 1,411 | 0.925246 | 0 | 0 | 1,433 | 0.939672 | 0 | 0 | 283 | 0.185574 |
aa3a75a5b6e67e64763fd2f9004162b6d0e34789 | 1,505 | py | Python | grape_data.py | Dechorgnat/wine_app | e037bb71c92cbfa770f3a167b93eea898fbd7948 | [
"MIT"
] | null | null | null | grape_data.py | Dechorgnat/wine_app | e037bb71c92cbfa770f3a167b93eea898fbd7948 | [
"MIT"
] | 1 | 2019-05-02T06:57:54.000Z | 2019-05-02T11:59:23.000Z | grape_data.py | Dechorgnat/wine_app | e037bb71c92cbfa770f3a167b93eea898fbd7948 | [
"MIT"
] | 1 | 2019-04-30T21:19:34.000Z | 2019-04-30T21:19:34.000Z | import pprint
from app.models import Cepage
data = []
counter = 0
with open('static_data.tsv', 'r') as file:
for line in file:
if counter == 0:
headers = line.split('\t')
print(len(headers))
else:
print(len(line.split('\t')))
data.append(dict(zip(headers, line.replace('\u202f', '').split('\t'))))
counter += 1
pprint.pprint(data)
for wine in data:
try:
id_ = wine['id']
if len(id_) > 0:
id_ = int(id_)
name = wine[u'Nom du cépage']
regions = wine['Régions']
sous_regions = wine['Sous-régions']
superficie_france = wine['Superficie en France (ha)']
superficie_monde = wine['Superficie mondiale (ha)']
red = wine['Cépage'] == 'Noir'
vignobles = wine['Vignobles']
# changing types
superficie_france = int(superficie_france) if len(superficie_france) > 0 else None
superficie_monde = int(superficie_monde) if len(superficie_monde) > 0 else None
c = Cepage(
id=id_,
name=name,
regions=regions,
vignobles=vignobles,
sous_regions=sous_regions,
superficie_france=superficie_france,
superficie_monde=superficie_monde,
red=red
)
db.session.add(c)
db.session.commit()
except ValueError:
continue
| 29.509804 | 94 | 0.530897 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 183 | 0.121272 |
aa3b0940d3759c524f76718360bd7b3fac996f6c | 431 | py | Python | docs/build/docutils/test/functional/tests/math_output_html.py | mjtamlyn/django-braces | 8adc9bc4f5139e3d032d4e38657bf86413388b78 | [
"BSD-3-Clause"
] | 1 | 2015-03-22T16:49:07.000Z | 2015-03-22T16:49:07.000Z | docs/build/docutils/test/functional/tests/math_output_html.py | mjtamlyn/django-braces | 8adc9bc4f5139e3d032d4e38657bf86413388b78 | [
"BSD-3-Clause"
] | null | null | null | docs/build/docutils/test/functional/tests/math_output_html.py | mjtamlyn/django-braces | 8adc9bc4f5139e3d032d4e38657bf86413388b78 | [
"BSD-3-Clause"
] | null | null | null | # Source and destination file names.
test_source = "data/math.txt"
test_destination = "math_output_html.html"
# Keyword parameters passed to publish_file.
reader_name = "standalone"
parser_name = "rst"
writer_name = "html"
# Extra setting
settings_overrides['math_output'] = 'HTML'
settings_overrides['stylesheet_path'] = (
'../docutils/writers/html4css1/html4css1.css, '
'../docutils/writers/html4css1/math.css ')
| 25.352941 | 51 | 0.740139 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 280 | 0.649652 |
aa3c53fd6d46ee624b7f8501fe8550742fc44be3 | 1,083 | py | Python | craftroom/thefriendlystars/panels.py | davidjwilson/craftroom | 05721893350a8b554204e188c8413ee33a7768ad | [
"MIT"
] | 1 | 2019-11-25T21:19:03.000Z | 2019-11-25T21:19:03.000Z | craftroom/thefriendlystars/panels.py | davidjwilson/craftroom | 05721893350a8b554204e188c8413ee33a7768ad | [
"MIT"
] | 1 | 2018-03-14T04:26:54.000Z | 2018-03-14T04:26:54.000Z | craftroom/thefriendlystars/panels.py | davidjwilson/craftroom | 05721893350a8b554204e188c8413ee33a7768ad | [
"MIT"
] | 1 | 2021-09-10T21:24:43.000Z | 2021-09-10T21:24:43.000Z |
'''
Panel object contains
up to one image in the background,
and any number of catalogs plotted.
'''
import astroquery.skyview
class Panel:
'''
A single frame of a finder chart,
that has up to one image in the background,
and any number of catalogs plotted.
'''
def __init__(self, image, catalogs=None):
pass
#???
# define the images that accessible to skyview
twomass = ['2MASS-J', '2MASS-H', '2MASS-K']
ukidss = ['UKIDSS-Y', 'UKIDSS-J', 'UKIDSS-H', 'UKIDSS-K']
wise = ['WISE 3.4', 'WISE 4.6', 'WISE 12', 'WISE 22']
dss1 = ['DSS1 Blue', 'DSS1 Red']
dss2 = ['DSS2 Blue', 'DSS2 Red']
GALEX = ['GALEX Far UV', 'GALEX Near UV']
class Image:
'''
This represents images that lines up with a given patch of the sky.
'''
def __init__(self, hdu, name=None):
'''
Initialize an image.
Parameters
----------
hdu : a PrimaryHDU file
FITS file
'''
self.header = hdu.header
self.data = hdu.data
self.wcs = WCS(hdu.header)
self.name = name
| 22.102041 | 71 | 0.581717 | 637 | 0.588181 | 0 | 0 | 0 | 0 | 0 | 0 | 684 | 0.631579 |
aa3eab439fa860bc36dd604b78e20a3475abf660 | 2,437 | py | Python | lib/wqmc_to_newick_converter.py | pythonLoader/QT-GILD | 9134ca410d14c554543cc444d4d18f9515e44423 | [
"Apache-2.0"
] | null | null | null | lib/wqmc_to_newick_converter.py | pythonLoader/QT-GILD | 9134ca410d14c554543cc444d4d18f9515e44423 | [
"Apache-2.0"
] | 1 | 2021-12-09T09:22:09.000Z | 2021-12-09T09:22:09.000Z | lib/wqmc_to_newick_converter.py | pythonLoader/QT-GILD | 9134ca410d14c554543cc444d4d18f9515e44423 | [
"Apache-2.0"
] | null | null | null |
import os,sys
import time
if(len(sys.argv) < 3):
print("Format -> handle.py <input_file> <base_directory>")
exit()
input_file_name = sys.argv[1]
base_direc = sys.argv[2]
# with open(input_file_name, "r") as input_file:
# data = input_file.read()
input_ = open(input_file_name, "r")
newick_format=""
# lines=data.split("\n")
# no_of_lines=len(lines)
#
# total_weight = 0.0
print("Working with", input_file_name)
start_time = time.time()
for lines in input_:
# print(i)
splt = lines.split(":")
weight = splt[1]
parts = splt[0].split("|")
left_part = parts[0]
right_part = parts[1]
lps = left_part.split(",")
rps = right_part.split(",")
A = lps[0]
B = lps[1]
C = rps[0]
D = rps[1]
newick_format += "(("+A+","+B+"),("+C+","+D+")); " + weight
# ((BOS,CAL),(CAN,CHO)); 1
# if(i == no_of_lines-2):
# # print("OK?")
# newick_format=newick_format+"(("+A+","+B+"),("+C+","+D+")); " + weight
# else:
# newick_format=newick_format+"(("+A+","+B+"),("+C+","+D+")); " + weight+"\n"
print("End_Time",time.time()-start_time)
# for i in range(0,no_of_lines):
# if lines[i].startswith('('):
# #print(lines[i])
# left_part=lines[i].split(';')
# weight=left_part[1]
# weight=weight.replace(' ','')
# total_weight += int(weight)
# #print(total_weight)
# print(no_of_lines)
# for i in range(0,no_of_lines):
# if lines[i].startswith('('):
# #print(lines[i])
# left_part=lines[i].split(';')
# #parts=lines[i].split(',')
# parts=left_part[0].split(',')
# weight=left_part[1]
# #total_weight += int(weight)
# weight=weight.replace(' ','')
# #weight = float(weight)
# #weight = str(weight/total_weight)
# #print(parts)
# #q1=parts[0][2:]
# q1=parts[0].replace('(','')
# q2=parts[1].replace(')','')
# q3=parts[2].replace('(','')
# q4=parts[3].replace(')','')
# #q2=parts[1][0:len(parts[1])-1]
# #q3=parts[2][1:]
# #q4=parts[3][0:len(parts[3])-5]
# if(i == no_of_lines-2):
# # print("OK?")
# wqmc_format=wqmc_format+q1+","+q2+"|"+q3+","+q4+":"+weight
# else:
# wqmc_format=wqmc_format+q1+","+q2+"|"+q3+","+q4+":"+weight+"\n"
#print(wqmc_format)
output_dir = base_direc +"/Quartets_Newick_format"
# output_dir = "Quartets_Newick_format_RE"
if not os.path.exists(output_dir):
os.mkdir(output_dir)
out_file = input_file_name.split('/')[-1].split(".")[0].split('_')[3] + "_GT_newick.quartets"
with open(output_dir+"/"+out_file, "w+") as f:
f.write(newick_format)
| 26.48913 | 93 | 0.601559 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,632 | 0.669676 |
aa3f27f9eadc8df2dacbc14d927b6d3675997de3 | 2,849 | py | Python | plugin/rasa.py | mayflower/err-rasa | a3fa9c02c9434dd716caa72bd7a91f36ce0d8a2e | [
"Apache-2.0"
] | 1 | 2018-08-04T17:32:49.000Z | 2018-08-04T17:32:49.000Z | plugin/rasa.py | mayflower/err-rasa | a3fa9c02c9434dd716caa72bd7a91f36ce0d8a2e | [
"Apache-2.0"
] | 4 | 2019-12-26T16:41:50.000Z | 2020-03-24T15:43:45.000Z | plugin/rasa.py | mayflower/err-rasa | a3fa9c02c9434dd716caa72bd7a91f36ce0d8a2e | [
"Apache-2.0"
] | 2 | 2019-05-17T20:15:01.000Z | 2019-08-08T03:08:25.000Z | from rasa_core.interpreter import RasaNLUInterpreter
from rasa_core.agent import Agent
from rasa_core.interpreter import RegexInterpreter
from rasa_core.policies.keras_policy import KerasPolicy
from rasa_core.policies.memoization import MemoizationPolicy
import json
import config
from errbot import BotPlugin, botcmd
from plugin.rasa_slack import RasaSlack
class Rasa(BotPlugin):
"""Plugin to enable rasa converstions in an errbot"""
OWN_COMMANDS = ['!learnonline']
dialog_model_dir = './models/dialogue'
chat_model_dir = './models/nlu/default/chat'
domain_file = './config/chat_domain.yml'
training_data_file= 'config/stories.md'
agent = None
backend_adapter = None
def activate(self):
"""To enable our classes we need like the agent and its tracker"""
super(Rasa, self).activate()
self.dialog_model_dir = './models/dialogue'
self.chat_model_dir = './models/nlu/default/chat'
self.domain_file = './config/chat_domain.yml'
self.training_data_file= 'config/stories.md'
self.agent = Agent.load(self.dialog_model_dir,
interpreter=RasaNLUInterpreter(self.chat_model_dir))
self.backend_adapter = RasaSlack(self._bot)
def callback_message(self, message):
"""Override to hook into the messaging and calling rase """
super(Rasa, self).callback_message(message)
text = message.body
if text == '!learnonline':
self.log.debug("Do not send something as it is an own commmand: "+text)
return
token = config.BOT_IDENTITY['token']
if token is None:
raise Exception('No slack token')
frm = getattr(message.frm, 'aclattr', message.frm.person)
userid = getattr(message.frm, 'userid', frm)
self.backend_adapter.set_person_by_id(userid, message.frm)
self.log.debug("From: {}".format(frm))
self.agent.handle_message(message.body,
sender_id=userid,
output_channel=self.backend_adapter)
@botcmd()
def learnonline(self, msg, args):
"""Command to trigger learn_online on rasa agent"""
token = config.BOT_IDENTITY['token']
if token is None:
raise Exception('No slack token')
train_agent= Agent(self.domain_file,
policies=[MemoizationPolicy(max_history=2), KerasPolicy()],
interpreter=RegexInterpreter())
training_data = train_agent.load_data(self.training_data_file)
train_agent.train_online(training_data,
input_channel=self.backend_adapter,
batch_size=50,
epochs=200,
max_training_samples=300)
| 41.289855 | 83 | 0.636715 | 2,485 | 0.872236 | 0 | 0 | 737 | 0.258687 | 0 | 0 | 562 | 0.197262 |
aa400985ee38a6b0cabe3da8dcfdcdcd42b9662d | 2,943 | py | Python | pyboretum/tree/list_tree.py | picwell/pyboretum | dd2af812ea2156ddec57aa252c79333f3bd81b0d | [
"MIT"
] | 1 | 2019-04-05T01:57:12.000Z | 2019-04-05T01:57:12.000Z | pyboretum/tree/list_tree.py | picwell/pyboretum | dd2af812ea2156ddec57aa252c79333f3bd81b0d | [
"MIT"
] | null | null | null | pyboretum/tree/list_tree.py | picwell/pyboretum | dd2af812ea2156ddec57aa252c79333f3bd81b0d | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import math
from .base import (
Tree,
TreeIterator,
)
def _get_left_index(node_index):
return 2 * node_index + 1
def _get_right_index(node_index):
return 2 * node_index + 2
def _get_depth(node_index):
"""
The indices in depth d is
(# of nodes up to depths d - 1) <= indices <= (# of nodes up to depth d) - 1
where
(# of nodes up to depth d) = 2^(d + 1) - 1
:param node_index:
:return: int for depth
"""
return int(math.log(node_index + 1, 2))
class ListTree(Tree):
def __init__(self, node):
self._array = [node]
def get_root_id(self):
# Root is always the first entry in the list.
return 0
def get_iterator(self):
return _ListTreeIterator(self)
def _is_memory_allocated(self, node_id):
return node_id < len(self._array)
def _does_node_exist(self, node_id):
return (self._is_memory_allocated(node_id) and
self._array[node_id] is not None)
def get_node(self, node_id):
if self._does_node_exist(node_id):
return self._array[node_id], _get_depth(node_id)
else:
return None, None
def get_children_ids(self, node_id):
right_id = _get_right_index(node_id)
if self._does_node_exist(right_id):
return _get_left_index(node_id), right_id
else:
return None, None
def insert_children(self, node_id, left_node, right_node):
assert self._does_node_exist(node_id), 'Node {} does not exist.'.format(node_id)
left_id, right_id = _get_left_index(node_id), _get_right_index(node_id)
assert not self._does_node_exist(left_id) and not self._does_node_exist(right_id), \
'Children nodes of {} already exist.'.format(node_id)
if not self._is_memory_allocated(right_id):
self._array += [None] * (right_id - len(self._array) + 1)
self._array[left_id] = left_node
self._array[right_id] = right_node
return left_id, right_id
class _ListTreeIterator(TreeIterator):
def __init__(self, tree, index=0):
# This assumes that the Tree is not updating during traversal.
self._tree = tree
self._index = index
def left_child(self):
left_index = _get_left_index(self._index)
assert self._tree._does_node_exist(left_index), 'This is a leaf node.'
self._index = left_index
def right_child(self):
right_index = _get_right_index(self._index)
assert self._tree._does_node_exist(right_index), 'This is a leaf node.'
self._index = right_index
def is_leaf(self):
left_id = _get_left_index(self._index)
return not self._tree._does_node_exist(left_id)
def get_node(self):
return self._tree._array[self._index], _get_depth(self._index)
def get_id(self):
return self._index | 28.298077 | 92 | 0.650357 | 2,386 | 0.810737 | 0 | 0 | 0 | 0 | 0 | 0 | 452 | 0.153585 |
aa423bbb57ccb85157fec0324522a113dca2abda | 501 | py | Python | Data/CSV_naar_hashtable.py | Tomaat/Programmeerproject | f3fa7558891add6c46948afe778e1abf1cc0f565 | [
"MIT"
] | null | null | null | Data/CSV_naar_hashtable.py | Tomaat/Programmeerproject | f3fa7558891add6c46948afe778e1abf1cc0f565 | [
"MIT"
] | 1 | 2017-01-12T18:31:00.000Z | 2017-01-12T18:31:00.000Z | Data/CSV_naar_hashtable.py | Tomaat/Programmeerproject | f3fa7558891add6c46948afe778e1abf1cc0f565 | [
"MIT"
] | null | null | null | import csv
import json
from collections import defaultdict
f = open('DOOSTROOM_new.csv', 'rU')
h = defaultdict(lambda: defaultdict(lambda: defaultdict(int)))
for line in f:
line_list = line.split(";")
h[line_list[7]]["oorsprong"][line_list[3]] += int(line_list[12])
h[line_list[7]]["profiel"][line_list[6]] += int(line_list[12])
# Parse the CSV into JSON
out = json.dumps(h)
print "JSON parsed!"
# Save the JSON
f = open('data2015.json', 'w')
f.write(out)
print "JSON saved!" | 22.772727 | 65 | 0.670659 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.269461 |
aa45f0ddc8356bc41cfa28538ab685525dfeff79 | 2,138 | py | Python | python/version_creator.py | geoff-possum/lambda-versioning | 18f35945579cd9c1315c697444fc0d47aa66ea61 | [
"MIT"
] | 2 | 2020-12-07T05:18:56.000Z | 2021-04-03T08:13:58.000Z | python/version_creator.py | geoff-possum/lambda-versioning | 18f35945579cd9c1315c697444fc0d47aa66ea61 | [
"MIT"
] | null | null | null | python/version_creator.py | geoff-possum/lambda-versioning | 18f35945579cd9c1315c697444fc0d47aa66ea61 | [
"MIT"
] | null | null | null | import boto3
from botocore.vendored import requests
import json
from uuid import uuid4
def send(event, context, response_status, Reason=None, ResponseData=None, PhysicalResourceId=None):
response_url = event.get('ResponseURL', "")
json_body = json.dumps({
'Status' : response_status,
'Reason' : Reason or 'See the details in CloudWatch Log Stream: ' + context.log_stream_name,
'PhysicalResourceId' : PhysicalResourceId or context.log_stream_name,
'StackId' : event.get('StackId', ""),
'RequestId' : event.get('RequestId', ""),
'LogicalResourceId' : event.get('LogicalResourceId', ""),
'NoEcho' : True,
'Data' : ResponseData})
headers = {
'content-type' : '',
'content-length' : str(len(json_body))
}
try:
print json_body
response = requests.put(response_url,data=json_body,headers=headers)
print("Status code: " + response.reason)
except Exception as e:
print("Failed to send response to CFN: error executing requests.put: " + str(e))
def new_version(lambda_arn, text):
try:
client = boto3.client('lambda')
return (
True,
{ "VersionArn": "{}:{}".format(lambda_arn, client.publish_version(FunctionName=lambda_arn)["Version"]) },
"{} Successful".format(text)
)
except Exception as e:
print e
return (False, "", "Error during {}: {}".format(text, e))
def lambda_handler(event, context):
print event
properties = event.get('ResourceProperties', {})
arn = properties.get('LambdaFunctionArn', "")
physical_resource_id = str(uuid4())
data = {}
req_type = event.get('RequestType', "")
if req_type == 'Create':
res, data, reason = new_version(arn, "Create")
elif req_type == 'Update':
res, data, reason = new_version(arn, "Update")
elif req_type == 'Delete':
physical_resource_id = properties.get('PhysicalResourceId', '')
res = True
reason = "Delete Successful"
else:
res = False
reason = "Unknown operation: " + req_type
status = "FAILED"
if res:
status = "SUCCESS"
send(event, context, status, Reason=reason, ResponseData=data, PhysicalResourceId=physical_resource_id)
| 34.483871 | 112 | 0.673994 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 553 | 0.258653 |
aa46d1dd10569615cec8459fd895776c5d59f9d9 | 6,459 | py | Python | musicdwh/musicdwh.py | dagmar-urbancova/musicdwh | d9960820e7bb77cbe20713b2a344e1fdf86be92a | [
"MIT"
] | null | null | null | musicdwh/musicdwh.py | dagmar-urbancova/musicdwh | d9960820e7bb77cbe20713b2a344e1fdf86be92a | [
"MIT"
] | null | null | null | musicdwh/musicdwh.py | dagmar-urbancova/musicdwh | d9960820e7bb77cbe20713b2a344e1fdf86be92a | [
"MIT"
] | null | null | null | """Main module."""
import os
import sys
import time
from datetime import datetime
import pandas as pd
import ipapi
import sqlalchemy as sqla
try:
DATA_DATE = os.environ['DATA_DATE']
print('Using data from {}'.format(DATA_DATE))
except:
print('Envvar DATA_DATE not set.')
sys.exit(1)
try:
POSTGRES_PASSWORD = os.environ['POSTGRES_PASSWORD']
print('Using password from POSTGRES_PASSWORD')
except:
print('Envvar POSTGRES_PASSWORD not set.')
sys.exit(1)
try:
POSTGRES_USER = os.environ['POSTGRES_USER']
print('Using password from POSTGRES_USER')
except:
print('Envvar POSTGRES_USER not set.')
sys.exit(1)
try:
POSTGRES_DB = os.environ['POSTGRES_DB']
print('Using password from POSTGRES_DB')
except:
print('Envvar POSTGRES_DB not set.')
sys.exit(1)
# define database connection
db_name = POSTGRES_DB
db_user = POSTGRES_USER
db_pass = POSTGRES_PASSWORD
db_host = 'database'
db_port = '5432'
DB_CONNECTION = 'postgresql://{}:{}@{}:{}/{}'.format(
db_user, db_pass, db_host, db_port, db_name
)
# define constants
DATA_PATH = './data'
DB_LAYER_0 = 'layer0'
DB_LAYER_1 = 'layer1'
RETRY_COUNT = 5
DELAY_TIME = 5
def import_hb(file_path):
# import csv file - hb
# import data to DataFrame
print('Reading hb data from {}'.format(file_path))
try:
data_hb2 = pd.read_csv(file_path)
return(data_hb2)
except:
print('hb data not accessible')
def import_wwc(file_path):
# import json file - wwc
# import data to DataFrame
print('Reading wwc data from {}'.format(file_path))
try:
data_wwc_i = pd.read_json(file_path, lines=True)
# split data into dataframe columns
dfs = []
for c in data_wwc_i:
tmp = pd.DataFrame(list(data_wwc_i[c]))
tmp.columns = [c + '_%s' % str(i+1) for i in range(tmp.shape[1])]
dfs.append(tmp)
data_wwc = pd.concat(dfs, 1)
return(data_wwc)
except:
print('wwc data not accessible')
def import_lov(file_path):
# import csv file - LOV
# import data to DataFrame
print('Reading LOV data from {}'.format(file_path))
try:
data = pd.read_csv(file_path)
return(data)
except:
print('data not accessible')
def ip_convert_country (
ip_address_series,
batch,
sleep_time = 60):
# get country code from IP address, ipapi limit - 1,000 requests daily , 45/minute
# using IP-API
# series to list
size_counter = 0
country_code = ''
ip_list = ip_address_series.to_list()
code_list = []
# for each element in list get IP
for address in ip_list:
# if we reached free limit of 45 items per minute, sleep
if size_counter >= batch:
size_counter = 0
#print('Sleeping')
time.sleep(sleep_time)
else:
pass
try:
country_code = ipapi.location(address, output='country_code')
except:
country_code = 'NaN'
code_list.append(country_code)
size_counter += 1
code_series = pd.Series(code_list)
return(code_series)
def import_game (
game_id,
export_date,
data_path
):
try:
export_date_d=datetime.strptime(export_date,'%Y-%m-%d')
except:
print('DATA_DATE is not in the right format. Please set in format YYYY-MM-DD.')
sys.exit(1)
date_y = export_date_d.strftime("%Y")
date_m = export_date_d.strftime("%m")
date_d = export_date_d.strftime("%d")
#data_path = '/musicdwh/musicdwh/data/'
wwc_path = '/wwc/{}/{}/{}/wwc.json'.format(date_y, date_m, date_d)
hb_path = '/hb/{}/{}/{}/hb.csv'.format(date_y, date_m, date_d)
# expecting date in format 'YYYY-MM-DD'
if game_id == 'wwc':
imported_data = import_wwc(data_path + wwc_path)
print('import wwc from: ' + data_path + wwc_path)
elif game_id == 'hb':
imported_data = import_hb(data_path + hb_path)
print('import hb from: ' + data_path + hb_path)
else:
print ('Please choose a game to import: wwc / hb')
return(imported_data)
def connect_to_db(db_con, retry_count, delay):
print('Connecting to {}'.format(db_con))
engine = sqla.create_engine(db_con, isolation_level="AUTOCOMMIT")
return engine
def upload_to_db(
df,
db_table,
engine,
db_schema
):
sql = sqla.text("TRUNCATE TABLE {}.{}".format(db_schema, db_table))
try:
engine.execute(sql)
except:
print("{}.{} - Table does not exist.".format(db_schema, db_table))
df.to_sql(db_table, engine, schema=db_schema, if_exists='append')
# main script
if __name__ == '__main__':
print("================================= starting load =================================")
# create database connection
engine = connect_to_db(DB_CONNECTION, RETRY_COUNT, DELAY_TIME)
# populate LOVs
LOV_PATH = '{}/LOVs'.format(DATA_PATH)
#LOV_gender
gender_df = import_lov('{}/LOV_gender.csv'.format(LOV_PATH))
upload_to_db (gender_df, 'lov_gender', engine, DB_LAYER_0)
#LOV_title
title_df = import_lov('{}/LOV_title.csv'.format(LOV_PATH))
upload_to_db (title_df, 'lov_title', engine, DB_LAYER_0)
# load wwc data
data_wwc = import_game ('wwc', DATA_DATE, DATA_PATH)
# load hb data
data_hb = import_game ('hb', DATA_DATE, DATA_PATH)
# get country codes from IP address
ip_code_series = ip_convert_country(data_hb['ip_address'], 30, 100)
# append country code to hb dataframe
data_hb['country_code']=ip_code_series
# upload daily data to database, schema L0
upload_to_db (data_hb, 'import_data_hb', engine, DB_LAYER_0)
upload_to_db (data_wwc, 'import_data_wwc', engine, DB_LAYER_0)
# run load to Layer1
with open('./sql_scripts/04_L0_L1_load.sql', 'r') as sql_file:
script_string = sql_file.read()
print('Running insert script L0_L1_load')
db_script = engine.execute(script_string)
# run updates on existing records
with open('./sql_scripts/05_L0_L1_update.sql', 'r') as sql_file:
script_string = sql_file.read()
print('Running update script L0_L1_update')
db_script = engine.execute(script_string)
| 29.226244 | 94 | 0.62471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,057 | 0.31847 |
aa471620e45f82f8795929b2bd7008f06f5d5d0e | 653 | py | Python | biserici_inlemnite/biserici/migrations/0033_auto_20210803_1623.py | ck-tm/biserici-inlemnite | c9d12127b92f25d3ab2fcc7b4c386419fe308a4e | [
"MIT"
] | null | null | null | biserici_inlemnite/biserici/migrations/0033_auto_20210803_1623.py | ck-tm/biserici-inlemnite | c9d12127b92f25d3ab2fcc7b4c386419fe308a4e | [
"MIT"
] | null | null | null | biserici_inlemnite/biserici/migrations/0033_auto_20210803_1623.py | ck-tm/biserici-inlemnite | c9d12127b92f25d3ab2fcc7b4c386419fe308a4e | [
"MIT"
] | null | null | null | # Generated by Django 3.1.13 on 2021-08-03 13:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('biserici', '0032_auto_20210803_1622'),
]
operations = [
migrations.AddField(
model_name='descriere',
name='solee_detalii',
field=models.TextField(blank=True, null=True, verbose_name='Solee (observații)'),
),
migrations.AddField(
model_name='historicaldescriere',
name='solee_detalii',
field=models.TextField(blank=True, null=True, verbose_name='Solee (observații)'),
),
]
| 27.208333 | 93 | 0.614089 | 561 | 0.856489 | 0 | 0 | 0 | 0 | 0 | 0 | 187 | 0.285496 |
aa479376b304ad8b20c1dba4d6101b903b9d8d20 | 9,436 | py | Python | load_data.py | rlatjcj/Naver-AI-Vision | 059e60218cef1f7c37b4d546b02e6024789e9716 | [
"Unlicense"
] | null | null | null | load_data.py | rlatjcj/Naver-AI-Vision | 059e60218cef1f7c37b4d546b02e6024789e9716 | [
"Unlicense"
] | null | null | null | load_data.py | rlatjcj/Naver-AI-Vision | 059e60218cef1f7c37b4d546b02e6024789e9716 | [
"Unlicense"
] | null | null | null | # -*- coding: utf_8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import cv2
import pickle
import random
import numpy as np
def train_load1(data_path, img_size, output_path):
label_list = []
img_list = []
label_idx = 0
for root, dirs, files in os.walk(data_path):
if not files:
continue
for filename in files:
img_path = os.path.join(root, filename)
try:
img = cv2.imread(img_path, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, img_size)
except:
continue
label_list.append(label_idx)
img_list.append(img)
label_idx += 1
# write output file for caching
with open(output_path[0], 'wb') as img_f:
pickle.dump(img_list, img_f)
with open(output_path[1], 'wb') as label_f:
pickle.dump(label_list, label_f)
# nsml test_data_loader
def test_data_loader(data_path):
data_path = os.path.join(data_path, 'test', 'test_data')
# return full path
queries_path = [os.path.join(data_path, 'query', path) for path in os.listdir(os.path.join(data_path, 'query'))]
references_path = [os.path.join(data_path, 'reference', path) for path in
os.listdir(os.path.join(data_path, 'reference'))]
return queries_path, references_path
def siamese_loader(train_dataset_path, data_list, order, input_shape):
datalist = data_list.copy()
target_folder = os.path.join(train_dataset_path, datalist[order])
datalist.pop(order)
target_list = os.listdir(target_folder)
random.shuffle(target_list)
target_name = target_list[0]
len_same_class = len(target_list[1:])
pair = [np.zeros((len_same_class*2, input_shape[0], input_shape[1], 3)) for i in range(2)]
target = np.zeros((len_same_class*2, 1))
target_img = cv2.resize(cv2.cvtColor(cv2.imread(os.path.join(target_folder, target_name), 1), cv2.COLOR_RGB2BGR), input_shape) / 255
# same
for i in range(len_same_class):
flag = random.random()
compare_img = cv2.resize(cv2.cvtColor(cv2.imread(os.path.join(target_folder, target_list[i+1]), 1), cv2.COLOR_RGB2BGR), input_shape) / 255
if flag > 0.75:
target_img = cv2.flip(target_img, 0)
target_img = cv2.flip(target_img, 1)
compare_img = cv2.flip(compare_img, 0)
compare_img = cv2.flip(compare_img, 1)
elif flag > 0.5:
target_img = cv2.flip(target_img, 0)
compare_img = cv2.flip(compare_img, 0)
elif flag > 0.25:
target_img = cv2.flip(target_img, 1)
compare_img = cv2.flip(compare_img, 1)
pair[0][i] = target_img
pair[1][i] = compare_img
target[i] = 1
# print(i, os.path.join(target_folder, target_img), os.path.join(target_folder, target_list[i+1]), target[i])
# different
random.shuffle(datalist)
for i in range(len_same_class, len_same_class*2):
flag = random.random()
dif_class = os.listdir(os.path.join(train_dataset_path, datalist[i]))
random.shuffle(dif_class)
compare_img = cv2.resize(cv2.cvtColor(cv2.imread(os.path.join(train_dataset_path, datalist[i], dif_class[0]), 1), cv2.COLOR_RGB2BGR), input_shape) / 255
if flag > 0.75:
target_img = cv2.flip(target_img, 0)
target_img = cv2.flip(target_img, 1)
compare_img = cv2.flip(compare_img, 0)
compare_img = cv2.flip(compare_img, 1)
elif flag > 0.5:
target_img = cv2.flip(target_img, 0)
compare_img = cv2.flip(compare_img, 0)
elif flag > 0.25:
target_img = cv2.flip(target_img, 1)
compare_img = cv2.flip(compare_img, 1)
pair[0][i] = target_img
pair[1][i] = compare_img
target[i] = 0
# print(i, os.path.join(target_folder, target_img), os.path.join(train_dataset_path, datalist[i], dif_class[0]), target[i])
p = np.random.permutation(len_same_class*2)
pair[1] = pair[1][p]
target = target[p]
return pair, target
def siamese_generator(train_dataset_path, data_list, batch_size, input_shape):
def flip_img(target_img, compare_img, flag):
if flag > 0.75:
target_img = cv2.flip(target_img, 0)
target_img = cv2.flip(target_img, 1)
compare_img = cv2.flip(compare_img, 0)
compare_img = cv2.flip(compare_img, 1)
elif flag > 0.5:
target_img = cv2.flip(target_img, 0)
compare_img = cv2.flip(compare_img, 0)
elif flag > 0.25:
target_img = cv2.flip(target_img, 1)
compare_img = cv2.flip(compare_img, 1)
return target_img, compare_img
while True:
pair = [np.zeros((batch_size,)+input_shape) for i in range(2)]
target = np.zeros((batch_size, 1))
p = np.random.permutation(len(data_list))
for i in range(batch_size//2):
flag = random.random()
target_folder = os.path.join(train_dataset_path, data_list[p[i]])
target_list = os.listdir(target_folder)
random.shuffle(target_list)
target_img = cv2.resize(cv2.cvtColor(cv2.imread(os.path.join(target_folder, target_list[0]), 1), cv2.COLOR_RGB2BGR), input_shape[:2]) / 255
compare_img = cv2.resize(cv2.cvtColor(cv2.imread(os.path.join(target_folder, target_list[1]), 1), cv2.COLOR_RGB2BGR), input_shape[:2]) / 255
pair[0][i], pair[1][i] = flip_img(target_img, compare_img, flag)
target[i] = 1
for i in range(batch_size//2, batch_size):
flag = random.random()
target_folder = os.path.join(train_dataset_path, data_list[p[i]])
target_list = os.listdir(target_folder)
compare_folder = os.path.join(train_dataset_path, data_list[p[i+batch_size]])
compare_list = os.listdir(compare_folder)
random.shuffle(target_list)
random.shuffle(compare_list)
target_img = cv2.resize(cv2.cvtColor(cv2.imread(os.path.join(target_folder, target_list[0]), 1), cv2.COLOR_RGB2BGR), input_shape[:2]) / 255
compare_img = cv2.resize(cv2.cvtColor(cv2.imread(os.path.join(compare_folder, compare_list[0]), 1), cv2.COLOR_RGB2BGR), input_shape[:2]) / 255
pair[0][i], pair[1][i] = flip_img(target_img, compare_img, flag)
target[i] = 0
p = np.random.permutation(batch_size)
pair[1] = pair[1][p]
target = target[p]
yield pair, target
# yield pair[0].shape, pair[1].shape, target.shape
def triple_generator(train_dataset_path, data_list, batch_size, input_shape, regions):
def flip_img(query, relevant, irrelevant, flag):
if flag > 0.75:
query = cv2.flip(query, 0)
query = cv2.flip(query, 1)
relevant = cv2.flip(relevant, 0)
relevant = cv2.flip(relevant, 1)
irrelevant = cv2.flip(irrelevant, 0)
irrelevant = cv2.flip(irrelevant, 1)
elif flag > 0.5:
query = cv2.flip(query, 0)
relevant = cv2.flip(relevant, 0)
irrelevant = cv2.flip(irrelevant, 0)
elif flag > 0.25:
query = cv2.flip(query, 1)
relevant = cv2.flip(relevant, 1)
irrelevant = cv2.flip(irrelevant, 1)
return query, relevant, irrelevant
while True:
pair = [np.zeros((batch_size,)+input_shape) for i in range(3)]
pair += [np.zeros((batch_size, len(regions), 4))]
target = np.zeros((batch_size, 1))
p = np.random.permutation(len(data_list))
for i in range(batch_size):
flag = random.random()
query_folder = os.path.join(train_dataset_path, data_list[p[i]])
irrelevant_folder = os.path.join(train_dataset_path, data_list[p[i+batch_size]])
query_list = os.listdir(query_folder)
irrelevant_list = os.listdir(irrelevant_folder)
random.shuffle(query_list)
query = cv2.resize(cv2.cvtColor(cv2.imread(os.path.join(query_folder, query_list[0]), 1), cv2.COLOR_RGB2BGR), input_shape[:2]) / 255
relevant = cv2.resize(cv2.cvtColor(cv2.imread(os.path.join(query_folder, query_list[1]), 1), cv2.COLOR_RGB2BGR), input_shape[:2]) / 255
irrelevant = cv2.resize(cv2.cvtColor(cv2.imread(os.path.join(irrelevant_folder, irrelevant_list[0]), 1), cv2.COLOR_RGB2BGR), input_shape[:2]) / 255
pair[0][i], pair[1][i], pair[2][i] = flip_img(query, relevant, irrelevant, flag)
pair[3][i] = np.array(regions)
target[i][0] = 0
# print(i, os.path.join(query_folder, query_list[0]), os.path.join(query_folder, query_list[1]), os.path.join(irrelevant_folder, irrelevant_list[0]))
yield pair, target
# yield len(pair), target.shape
if __name__ == '__main__':
from get_regions import rmac_regions, get_size_vgg_feat_map
Wmap, Hmap = get_size_vgg_feat_map(512, 512)
regions = rmac_regions(Wmap, Hmap, 3)
train_dataset_path = './dataset/train/train_data'
datalist = os.listdir(train_dataset_path)
gen = triple_generator(train_dataset_path, datalist, 16, (512, 512, 3), regions)
for i in range(10):
print(next(gen)) | 41.568282 | 161 | 0.627914 | 0 | 0 | 4,791 | 0.507736 | 0 | 0 | 0 | 0 | 673 | 0.071323 |
aa479b29bfe32b3b0d5ad18d34ca8b1c4f74dff8 | 639 | py | Python | data/train/python/aa479b29bfe32b3b0d5ad18d34ca8b1c4f74dff8api.py | harshp8l/deep-learning-lang-detection | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | [
"MIT"
] | 84 | 2017-10-25T15:49:21.000Z | 2021-11-28T21:25:54.000Z | data/train/python/aa479b29bfe32b3b0d5ad18d34ca8b1c4f74dff8api.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 5 | 2018-03-29T11:50:46.000Z | 2021-04-26T13:33:18.000Z | data/train/python/aa479b29bfe32b3b0d5ad18d34ca8b1c4f74dff8api.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 24 | 2017-11-22T08:31:00.000Z | 2022-03-27T01:22:31.000Z | from tastypie.api import Api
from encuestas.api.user import UserResource
from encuestas.api.encuesta import EncuestaResource
from encuestas.api.grupo import GrupoResource
from encuestas.api.pregunta import PreguntaResource
from encuestas.api.opcion import OpcionResource
from encuestas.api.link import LinkResource
from encuestas.api.respuesta import RespuestaResource
v1_api = Api(api_name='v1')
v1_api.register(UserResource())
v1_api.register(EncuestaResource())
v1_api.register(GrupoResource())
v1_api.register(PreguntaResource())
v1_api.register(OpcionResource())
v1_api.register(LinkResource())
v1_api.register(RespuestaResource())
| 33.631579 | 53 | 0.843505 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.00626 |
aa47d21c6ff69f1572502674f4acbedbda768571 | 2,371 | py | Python | ex1/gerador.py | renzon/oo-inpe | 1b33939974f998badbeebd7bfe182070e77ef98f | [
"MIT"
] | null | null | null | ex1/gerador.py | renzon/oo-inpe | 1b33939974f998badbeebd7bfe182070e77ef98f | [
"MIT"
] | null | null | null | ex1/gerador.py | renzon/oo-inpe | 1b33939974f998badbeebd7bfe182070e77ef98f | [
"MIT"
] | null | null | null | from random import randint
from ex1.evento import Evento
class Gerador():
def __init__(self, msg):
self.msg = msg
def gerar_evento(self, tempo):
"""
Método que gera evento levando em conta tempo de execução.
:return: Instancia de Evento ou Nulo se não for para gerar envento
"""
raise NotImplementedError()
class GeradorDecorator(Gerador):
def __init__(self, msg, gerador, despachador):
super().__init__(msg)
self._despachador = despachador
self._gerador = gerador
def gerar_evento(self, tempo):
evento = self._gerador.gerar_evento(tempo)
if evento:
self._despachador.despachar(evento)
class TempoEstrategia():
def deve_gerar_evento(self, tempo):
"""
Método abstrato que returna verdadeiro se evento deve ser gerado e falso caso contrário
:return: bool
"""
raise NotImplementedError('Deve definir estratégia de tempo')
class Tempo5Segundos(TempoEstrategia):
def deve_gerar_evento(self, tempo):
return tempo % 5 == 0
class TempoAleatorio(TempoEstrategia):
def __init__(self):
self._proximo_tempo = randint(1, 10)
def deve_gerar_evento(self, tempo):
flag = self._proximo_tempo <= tempo
if flag:
self._proximo_tempo += randint(1, 10)
return flag
class TransformadorString():
def transformar(self, s, tempo):
"""
Recebe string e transforma de acordo com estratégia
:param s: string a ser transformada
:param tempo: Tempo que string foi transformada
:return: string transformada
"""
raise NotImplementedError('Deve ser implementado')
class TransformadorNulo(TransformadorString):
def transformar(self, s, tempo):
return s
class TransformadorComTempo(TransformadorString):
def transformar(self, s, tempo):
return '{}. Tempo={}'.format(s, tempo)
class GeradorBridge(Gerador):
def __init__(self, msg, tempo_strategia, transformador):
super().__init__(msg)
self._transformador = transformador
self._tempo_strategia = tempo_strategia
def gerar_evento(self, tempo):
if self._tempo_strategia.deve_gerar_evento(tempo):
s = self._transformador.transformar(self.msg, tempo)
return Evento(s)
| 27.569767 | 95 | 0.660059 | 2,294 | 0.964271 | 0 | 0 | 0 | 0 | 0 | 0 | 581 | 0.24422 |
aa4862c705ed37c155085f64db48e3e7e6b55b7d | 4,221 | py | Python | tests/api/v1/test_bucketlist_endpoint.py | Elbertbiggs360/buckelist-api | 1fd6a857c45aafa55b39d3c215b1e69e0b23c33b | [
"MIT"
] | null | null | null | tests/api/v1/test_bucketlist_endpoint.py | Elbertbiggs360/buckelist-api | 1fd6a857c45aafa55b39d3c215b1e69e0b23c33b | [
"MIT"
] | 18 | 2017-08-22T11:44:19.000Z | 2021-02-02T07:16:59.000Z | tests/api/v1/test_bucketlist_endpoint.py | Elbertbiggs360/buckelist-api | 1fd6a857c45aafa55b39d3c215b1e69e0b23c33b | [
"MIT"
] | null | null | null | import json
from tests.base_test import BaseCase
from app.models.bucketlist import Bucketlist
class TestBucketlistEndpoint(BaseCase):
''' A class to test the bucketlist endpoints '''
def setUp(self):
super(TestBucketlistEndpoint, self).setUp()
self.bucketlist_data = {'name': 'Eat Sushi'}
def test_post_bucketlists_adds_new_bucketlist(self):
with self.app.app_context():
response = self.client().post(
'/api/v1/bucketlists',
data=json.dumps(self.bucketlist_data),
headers=self.auth_headers())
self.assertEqual(response.status_code, 201)
self.assertEqual('Bucketlist created successfully!',
json.loads(response.data.decode('utf-8')).get('message'))
def test_get_returns_all_bucketlists_for_user(self):
with self.app.app_context():
response = self.client().get('/api/v1/bucketlists',
headers=self.auth_headers())
result = response.data.decode('utf-8')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(json.loads(result)), 6)
def test_get_returns_one_bucketlist_if_id_is_specified(self):
with self.app.app_context():
response = self.client().get('/api/v1/bucketlists/1',
headers=self.auth_headers())
result = json.loads(response.data.decode('utf-8'))
expected_list = sorted(['id', 'name', 'date_created', 'date_modified', 'created_by', 'items'])
self.assertEqual(response.status_code, 200)
self.assertListEqual([result.get('name'), result.get('created_by')], ['sample_1', 1])
def test_edit_updates_bucketlist_fields(self):
with self.app.app_context():
response = self.client().get('/api/v1/bucketlists/1',
headers=self.auth_headers())
result = json.loads(response.data.decode('utf-8'))
self.assertEqual(result.get('name'), 'sample_1')
update_fields = {'name': 'Bungee Jump'}
with self.app.app_context():
response = self.client().put('/api/v1/bucketlists/1',
data=json.dumps(update_fields),
headers=self.auth_headers())
result = json.loads(response.data.decode('utf-8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(result.get('name'), update_fields.get('name'))
def test_delete_removes_bucketlist_from_database(self):
with self.app.app_context():
self.assertEqual(len(Bucketlist.query.filter_by(active=True).all()), 2)
response = self.client().delete('/api/v1/bucketlists/1',
headers=self.auth_headers())
result = json.loads(response.data.decode('utf-8'))
self.assertEqual(response.status_code, 200)
self.assertEqual(result.get('message'), 'Bucketlist with id 1 successfully deleted.')
with self.app.app_context():
self.assertEqual(len(Bucketlist.query.filter_by(active=True).all()), 1)
def test_search_returns_bucketlists_whose_name_matches_a_search_term(self):
with self.app.app_context():
response = self.client().get('/api/v1/bucketlists?q=sample',
headers=self.auth_headers())
result = json.loads(response.data.decode('utf-8')).get('data')
self.assertEqual(response.status_code, 200)
self.assertEqual(len(result), 2)
def test_pagination_of_bucketlists_when_you_pass_a_limit_parameter(self):
with self.app.app_context():
response = self.client().get('/api/v1/bucketlists?limit=1',
headers=self.auth_headers())
result = json.loads(response.data.decode('utf-8'))
self.assertEqual(response.status_code, 200)
expected_result = sorted(['data', 'next_page','page', 'per_page', 'total_data', 'pages', 'prev_page'])
self.assertListEqual(sorted(result.keys()), expected_result)
self.assertEqual(len(result.get('data')), 1) | 48.517241 | 110 | 0.615731 | 4,124 | 0.97702 | 0 | 0 | 0 | 0 | 0 | 0 | 618 | 0.146411 |
a4aaff7e2edd9e94de8ad787fca6f95714a7b0d5 | 6,256 | py | Python | lib/SetAPI/readsalignment/ReadsAlignmentSetInterfaceV1.py | r2sunita/SetAPI | 4ed769ed9678c057c7ded05fb93b9b7dc0874fc2 | [
"MIT"
] | null | null | null | lib/SetAPI/readsalignment/ReadsAlignmentSetInterfaceV1.py | r2sunita/SetAPI | 4ed769ed9678c057c7ded05fb93b9b7dc0874fc2 | [
"MIT"
] | null | null | null | lib/SetAPI/readsalignment/ReadsAlignmentSetInterfaceV1.py | r2sunita/SetAPI | 4ed769ed9678c057c7ded05fb93b9b7dc0874fc2 | [
"MIT"
] | null | null | null | """
An interface for handling sets of ReadsAlignments.
"""
from pprint import pprint
from SetAPI.generic.SetInterfaceV1 import SetInterfaceV1
from SetAPI import util
class ReadsAlignmentSetInterfaceV1:
def __init__(self, workspace_client):
self.workspace_client = workspace_client
self.set_interface = SetInterfaceV1(workspace_client)
def save_reads_alignment_set(self, ctx, params):
if 'data' in params and params['data'] is not None:
self._validate_reads_alignment_set_data(params['data'])
else:
raise ValueError('"data" parameter field required to save a ReadsAlignmentSet')
save_result = self.set_interface.save_set(
'KBaseSets.ReadsAlignmentSet',
ctx['provenance'],
params
)
info = save_result[0]
return {
'set_ref': str(info[6]) + '/' + str(info[0]) + '/' + str(info[4]),
'set_info': info
}
def _validate_reads_alignment_set_data(self, data):
# Normalize the object, make empty strings where necessary
if "description" not in data:
data["description"] = ""
if "items" not in data or len(data.get("items", [])) == 0:
raise ValueError("A ReadsAlignmentSet must contain at "
"least one ReadsAlignment reference.")
refs = list()
for item in data["items"]:
refs.append(item["ref"])
if "label" not in item:
item["label"] = ""
ref_list = list(map(lambda r: {"ref": r}, refs))
# Get all the genome ids from our ReadsAlignment references (it's the genome_id key in
# the object metadata). Make a set out of them.
# If there's 0 or more than 1 item in the set, then either those items are bad, or they're
# aligned against different genomes.
info = self.workspace_client.get_object_info3({"objects": ref_list, "includeMetadata": 1})
num_genomes = len(set([item[10]["genome_id"] for item in info["infos"]]))
if num_genomes == 0 or num_genomes > 1:
raise ValueError("All ReadsAlignments in the set must be aligned "
"against the same genome reference.")
def get_reads_alignment_set(self, ctx, params):
"""
If the set is a KBaseSets.ReadsAlignmentSet, it gets returned as-is.
If it's a KBaseRNASeq.RNASeqAlignmentSet, a few things get juggled.
1. We try to figure out the object references for the alignments (which are optional)
2. From each ref, we try to figure out the condition, and apply those as labels (also
might be optional)
"""
set_type, obj_spec = self._check_get_reads_alignment_set_params(params)
include_item_info = False
if 'include_item_info' in params:
if params['include_item_info'] == 1:
include_item_info = True
include_set_item_ref_paths = False
if 'include_set_item_ref_paths' in params:
if params['include_set_item_ref_paths'] == 1:
include_set_item_ref_paths = True
ref_path_to_set = []
if 'ref_path_to_set' in params and len(params['ref_path_to_set']) > 0:
ref_path_to_set = params['ref_path_to_set']
if "KBaseSets" in set_type:
# If it's a KBaseSets type, then we know the usual interface will work...
return self.set_interface.get_set(
params['ref'],
include_item_info,
ref_path_to_set,
include_set_item_ref_paths
)
else:
# ...otherwise, we need to fetch it directly from the workspace and tweak it into the
# expected return object
obj_data = self.workspace_client.get_objects2({"objects": [obj_spec]})["data"][0]
obj = obj_data["data"]
obj_info = obj_data["info"]
alignment_ref_list = list()
if "sample_alignments" in obj:
alignment_ref_list = obj["sample_alignments"]
else:
# this is a list of dicts of random strings -> alignment refs
# need them all as a set, then emit as a list.
reads_to_alignments = obj["mapped_alignments_ids"]
refs = set()
for mapping in reads_to_alignments:
refs.update(mapping.values())
alignment_ref_list = list(refs)
alignment_items = [{"ref": i} for i in alignment_ref_list]
item_infos = self.workspace_client.get_object_info3(
{"objects": alignment_items, "includeMetadata": 1})["infos"]
for idx, ref in enumerate(alignment_items):
alignment_items[idx]["label"] = item_infos[idx][10].get("condition", None)
if include_item_info:
alignment_items[idx]["info"] = item_infos[idx]
"""
If include_set_item_ref_paths is set, then add a field ref_path in alignment items
"""
if include_set_item_ref_paths:
util.populate_item_object_ref_paths(alignment_items, obj_spec)
return {
"data": {
"items": alignment_items,
"description": ""
},
"info": obj_info
}
def _check_get_reads_alignment_set_params(self, params):
if 'ref' not in params or params['ref'] is None:
raise ValueError('"ref" parameter field specifiying the reads alignment set is required')
elif not util.check_reference(params['ref']):
raise ValueError('"ref" parameter must be a valid workspace reference')
if 'include_item_info' in params:
if params['include_item_info'] not in [0, 1]:
raise ValueError('"include_item_info" parameter field can only be set to 0 or 1')
obj_spec = util.build_ws_obj_selector(params.get('ref'), params.get('ref_path_to_set', []))
info = self.workspace_client.get_object_info3({"objects": [obj_spec]})
return info["infos"][0][2], obj_spec
| 42.849315 | 101 | 0.597666 | 6,086 | 0.972826 | 0 | 0 | 0 | 0 | 0 | 0 | 2,211 | 0.353421 |
a4acbea04ae0ec385db1ca11483d127f5bbc49c8 | 3,193 | py | Python | datacatalog/linkedstores/process/store.py | SD2E/python-datacatalog | 51ab366639505fb6e8a14cd6b446de37080cd20d | [
"CNRI-Python"
] | null | null | null | datacatalog/linkedstores/process/store.py | SD2E/python-datacatalog | 51ab366639505fb6e8a14cd6b446de37080cd20d | [
"CNRI-Python"
] | 2 | 2019-07-25T15:39:04.000Z | 2019-10-21T15:31:46.000Z | datacatalog/linkedstores/process/store.py | SD2E/python-datacatalog | 51ab366639505fb6e8a14cd6b446de37080cd20d | [
"CNRI-Python"
] | 1 | 2019-10-15T14:33:44.000Z | 2019-10-15T14:33:44.000Z | import collections
import inspect
import json
import jsonschema
import os
import sys
from pprint import pprint
from slugify import slugify
from ...dicthelpers import data_merge
from ..basestore import LinkedStore, linkages
from ..basestore import HeritableDocumentSchema, JSONSchemaCollection, formatChecker
from ..basestore import CatalogUpdateFailure
from ...stores import abspath
from ...utils import normalize, normpath
from ...filetypes import infer_filetype
DEFAULT_LINK_FIELDS = list()
class ProcessUpdateFailure(CatalogUpdateFailure):
pass
class ProcessDocument(HeritableDocumentSchema):
"""Defines metadata for a Process Entity"""
def __init__(self, inheritance=True, **kwargs):
super(ProcessDocument, self).__init__(inheritance, **kwargs)
self.update_id()
class ProcessRecord(collections.UserDict):
"""New document for ProcessStore with schema enforcement"""
def __init__(self, value, *args, **kwargs):
# if 'file_id' not in value:
# value['file_id'] = 'file.tacc.' + uuid.uuid1().hex
value = dict(value)
self.schema = ProcessDocument()
for k in self.schema.filter_keys():
try:
del value[k]
except KeyError:
pass
jsonschema.validate(value, self.schema.to_dict(),
format_checker=formatChecker())
super().__init__(value, *args, **kwargs)
class ProcessStore(LinkedStore):
"""Manage storage and retrieval of ProcessDocument records"""
LINK_FIELDS = DEFAULT_LINK_FIELDS
def __init__(self, mongodb, config={}, session=None, **kwargs):
super(ProcessStore, self).__init__(mongodb, config, session)
schema = ProcessDocument(**kwargs)
super(ProcessStore, self).update_attrs(schema)
self.setup(update_indexes=kwargs.get('update_indexes', False))
def add_update_document(self, document_dict, uuid=None, token=None, strategy='merge'):
if 'process_id' not in document_dict:
suggested_id = encode_name(document_dict['name'])
raise KeyError("Process document must have a 'processe_id'. " +
"Based on the provided name, here is a suggestion: {}".format(suggested_id))
return super().add_update_document(document_dict,
uuid=uuid, token=token,
strategy=strategy)
def get_typeduuid(self, payload, binary=False):
identifier_string = None
if isinstance(payload, dict):
if 'name' in payload:
payload['name'] = normpath(payload['name'])
identifier_string = self.get_linearized_values(payload)
else:
identifier_string = normpath(str(payload))
# print('IDENTIFIER.string', identifier_string)
return super().get_typeduuid(identifier_string, binary)
class StoreInterface(ProcessStore):
pass
def encode_name(textstring, separator='_', stopwords=[], case_insensitive=False):
return separator.join(slug for slug in slugify(
textstring, stopwords=stopwords,
lowercase=case_insensitive).split('-'))
| 36.701149 | 103 | 0.665832 | 2,463 | 0.771375 | 0 | 0 | 0 | 0 | 0 | 0 | 459 | 0.143752 |
a4af9051ce0c393008d96458e2c2f3dce09ae68a | 3,514 | py | Python | tests/clickhouse/test_columns.py | fpacifici/snuba | cf732b71383c948f9387fbe64e9404ca71f8e9c5 | [
"Apache-2.0"
] | null | null | null | tests/clickhouse/test_columns.py | fpacifici/snuba | cf732b71383c948f9387fbe64e9404ca71f8e9c5 | [
"Apache-2.0"
] | null | null | null | tests/clickhouse/test_columns.py | fpacifici/snuba | cf732b71383c948f9387fbe64e9404ca71f8e9c5 | [
"Apache-2.0"
] | null | null | null | from copy import deepcopy
import pytest
from snuba.clickhouse.columns import (
UUID,
AggregateFunction,
Array,
ColumnType,
Date,
DateTime,
Enum,
FixedString,
Float,
IPv4,
IPv6,
Nested,
ReadOnly,
)
from snuba.clickhouse.columns import SchemaModifiers as Modifier
from snuba.clickhouse.columns import String, UInt
TEST_CASES = [
pytest.param(
String(Modifier(nullable=True)),
String(),
String(),
"Nullable(String)",
id="strings",
),
pytest.param(
UUID(Modifier(readonly=True)),
UUID(),
UUID(Modifier(nullable=True)),
"UUID",
id="UUIDs",
),
pytest.param(IPv4(None), IPv4(), IPv4(Modifier(nullable=True)), "IPv4", id="IPs",),
pytest.param(IPv6(None), IPv6(), IPv6(Modifier(nullable=True)), "IPv6", id="IPs",),
pytest.param(
FixedString(32, Modifier(nullable=True)),
FixedString(32),
FixedString(64, Modifier(nullable=True)),
"Nullable(FixedString(32))",
id="fixed strings",
),
pytest.param(
UInt(8, Modifier(nullable=True)),
UInt(8),
UInt(16, Modifier(nullable=True)),
"Nullable(UInt8)",
id="integers",
),
pytest.param(
Float(64, Modifier(nullable=True)),
Float(64),
Float(32, Modifier(nullable=True)),
"Nullable(Float64)",
id="floats",
),
pytest.param(Date(), Date(), Date(Modifier(nullable=True)), "Date", id="dates",),
pytest.param(
DateTime(),
DateTime(),
DateTime(Modifier(nullable=True)),
"DateTime",
id="datetimes",
),
pytest.param(
Array(String(Modifier(nullable=True))),
Array(String()),
Array(String()),
"Array(Nullable(String))",
id="arrays",
),
pytest.param(
Nested(
[("key", String()), ("val", String(Modifier(nullable=True)))],
Modifier(nullable=True),
),
Nested([("key", String()), ("val", String())]),
Nested([("key", String()), ("val", String())], Modifier(nullable=True)),
"Nullable(Nested(key String, val Nullable(String)))",
id="nested",
),
pytest.param(
AggregateFunction("uniqIf", [UInt(8), UInt(32)], Modifier(nullable=True)),
AggregateFunction("uniqIf", [UInt(8), UInt(32)]),
AggregateFunction("uniqIf", [UInt(8)], Modifier(nullable=True)),
"Nullable(AggregateFunction(uniqIf, UInt8, UInt32))",
id="aggregated",
),
pytest.param(
Enum([("a", 1), ("b", 2)], Modifier(nullable=True)),
Enum([("a", 1), ("b", 2)]),
Enum([("a", 1), ("b", 2)]),
"Nullable(Enum('a' = 1, 'b' = 2))",
id="enums",
),
]
@pytest.mark.parametrize("col_type, raw_type, different_type, for_schema", TEST_CASES)
def test_methods(
col_type: ColumnType,
raw_type: ColumnType,
different_type: ColumnType,
for_schema: str,
) -> None:
assert col_type == deepcopy(col_type)
assert col_type != different_type
# Test it is not equal to a type of different class.
assert col_type != ColumnType(Modifier(readonly=True))
assert col_type.for_schema() == for_schema
assert col_type.get_raw() == raw_type
modified = col_type.set_modifiers(col_type.get_modifiers())
assert modified is not col_type
assert modified == col_type
assert col_type.set_modifiers(Modifier(readonly=True)).has_modifier(ReadOnly)
| 28.803279 | 87 | 0.584804 | 0 | 0 | 0 | 0 | 722 | 0.205464 | 0 | 0 | 562 | 0.159932 |
a4b1c460b6c065a467161df65796be109740e64e | 5,851 | py | Python | src/paddle_prompt/templates/base_template.py | wj-Mcat/paddle-prompt | 3cc47c7cbda946fb9bc6e3032e8de17571e03cd4 | [
"Apache-2.0"
] | 1 | 2022-03-09T05:31:50.000Z | 2022-03-09T05:31:50.000Z | src/paddle_prompt/templates/base_template.py | wj-Mcat/paddle-prompt | 3cc47c7cbda946fb9bc6e3032e8de17571e03cd4 | [
"Apache-2.0"
] | null | null | null | src/paddle_prompt/templates/base_template.py | wj-Mcat/paddle-prompt | 3cc47c7cbda946fb9bc6e3032e8de17571e03cd4 | [
"Apache-2.0"
] | null | null | null | """Base Abstract Template class"""
from __future__ import annotations
import json
from abc import ABC
from collections import OrderedDict
from typing import Any, Dict, List
import numpy as np
import paddle
from paddle import nn
from paddlenlp.transformers.tokenizer_utils import PretrainedTokenizer
from paddle_prompt.config import Config
from paddle_prompt.schema import InputExample, InputFeature
from paddle_prompt.templates.engine import JinjaEngine
from paddle_prompt.utils import extract_and_stack_by_fields, lists_to_tensors
def _resize_prediction_mask(text: str, label_size: int) -> str:
mask_str = '[MASK]'
return text.replace(mask_str, ''.join([mask_str] * label_size))
def _load_label2words(file: str) -> Dict[str, List[str]]:
label2words = OrderedDict()
with open(file, 'r', encoding='utf-8') as f:
data = json.load(f)
for label, label_obj in data.items():
label2words[label] = label_obj['labels']
return label2words
class SoftMixin:
"""Soft Template Mixin object which can handle the soft token"""
def soft_token_ids(self) -> List[int]:
"""
This function identifies which tokens are soft tokens.
Sometimes tokens in the template are not from the vocabulary,
but a sequence of soft tokens.
In this case, you need to implement this function
"""
raise NotImplementedError
class Template(nn.Layer):
"""
abstract class for templates in prompt
TODO: how to handle -> fill the target label in the mask place
"""
def __init__(
self,
tokenizer: PretrainedTokenizer,
config: Config,
**kwargs
):
super().__init__(**kwargs)
self.render_engine = JinjaEngine.from_file(config.template_file)
self.tokenizer: PretrainedTokenizer = tokenizer
self.config: Config = config
self.label2words: Dict[str, List[str]] = _load_label2words(
config.template_file
)
self._init_max_token_num()
def _init_max_token_num(self):
max_token_num = 0
for words in self.label2words.values():
for word in words:
max_token_num = max(max_token_num, len(word))
self.config.max_token_num = max_token_num
def _get_mask_id(self) -> int:
# TODO: to be removed, this code is to fix the issue of paddlenlp
special_tokens = [token for token in self.tokenizer.all_special_tokens if token != self.config.mask_token]
special_ids: List[int] = self.tokenizer.convert_tokens_to_ids(special_tokens)
ids = self.tokenizer.convert_tokens_to_ids([self.config.mask_token])
ids = [id for id in ids if id not in special_ids]
assert len(ids) == 1, 'can"t get [MASK] id from tokenizer'
return ids[0]
def wrap_examples(
self,
examples: List[InputExample],
label2idx: Dict[str, int] = None
):
"""wrap examples with template and convert them to features
which can be feed into MLM
Args:
examples (List[InputExample]): the examples object
label2idx (Dict[str, int], optional): label to index mapper.
Defaults to None.
Returns:
List[Tensor]: the features which will be feed into MLM
"""
if not label2idx:
label2idx = self.config.label2idx
# 1. construct text or text pair dataset
texts = [self.render_engine.render(example) for example in examples]
texts = [_resize_prediction_mask(
text, self.config.max_token_num) for text in texts]
encoded_features = self.tokenizer.batch_encode(
texts,
max_seq_len=self.config.max_seq_length,
pad_to_max_seq_len=True,
return_token_type_ids=True,
)
fields = ['input_ids', 'token_type_ids']
# 2. return different data based on label
has_label = examples[0].label is not None
if not has_label:
return extract_and_stack_by_fields(encoded_features, fields)
label_ids = []
is_multi_class = isinstance(examples[0].label, list)
if not is_multi_class:
label_ids = [label2idx[example.label] for example in examples]
else:
for example in examples:
example_label_ids = [label2idx[label]
for label in example.label]
label_ids.append(example_label_ids)
features = extract_and_stack_by_fields(encoded_features, fields)
# 3. construct prediction mask
mask_token_id = self._get_mask_id()
mask_label_mask = np.array(features[0]) == mask_token_id
np_prediction_mask = np.argwhere(mask_label_mask)
prediction_mask = []
for pre_mask in np_prediction_mask:
prediction_mask.append(
pre_mask[0] * self.config.max_seq_length + pre_mask[1])
features.append(np.array(prediction_mask))
# 4. construct mask_label_ids
mask_label_ids = []
for example in examples:
mask_label_ids.extend(
self.tokenizer.convert_tokens_to_ids(
# TODO: to handle the multiple words?
list(self.label2words[example.label][0])
)
)
features.append(np.array(mask_label_ids))
# 4. add label ids data
features.append(
np.array(label_ids)
)
features = lists_to_tensors(features, self.config.place())
return features
def forward(self, *args, **kwargs) -> Any:
"""should handle the template mainforce logit
Returns:
Any: any result. TODO: define the forward result data structure.
"""
| 33.626437 | 114 | 0.635105 | 4,860 | 0.830627 | 0 | 0 | 0 | 0 | 0 | 0 | 1,355 | 0.231584 |
a4b236a4945b4b773f1daf37ffb005ccfb83b552 | 2,538 | py | Python | tests/test_eulerian.py | guojingyu/DeNovoAssembly | cd1f3e58c42f97197e73aed085ede07a924ebbdb | [
"MIT"
] | 4 | 2018-09-03T03:09:21.000Z | 2022-03-15T12:57:07.000Z | tests/test_eulerian.py | guojingyu/DeNovoAssembly | cd1f3e58c42f97197e73aed085ede07a924ebbdb | [
"MIT"
] | null | null | null | tests/test_eulerian.py | guojingyu/DeNovoAssembly | cd1f3e58c42f97197e73aed085ede07a924ebbdb | [
"MIT"
] | 3 | 2017-04-24T07:29:23.000Z | 2020-08-13T07:13:06.000Z | #!/usr/bin/env python
"""
Test eulerian functions including the random walk
Author : Jingyu Guo
"""
import unittest
from de_novo_assembly.de_bruijn_graph import DeBruijnGraph
from Bio.SeqRecord import SeqRecord
from de_novo_assembly.eulerian import has_euler_path, has_euler_circuit, \
make_contig_from_path, eulerian_random_walk
class EulerianTests(unittest.TestCase):
def setUp(self):
self.sequence_1 = "ATTAGACCTG"
self.sequence_2 = "ATTTAGACCCTG"
self.sequence_3 = "AGACCCTGAGTCG"
self.test_seq_1 = {'seq_1': SeqRecord(self.sequence_1)}
self.test_seq_2 = {'seq_2': SeqRecord(self.sequence_2),
'seq_3': SeqRecord(self.sequence_3)}
self.dbg_1 = DeBruijnGraph(self.test_seq_1,k=4)
# now link the dbg_1 to make a Eulerian circle
self.dbg_1.G.add_edge("CTG", "ATT")
self.dbg_2 = DeBruijnGraph(self.test_seq_2,k=6)
self.reference_eulerian_path_2 = [('ATTT', 'TTTA'), ('TTTA', 'TTAG'),
('TTAG', 'TAGA'), ('TAGA', 'AGAC'),
('AGAC', 'GACC'), ('GACC', 'ACCC'),
('ACCC', 'CCCT'), ('CCCT', 'CCTG'),
('CCTG', 'CTGA'), ('CTGA', 'TGAG'),
('TGAG', 'GAGT'), ('GAGT', 'AGTC'),
('AGTC', 'GTCG')]
def test_has_euler_path_function(self):
assert has_euler_circuit(self.dbg_1.G) == True
flag,_,_ = has_euler_path(self.dbg_1.G)
assert flag == False
def test_has_euler_circuit_function(self):
assert has_euler_circuit(self.dbg_2.G) == False
flag, _, _ = has_euler_path(self.dbg_2.G)
assert flag == True
def test_eulerian_random_walk(self):
print eulerian_random_walk(self.dbg_2)
class PathToSequenceTests(unittest.TestCase):
def setUp(self):
self.path_1 = [('1','2'),('2','3'),('3','4')]
self.seq_1 = '1234'
self.path_2 = [('1','2'),('2','3'),('3','4'),('4','1')]
self.seq_2 = '12341'
self.path_3 = [('1', '2'), ('2', '1')]
self.seq_3 = '121'
self.path_4 = [('1','2'),('2','3'),('3','1')]
self.seq_4 = '1231'
def test_path_to_sequence(self):
assert self.seq_1 == make_contig_from_path(self.path_1)
assert self.seq_2 == make_contig_from_path(self.path_2)
assert self.seq_3 == make_contig_from_path(self.path_3)
| 35.25 | 77 | 0.559102 | 2,193 | 0.864066 | 0 | 0 | 0 | 0 | 0 | 0 | 469 | 0.184791 |
a4b259e11338f6d5c17641cccb402a501cd51358 | 25,351 | py | Python | gameevents/tests/test_gameevents.py | danilovbarbosa/sg-gameevents | 839de019fb6026c5a9d3178d3109cb2948f19f09 | [
"Apache-2.0"
] | null | null | null | gameevents/tests/test_gameevents.py | danilovbarbosa/sg-gameevents | 839de019fb6026c5a9d3178d3109cb2948f19f09 | [
"Apache-2.0"
] | null | null | null | gameevents/tests/test_gameevents.py | danilovbarbosa/sg-gameevents | 839de019fb6026c5a9d3178d3109cb2948f19f09 | [
"Apache-2.0"
] | 1 | 2018-09-28T00:03:29.000Z | 2018-09-28T00:03:29.000Z |
import unittest
import time
import datetime
import json
import sys
#import base64
#from werkzeug.wrappers import Response
sys.path.append("..")
#from flask import current_app
#from werkzeug.datastructures import Headers
from gameevents_app import create_app
#Extensions
from gameevents_app.extensions import db, LOG
from gameevents_app.models.session import Session
from gameevents_app.models.client import Client
from gameevents_app.models.gameevent import GameEvent
from uuid import UUID
import OpenSSL
#from gameevents_app.errors import InvalidGamingSession
#from sqlalchemy.orm.exc import NoResultFound
#from flask.ext.api.exceptions import AuthenticationFailed
class TestGameEvents(unittest.TestCase):
@classmethod
def setUpClass(self):
self.app = create_app(testing=True)
self.app_context = self.app.app_context()
self.app_context.push()
self.client = self.app.test_client()
LOG.info("Initializing tests.")
#Create a brand new test db
db.create_all()
#Add a clientid and apikey
new_client = Client("myclientid", "myapikey", "normal")
new_admin_client = Client("dashboard", "dashboardapikey", "admin")
db.session.add(new_client)
db.session.add(new_admin_client)
try:
db.session.commit()
LOG.info("=== Added clients ===")
except Exception as e:
LOG.error(e, exc_info=True)
#Generating gaming sessions ids
self.newsessionid = UUID(bytes = OpenSSL.rand.bytes(16)).hex
self.newsessionid2 = UUID(bytes = OpenSSL.rand.bytes(16)).hex
self.newsessionid3 = UUID(bytes = OpenSSL.rand.bytes(16)).hex #session not in db
self.unauthorized_sessionid = "ac52bb1d811356ab3a8e8711c5f7ac5d"
new_session = Session(self.newsessionid, new_client.id)
new_session2 = Session(self.newsessionid2, new_client.id)
db.session.add(new_session)
db.session.add(new_session2)
try:
db.session.commit()
LOG.info("=== Added sessions ===")
LOG.info("=== Session not in db: %s ===" % self.newsessionid3)
except Exception as e:
LOG.error(e, exc_info=True)
#Generating tokens
self.mytoken = new_client.generate_auth_token(self.newsessionid)
self.myexpiredtoken = new_client.generate_auth_token(self.newsessionid, expiration=1)
self.mytokennewsession = new_client.generate_auth_token(self.newsessionid3)
self.myadmintoken = new_admin_client.generate_auth_token()
self.myexpiredadmintoken = new_admin_client.generate_auth_token(expiration=1)
self.mybadtoken = "badlogin" + self.mytoken.decode()[8:]
self.mybadtoken = self.mybadtoken.encode("ascii")
self.xml_valid_event = """<event><timestamp>2015-11-29T12:10:57Z</timestamp>
<action>STARTGAME</action><level></level><update></update><which_lix>
</which_lix><result></result></event>""";
self.json_valid_event = """[{
"timestamp": "2015-11-29T12:10:57Z",
"action": "STARTGAME",
"which_lix": ""
}]"""
self.xml_invalid_event = """<event>a
<action>STARTGAME</action>
<timestamp>2015-11-29T12:10:57Z</timestamp>
<which_lix />
</event>"""
self.json_invalid_event = """
"timestamp": "2015-11-29T12:10:57Z",
"action": "STARTGAME",,
"which_lix": ""
"""
self.xml_multiple_events = """<event>
<action>STARTGAME</action>
<timestamp>2015-11-29T12:10:57Z</timestamp>
<which_lix />
</event>
<event>
<action>ENDGAME</action>
<timestamp>2015-11-29T13:10:57Z</timestamp>
<which_lix />
</event>"""
self.json_multiple_events = """[{ "timestamp": "2015-11-29T12:10:57Z",
"action": "STARTGAME",
"which_lix": ""
}, {
"timestamp": "2015-11-29T13:10:57Z",
"action": "ENDGAME",
"which_lix": ""
}]"""
time.sleep(3) #expire the token
new_gameevent = GameEvent(new_session.id,self.xml_valid_event)
db.session.add(new_gameevent)
try:
db.session.commit()
LOG.info("=== Added game event. All set up. ===")
except Exception as e:
LOG.error(e, exc_info=True)
@classmethod
def tearDownClass(self):
LOG.info("======================Finished tests====================")
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_token_existing_sessionid(self):
'''
Token request with valid credentials and existing sessionid.
'''
requestdata = json.dumps(dict(clientid="myclientid", apikey="myapikey", sessionid = self.newsessionid))
response = self.client.post('/gameevents/api/v1.0/token',
data=requestdata,
content_type = 'application/json',
follow_redirects=True)
# Assert response is 200 OK.
self.assertEquals(response.status, "200 OK")
def test_token_nonexisting_but_valid_sessionid(self):
'''
Token request with valid credentials and a valid - but still not in the db - sessionid.
'''
requestdata = json.dumps(dict(clientid="myclientid", apikey="myapikey", sessionid = self.newsessionid3))
response = self.client.post('/gameevents/api/v1.0/token',
data=requestdata,
content_type = 'application/json',
follow_redirects=True)
# Assert response is 200 OK.
self.assertEquals(response.status, "200 OK")
def test_token_invalid_sessionid(self):
'''
Token request with valid credentials but invalid sessionid.
'''
requestdata = json.dumps(dict(clientid="myclientid", apikey="myapikey", sessionid = "bablablabal"))
response = self.client.post('/gameevents/api/v1.0/token',
data=requestdata,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "400 BAD REQUEST")
def test_token_unauthorized_sessionid(self):
'''
Token request with valid credentials but invalid sessionid.
'''
requestdata = json.dumps(dict(clientid="myclientid", apikey="myapikey", sessionid = self.unauthorized_sessionid))
response = self.client.post('/gameevents/api/v1.0/token',
data=requestdata,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "401 UNAUTHORIZED")
def test_get_admin_token(self):
'''
Admin token request with valid credentials.
'''
requestdata = json.dumps(dict(clientid="dashboard", apikey="dashboardapikey"))
response = self.client.post('/gameevents/api/v1.0/token',
data=requestdata,
content_type = 'application/json',
follow_redirects=True)
# Assert response is 200 OK.
self.assertEquals(response.status, "200 OK")
def test_token_badparams(self):
'''
Token request with missing parameters.
'''
requestdata = json.dumps(dict(clientid="myclientid"))
response = self.client.post('/gameevents/api/v1.0/token',
data=requestdata,
content_type = 'application/json',
follow_redirects=True)
# Assert response is 400 BAD REQUEST.
self.assertEquals(response.status, "400 BAD REQUEST")
def test_token_invalid_apikey(self):
'''
Token request with invalid credentials and valid sessionid.
'''
requestdata = json.dumps(dict(clientid="myclientidaaaaa", apikey="myapikeyaaaa", sessionid=self.newsessionid))
response = self.client.post('/gameevents/api/v1.0/token',
data=requestdata,
content_type = 'application/json',
follow_redirects=True)
# Assert response is 200 OK.
self.assertEquals(response.status, "401 UNAUTHORIZED")
def test_token_invalid_clientid(self):
'''
Token request with valid clientid but invalid apikey, and valid sessionid.
'''
requestdata = json.dumps(dict(clientid="myclientid", apikey="myapikeyaaaa", sessionid=self.newsessionid))
response = self.client.post('/gameevents/api/v1.0/token',
data=requestdata,
content_type = 'application/json',
follow_redirects=True)
# Assert response is 200 OK.
self.assertEquals(response.status, "401 UNAUTHORIZED")
def test_commit_xmlgameevent_validtoken(self):
'''
Game event commit request with valid token and invalid game event (in XML instead of JSON).
'''
token = self.mytoken.decode()
headers = {}
sessionid = self.newsessionid
headers['X-AUTH-TOKEN'] = token
gameevent = self.xml_valid_event
timestamp = str(datetime.datetime.now())
requestdata = json.dumps(dict(timestamp=timestamp, events=gameevent))
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "400 BAD REQUEST")
def test_commit_gameevent_incompletejsonrequest(self):
'''
Game event commit request with valid token and invalid game event (invalid JSON).
'''
token = self.mytoken.decode()
headers = {}
sessionid = self.newsessionid
headers['X-AUTH-TOKEN'] = token
requestdata = "{json:\"badlyformed\""
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "400 BAD REQUEST")
def test_commit_jsongameevent_validtoken(self):
'''
Game event commit request with valid token and valid game event.
'''
token = self.mytoken.decode()
headers = {}
headers['X-AUTH-TOKEN'] = token
sessionid = self.newsessionid
gameevent = self.json_valid_event
timestamp = str(datetime.datetime.now())
requestdata = json.dumps(dict(timestamp=timestamp, events=gameevent))
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "201 CREATED")
#self.assertFail()
def test_commit_invalidjsongameevent_validtoken(self):
'''
Game event commit request with valid token and invalid game event (invalid JSON).
'''
token = self.mytoken.decode()
headers = {}
sessionid = self.newsessionid
gameevent = self.json_invalid_event
headers['X-AUTH-TOKEN'] = token
timestamp = str(datetime.datetime.now())
requestdata = json.dumps(dict(timestamp=timestamp, events=gameevent))
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "400 BAD REQUEST")
def test_commit_invalidxmlgameevent_validtoken(self):
'''
Game event commit request with valid token and invalid game event (in invalid XML).
'''
token = self.mytoken.decode()
headers = {}
sessionid = self.newsessionid
headers['X-AUTH-TOKEN'] = token
gameevent = self.xml_invalid_event
timestamp = str(datetime.datetime.now())
requestdata = json.dumps(dict(timestamp=timestamp, events=gameevent))
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "400 BAD REQUEST")
def test_commit_multiplexmlgameevent_validtoken(self):
'''
Game event commit request with valid token and multiple game events (but in XML, not JSON).
'''
token = self.mytoken.decode()
headers = {}
sessionid = self.newsessionid
headers['X-AUTH-TOKEN'] = token
gameevent = self.xml_multiple_events
timestamp = str(datetime.datetime.now())
requestdata = json.dumps(dict(timestamp=timestamp, events=gameevent))
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "400 BAD REQUEST")
def test_commit_multiplejsongameevent_validtoken(self):
'''
Game event commit request with valid token and multiple valid game events.
'''
token = self.mytoken.decode()
headers = {}
sessionid = self.newsessionid
headers['X-AUTH-TOKEN'] = token
gameevent = self.json_multiple_events
timestamp = str(datetime.datetime.now())
requestdata = json.dumps(dict(timestamp=timestamp, events=gameevent))
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
json_results = json.loads(response.get_data().decode())
self.assertEquals(json_results["message"], "Created 2 new item(s).")
self.assertEquals(response.status, "201 CREATED")
#self.assertFail()
def test_commit_gameevent_validtoken_newsessionid(self):
'''
Game event commit request with valid token but for a session not in the database.
'''
token = self.mytokennewsession.decode()
sessionid = self.newsessionid3
gameevent = self.json_valid_event
timestamp = str(datetime.datetime.now())
headers = {}
headers['X-AUTH-TOKEN'] = token
requestdata = json.dumps(dict(timestamp=timestamp, events=gameevent))
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "404 NOT FOUND")
def test_commit_gameevent_expiredtoken(self):
'''
Game event commit request with expired token.
'''
token = self.myexpiredtoken.decode()
sessionid = self.newsessionid
gameevent = self.json_valid_event
timestamp = str(datetime.datetime.now())
requestdata = json.dumps(dict(timestamp=timestamp, events=gameevent))
headers = {}
headers['X-AUTH-TOKEN'] = token
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "401 UNAUTHORIZED")
def test_commit_gameevent_badtoken(self):
'''
Game event commit request with bad token.
'''
sessionid = self.newsessionid
token = self.mybadtoken.decode()
gameevent = self.json_valid_event
timestamp = str(datetime.datetime.now())
headers = {}
headers['X-AUTH-TOKEN'] = token
requestdata = json.dumps(dict(timestamp=timestamp, events=gameevent))
response = self.client.post('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "401 UNAUTHORIZED")
def test_getgameevents(self):
'''
List game events for a given session with valid token.
'''
token = self.mytoken.decode()
sessionid = self.newsessionid
headers = {}
headers['X-AUTH-TOKEN'] = token
response = self.client.get('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "200 OK")
def test_getgameevents_badtoken(self):
'''
List game events for a given session with invalid token.
'''
token = self.mybadtoken.decode()
headers = {}
headers['X-AUTH-TOKEN'] = token
sessionid = self.newsessionid
response = self.client.get('/gameevents/api/v1.0/sessions/%s/events' % sessionid,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "401 UNAUTHORIZED")
def test_newclient_admintoken(self):
'''
Add a new client to database, using admin token, with good parameters.
'''
token = self.myadmintoken.decode()
headers = {}
headers['X-AUTH-TOKEN'] = token
requestdata = json.dumps(dict(clientid="testclientid", apikey="testapikey"))
response = self.client.post('/gameevents/api/v1.0/admin/clients',
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "201 CREATED")
def test_newclient_bad_request_missing_params(self):
'''
Try to add a new client to database, without a token.
'''
requestdata = json.dumps(dict(clientid="lix", apikey="lixapikey"))
response = self.client.post('/gameevents/api/v1.0/admin/clients',
data=requestdata,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "400 BAD REQUEST")
def test_newexistingclient(self):
'''
Try to add client that already exists in database, using admin token, with good parameters.
'''
token = self.myadmintoken.decode()
headers = {}
headers['X-AUTH-TOKEN'] = token
requestdata = json.dumps(dict(clientid="myclientid", apikey="testapikey"))
response = self.client.post('/gameevents/api/v1.0/admin/clients',
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "409 CONFLICT")
def test_newclient_nonadmintoken(self):
'''
Try to add a new client to database, with non-admin token.
'''
token = self.mytoken.decode()
headers = {}
headers['X-AUTH-TOKEN'] = token
requestdata = json.dumps(dict(clientid="testclientid", apikey="testapikey"))
response = self.client.post('/gameevents/api/v1.0/admin/clients',
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "401 UNAUTHORIZED")
def test_newclient_expiredadmintoken(self):
'''
Try to add a new client to database, with expired admin token.
'''
token = self.myexpiredadmintoken.decode()
headers = {}
headers['X-AUTH-TOKEN'] = token
requestdata = json.dumps(dict(clientid="testclientid", apikey="testapikey"))
response = self.client.post('/gameevents/api/v1.0/admin/clients',
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "401 UNAUTHORIZED")
def test_newclient_badtoken(self):
'''
Try to add a new client to database, with bad token.
'''
token = self.mybadtoken.decode()
headers = {}
headers['X-AUTH-TOKEN'] = token
requestdata = json.dumps(dict(clientid="testclientid", apikey="testapikey"))
response = self.client.post('/gameevents/api/v1.0/admin/clients',
data=requestdata,
headers=headers,
content_type = 'application/json',
follow_redirects=True)
self.assertEquals(response.status, "401 UNAUTHORIZED")
def test_getsessions_validtoken(self):
'''
Get list of active sessions, with valid admin token.
'''
token = self.myadmintoken.decode()
headers = {}
headers['X-AUTH-TOKEN'] = token
response = self.client.get('/gameevents/api/v1.0/sessions',
headers=headers,
follow_redirects=True)
#json_results = json.loads(response.get_data().decode())
self.assertEquals(response.status, "200 OK")
#self.assertEquals(response.headers["X-Total-Count"], '3')
def test_getsessions_notoken(self):
'''
Get list of active sessions, without a token.
'''
response = self.client.get('/gameevents/api/v1.0/sessions',
follow_redirects=True)
self.assertEquals(response.status, "400 BAD REQUEST")
def test_getsessions_invalidtoken(self):
'''
Get list of active sessions, with expired admin token.
'''
token = self.myexpiredadmintoken.decode()
headers = {}
headers['X-AUTH-TOKEN'] = token
response = self.client.get('/gameevents/api/v1.0/sessions',
headers=headers,
follow_redirects=True)
self.assertEquals(response.status, "401 UNAUTHORIZED")
if __name__ == '__main__':
unittest.main() | 42.535235 | 122 | 0.549051 | 24,609 | 0.970731 | 0 | 0 | 4,107 | 0.162005 | 0 | 0 | 7,675 | 0.302749 |
a4b2c4045c07be3b84327e571cdbd1e25cc17fb5 | 7,810 | py | Python | regions/core/regions.py | dhomeier/regions | 5055128abda57c3b463f51ede0a6ac0ef5a0c698 | [
"BSD-3-Clause"
] | 46 | 2015-05-26T20:59:24.000Z | 2022-03-31T04:57:50.000Z | regions/core/regions.py | dhomeier/regions | 5055128abda57c3b463f51ede0a6ac0ef5a0c698 | [
"BSD-3-Clause"
] | 338 | 2015-05-15T20:33:35.000Z | 2022-03-31T21:43:43.000Z | regions/core/regions.py | dhomeier/regions | 5055128abda57c3b463f51ede0a6ac0ef5a0c698 | [
"BSD-3-Clause"
] | 49 | 2016-03-21T22:12:31.000Z | 2022-03-29T21:50:25.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides a Regions class.
"""
from .core import Region
from .registry import RegionsRegistry
__all__ = ['Regions']
__doctest_skip__ = ['Regions.read', 'Regions.write', 'Regions.parse',
'Regions.serialize']
class Regions:
"""
Class to hold a list of `~regions.Region` objects.
This class provides a unified I/O interface that supports reading,
writing, parsing, and serializing many region data formats.
Parameters
----------
regions : list of `~regions.Region`
The list of region objects.
"""
def __init__(self, regions):
self.regions = regions
def __getitem__(self, index):
newregions = self.regions[index]
if isinstance(newregions, Region): # one item
return newregions
else:
newcls = object.__new__(self.__class__)
newcls.regions = newregions
return newcls
def __repr__(self):
cls_name = self.__class__.__name__
return f'<{cls_name}({repr(self.regions)})>'
def __str__(self):
return str(self.regions)
def __len__(self):
return len(self.regions)
def append(self, region):
"""
Append the region to the end of the list of regions.
Parameters
----------
region : `~regions.Region`
The region to append.
"""
self.regions.append(region)
def extend(self, regions):
"""
Extend the list of regions by appending elements from the
input regions.
Parameters
----------
regions : list of `~regions.Region`
A list of regions to include.
"""
self.regions.extend(regions)
def insert(self, index, region):
"""
Insert the region before index.
Parameters
----------
index : int
The list index.
region : `~regions.Region`
The region to insert.
"""
self.regions.insert(index, region)
def reverse(self):
"""
Reverse the list of regions in place.
"""
self.regions.reverse()
def pop(self, index=-1):
"""
Remove and return the region at index.
Parameters
----------
index : int, optional
The index of the region to remove.
Returns
-------
result : `~regions.Region`
"""
return self.regions.pop(index)
def copy(self):
"""
Return a shallow copy of this object.
"""
newcls = object.__new__(self.__class__)
newcls.regions = self.regions.copy()
return newcls
@classmethod
def get_formats(cls):
"""
Get the registered I/O formats as a Table.
"""
return RegionsRegistry.get_formats(cls)
@classmethod
def read(cls, filename, format=None, cache=False, **kwargs):
"""
Read and parse a region file and return as a Regions object.
This method allows reading a file in many supported data
formats, e.g.,::
>>> from regions import Regions
>>> reg1 = Regions.read('regions.reg', format='ds9')
>>> reg2 = Regions.read('regions.crtf', format='crtf')
>>> reg3 = Regions.read('regions.fits', format='fits')
A list of the available formats for `~regions.Regions` is
available using::
>>> Regions.get_formats()
Parameters
----------
filename : str
The filename or URL of the file to read.
format : str, optional
The file format specifier.
cache : bool or 'update', optional
Whether to cache the contents of remote URLs. If 'update',
check the remote URL for a new version but store the result
in the cache.
**kwargs : dict, optional
Keyword arguments passed to the data reader.
Returns
-------
result : `~regions.Regions`
A `~regions.Regions` object containing the file contents.
"""
return RegionsRegistry.read(filename, cls, format=format,
cache=cache, **kwargs)
@classmethod
def parse(cls, data, format=None, **kwargs):
"""
Parse a region string or table and return as a Regions object.
This method allows parsing region data in many supported data
formats, e.g.,::
>>> from regions import Regions
>>> reg1 = Regions.parse(regions_str, format='ds9')
>>> reg2 = Regions.parse(regions_str, format='crtf')
>>> reg3 = Regions.parse(regions_tbl, format='fits')
A list of the available formats for `~regions.Regions` is
available using::
>>> Regions.get_formats()
Parameters
----------
data : str or `~astropy.table.Table`
The region data to parse.
format : str, optional
The file format specifier.
**kwargs : dict, optional
Keyword arguments passed to the data parser.
Returns
-------
result : `~regions.Regions`
A `~regions.Regions` object containing the data contents.
"""
return RegionsRegistry.parse(data, cls, format=format,
**kwargs)
def write(self, filename, format=None, overwrite=False, **kwargs):
"""
Write the regions to a region file in the specified format.
This method allows writing a file in many supported data
formats, e.g.,::
>>> from regions import Regions
>>> reg = Regions.read('regions.reg', format='ds9')
>>> reg.write('new_regions.reg', format='ds9')
>>> reg.write('new_regions.crtf', format='crtf')
>>> reg.write('new_regions.fits', format='fits')
A list of the available formats for `~regions.Regions` is
available using::
>>> Regions.get_formats()
Parameters
----------
filename : str
The filename or URL of the file to write.
format : str, optional
The file format specifier.
overwrite : bool, optional
If True, overwrite the output file if it exists. Raises an
`OSError` if False and the output file exists. Default is
False.
**kwargs : dict, optional
Keyword arguments passed to the data writer.
"""
return RegionsRegistry.write(self.regions, filename,
self.__class__, format=format,
overwrite=overwrite, **kwargs)
def serialize(self, format=None, **kwargs):
"""
Serialize the regions to a region string or table.
This method allows serializing regions in many supported data
formats, e.g.,::
>>> from regions import Regions
>>> reg = Regions.read('regions.reg', format='ds9')
>>> reg1_str = reg.serialize(format='ds9')
>>> reg2_str = reg.serialize(format='crtf')
>>> reg3_tbl = reg.serialize(format='fits')
A list of the available formats for `~regions.Regions` is
available using::
>>> Regions.get_formats()
Parameters
----------
format : str, optional
The file format specifier.
**kwargs : dict, optional
Keyword arguments passed to the data serializer.
"""
return RegionsRegistry.serialize(self.regions, self.__class__,
format=format, **kwargs)
| 29.360902 | 71 | 0.554802 | 7,499 | 0.960179 | 0 | 0 | 2,710 | 0.346991 | 0 | 0 | 5,606 | 0.717798 |
a4b494c20c5b44b90d242f37feb5161e7c33c729 | 9,254 | py | Python | main.py | rhaksar/control-percolation | d44d7e820eb49c9234cd3dc91383f63938ff67a8 | [
"MIT"
] | 1 | 2020-11-04T17:38:08.000Z | 2020-11-04T17:38:08.000Z | main.py | rhaksar/control-percolation | d44d7e820eb49c9234cd3dc91383f63938ff67a8 | [
"MIT"
] | null | null | null | main.py | rhaksar/control-percolation | d44d7e820eb49c9234cd3dc91383f63938ff67a8 | [
"MIT"
] | null | null | null | from collections import defaultdict
import itertools
import numpy as np
import pickle
import time
import warnings
from Analysis import binomial_pgf, BranchModel, StaticModel
from simulators.fires.UrbanForest import UrbanForest
from Policies import NCTfires, UBTfires, DWTfires, RHTfires, USTfires
from Utilities import fire_boundary, urban_boundary, forest_children, percolation_parameter, equivalent_percolation_control
np.seterr(all='raise')
def uniform():
# given alpha and beta, compute lattice probabilities for every (parent, child) pair
a = 0.2763
b = np.exp(-1/10)
p = percolation_parameter(a, b)
if p <= 0.5:
raise Warning('Percolation parameter {0:0.2f} is not supercritical'.format(p))
lattice_p = defaultdict(lambda: p)
# given (delta_alpha, delta_beta), construct the equivalent delta_p
delta_a = 0
delta_b = 0.4
dp = equivalent_percolation_control(a, b, delta_a, delta_b)
if p - dp >= 0.5:
raise Warning('Control is insufficient: p - dp = {0:0.2f} - {1:0.2f} = {2:0.2f}'.format(p, dp, p-dp))
control_p = defaultdict(lambda: dp)
control_ab = defaultdict(lambda: (delta_a, delta_b))
# or given delta_p, construct the equivalent (delta_alpha, delta_beta)
# delta_p = 0.4
# control_percolation = defaultdict(lambda: delta_p)
# control_gmdp = defaultdict(lambda: equivalent_gmdp_control(a, b, delta_p))
a = defaultdict(lambda: a)
b = defaultdict(lambda: b)
return a, b, lattice_p, control_p, control_ab
def nonuniform(simulation):
alpha_set = dict()
# beta_set = defaultdict(lambda: np.exp(-1/9))
beta_set = dict()
p_set = dict()
delta_beta = 0.35
control_gmdp = dict()
alpha_start = 0.2
alpha_end = 0.4
for r in range(simulation.dims[0]):
for c in range(simulation.dims[1]):
alpha_set[(r, c)] = alpha_start + (c/(simulation.dims[1]-1))*(alpha_end-alpha_start)
beta1 = np.exp(-1/5)
beta2 = np.exp(-1/10)
for r in range(simulation.dims[0]):
for c in range(simulation.dims[1]):
if c < simulation.dims[1]-simulation.urban_width:
beta_set[(r, c)] = beta1
else:
beta_set[(r, c)] = beta2
control_gmdp[(r, c)] = {'healthy': (alpha_set[(r, c)], 0),
'on_fire': (0, np.amin([delta_beta, beta_set[(r, c)]]))}
# set initial condition
initial_fire = []
r_center = np.floor((simulation.dims[0]-1)/2).astype(np.uint8)
c_center = np.floor((simulation.dims[1]-1)/2).astype(np.uint8)
delta_r = [k for k in range(-2, 3)]
delta_c = [k for k in range(-2, 3)]
deltas = itertools.product(delta_r, delta_c)
for (dr, dc) in deltas:
if dr == 0 and dc == 0:
continue
elif (dr == -2 or dr == 2) and (dc == -2 or dc == 2):
continue
elif dc == dr or dc == -dr:
continue
r, c = r_center + dr, c_center + dc
initial_fire.append((r, c))
# control_p = dict()
for tree_rc in simulation.group.keys():
for neighbor in simulation.group[tree_rc].neighbors:
p = percolation_parameter(alpha_set[neighbor], beta_set[tree_rc])
if p <= 0.5:
warnings.warn('p({0:0.2f}, {1:0.2f}) = {2:0.2f} <= 0.5'.format(alpha_set[neighbor],
beta_set[tree_rc], p))
p_set[(tree_rc, neighbor)] = p
# control_p[(tree_rc, neighbor)] = dict()
#
# for k in control_gmdp[neighbor].keys():
# da, db = control_gmdp[neighbor][k]
# dp = equivalent_percolation_control(alpha_set[neighbor], beta_set[tree_rc], da, db)
# if p - dp >= 0.5:
# warnings.warn('p - dp = {0:0.2f} - {1:0.2f} = {2:0.2f} >= 0.5'.format(p, dp, p - dp))
#
# control_p[(tree_rc, neighbor)][k] = dp
return alpha_set, beta_set, initial_fire, control_gmdp, p_set
def benchmark(simulation, branchmodel, policy, num_generations=1, num_simulations=1):
print('Running policy {0:s} with capacity {1:d} for {2:d} simulations'.format(policy.name,
policy.capacity,
num_simulations))
print('started at {0:s}'.format(time.strftime('%d-%b-%Y %H:%M')))
tic = time.clock()
results = dict()
staticmodel = StaticModel()
for seed in range(num_simulations):
np.random.seed(seed)
simulation.reset()
simulation.rng = seed
while not simulation.early_end:
branchmodel.reset()
branchmodel.set_boundary(fire_boundary(simulation))
if isinstance(policy, USTfires):
staticmodel.set_boundary(urban_boundary(simulation))
policy.urbanboundary = urban_boundary(simulation)
def children_function(p):
return forest_children(simulation, p)
branchmodel.set_children_function(children_function)
for _ in range(num_generations):
for process in branchmodel.GWprocesses.values():
for parent in process.current_parents:
if parent not in branchmodel.lattice_children:
branchmodel.lattice_children[parent] = branchmodel.children_function(parent)
if not isinstance(policy, USTfires):
policy.generate_map(branchmodel)
else:
policy.generate_map(branchmodel, staticmodel)
branchmodel.next_generation(policy)
if isinstance(policy, USTfires):
staticmodel.next_boundary(policy.control_decisions)
# apply control and update simulator
if not isinstance(policy, USTfires):
control = policy.control(branchmodel)
else:
control = policy.control(branchmodel, staticmodel)
simulation.update(control)
if (seed+1) % 10 == 0:
print('completed {0:d} simulations'.format((seed+1)))
results[seed] = {'healthy_trees': simulation.stats_trees[0]/np.sum(simulation.stats_trees),
'healthy_urban': simulation.stats_urban[0]/np.sum(simulation.stats_urban),
'razed_urban': simulation.stats_urban[3]/np.sum(simulation.stats_urban)}
toc = time.clock()
dt = toc - tic
print('finished at {0:s}'.format(time.strftime('%d-%b-%Y %H:%M')))
print('{0:0.2f}s = {1:0.2f}m = {2:0.2f}h elapsed'.format(dt, dt/60, dt/3600))
filename = policy.name + '_s' + str(num_simulations) + '.pkl'
output = open('results/' + filename, 'wb')
pickle.dump(results, output)
output.close()
print('median healthy trees: {0:0.2f}%'.format(100*np.median([results[s]['healthy_trees']
for s in results.keys()])))
print('median healthy urban developments: {0:0.2f}%'.format(100*np.median([results[s]['healthy_urban']
for s in results.keys()])))
print('median removed urban developments: {0:0.2f}%'.format(100*np.median([results[s]['razed_urban']
for s in results.keys()])))
# print('mean remaining trees: {0:0.2f}%'.format(100*np.mean(results)))
# print('minimum {0:0.2f}, maximum {1:0.2f}'.format(100*np.amin(results), 100*np.amax(results)))
# first, third = np.percentile(results, [25, 75])
# print('1st quartile {0:0.2f}, 3rd quartile {1:0.2f}'.format(100*first, 100*third))
return
if __name__ == '__main__':
# forest parameters
dimension = 50
urban_width = 10
# generate information for uniform or non-uniform case
# alpha, beta, lattice_parameters, control_percolation, control_gmdp = uniform(LatticeForest(dimension))
# alpha, beta, p_parameters, map_percolation, map_gmdp = nonuniform(LatticeForest(dimension))
alpha, beta, initial_fire, map_gmdp, p_parameters = nonuniform(UrbanForest(dimension, urban_width))
# sim = LatticeForest(dimension, alpha=alpha, beta=beta)
sim = UrbanForest(dimension, urban_width, initial_fire=initial_fire, alpha=alpha, beta=beta)
# define policy
cap = 6
pi = NCTfires(capacity=cap, alpha_set=alpha, beta_set=beta, control_map_gmdp=map_gmdp)
# pi = UBTfires(capacity=cap, alpha_set=alpha, beta_set=beta, control_map_gmdp=map_gmdp)
# pi = DWTfires(capacity=cap, alpha_set=alpha, beta_set=beta, control_map_gmdp=map_gmdp)
# pi = RHTfires(capacity=cap, horizon=1, alpha_set=alpha, beta_set=beta, control_map_gmdp=map_gmdp)
# pi = USTfires(capacity=cap, horizon=5, control_map_gmdp=map_gmdp, alpha_set=alpha, beta_set=beta)
# create branching process model approximation
bm = BranchModel(lattice_parameters=p_parameters, pgf=binomial_pgf)
sm = StaticModel()
benchmark(sim, bm, pi, num_generations=1, num_simulations=1000)
print()
| 41.3125 | 123 | 0.600173 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,560 | 0.276637 |
a4b54184c984255e498a3de14e79351b3cf349d8 | 20,265 | py | Python | tests/functional/test_create.py | AgeOfLearning/uget-cli | ecaa54db9c9a6bd22f3ce2099b28828ea42cb853 | [
"MIT"
] | 1 | 2019-03-03T21:19:51.000Z | 2019-03-03T21:19:51.000Z | tests/functional/test_create.py | AgeOfLearning/uget-cli | ecaa54db9c9a6bd22f3ce2099b28828ea42cb853 | [
"MIT"
] | 3 | 2018-12-31T20:11:03.000Z | 2021-11-15T17:47:57.000Z | tests/functional/test_create.py | AgeOfLearning/uget-cli | ecaa54db9c9a6bd22f3ce2099b28828ea42cb853 | [
"MIT"
] | 2 | 2019-02-14T01:08:57.000Z | 2019-03-03T21:19:53.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Functional tests for `ugetcli` package - `create` command.
Tests functionality of the cli create command with various options.
"""
import os
import unittest
import json
from click.testing import CliRunner
from mock import MagicMock, patch
from ugetcli import cli
from ugetcli.utils import create_empty_file
class TestUGetCliCreate(unittest.TestCase):
"""Functional Tests for `ugetcli` package - `create` command."""
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.UnityPackageRunner')
def test_cli_uget_create(
self, unitypackage_runner_mock, csproj_mock):
"""Test cli: uget create with default options"""
invocation_results = [False]
# Mock running Unity to export unity package
def export_unitypackage_mock(*args, **kwargs):
assert 'UnityProject' in args[0] # In temp folder
assert os.path.normpath('UnityProject/Assets/TestProject') in args[0]
assert os.path.normpath('Output/TestProject_1.0.0_Release.unitypackage') in args[1]
create_empty_file(args[1])
invocation_results[0] = True
return 0
unitypackage_runner_instance = MagicMock()
unitypackage_runner_instance.export_unitypackage = export_unitypackage_mock
unitypackage_runner_mock.return_value = unitypackage_runner_instance
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.0.0"
csproj_instance.get_output_path.return_value = "bin/Output/Debug"
csproj_instance.path = "TestProject.csproj"
csproj_mock.return_value = csproj_instance
csproj_mock.get_csproj_at_path.return_value = "TestProject.csproj"
runner = CliRunner(env={})
with runner.isolated_filesystem():
os.makedirs("bin/Output/Debug")
create_empty_file("bin/Output/Debug/TestProject.dll")
create_empty_file("bin/Output/Debug/TestProject.pdb")
result = runner.invoke(cli.ugetcli, ['create'], obj={})
assert result.exit_code == 0, result
unitypackage_runner_mock.assert_called_with(False)
assert invocation_results[0], "did not invoke export_unitypackage_mock"
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.UnityPackageRunner')
def test_cli_uget_create_with_path_directory(
self, unitypackage_runner_mock, csproj_mock):
"""Test cli: uget create with --path option when path is a directory"""
invocation_results = [False]
# Mock running Unity to export unity package
def export_unitypackage_mock(*args, **kwargs):
assert 'UnityProject' in args[0] # In temp folder
assert os.path.normpath('UnityProject/Assets/TestProject') in args[0]
assert os.path.normpath('Output/TestProject_1.0.0_Release.unitypackage') in args[1]
create_empty_file(args[1])
invocation_results[0] = True
return 0
unitypackage_runner_instance = MagicMock()
unitypackage_runner_instance.export_unitypackage = export_unitypackage_mock
unitypackage_runner_mock.return_value = unitypackage_runner_instance
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.0.0"
csproj_instance.get_output_path.return_value = "bin/Output/Debug"
csproj_instance.path = "custom/MyProject.csproj"
csproj_mock.return_value = csproj_instance
runner = CliRunner(env={})
with runner.isolated_filesystem():
os.makedirs("custom/bin/Output/Debug")
create_empty_file("custom/bin/Output/Debug/TestProject.dll")
create_empty_file("custom/bin/Output/Debug/TestProject.pdb")
result = runner.invoke(cli.ugetcli, ['create', '--path', 'custom/'], obj={})
assert result.exit_code == 0, result
unitypackage_runner_mock.assert_called_with(False)
csproj_mock.assert_called_with('custom/')
assert invocation_results[0], "did not invoke export_unitypackage_mock"
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.UnityPackageRunner')
def test_cli_uget_create_with_path_file(
self, unitypackage_runner_mock, csproj_mock):
"""Test cli: uget create with --path option when path is a .csproj file"""
invocation_results = [False]
# Mock running Unity to export unity package
def export_unitypackage_mock(*args, **kwargs):
assert 'UnityProject' in args[0] # In temp folder
assert os.path.normpath('UnityProject/Assets/TestProject') in args[0]
assert os.path.normpath('Output/TestProject_1.0.0_Release.unitypackage') in args[1]
create_empty_file(args[1])
invocation_results[0] = True
return 0
unitypackage_runner_instance = MagicMock()
unitypackage_runner_instance.export_unitypackage = export_unitypackage_mock
unitypackage_runner_mock.return_value = unitypackage_runner_instance
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.0.0"
csproj_instance.get_output_path.return_value = "bin/Output/Debug"
csproj_instance.path = "custom/MyProject.csproj"
csproj_mock.return_value = csproj_instance
runner = CliRunner(env={})
with runner.isolated_filesystem():
os.makedirs("custom/bin/Output/Debug")
create_empty_file("custom/bin/Output/Debug/TestProject.dll")
create_empty_file("custom/bin/Output/Debug/TestProject.pdb")
result = runner.invoke(cli.ugetcli, ['create', '--path', 'custom/MyProject.csproj'], obj={})
assert result.exit_code == 0, result
unitypackage_runner_mock.assert_called_with(False)
csproj_mock.assert_called_with('custom/MyProject.csproj')
assert invocation_results[0], "did not invoke export_unitypackage_mock"
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.UnityPackageRunner')
def test_cli_uget_create_with_output_dir(
self, unitypackage_runner_mock, csproj_mock):
"""Test cli: uget create with --output-dir option"""
invocation_results = [False]
# Mock running Unity to export unity package
def export_unitypackage_mock(*args, **kwargs):
assert 'UnityProject' in args[0] # In temp folder
assert os.path.normpath('UnityProject/Assets/TestProject') in args[0]
assert os.path.normpath('out/TestProject_1.0.0_Release.unitypackage') in args[1]
create_empty_file(args[1])
invocation_results[0] = True
return 0
unitypackage_runner_instance = MagicMock()
unitypackage_runner_instance.export_unitypackage = export_unitypackage_mock
unitypackage_runner_mock.return_value = unitypackage_runner_instance
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.0.0"
csproj_instance.get_output_path.return_value = "bin/Output/Debug"
csproj_instance.path = "TestProject.csproj"
csproj_mock.return_value = csproj_instance
runner = CliRunner(env={})
with runner.isolated_filesystem():
os.makedirs("bin/Output/Debug")
create_empty_file("bin/Output/Debug/TestProject.dll")
create_empty_file("bin/Output/Debug/TestProject.pdb")
result = runner.invoke(cli.ugetcli, ['create', '--output-dir', 'out'], obj={})
assert result.exit_code == 0, result
unitypackage_runner_mock.assert_called_with(False)
assert invocation_results[0], "did not invoke export_unitypackage_mock"
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.UnityPackageRunner')
def test_cli_uget_create_with_configuration(
self, unitypackage_runner_mock, csproj_mock):
"""Test cli: uget create with --configuration option"""
invocation_results = [False]
# Mock running Unity to export unity package
def export_unitypackage_mock(*args, **kwargs):
assert 'UnityProject' in args[0] # In temp folder
assert os.path.normpath('UnityProject/Assets/TestProject') in args[0]
assert os.path.normpath('Output/TestProject_1.0.0_Debug.unitypackage') in args[1]
create_empty_file(args[1])
invocation_results[0] = True
return 0
unitypackage_runner_instance = MagicMock()
unitypackage_runner_instance.export_unitypackage = export_unitypackage_mock
unitypackage_runner_mock.return_value = unitypackage_runner_instance
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.0.0"
csproj_instance.get_output_path.return_value = "bin/Output/Debug"
csproj_instance.path = "TestProject.csproj"
csproj_mock.return_value = csproj_instance
runner = CliRunner(env={})
with runner.isolated_filesystem():
os.makedirs("bin/Output/Debug")
create_empty_file("bin/Output/Debug/TestProject.dll")
create_empty_file("bin/Output/Debug/TestProject.pdb")
result = runner.invoke(cli.ugetcli, ['create', '--configuration', 'Debug'],
obj={})
assert result.exit_code == 0, result
unitypackage_runner_mock.assert_called_with(False)
assert invocation_results[0], "did not invoke export_unitypackage_mock"
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.UnityPackageRunner')
def test_cli_uget_create_with_unity_project_path(
self, unitypackage_runner_mock, csproj_mock):
"""Test cli: uget create with --unity-project-path"""
invocation_results = [False]
# Mock running Unity to export unity package
def export_unitypackage_mock(*args, **kwargs):
assert 'MyUnityProject' in args[0] # In temp folder
assert os.path.normpath('UnityProject/Assets/TestProject') in args[0]
assert os.path.normpath('Output/TestProject_1.0.0_Release.unitypackage') in args[1]
create_empty_file(args[1])
invocation_results[0] = True
return 0
unitypackage_runner_instance = MagicMock()
unitypackage_runner_instance.export_unitypackage = export_unitypackage_mock
unitypackage_runner_mock.return_value = unitypackage_runner_instance
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.0.0"
csproj_instance.get_output_path.return_value = "bin/Output/Debug"
csproj_instance.path = "TestProject.csproj"
csproj_mock.return_value = csproj_instance
runner = CliRunner(env={})
with runner.isolated_filesystem():
os.makedirs("bin/Output/Debug")
create_empty_file("bin/Output/Debug/TestProject.dll")
create_empty_file("bin/Output/Debug/TestProject.pdb")
result = runner.invoke(
cli.ugetcli, ['create', '--unity-project-path', 'MyUnityProject'], obj={})
assert result.exit_code == 0, result
unitypackage_runner_mock.assert_called_with(False)
assert invocation_results[0], "did not invoke export_unitypackage_mock"
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.UnityPackageRunner')
def test_cli_uget_create_with_root_directory(
self, unitypackage_runner_mock, csproj_mock):
"""Test cli: uget create with --root-dir"""
invocation_results = [False]
# Mock running Unity to export unity package
def export_unitypackage_mock(*args, **kwargs):
assert 'UnityProject' in args[0] # In temp folder
assert os.path.normpath('UnityProject/Assets/MyUnityPackageRoot') in args[0]
assert os.path.normpath('Output/TestProject_1.0.0_Release.unitypackage') in args[1]
create_empty_file(args[1])
invocation_results[0] = True
return 0
unitypackage_runner_instance = MagicMock()
unitypackage_runner_instance.export_unitypackage = export_unitypackage_mock
unitypackage_runner_mock.return_value = unitypackage_runner_instance
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.0.0"
csproj_instance.get_output_path.return_value = "bin/Output/Debug"
csproj_instance.path = "TestProject.csproj"
csproj_mock.return_value = csproj_instance
runner = CliRunner(env={})
with runner.isolated_filesystem():
os.makedirs("bin/Output/Debug")
create_empty_file("bin/Output/Debug/TestProject.dll")
create_empty_file("bin/Output/Debug/TestProject.pdb")
result = runner.invoke(
cli.ugetcli, ['create', '--root-dir', 'MyUnityPackageRoot'], obj={})
assert result.exit_code == 0, result
unitypackage_runner_mock.assert_called_with(False)
assert invocation_results[0], "did not invoke export_unitypackage_mock"
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.UnityPackageRunner')
def test_cli_uget_create_with_clean(
self, unitypackage_runner_mock, csproj_mock):
"""Test cli: uget create with --clean"""
invocation_results = [False]
# Mock running Unity to export unity package
def export_unitypackage_mock(*args, **kwargs):
assert 'UnityProject' in args[0] # In temp folder
assert os.path.normpath('UnityProject/Assets/TestProject') in args[0]
assert os.path.normpath('Output/TestProject_1.0.0_Release.unitypackage') in args[1]
create_empty_file(args[1])
invocation_results[0] = True
return 0
unitypackage_runner_instance = MagicMock()
unitypackage_runner_instance.export_unitypackage = export_unitypackage_mock
unitypackage_runner_mock.return_value = unitypackage_runner_instance
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.0.0"
csproj_instance.get_output_path.return_value = "bin/Output/Debug"
csproj_instance.path = "TestProject.csproj"
csproj_mock.return_value = csproj_instance
runner = CliRunner(env={})
with runner.isolated_filesystem():
os.makedirs("bin/Output/Debug")
create_empty_file("bin/Output/Debug/TestProject.dll")
create_empty_file("bin/Output/Debug/TestProject.pdb")
os.makedirs("Output/")
create_empty_file("Output/TestProject_0.1.0_Release.unitypackage") # Should be removed
create_empty_file("Output/TestProject_0.1.1_Release.unitypackage") # Should be removed
create_empty_file("Output/TestProject_0.1.0_Debug.unitypackage") # Should NOT be removed
result = runner.invoke(
cli.ugetcli, ['create', '--clean'], obj={})
assert not os.path.isfile("Output/TestProject_0.1.0_Release.unitypackage")
assert not os.path.isfile("Output/TestProject_0.1.1_Release.unitypackage")
assert os.path.isfile("Output/TestProject_0.1.0_Debug.unitypackage")
assert result.exit_code == 0, result
unitypackage_runner_mock.assert_called_with(False)
assert invocation_results[0], "did not invoke export_unitypackage_mock"
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.UnityPackageRunner')
def test_cli_uget_create_with_unity_username(
self, unitypackage_runner_mock, csproj_mock):
"""Test cli: uget create with --unity-username"""
invocation_results = [False]
# Mock running Unity to export unity package
def export_unitypackage_mock(*args, **kwargs):
assert 'UnityProject' in args[0] # In temp folder
assert os.path.normpath('UnityProject/Assets/TestProject') in args[0]
assert os.path.normpath('Output/TestProject_1.0.0_Release.unitypackage') in args[1]
create_empty_file(args[1])
invocation_results[0] = True
return 0
unitypackage_runner_instance = MagicMock()
unitypackage_runner_instance.export_unitypackage = export_unitypackage_mock
unitypackage_runner_mock.return_value = unitypackage_runner_instance
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.0.0"
csproj_instance.get_output_path.return_value = "bin/Output/Debug"
csproj_instance.path = "TestProject.csproj"
csproj_mock.return_value = csproj_instance
runner = CliRunner(env={})
with runner.isolated_filesystem():
os.makedirs("bin/Output/Debug")
create_empty_file("bin/Output/Debug/TestProject.dll")
create_empty_file("bin/Output/Debug/TestProject.pdb")
result = runner.invoke(
cli.ugetcli, ['create'], obj={})
assert result.exit_code == 0, result
unitypackage_runner_mock.assert_called_with(False)
assert invocation_results[0], "did not invoke export_unitypackage_mock"
@patch('ugetcli.uget.CsProj')
@patch('ugetcli.uget.UnityPackageRunner')
def test_cli_uget_create_with_config_json(
self, unitypackage_runner_mock, csproj_mock):
"""Test cli: uget create with options loaded via config json"""
invocation_results = [False]
# Mock running Unity to export unity package
def export_unitypackage_mock(*args, **kwargs):
assert 'CustomUnityProject' in args[0] # In temp folder
assert os.path.normpath('CustomUnityProject/Assets/MyUnityPackage') in args[0]
assert os.path.normpath('CustomOutput/TestProject_1.0.0_Debug.unitypackage') in args[1]
create_empty_file(args[1])
invocation_results[0] = True
return 0
unitypackage_runner_instance = MagicMock()
unitypackage_runner_instance.export_unitypackage = export_unitypackage_mock
unitypackage_runner_mock.return_value = unitypackage_runner_instance
csproj_instance = MagicMock()
csproj_instance.get_assembly_name.return_value = "TestProject"
csproj_instance.get_assembly_version.return_value = "1.0.0"
csproj_instance.get_output_path.return_value = "bin/Output/Debug"
csproj_instance.path = "TestProject.csproj"
csproj_mock.return_value = csproj_instance
config_data = {
"output_dir": "CustomOutput",
"configuration": "Debug",
"unity_project_path": "CustomUnityProject",
"root_dir": "MyUnityPackage",
"clean": True,
}
runner = CliRunner(env={})
with runner.isolated_filesystem():
os.makedirs("bin/Output/Debug")
create_empty_file("bin/Output/Debug/TestProject.dll")
create_empty_file("bin/Output/Debug/TestProject.pdb")
os.makedirs("CustomOutput/")
create_empty_file("CustomOutput/TestProject_0.1.0_Release.unitypackage") # Should be removed
result = runner.invoke(
cli.ugetcli, ['create', '--config', json.dumps(config_data)], obj={})
assert not os.path.isfile("Output/TestProject_0.1.0_Release.unitypackage")
assert result.exit_code == 0, result
unitypackage_runner_mock.assert_called_with(False)
assert invocation_results[0], "did not invoke export_unitypackage_mock"
| 45.952381 | 105 | 0.68527 | 19,902 | 0.982087 | 0 | 0 | 19,730 | 0.9736 | 0 | 0 | 5,714 | 0.281964 |
a4b57a11801845ae45bfa0a0f8490e4b89315ce3 | 1,647 | py | Python | hackerrank/calc_missing.py | capac/python-exercises | 0fa58401d160a702a8cfba3f406275f857e7a203 | [
"MIT"
] | null | null | null | hackerrank/calc_missing.py | capac/python-exercises | 0fa58401d160a702a8cfba3f406275f857e7a203 | [
"MIT"
] | null | null | null | hackerrank/calc_missing.py | capac/python-exercises | 0fa58401d160a702a8cfba3f406275f857e7a203 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
import re
import pandas as pd
from numpy import interp
import os
from pathlib import Path
home = os.environ['HOME']
home_dir = Path(home)
work_dir = home_dir / 'Programming/Python/python-exercises/hackerrank'
# 12/14/2012 16:00:00 Missing_19
pattern = re.compile(r'(\d{1,2}/\d{1,2}/2012)\s+(16:00:00)\s+(Missing_\d{1,2})')
missing_list = []
with open(work_dir / 'data/readings.txt', 'r') as f:
lines = f.readlines()
for index, line in enumerate(lines):
# print(f'line: {line}')
missing_item = pattern.findall(line)
if missing_item:
# print(f'missing_item: {missing_item}')
date = pattern.sub(r'\1', line)
# print(f'date: {date}')
missing_list.append(date.rstrip())
reading_df = pd.read_csv(work_dir / 'data/readings.txt', sep=r'\s+',
names=['date', 'time', 'measurements'])
reading_df['date_time'] = reading_df[['date', 'time']].agg(' '.join, axis=1)
reading_df.drop(['date', 'time'], axis=1, inplace=True)
reading_df['date_time'] = pd.to_datetime(reading_df['date_time'], format=r'%m/%d/%Y %H:%M:%S')
# reading_df.set_index('date_time', inplace=True)
reading_df = reading_df[['date_time', 'measurements']]
print(reading_df.head())
print(reading_df.info())
# new_interp = interp(missing_list)
# print(f'missing_list: {missing_list}')
def calcMissing(readings):
pass
# if __name__ == '__main__':
# readings_count = int(input().strip())
# readings = []
# for _ in range(readings_count):
# readings_item = input()
# readings.append(readings_item)
# calcMissing(readings)
| 28.396552 | 94 | 0.64238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 788 | 0.478446 |
a4ba8f00534df85c91df0dc9eef6fe48f92deb79 | 3,777 | py | Python | qcloudsdkvod/ApplyUploadRequest.py | f3n9/qcloudcli | b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19 | [
"Apache-2.0"
] | null | null | null | qcloudsdkvod/ApplyUploadRequest.py | f3n9/qcloudcli | b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19 | [
"Apache-2.0"
] | null | null | null | qcloudsdkvod/ApplyUploadRequest.py | f3n9/qcloudcli | b965a4f0e6cdd79c1245c1d0cd2ca9c460a56f19 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class ApplyUploadRequest(Request):
def __init__(self):
super(ApplyUploadRequest, self).__init__(
'vod', 'qcloudcliV1', 'ApplyUpload', 'vod.api.qcloud.com')
def get_SubAppId(self):
return self.get_params().get('SubAppId')
def set_SubAppId(self, SubAppId):
self.add_param('SubAppId', SubAppId)
def get_classId(self):
return self.get_params().get('classId')
def set_classId(self, classId):
self.add_param('classId', classId)
def get_coverName(self):
return self.get_params().get('coverName')
def set_coverName(self, coverName):
self.add_param('coverName', coverName)
def get_coverSize(self):
return self.get_params().get('coverSize')
def set_coverSize(self, coverSize):
self.add_param('coverSize', coverSize)
def get_coverType(self):
return self.get_params().get('coverType')
def set_coverType(self, coverType):
self.add_param('coverType', coverType)
def get_expireTime(self):
return self.get_params().get('expireTime')
def set_expireTime(self, expireTime):
self.add_param('expireTime', expireTime)
def get_isCdnPusher(self):
return self.get_params().get('isCdnPusher')
def set_isCdnPusher(self, isCdnPusher):
self.add_param('isCdnPusher', isCdnPusher)
def get_isScreenshot(self):
return self.get_params().get('isScreenshot')
def set_isScreenshot(self, isScreenshot):
self.add_param('isScreenshot', isScreenshot)
def get_isTranscode(self):
return self.get_params().get('isTranscode')
def set_isTranscode(self, isTranscode):
self.add_param('isTranscode', isTranscode)
def get_isWatermark(self):
return self.get_params().get('isWatermark')
def set_isWatermark(self, isWatermark):
self.add_param('isWatermark', isWatermark)
def get_procedure(self):
return self.get_params().get('procedure')
def set_procedure(self, procedure):
self.add_param('procedure', procedure)
def get_procedure(self):
return self.get_params().get('procedure')
def set_procedure(self, procedure):
self.add_param('procedure', procedure)
def get_sourceContext(self):
return self.get_params().get('sourceContext')
def set_sourceContext(self, sourceContext):
self.add_param('sourceContext', sourceContext)
def get_storageRegion(self):
return self.get_params().get('storageRegion')
def set_storageRegion(self, storageRegion):
self.add_param('storageRegion', storageRegion)
def get_videoFileId(self):
return self.get_params().get('videoFileId')
def set_videoFileId(self, videoFileId):
self.add_param('videoFileId', videoFileId)
def get_videoName(self):
return self.get_params().get('videoName')
def set_videoName(self, videoName):
self.add_param('videoName', videoName)
def get_videoSize(self):
return self.get_params().get('videoSize')
def set_videoSize(self, videoSize):
self.add_param('videoSize', videoSize)
def get_videoStoragePath(self):
return self.get_params().get('videoStoragePath')
def set_videoStoragePath(self, videoStoragePath):
self.add_param('videoStoragePath', videoStoragePath)
def get_videoType(self):
return self.get_params().get('videoType')
def set_videoType(self, videoType):
self.add_param('videoType', videoType)
def get_vodSessionKey(self):
return self.get_params().get('vodSessionKey')
def set_vodSessionKey(self, vodSessionKey):
self.add_param('vodSessionKey', vodSessionKey)
| 29.053846 | 70 | 0.681758 | 3,708 | 0.981732 | 0 | 0 | 0 | 0 | 0 | 0 | 570 | 0.150913 |
a4bc05d694bb4b337955eefcda743bc5c3c76501 | 981 | py | Python | Merge Sorted Array.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | Merge Sorted Array.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | Merge Sorted Array.py | sugia/leetcode | 6facec2a54d1d9f133f420c9bce1d1043f57ebc6 | [
"Apache-2.0"
] | null | null | null | '''
Given two sorted integer arrays nums1 and nums2, merge nums2 into nums1 as one sorted array.
Note:
The number of elements initialized in nums1 and nums2 are m and n respectively.
You may assume that nums1 has enough space (size that is greater or equal to m + n) to hold additional elements from nums2.
Example:
Input:
nums1 = [1,2,3,0,0,0], m = 3
nums2 = [2,5,6], n = 3
Output: [1,2,2,3,5,6]
'''
class Solution(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
while m > 0 and n > 0:
if nums1[m-1] >= nums2[n-1]:
nums1[m+n-1] = nums1[m-1]
m -= 1
else:
nums1[m+n-1] = nums2[n-1]
n -= 1
if n > 0:
nums1[:n] = nums2[:n]
| 25.153846 | 127 | 0.522936 | 557 | 0.567788 | 0 | 0 | 0 | 0 | 0 | 0 | 616 | 0.627931 |
a4bd94902475f23a476e7f0700e3dc819593d4d1 | 275 | py | Python | test.py | stepan20000/MITx-6-00-1 | 850981013ab00cf476b01fa93130a38aaaea5aea | [
"MIT"
] | null | null | null | test.py | stepan20000/MITx-6-00-1 | 850981013ab00cf476b01fa93130a38aaaea5aea | [
"MIT"
] | null | null | null | test.py | stepan20000/MITx-6-00-1 | 850981013ab00cf476b01fa93130a38aaaea5aea | [
"MIT"
] | null | null | null | import string
def foo(shift):
shiftDict = {}
for l in string.ascii_lowercase:
shiftDict[l] = chr((ord(l) - 97 + shift)%26 + 97)
for l in string.ascii_uppercase:
shiftDict[l] = chr((ord(l) - 65 + shift)%26 + 65)
return shiftDict
print(foo(1))
| 25 | 57 | 0.603636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a4be3796f527611e790afc6213becfff4d63182b | 1,936 | py | Python | deepreg/model/loss/deform.py | agrimwood/DeepRegFromMain20200714 | 1a1b82ca1e09ee03b1a04f35e192e3230be1c2eb | [
"Apache-2.0"
] | null | null | null | deepreg/model/loss/deform.py | agrimwood/DeepRegFromMain20200714 | 1a1b82ca1e09ee03b1a04f35e192e3230be1c2eb | [
"Apache-2.0"
] | null | null | null | deepreg/model/loss/deform.py | agrimwood/DeepRegFromMain20200714 | 1a1b82ca1e09ee03b1a04f35e192e3230be1c2eb | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
def local_displacement_energy(ddf, energy_type, **kwargs):
def gradient_dx(fv):
return (fv[:, 2:, 1:-1, 1:-1] - fv[:, :-2, 1:-1, 1:-1]) / 2
def gradient_dy(fv):
return (fv[:, 1:-1, 2:, 1:-1] - fv[:, 1:-1, :-2, 1:-1]) / 2
def gradient_dz(fv):
return (fv[:, 1:-1, 1:-1, 2:] - fv[:, 1:-1, 1:-1, :-2]) / 2
def gradient_txyz(Txyz, fn):
return tf.stack([fn(Txyz[..., i]) for i in [0, 1, 2]], axis=4)
def compute_gradient_norm(displacement, l1=False):
dTdx = gradient_txyz(displacement, gradient_dx)
dTdy = gradient_txyz(displacement, gradient_dy)
dTdz = gradient_txyz(displacement, gradient_dz)
if l1:
norms = tf.abs(dTdx) + tf.abs(dTdy) + tf.abs(dTdz)
else:
norms = dTdx ** 2 + dTdy ** 2 + dTdz ** 2
return tf.reduce_mean(norms, [1, 2, 3, 4])
def compute_bending_energy(displacement):
dTdx = gradient_txyz(displacement, gradient_dx)
dTdy = gradient_txyz(displacement, gradient_dy)
dTdz = gradient_txyz(displacement, gradient_dz)
dTdxx = gradient_txyz(dTdx, gradient_dx)
dTdyy = gradient_txyz(dTdy, gradient_dy)
dTdzz = gradient_txyz(dTdz, gradient_dz)
dTdxy = gradient_txyz(dTdx, gradient_dy)
dTdyz = gradient_txyz(dTdy, gradient_dz)
dTdxz = gradient_txyz(dTdx, gradient_dz)
return tf.reduce_mean(
dTdxx ** 2
+ dTdyy ** 2
+ dTdzz ** 2
+ 2 * dTdxy ** 2
+ 2 * dTdxz ** 2
+ 2 * dTdyz ** 2,
[1, 2, 3, 4],
)
if energy_type == "bending":
return compute_bending_energy(ddf)
elif energy_type == "gradient-l2":
return compute_gradient_norm(ddf)
elif energy_type == "gradient-l1":
return compute_gradient_norm(ddf, l1=True)
else:
raise ValueError("Unknown regularizer.")
| 35.2 | 70 | 0.571798 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.029442 |
a4bf181a35293cbead4d6b8183631e557900dd73 | 1,271 | py | Python | sa/profiles/Generic/get_inventory.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | sa/profiles/Generic/get_inventory.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | sa/profiles/Generic/get_inventory.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ---------------------------------------------------------------------
# Generic.get_inventory
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinventory import IGetInventory
class Script(BaseScript):
name = "Generic.get_inventory"
interface = IGetInventory
def get_inv_from_version(self):
v = self.scripts.get_version()
serial = None
if "attributes" in v and "Serial Number" in v["attributes"]:
serial = v["attributes"]["Serial Number"]
revision = None
if "attributes" in v and "HW version" in v["attributes"]:
revision = v["attributes"]["HW version"]
return [
{
"type": "CHASSIS",
"vendor": v["vendor"],
"part_no": [v["platform"]],
"serial": serial,
"revision": revision,
}
]
def execute_snmp(self):
return self.get_inv_from_version()
def execute_cli(self):
return self.get_inv_from_version()
| 31 | 71 | 0.487018 | 843 | 0.663257 | 0 | 0 | 0 | 0 | 0 | 0 | 532 | 0.418568 |
a4c11ca7582fc66a6f41cd73075b425763f4d114 | 2,514 | py | Python | pirates/uberdog/DistributedAvatarManager.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 3 | 2021-02-25T06:38:13.000Z | 2022-03-22T07:00:15.000Z | pirates/uberdog/DistributedAvatarManager.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | null | null | null | pirates/uberdog/DistributedAvatarManager.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 1 | 2021-02-25T06:38:17.000Z | 2021-02-25T06:38:17.000Z | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.uberdog.DistributedAvatarManager
from otp.uberdog.OtpAvatarManager import OtpAvatarManager
from otp.otpbase import OTPGlobals
class DistributedAvatarManager(OtpAvatarManager):
__module__ = __name__
def sendAvIdList(self, avIds):
pass
def sendRequestFinalize(self, avId):
self.sendUpdate('requestFinalize', [0, avId])
def sendRequestCreateAvatar(self, subId):
self.sendUpdate('requestCreateAvatar', [0, subId])
def sendRequestPopulateAvatar(self, avId, avatarData, usePattern, nicknameIndex, firstIndex, prefixIndex, suffixIndex):
self.sendUpdate('requestPopulateAvatar', [0, avId, avatarData, usePattern, nicknameIndex, firstIndex, prefixIndex, suffixIndex])
def sendRequestPatternName(self, avId, nicknameIndex, firstIndex, prefixIndex, suffixIndex):
self.sendUpdate('requestPatternName', [0, avId, nicknameIndex, firstIndex, prefixIndex, suffixIndex])
def populateAvatarResponse(self, success):
if success:
messenger.send('avatarPopulated')
def patternNameResponse(self, success):
if success:
messenger.send('patternNameSet')
def avatarListResponse(self, accounts, numInventoryManagers):
base.cr.createInventoryManagers(numInventoryManagers)
finalData = {}
for sub in accounts:
subId = sub[0]
numPending = sub[1]
maxAvatars = sub[2]
maxSlots = sub[3]
avatars = sub[4]
avatarData = []
for av in avatars:
av[1].setName(av[0])
avatarData.append({'name': av[0], 'dna': av[1], 'slot': av[2], 'id': av[3], 'creator': av[4], 'shared': av[5], 'online': av[6], 'wishName': av[7], 'wishState': av[8], 'defaultShard': av[9], 'lastLogout': av[10]})
if numPending > 0:
avatarData += [OTPGlobals.AvatarPendingCreate] * numPending
curNum = len(avatarData)
if maxAvatars > curNum:
avatarData += [OTPGlobals.AvatarSlotAvailable] * (maxAvatars - curNum)
curNum = len(avatarData)
if maxSlots > curNum:
avatarData += [OTPGlobals.AvatarSlotUnavailable] * (maxSlots - curNum)
finalData[subId] = avatarData
messenger.send('avatarList', [finalData]) | 43.344828 | 228 | 0.648767 | 2,195 | 0.873111 | 0 | 0 | 0 | 0 | 0 | 0 | 440 | 0.17502 |
a4c132843d17f9507a14b314301300f42b750e05 | 240 | py | Python | src/the_tale/the_tale/game/roads/admin.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | 85 | 2017-11-21T12:22:02.000Z | 2022-03-27T23:07:17.000Z | src/the_tale/the_tale/game/roads/admin.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | 545 | 2017-11-04T14:15:04.000Z | 2022-03-27T14:19:27.000Z | src/the_tale/the_tale/game/roads/admin.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | 45 | 2017-11-11T12:36:30.000Z | 2022-02-25T06:10:44.000Z |
import smart_imports
smart_imports.all()
class RoadAdmin(django_admin.ModelAdmin):
list_display = ('id', 'point_1', 'point_2', 'length')
list_filter = ('point_1', 'point_2')
django_admin.site.register(models.Road, RoadAdmin)
| 17.142857 | 57 | 0.720833 | 141 | 0.5875 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.2 |
a4c21f79ab6644142bcaa20009e1b5182faf4d57 | 287 | py | Python | test.py | lexibank/asjp | 309d372b89c92d7013284d281023b23ca5d68524 | [
"CC-BY-4.0"
] | null | null | null | test.py | lexibank/asjp | 309d372b89c92d7013284d281023b23ca5d68524 | [
"CC-BY-4.0"
] | 15 | 2018-04-25T10:11:30.000Z | 2021-04-26T11:19:24.000Z | test.py | lexibank/asjp | 309d372b89c92d7013284d281023b23ca5d68524 | [
"CC-BY-4.0"
] | 1 | 2019-06-18T11:57:38.000Z | 2019-06-18T11:57:38.000Z |
def test_valid(cldf_dataset, cldf_logger):
assert cldf_dataset.validate(log=cldf_logger)
def test_parameters(cldf_dataset):
assert len(list(cldf_dataset["ParameterTable"])) == 100
def test_languages(cldf_dataset):
assert len(list(cldf_dataset["LanguageTable"])) > 4000
| 23.916667 | 59 | 0.763066 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.108014 |
a4c29d9eb840f43da6d451a47b01364e9c6ce46c | 5,040 | py | Python | nicos_mlz/spodi/setups/special/monitor-html.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 12 | 2019-11-06T15:40:36.000Z | 2022-01-01T16:23:00.000Z | nicos_mlz/spodi/setups/special/monitor-html.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 91 | 2020-08-18T09:20:26.000Z | 2022-02-01T11:07:14.000Z | nicos_mlz/spodi/setups/special/monitor-html.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 6 | 2020-01-11T10:52:30.000Z | 2022-02-25T12:35:23.000Z | description = 'setup for the status monitor'
group = 'special'
_expcolumn = Column(
Block('Experiment', [
BlockRow(
# Field(name='Proposal', key='exp/proposal', width=7),
# Field(name='Title', key='exp/title', width=20,
# istext=True, maxlen=20),
Field(name='Current status', key='exp/action', width=40,
istext=True, maxlen=40),
Field(name='Data file', key='exp/lastpoint'),
),
],
),
)
_sampletable = Column(
Block('Sample table', [
BlockRow(
Field(dev='omgs'),
),
BlockRow(
Field(dev='tths'),
),
],
),
)
_instrument = Column(
Block('Instrument', [
BlockRow(
Field(dev='wav'),
),
BlockRow(
Field(dev='slits'),
),
BlockRow(
Field(dev='mon'),
Field(name='Resosteps', key='adet/resosteps'),
Field(name='Step', key='adet/value[0]'),
),
],
),
)
_frm = Column(
Block('FRM II', [
BlockRow(
Field(dev='ReactorPower'),
),
],
),
Block('SPODI', [
BlockRow(
Field(name='O2', dev='o2_nguide'),
Field(name='O2 part', dev='o2part_nguide'),
),
BlockRow(
Field(name='p1 N-Guide', dev='p1_nguide'),
Field(name='p2 N-Guide', dev='p2_nguide'),
Field(name='p3 N-Guide', dev='p3_nguide'),
),
],
),
)
# generic CCR-stuff
ccrs = []
ccrsupps = []
ccrplots = []
_ccrnrs = [6,] + list(range(10, 22 + 1))
for i in _ccrnrs:
ccrs.append(
Block('CCR%d-Pulse tube' % i, [
BlockRow(
Field(dev='t_ccr%d_c' % i, name='Coldhead'),
Field(dev='t_ccr%d_d' % i, name='Regulation'),
Field(dev='t_ccr%d_b' % i, name='Sample'),
),
BlockRow(
Field(key='t_ccr%d/setpoint' % i, name='Setpoint'),
Field(key='t_ccr%d/p' % i, name='P', width=7),
Field(key='t_ccr%d/i' % i, name='I', width=7),
Field(key='t_ccr%d/d' % i, name='D', width=6),
),
],
setups='ccr%d and not cci3he0*' % i,
),
)
ccrsupps.append(
Block('CCR%d' % i, [
BlockRow(
Field(dev='T_ccr%d_A' % i, name='A'),
Field(dev='T_ccr%d_B' % i, name='B'),
Field(dev='T_ccr%d_C' % i, name='C'),
Field(dev='T_ccr%d_D' % i, name='D'),
),
BlockRow(
Field(key='t_ccr%d/setpoint' % i, name='SetP.', width=6),
Field(key='t_ccr%d/p' % i, name='P', width=4),
Field(key='t_ccr%d/i' % i, name='I', width=4),
Field(key='t_ccr%d/d' % i, name='D', width=3),
),
BlockRow(
Field(dev='ccr%d_p1' % i, name='P1'),
Field(dev='ccr%d_p2' % i, name='P2'),
),
],
setups='ccr%d' % i,
),
)
_cryo = Column(*ccrs)
_cryosup = Column(*ccrsupps)
_htf = Column(
Block('HTF', [
BlockRow(
Field(dev='T'),
Field(name='Power', key='T/heaterpower'),
),
],
setups='htf*',
),
)
_magnet = Column(
Block('Magnet', [
BlockRow(
Field(dev='B'),
),
],
setups='ccm*',
),
)
_sc = Column(
Block('Sample Changer', [
BlockRow(
Field(dev='sams'),
),
],
setups='samplechanger',
),
)
_e = Column(
Block('E field', [
BlockRow(
Field(dev='E'),
),
],
setups='efield',
),
)
_tension = Column(
Block('Tension rack', [
BlockRow(
Field(dev='teload'),
Field(dev='tepos'),
Field(dev='teext'),
Field(dev='topos'),
Field(dev='tomom'),
),
],
setups='tensile',
),
)
_nps =[1,2,3,10,11,12]
_npblocks = []
for i in _nps:
_npblocks.append(
Block('Newport', [
BlockRow(
Field(dev='sth_rsc%02d' % i),
),
],
setups='rsc%02d' % i,
),
)
_rsc = Column(*_npblocks)
devices = dict(
Monitor = device('nicos.services.monitor.html.Monitor',
title = 'SPODI status monitor',
loglevel = 'info',
interval = 10,
filename = '/spodicontrol/webroot/index.html',
cache = 'spodictrl.spodi.frm2',
font = 'Luxi Sans',
valuefont = 'Consolas',
prefix = 'nicos/',
padding = 0,
fontsize = 24,
layout = [
Row(_expcolumn),
Row(_frm, _instrument, _sampletable),
Row(_htf,),
Row(_cryosup),
Row(_tension),
Row(_magnet, _e,),
Row(_sc, _rsc),
],
noexpired = True,
),
)
| 23.551402 | 73 | 0.435317 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,188 | 0.235714 |
a4c36212e0c38956baeca1b3cd655252498a5ef0 | 4,400 | bzl | Python | tools/tsec.bzl | brkgyln/angular | 18dc2a9be021909f58ee21754a6ee09cec1ac1f0 | [
"MIT"
] | null | null | null | tools/tsec.bzl | brkgyln/angular | 18dc2a9be021909f58ee21754a6ee09cec1ac1f0 | [
"MIT"
] | null | null | null | tools/tsec.bzl | brkgyln/angular | 18dc2a9be021909f58ee21754a6ee09cec1ac1f0 | [
"MIT"
] | null | null | null | """Bazel rules and macros for running tsec over a ng_module or ts_library."""
load("@npm//@bazel/typescript/internal:ts_config.bzl", "TsConfigInfo")
load("@build_bazel_rules_nodejs//:providers.bzl", "DeclarationInfo")
load("@npm//tsec:index.bzl", _tsec_test = "tsec_test")
TsecTsconfigInfo = provider(fields = ["src", "exemption", "deps"])
def _tsec_config_impl(ctx):
deps = []
# Gather all extended tsconfig files.
if ctx.files.deps:
deps.extend(ctx.files.deps)
for d in ctx.attr.deps:
# Gather all files from extended ts_config targets.
if TsConfigInfo in d:
deps.extend(d[TsConfigInfo].deps)
# Gather all files from extended tsec_config targets.
if TsecTsconfigInfo in d:
deps.append(d[TsecTsconfigInfo].src)
if d[TsecTsconfigInfo].exemption:
deps.append(d[TsecTsconfigInfo].exemption)
deps.extend(d[TsecTsconfigInfo].deps)
src = ctx.file.src
return [
# We need $(rootpath tsec_config_target) to get the path
# of the top-level config file as the argument for tsec
# binary. Only `src` should be stored in the DefaultInfo
# provider.
DefaultInfo(files = depset([src])),
TsecTsconfigInfo(
src = src,
exemption = ctx.file.exemption,
deps = deps,
),
]
tsec_config = rule(
implementation = _tsec_config_impl,
attrs = {
"src": attr.label(
mandatory = True,
allow_single_file = [".json"],
doc = """The single tsconfig used for running tsec.""",
),
"deps": attr.label_list(
allow_files = [".json"],
doc = """Any configs extended by `src`.""",
),
"exemption": attr.label(
allow_single_file = [".json"],
doc = """The exemption list used by `src`.""",
),
},
doc = """Compute all transitive dependencies of a tsec_test config. """,
)
TsLibInfo = provider(fields = ["srcs", "deps"])
def _capture_tsec_deps_aspect_impl(target, ctx):
"""Forward `srcs` and `deps` of `ts_library` and `ng_module` macros to `_tsec_test`."""
return [TsLibInfo(srcs = ctx.rule.attr.srcs, deps = ctx.rule.attr.deps)]
_capture_tsec_deps_aspect = aspect(
implementation = _capture_tsec_deps_aspect_impl,
)
def _all_transitive_deps_impl(ctx):
files = []
if TsecTsconfigInfo not in ctx.attr.tsconfig:
fail("`tsconfig` must be a tsec_config target")
tsec_tsconfig_info = ctx.attr.tsconfig[TsecTsconfigInfo]
files.append(tsec_tsconfig_info.src)
if tsec_tsconfig_info.exemption:
files.append(tsec_tsconfig_info.exemption)
files.extend(tsec_tsconfig_info.deps)
if TsLibInfo not in ctx.attr.ts_target:
fail("`target` must be a ts_library or ng_module target")
ts_target_info = ctx.attr.ts_target[TsLibInfo]
for s in ts_target_info.srcs:
if hasattr(s, "files"):
files.extend(s.files.to_list())
for d in ts_target_info.deps:
if DeclarationInfo in d:
files.extend(d[DeclarationInfo].transitive_declarations.to_list())
if hasattr(d, "files"):
files.extend(d.files.to_list())
return [DefaultInfo(files = depset(files))]
_all_transitive_deps = rule(
implementation = _all_transitive_deps_impl,
attrs = {
"tsconfig": attr.label(),
"ts_target": attr.label(aspects = [_capture_tsec_deps_aspect]),
},
doc = """Expand all transitive dependencies needed to run `_tsec_test`.""",
)
def tsec_test(name, target, tsconfig):
"""Run tsec over a ts_library or ng_module target to check its compatibility with Trusted Types.
This rule DOES NOT check transitive dependencies.
Args:
name: name of the tsec test
target: the ts_library or ng_module target to be checked
tsconfig: the tsec_config target used for configuring tsec
"""
all_transitive_deps_name = "%s_all_transitive_deps" % name
_all_transitive_deps(
name = all_transitive_deps_name,
testonly = True,
tsconfig = tsconfig,
ts_target = target,
tags = ["tsec"],
)
_tsec_test(
name = name,
data = [tsconfig, all_transitive_deps_name],
tags = ["tsec"],
templated_args = ["-p", "$(rootpath %s)" % tsconfig],
)
| 32.835821 | 100 | 0.635455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,492 | 0.339091 |
a4c37699bb55af86cd094b707c4800bfc7f95014 | 805 | py | Python | simulation_scripts/Q2.py | szwieback/BayesianTripleCollocation | a288b2a9ac547fdae389714dbdae1e34789f197b | [
"ECL-2.0"
] | null | null | null | simulation_scripts/Q2.py | szwieback/BayesianTripleCollocation | a288b2a9ac547fdae389714dbdae1e34789f197b | [
"ECL-2.0"
] | null | null | null | simulation_scripts/Q2.py | szwieback/BayesianTripleCollocation | a288b2a9ac547fdae389714dbdae1e34789f197b | [
"ECL-2.0"
] | 2 | 2018-09-11T18:14:32.000Z | 2021-09-12T07:45:53.000Z | '''
Created on Jun 8, 2017
@author: zwieback
'''
import numpy as np
import os
from simulation_internal import simulation_internal
from simulation_paths import path
def Q2_simulation():
ns = [100,250,500]
nrep = 25
niter = 2000
nchains = 2
seed = 1234
scenarios = ['Q2kappa_base', 'Q2lambdamu_base', 'Q2kappa_lambdamu', 'Q2spline_base']
for scenario in scenarios:
for n in ns:
numpy_rng = np.random.RandomState(seed)
for rep in range(nrep):
pathoutrep = os.path.join(path, scenario, str(n), str(rep))
simulation_internal(scenario, n, numpy_rng=numpy_rng, pathout=pathoutrep, niter=niter, nchains=nchains)
if __name__=='__main__':
Q2_simulation() | 27.758621 | 119 | 0.613665 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.152795 |
a4c5208b29478e543aa3e2775cb3f49127a4fde6 | 1,088 | py | Python | app.py | giao-cloude/card | 8ddc7b0cacd412a1cc15cb1c3d020463c79a33b6 | [
"MIT"
] | null | null | null | app.py | giao-cloude/card | 8ddc7b0cacd412a1cc15cb1c3d020463c79a33b6 | [
"MIT"
] | null | null | null | app.py | giao-cloude/card | 8ddc7b0cacd412a1cc15cb1c3d020463c79a33b6 | [
"MIT"
] | null | null | null | #coding:utf-8
import tensorflow as tf
import backward
import forward
import PreProcess as PP
def restore_model(testArr):
with tf.Graph().as_default() as tg:
x = tf.placeholder(tf.float32, [None, forward.INPUT_NODE])
y = forward.forward(x, None)
preValue = tf.argmax(y, 1)
variable_averages = tf.train.ExponentialMovingAverage(backward.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(backward.MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
preValue = sess.run(preValue, feed_dict={x:testArr})
return preValue
else:
print("No checkpoint file found")
return -1
def application(file_path):
data = PP.image_process(file_path)
lable = ''
if(len(data)==0):
print("识别失败,请传入更清晰的图片")
else:
print("正在识别......")
for i in range(len(data)):
preValue = restore_model(data[i:i + 1])[0]
lable += str(preValue)
print("识别结果:"+lable)
| 26.536585 | 86 | 0.726103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.107584 |
a4c5246b1bb457ccf2adc163a51a115f0a845803 | 56 | py | Python | utils/test_fm.py | dilum1995/DAugmentor | 6cc86dccf826415a88b8226265e16ae96b5cc05b | [
"MIT"
] | 1 | 2020-08-02T13:06:03.000Z | 2020-08-02T13:06:03.000Z | utils/test_fm.py | dilum1995/DAugmentor | 6cc86dccf826415a88b8226265e16ae96b5cc05b | [
"MIT"
] | null | null | null | utils/test_fm.py | dilum1995/DAugmentor | 6cc86dccf826415a88b8226265e16ae96b5cc05b | [
"MIT"
] | null | null | null | from utils import constants as const
print(const.PATH)
| 14 | 36 | 0.803571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
a4c65724407b507d3c3575879bb612a5d9d25dea | 20,619 | py | Python | jsk_recognition/jsk_perception/node_scripts/openpose/pose_net.py | VT-ASIM-LAB/autoware.ai | 211dff3bee2d2782cb10444272c5d98d1f30d33a | [
"Apache-2.0"
] | null | null | null | jsk_recognition/jsk_perception/node_scripts/openpose/pose_net.py | VT-ASIM-LAB/autoware.ai | 211dff3bee2d2782cb10444272c5d98d1f30d33a | [
"Apache-2.0"
] | null | null | null | jsk_recognition/jsk_perception/node_scripts/openpose/pose_net.py | VT-ASIM-LAB/autoware.ai | 211dff3bee2d2782cb10444272c5d98d1f30d33a | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import os
import itertools, pkg_resources, sys
from distutils.version import LooseVersion
if LooseVersion(pkg_resources.get_distribution("chainer").version) >= LooseVersion('7.0.0') and \
sys.version_info.major == 2:
print('''Please install chainer <= 7.0.0:
sudo pip install chainer==6.7.0
c.f https://github.com/jsk-ros-pkg/jsk_recognition/pull/2485
''', file=sys.stderr)
sys.exit(1)
if [p for p in list(itertools.chain(*[pkg_resources.find_distributions(_) for _ in sys.path])) if "cupy-" in p.project_name ] == []:
print('''Please install CuPy
sudo pip install cupy-cuda[your cuda version]
i.e.
sudo pip install cupy-cuda91
''', file=sys.stderr)
sys.exit(1)
import chainer
import chainer.functions as F
import chainer.links as L
base_url = 'http://posefs1.perception.cs.cmu.edu/OpenPose/models/pose/'
models = {
'auto': 'coco/pose_iter_440000.chainermodel',
'coco': 'coco/pose_iter_440000.chainermodel',
'mpi': 'mpi/pose_iter_160000.chainermodel',
}
class PoseNet(chainer.Chain):
def __init__(self, pretrained_model='auto'):
super(PoseNet, self).__init__()
with self.init_scope():
self.conv1_1 = L.Convolution2D(
in_channels=3, out_channels=64, ksize=3, stride=1, pad=1)
self.conv1_2 = L.Convolution2D(
in_channels=64, out_channels=64, ksize=3, stride=1, pad=1)
self.conv2_1 = L.Convolution2D(
in_channels=64, out_channels=128, ksize=3, stride=1, pad=1)
self.conv2_2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=3, stride=1, pad=1)
self.conv3_1 = L.Convolution2D(
in_channels=128, out_channels=256, ksize=3, stride=1, pad=1)
self.conv3_2 = L.Convolution2D(
in_channels=256, out_channels=256, ksize=3, stride=1, pad=1)
self.conv3_3 = L.Convolution2D(
in_channels=256, out_channels=256, ksize=3, stride=1, pad=1)
self.conv3_4 = L.Convolution2D(
in_channels=256, out_channels=256, ksize=3, stride=1, pad=1)
self.conv4_1 = L.Convolution2D(
in_channels=256, out_channels=512, ksize=3, stride=1, pad=1)
self.conv4_2 = L.Convolution2D(
in_channels=512, out_channels=512, ksize=3, stride=1, pad=1)
self.conv4_3_CPM = L.Convolution2D(
in_channels=512, out_channels=256, ksize=3, stride=1, pad=1)
self.conv4_4_CPM = L.Convolution2D(
in_channels=256, out_channels=128, ksize=3, stride=1, pad=1)
# stage1
self.conv5_1_CPM_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=3, stride=1, pad=1)
self.conv5_2_CPM_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=3, stride=1, pad=1)
self.conv5_3_CPM_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=3, stride=1, pad=1)
self.conv5_4_CPM_L1 = L.Convolution2D(
in_channels=128, out_channels=512, ksize=1, stride=1, pad=0)
self.conv5_5_CPM_L1 = L.Convolution2D(
in_channels=512, out_channels=38, ksize=1, stride=1, pad=0)
self.conv5_1_CPM_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=3, stride=1, pad=1)
self.conv5_2_CPM_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=3, stride=1, pad=1)
self.conv5_3_CPM_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=3, stride=1, pad=1)
self.conv5_4_CPM_L2 = L.Convolution2D(
in_channels=128, out_channels=512, ksize=1, stride=1, pad=0)
self.conv5_5_CPM_L2 = L.Convolution2D(
in_channels=512, out_channels=19, ksize=1, stride=1, pad=0)
# stage2
self.Mconv1_stage2_L1 = L.Convolution2D(
in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv2_stage2_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv3_stage2_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv4_stage2_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv5_stage2_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv6_stage2_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
self.Mconv7_stage2_L1 = L.Convolution2D(
in_channels=128, out_channels=38, ksize=1, stride=1, pad=0)
self.Mconv1_stage2_L2 = L.Convolution2D(
in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv2_stage2_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv3_stage2_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv4_stage2_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv5_stage2_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv6_stage2_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
self.Mconv7_stage2_L2 = L.Convolution2D(
in_channels=128, out_channels=19, ksize=1, stride=1, pad=0)
# stage3
self.Mconv1_stage3_L1 = L.Convolution2D(
in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv2_stage3_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv3_stage3_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv4_stage3_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv5_stage3_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv6_stage3_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
self.Mconv7_stage3_L1 = L.Convolution2D(
in_channels=128, out_channels=38, ksize=1, stride=1, pad=0)
self.Mconv1_stage3_L2 = L.Convolution2D(
in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv2_stage3_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv3_stage3_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv4_stage3_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv5_stage3_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv6_stage3_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
self.Mconv7_stage3_L2 = L.Convolution2D(
in_channels=128, out_channels=19, ksize=1, stride=1, pad=0)
# stage4
self.Mconv1_stage4_L1 = L.Convolution2D(
in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv2_stage4_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv3_stage4_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv4_stage4_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv5_stage4_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv6_stage4_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
self.Mconv7_stage4_L1 = L.Convolution2D(
in_channels=128, out_channels=38, ksize=1, stride=1, pad=0)
self.Mconv1_stage4_L2 = L.Convolution2D(
in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv2_stage4_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv3_stage4_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv4_stage4_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv5_stage4_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv6_stage4_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
self.Mconv7_stage4_L2 = L.Convolution2D(
in_channels=128, out_channels=19, ksize=1, stride=1, pad=0)
# stage5
self.Mconv1_stage5_L1 = L.Convolution2D(
in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv2_stage5_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv3_stage5_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv4_stage5_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv5_stage5_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv6_stage5_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
self.Mconv7_stage5_L1 = L.Convolution2D(
in_channels=128, out_channels=38, ksize=1, stride=1, pad=0)
self.Mconv1_stage5_L2 = L.Convolution2D(
in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv2_stage5_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv3_stage5_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv4_stage5_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv5_stage5_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv6_stage5_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
self.Mconv7_stage5_L2 = L.Convolution2D(
in_channels=128, out_channels=19, ksize=1, stride=1, pad=0)
# stage6
self.Mconv1_stage6_L1 = L.Convolution2D(
in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv2_stage6_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv3_stage6_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv4_stage6_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv5_stage6_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv6_stage6_L1 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
self.Mconv7_stage6_L1 = L.Convolution2D(
in_channels=128, out_channels=38, ksize=1, stride=1, pad=0)
self.Mconv1_stage6_L2 = L.Convolution2D(
in_channels=185, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv2_stage6_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv3_stage6_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv4_stage6_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv5_stage6_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=7, stride=1, pad=3)
self.Mconv6_stage6_L2 = L.Convolution2D(
in_channels=128, out_channels=128, ksize=1, stride=1, pad=0)
self.Mconv7_stage6_L2 = L.Convolution2D(
in_channels=128, out_channels=19, ksize=1, stride=1, pad=0)
if pretrained_model in models.keys():
data_dir = chainer.dataset.get_dataset_directory('openpose/pose')
model_path = os.path.join(data_dir, models[pretrained_model])
try:
os.makedirs(os.path.dirname(model_path))
except OSError:
pass
chainer.dataset.cache_or_load_file(
model_path,
lambda f: _download_pretrained_model(pretrained_model, f),
lambda f: f)
chainer.serializers.load_npz(model_path, self)
elif pretrained_model is not None:
if not os.path.exists(pretrained_model):
raise OSError('model does not exists: "%s"' % pretrained_model)
chainer.serializers.load_npz(pretrained_model, self)
def __call__(self, x):
heatmaps = []
pafs = []
h = F.relu(self.conv1_1(x))
h = F.relu(self.conv1_2(h))
h = F.max_pooling_2d(h, ksize=2, stride=2)
h = F.relu(self.conv2_1(h))
h = F.relu(self.conv2_2(h))
h = F.max_pooling_2d(h, ksize=2, stride=2)
h = F.relu(self.conv3_1(h))
h = F.relu(self.conv3_2(h))
h = F.relu(self.conv3_3(h))
h = F.relu(self.conv3_4(h))
h = F.max_pooling_2d(h, ksize=2, stride=2)
h = F.relu(self.conv4_1(h))
h = F.relu(self.conv4_2(h))
h = F.relu(self.conv4_3_CPM(h))
h = F.relu(self.conv4_4_CPM(h))
feature_map = h
# stage1
h1 = F.relu(self.conv5_1_CPM_L1(feature_map)) # branch1
h1 = F.relu(self.conv5_2_CPM_L1(h1))
h1 = F.relu(self.conv5_3_CPM_L1(h1))
h1 = F.relu(self.conv5_4_CPM_L1(h1))
h1 = self.conv5_5_CPM_L1(h1)
h2 = F.relu(self.conv5_1_CPM_L2(feature_map)) # branch2
h2 = F.relu(self.conv5_2_CPM_L2(h2))
h2 = F.relu(self.conv5_3_CPM_L2(h2))
h2 = F.relu(self.conv5_4_CPM_L2(h2))
h2 = self.conv5_5_CPM_L2(h2)
pafs.append(h1)
heatmaps.append(h2)
# stage2
h = F.concat((h1, h2, feature_map), axis=1) # channel concat
h1 = F.relu(self.Mconv1_stage2_L1(h)) # branch1
h1 = F.relu(self.Mconv2_stage2_L1(h1))
h1 = F.relu(self.Mconv3_stage2_L1(h1))
h1 = F.relu(self.Mconv4_stage2_L1(h1))
h1 = F.relu(self.Mconv5_stage2_L1(h1))
h1 = F.relu(self.Mconv6_stage2_L1(h1))
h1 = self.Mconv7_stage2_L1(h1)
h2 = F.relu(self.Mconv1_stage2_L2(h)) # branch2
h2 = F.relu(self.Mconv2_stage2_L2(h2))
h2 = F.relu(self.Mconv3_stage2_L2(h2))
h2 = F.relu(self.Mconv4_stage2_L2(h2))
h2 = F.relu(self.Mconv5_stage2_L2(h2))
h2 = F.relu(self.Mconv6_stage2_L2(h2))
h2 = self.Mconv7_stage2_L2(h2)
pafs.append(h1)
heatmaps.append(h2)
# stage3
h = F.concat((h1, h2, feature_map), axis=1) # channel concat
h1 = F.relu(self.Mconv1_stage3_L1(h)) # branch1
h1 = F.relu(self.Mconv2_stage3_L1(h1))
h1 = F.relu(self.Mconv3_stage3_L1(h1))
h1 = F.relu(self.Mconv4_stage3_L1(h1))
h1 = F.relu(self.Mconv5_stage3_L1(h1))
h1 = F.relu(self.Mconv6_stage3_L1(h1))
h1 = self.Mconv7_stage3_L1(h1)
h2 = F.relu(self.Mconv1_stage3_L2(h)) # branch2
h2 = F.relu(self.Mconv2_stage3_L2(h2))
h2 = F.relu(self.Mconv3_stage3_L2(h2))
h2 = F.relu(self.Mconv4_stage3_L2(h2))
h2 = F.relu(self.Mconv5_stage3_L2(h2))
h2 = F.relu(self.Mconv6_stage3_L2(h2))
h2 = self.Mconv7_stage3_L2(h2)
pafs.append(h1)
heatmaps.append(h2)
# stage4
h = F.concat((h1, h2, feature_map), axis=1) # channel concat
h1 = F.relu(self.Mconv1_stage4_L1(h)) # branch1
h1 = F.relu(self.Mconv2_stage4_L1(h1))
h1 = F.relu(self.Mconv3_stage4_L1(h1))
h1 = F.relu(self.Mconv4_stage4_L1(h1))
h1 = F.relu(self.Mconv5_stage4_L1(h1))
h1 = F.relu(self.Mconv6_stage4_L1(h1))
h1 = self.Mconv7_stage4_L1(h1)
h2 = F.relu(self.Mconv1_stage4_L2(h)) # branch2
h2 = F.relu(self.Mconv2_stage4_L2(h2))
h2 = F.relu(self.Mconv3_stage4_L2(h2))
h2 = F.relu(self.Mconv4_stage4_L2(h2))
h2 = F.relu(self.Mconv5_stage4_L2(h2))
h2 = F.relu(self.Mconv6_stage4_L2(h2))
h2 = self.Mconv7_stage4_L2(h2)
pafs.append(h1)
heatmaps.append(h2)
# stage5
h = F.concat((h1, h2, feature_map), axis=1) # channel concat
h1 = F.relu(self.Mconv1_stage5_L1(h)) # branch1
h1 = F.relu(self.Mconv2_stage5_L1(h1))
h1 = F.relu(self.Mconv3_stage5_L1(h1))
h1 = F.relu(self.Mconv4_stage5_L1(h1))
h1 = F.relu(self.Mconv5_stage5_L1(h1))
h1 = F.relu(self.Mconv6_stage5_L1(h1))
h1 = self.Mconv7_stage5_L1(h1)
h2 = F.relu(self.Mconv1_stage5_L2(h)) # branch2
h2 = F.relu(self.Mconv2_stage5_L2(h2))
h2 = F.relu(self.Mconv3_stage5_L2(h2))
h2 = F.relu(self.Mconv4_stage5_L2(h2))
h2 = F.relu(self.Mconv5_stage5_L2(h2))
h2 = F.relu(self.Mconv6_stage5_L2(h2))
h2 = self.Mconv7_stage5_L2(h2)
pafs.append(h1)
heatmaps.append(h2)
# stage6
h = F.concat((h1, h2, feature_map), axis=1) # channel concat
h1 = F.relu(self.Mconv1_stage6_L1(h)) # branch1
h1 = F.relu(self.Mconv2_stage6_L1(h1))
h1 = F.relu(self.Mconv3_stage6_L1(h1))
h1 = F.relu(self.Mconv4_stage6_L1(h1))
h1 = F.relu(self.Mconv5_stage6_L1(h1))
h1 = F.relu(self.Mconv6_stage6_L1(h1))
h1 = self.Mconv7_stage6_L1(h1)
h2 = F.relu(self.Mconv1_stage6_L2(h)) # branch2
h2 = F.relu(self.Mconv2_stage6_L2(h2))
h2 = F.relu(self.Mconv3_stage6_L2(h2))
h2 = F.relu(self.Mconv4_stage6_L2(h2))
h2 = F.relu(self.Mconv5_stage6_L2(h2))
h2 = F.relu(self.Mconv6_stage6_L2(h2))
h2 = self.Mconv7_stage6_L2(h2)
pafs.append(h1)
heatmaps.append(h2)
return pafs, heatmaps
def _download_pretrained_model(model_type, dest_path):
from chainer.links import caffe
if os.path.exists(dest_path):
raise OSError('destination already exists: %s' % dest_path)
basename, ext = os.path.splitext(models[model_type])
url = base_url + basename + '.caffemodel'
caffe_model_path = chainer.dataset.cached_download(url)
if not os.path.exists(caffe_model_path):
raise OSError('caffe model does not exist: %s' % caffe_model_path)
print('Converting to chainer model')
caffe_model = caffe.CaffeFunction(caffe_model_path)
chainer_model = PoseNet(pretrained_model=None)
for link in chainer_model.links():
if not isinstance(link, chainer.Link) or not link.name:
continue
if eval('chainer_model.{0}.b.shape == caffe_model["{0}"].b.shape'.format(link.name)) and\
eval('chainer_model.{0}.W.shape == caffe_model["{0}"].W.shape'.format(link.name)):
exec('chainer_model.{0}.W.data = caffe_model["{0}"].W.data'.format(link.name))
exec('chainer_model.{0}.b.data = caffe_model["{0}"].b.data'.format(link.name))
print('Copied layer {0}'.format(link.name))
else:
print('Failed to copy layer {0}'.format(link.name))
chainer.serializers.save_npz(dest_path, chainer_model)
return True
| 49.564904 | 132 | 0.611135 | 18,242 | 0.884718 | 0 | 0 | 0 | 0 | 0 | 0 | 1,167 | 0.056598 |
a4c977d5b3a6032234335d5aeffe8536769352e7 | 4,073 | py | Python | wxtbx/phil_controls/text_base.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | 2 | 2021-03-18T12:31:57.000Z | 2022-03-14T06:27:06.000Z | wxtbx/phil_controls/text_base.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | null | null | null | wxtbx/phil_controls/text_base.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | 1 | 2020-02-04T15:39:06.000Z | 2020-02-04T15:39:06.000Z |
from __future__ import absolute_import, division, print_function
from wxtbx import phil_controls
import wxtbx
from libtbx.utils import Abort, to_unicode, to_str
from libtbx import Auto
import wx
import sys
class ValidatedTextCtrl(wx.TextCtrl, phil_controls.PhilCtrl):
def __init__(self, *args, **kwds):
saved_value = None
if (kwds.get('value', "") != ""):
saved_value = kwds['value']
kwds['value'] = ""
super(ValidatedTextCtrl, self).__init__(*args, **kwds)
font = wx.Font(wxtbx.default_font_size, wx.MODERN, wx.NORMAL, wx.NORMAL)
self.SetFont(font)
style = self.GetWindowStyle()
if (not style & wx.TE_PROCESS_ENTER):
style |= wx.TE_PROCESS_ENTER
self.SetWindowStyle(style)
self.SetValidator(self.CreateValidator())
self.Bind(wx.EVT_TEXT_ENTER, self.OnEnter, self)
self.Bind(wx.EVT_KILL_FOCUS, self.OnFocusLost, self)
if saved_value is not None:
if (type(saved_value) == str):
save_value = to_unicode(saved_value)
self.SetValue(saved_value)
def GetValue(self):
val = wx.TextCtrl.GetValue(self)
if wxtbx.is_unicode_build():
return to_str(val)
else :
assert isinstance(val, str)
return val
def OnEnter(self, evt=None):
#self.Validate()
self.DoSendEvent()
def OnFocusLost(self, event):
self.DoSendEvent()
event.Skip()
def CreateValidator(self):
raise NotImplementedError()
def Validate(self):
# XXX why doesn't self.Validate() work?
if self.GetValidator().Validate(self.GetParent()):
return True
else :
raise Abort()
def FormatValue(self, value):
raise NotImplementedError()
def GetPhilValue(self):
raise NotImplementedError()
def GetStringValue(self):
value = self.GetPhilValue()
if (value is not None) and (value is not Auto):
return self.FormatValue(value)
elif (self.UseAuto()) or (value is Auto):
return Auto
return None
def Enable(self, enable=True):
wx.TextCtrl.Enable(self, enable)
if enable :
self.SetBackgroundColour((255,255,255))
else :
self.SetBackgroundColour((200,200,200))
class TextCtrlValidator(wx.PyValidator):
def __init__(self):
wx.PyValidator.__init__(self)
self.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)
def Clone(self):
return self.__class__()
def TransferToWindow(self):
return True
def TransferFromWindow(self):
return True
def CheckFormat(self, value):
raise NotImplementedError()
def Validate(self, win):
ctrl = self.GetWindow()
try :
value = to_unicode(ctrl.GetValue())
# if isinstance(value, str):
# value = value.decode("utf-8")
if (value == ""):
ctrl.SetBackgroundColour(
wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW))
return True
reformatted = self.CheckFormat(value)
if isinstance(reformatted, str):
reformatted = to_unicode(reformatted)
ctrl.SetValue(reformatted)
ctrl.SetBackgroundColour(
wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW))
#ctrl.SetFocus()
ctrl.Refresh()
return True
except NotImplementedError :
raise
except Exception as e :
ctrl_name = str(ctrl.GetName())
msg = "Inappropriate value given for \"%s\": %s" %(ctrl_name,str(e))
if (type(e).__name__ == "UnicodeEncodeError"):
msg = ("You have entered characters which cannot be converted to "+
"Latin characters in the control '%s'; due to limitations of the "+
"underlying code, only the standard UTF-8 character set is "+
"allowed.") % ctrl_name
wx.MessageBox(caption="Format error", message=msg)
ctrl.SetBackgroundColour("red")
# Don't set focus on Windows since messagebox is modal and thus
# would automatically recapture focus leading to an endless UI loop
if (sys.platform != 'win32'):
ctrl.SetFocus()
ctrl.Refresh()
return False
def OnEnter(self, event):
#self.Validate(None)
ctrl = self.GetWindow()
ctrl.DoSendEvent()
| 29.729927 | 77 | 0.667076 | 3,862 | 0.948195 | 0 | 0 | 0 | 0 | 0 | 0 | 594 | 0.145838 |
a4c983cf47d6c5bcf243e3a9a793a1b99475e9b4 | 20,235 | py | Python | cogs/commands/misc/misc.py | DiscordGIR/Bloo | e23172950ebb664cc96d91222b35a90f7d9802c0 | [
"MIT"
] | 34 | 2021-10-30T16:48:28.000Z | 2022-03-25T03:22:12.000Z | cogs/commands/misc/misc.py | DiscordGIR/Bloo | e23172950ebb664cc96d91222b35a90f7d9802c0 | [
"MIT"
] | 9 | 2021-11-19T04:25:29.000Z | 2022-03-09T22:35:46.000Z | cogs/commands/misc/misc.py | DiscordGIR/Bloo | e23172950ebb664cc96d91222b35a90f7d9802c0 | [
"MIT"
] | 20 | 2021-11-05T21:14:59.000Z | 2022-03-30T21:15:40.000Z | import base64
import datetime
import io
import json
import traceback
import aiohttp
import discord
import pytimeparse
from data.services.guild_service import guild_service
from discord.commands import Option, slash_command, message_command, user_command
from discord.ext import commands
from discord.utils import format_dt
from PIL import Image
from utils.autocompleters import (bypass_autocomplete, get_ios_cfw,
rule_autocomplete)
from utils.config import cfg
from utils.context import BlooContext
from utils.logger import logger
from utils.menu import BypassMenu
from utils.permissions.checks import (PermissionsFailure, mod_and_up,
whisper, whisper_in_general)
from utils.permissions.permissions import permissions
from utils.permissions.slash_perms import slash_perms
from yarl import URL
class PFPView(discord.ui.View):
def __init__(self, ctx: BlooContext):
super().__init__(timeout=30)
self.ctx = ctx
async def on_timeout(self):
for child in self.children:
child.disabled = True
await self.ctx.respond_or_edit(view=self)
class PFPButton(discord.ui.Button):
def __init__(self, ctx: BlooContext, member: discord.Member):
super().__init__(label="Show other avatar", style=discord.ButtonStyle.primary)
self.ctx = ctx
self.member = member
self.other = False
async def callback(self, interaction: discord.Interaction):
if interaction.user != self.ctx.author:
return
if not self.other:
avatar = self.member.guild_avatar
self.other = not self.other
else:
avatar = self.member.avatar or self.member.default_avatar
self.other = not self.other
embed = interaction.message.embeds[0]
embed.set_image(url=avatar.replace(size=4096))
animated = ["gif", "png", "jpeg", "webp"]
not_animated = ["png", "jpeg", "webp"]
def fmt(format_):
return f"[{format_}]({avatar.replace(format=format_, size=4096)})"
if avatar.is_animated():
embed.description = f"View As\n {' '.join([fmt(format_) for format_ in animated])}"
else:
embed.description = f"View As\n {' '.join([fmt(format_) for format_ in not_animated])}"
await interaction.response.edit_message(embed=embed)
class BypassDropdown(discord.ui.Select):
def __init__(self, ctx, apps):
self.ctx = ctx
self.apps = {app.get("bundleId"): app for app in apps}
options = [
discord.SelectOption(label=app.get("name"), value=app.get("bundleId"), description="Bypasses found" if app.get("bypasses") else "No bypasses found", emoji='<:appstore:392027597648822281>') for app in apps
]
super().__init__(placeholder='Pick an app...',
min_values=1, max_values=1, options=options)
async def callback(self, interaction):
if interaction.user != self.ctx.author:
return
self.view.stop()
app = self.apps.get(self.values[0])
self.ctx.app = app
if not app.get("bypasses"):
await self.ctx.send_error("No bypasses found for this app!")
return
menu = BypassMenu(self.ctx, app.get("bypasses"), per_page=1,
page_formatter=format_bypass_page, whisper=self.ctx.whisper)
await menu.start()
async def on_timeout(self):
self.disabled = True
self.placeholder = "Timed out"
await self.ctx.edit(view=self._view)
def format_bypass_page(ctx, entries, current_page, all_pages):
ctx.current_bypass = entries[0]
embed = discord.Embed(title=ctx.app.get(
"name"), color=discord.Color.blue())
embed.set_thumbnail(url=ctx.app.get("icon"))
embed.description = f"You can use **{ctx.current_bypass.get('name')}**!"
if ctx.current_bypass.get("notes") is not None:
embed.add_field(name="Note", value=ctx.current_bypass.get('notes'))
embed.color = discord.Color.orange()
if ctx.current_bypass.get("version") is not None:
embed.add_field(name="Supported versions",
value=f"This bypass works on versions {ctx.current_bypass.get('version')} of the app")
embed.set_footer(
text=f"Powered by ios.cfw.guide • Bypass {current_page} of {len(all_pages)}")
return embed
class Misc(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.spam_cooldown = commands.CooldownMapping.from_cooldown(
3, 15.0, commands.BucketType.channel)
try:
with open('emojis.json') as f:
self.emojis = json.loads(f.read())
except:
raise Exception(
"Could not find emojis.json. Make sure to run scrape_emojis.py")
@whisper()
@slash_command(guild_ids=[cfg.guild_id], description="Send yourself a reminder after a given time gap")
async def remindme(self, ctx: BlooContext, reminder: Option(str, description="What do you want to be reminded?"), duration: Option(str, description="When do we remind you? (i.e 1m, 1h, 1d)")):
"""Sends you a reminder after a given time gap
Example usage
-------------
/remindme 1h bake the cake
Parameters
----------
dur : str
"After when to send the reminder"
reminder : str
"What to remind you of"
"""
now = datetime.datetime.now()
delta = pytimeparse.parse(duration)
if delta is None:
raise commands.BadArgument(
"Please give me a valid time to remind you! (i.e 1h, 30m)")
time = now + datetime.timedelta(seconds=delta)
if time < now:
raise commands.BadArgument("Time has to be in the future >:(")
reminder = discord.utils.escape_markdown(reminder)
ctx.tasks.schedule_reminder(ctx.author.id, reminder, time)
# natural_time = humanize.naturaldelta(
# delta, minimum_unit='seconds')
embed = discord.Embed(title="Reminder set", color=discord.Color.random(
), description=f"We'll remind you {discord.utils.format_dt(time, style='R')}")
await ctx.respond(embed=embed, ephemeral=ctx.whisper, delete_after=5)
@slash_command(guild_ids=[cfg.guild_id], description="Post large version of a given emoji")
async def jumbo(self, ctx: BlooContext, emoji: str):
"""Posts large version of a given emoji
Example usage
-------------
/jumbo <emote>
Parameters
----------
emoji : str
"Emoji to enlarge"
"""
# non-mod users will be ratelimited
bot_chan = guild_service.get_guild().channel_botspam
if not permissions.has(ctx.guild, ctx.author, 5) and ctx.channel.id != bot_chan:
bucket = self.spam_cooldown.get_bucket(ctx.interaction)
if bucket.update_rate_limit():
raise commands.BadArgument("This command is on cooldown.")
# is this a regular Unicode emoji?
try:
em = await commands.PartialEmojiConverter().convert(ctx, emoji)
except commands.PartialEmojiConversionFailure:
em = emoji
if isinstance(em, str):
async with ctx.typing():
emoji_url_file = self.emojis.get(em)
if emoji_url_file is None:
raise commands.BadArgument(
"Couldn't find a suitable emoji.")
im = Image.open(io.BytesIO(base64.b64decode(emoji_url_file)))
image_conatiner = io.BytesIO()
im.save(image_conatiner, 'png')
image_conatiner.seek(0)
_file = discord.File(image_conatiner, filename='image.png')
await ctx.respond(file=_file)
else:
await ctx.respond(em.url)
@whisper()
@slash_command(guild_ids=[cfg.guild_id], description="Get avatar of another user or yourself.")
async def avatar(self, ctx: BlooContext, member: Option(discord.Member, description="User to get avatar of", required=False)) -> None:
"""Posts large version of a given emoji
Example usage
-------------
/avatar member:<member>
Parameters
----------
member : discord.Member, optional
"Member to get avatar of"
"""
if member is None:
member = ctx.author
await self.handle_avatar(ctx, member)
@whisper()
@user_command(guild_ids=[cfg.guild_id], name="View avatar")
async def avatar_rc(self, ctx: BlooContext, member: discord.Member):
await self.handle_avatar(ctx, member)
@whisper()
@message_command(guild_ids=[cfg.guild_id], name="View avatar")
async def avatar_msg(self, ctx: BlooContext, message: discord.Message):
await self.handle_avatar(ctx, message.author)
async def handle_avatar(self, ctx, member: discord.Member):
embed = discord.Embed(title=f"{member}'s avatar")
animated = ["gif", "png", "jpeg", "webp"]
not_animated = ["png", "jpeg", "webp"]
avatar = member.avatar or member.default_avatar
def fmt(format_):
return f"[{format_}]({avatar.replace(format=format_, size=4096)})"
if member.display_avatar.is_animated():
embed.description = f"View As\n {' '.join([fmt(format_) for format_ in animated])}"
else:
embed.description = f"View As\n {' '.join([fmt(format_) for format_ in not_animated])}"
embed.set_image(url=avatar.replace(size=4096))
embed.color = discord.Color.random()
view = PFPView(ctx)
if member.guild_avatar is not None:
view.add_item(PFPButton(ctx, member))
view.message = await ctx.respond(embed=embed, ephemeral=ctx.whisper, view=view)
@whisper_in_general()
@slash_command(guild_ids=[cfg.guild_id], description="View information about a CVE")
async def cve(self, ctx: BlooContext, id: str):
"""View information about a CVE
Example usage
-------------
/cve <id>
Parameters
----------
id : str
"ID of CVE to lookup"
"""
try:
async with aiohttp.ClientSession() as client:
async with client.get(URL(f'https://cve.circl.lu/api/cve/{id}', encoded=True)) as resp:
response = json.loads(await resp.text())
embed = discord.Embed(title=response.get(
'id'), color=discord.Color.random())
embed.description = response.get('summary')
embed.add_field(name="Published", value=response.get(
'Published'), inline=True)
embed.add_field(name="Last Modified",
value=response.get('Modified'), inline=True)
embed.add_field(name="Complexity", value=response.get(
'access').get('complexity').title(), inline=False)
embed.set_footer(text="Powered by https://cve.circl.lu")
await ctx.respond(embed=embed, ephemeral=ctx.whisper)
except Exception:
raise commands.BadArgument("Could not find CVE.")
@whisper_in_general()
@slash_command(guild_ids=[cfg.guild_id], description="Find out how to bypass jailbreak detection for an app")
async def bypass(self, ctx: BlooContext, app: Option(str, description="Name of the app", autocomplete=bypass_autocomplete)):
await ctx.defer(ephemeral=ctx.whisper)
data = await get_ios_cfw()
bypasses = data.get('bypass')
matching_apps = [body for _, body in bypasses.items() if app.lower() in body.get("name").lower()]
if not matching_apps:
raise commands.BadArgument(
"The API does not recognize that app or there are no bypasses available.")
# matching_app = bypasses[matching_apps[0]]
# print(matching_app)
if len(matching_apps) > 1:
view = discord.ui.View(timeout=30)
apps = matching_apps[:25]
apps.sort(key=lambda x: x.get("name"))
menu = BypassDropdown(ctx, apps)
view.add_item(menu)
view.on_timeout = menu.on_timeout
embed = discord.Embed(
description="Which app would you like to view bypasses for?", color=discord.Color.blurple())
await ctx.respond(embed=embed, view=view, ephemeral=ctx.whisper)
else:
ctx.app = matching_apps[0]
bypasses = ctx.app.get("bypasses")
if not bypasses or bypasses is None or bypasses == [None]:
raise commands.BadArgument(
f"{ctx.app.get('name')} has no bypasses.")
menu = BypassMenu(ctx, ctx.app.get(
"bypasses"), per_page=1, page_formatter=format_bypass_page, whisper=ctx.whisper)
await menu.start()
@slash_command(guild_ids=[cfg.guild_id], description="Post the embed for one of the rules")
async def rule(self, ctx: BlooContext, title: Option(str, autocomplete=rule_autocomplete), user_to_mention: Option(discord.Member, description="User to mention in the response", required=False)):
if title not in self.bot.rule_cache.cache:
potential_rules = [r for r in self.bot.rule_cache.cache if title.lower() == r.lower(
) or title.strip() == f"{r} - {self.bot.rule_cache.cache[r].description}"[:100].strip()]
if not potential_rules:
raise commands.BadArgument(
"Rule not found! Title must match one of the embeds exactly, use autocomplete to help!")
title = potential_rules[0]
embed = self.bot.rule_cache.cache[title]
if user_to_mention is not None:
title = f"Hey {user_to_mention.mention}, have a look at this!"
else:
title = None
await ctx.respond_or_edit(content=title, embed=embed)
@slash_command(guild_ids=[cfg.guild_id], description="Get the topic for a channel")
async def topic(self, ctx: BlooContext, channel: Option(discord.TextChannel, description="Channel to get the topic from", required=False), user_to_mention: Option(discord.Member, description="User to mention in the response", required=False)):
"""get the channel's topic"""
channel = channel or ctx.channel
if channel.topic is None:
raise commands.BadArgument(f"{channel.mention} has no topic!")
if user_to_mention is not None:
title = f"Hey {user_to_mention.mention}, have a look at this!"
else:
title = None
embed = discord.Embed(title=f"#{channel.name}'s topic",
description=channel.topic, color=discord.Color.blue())
await ctx.respond_or_edit(content=title, embed=embed)
@mod_and_up()
@slash_command(guild_ids=[cfg.guild_id], description="Start a poll", permissions=slash_perms.mod_and_up())
async def poll(self, ctx: BlooContext, question: str, channel: Option(discord.TextChannel, required=False, description="Where to post the message") = None):
if channel is None:
channel = ctx.channel
embed=discord.Embed(description=question, color=discord.Color.random())
embed.timestamp = datetime.datetime.now()
embed.set_footer(text=f"Poll started by {ctx.author}")
message = await channel.send(embed=embed)
emojis = ['⬆️', '⬇️']
for emoji in emojis:
await message.add_reaction(emoji)
ctx.whisper = True
await ctx.send_success("Done!")
@slash_command(guild_ids=[cfg.guild_id], description="View the status of various Discord features")
@commands.guild_only()
async def dstatus(self, ctx):
async with aiohttp.ClientSession() as session:
async with session.get("https://discordstatus.com/api/v2/components.json") as resp:
if resp.status == 200:
components = await resp.json()
async with aiohttp.ClientSession() as session:
async with session.get("https://discordstatus.com/api/v2/incidents.json") as resp:
if resp.status == 200:
incidents = await resp.json()
api_status = components.get('components')[0].get('status').title() # API
mp_status = components.get('components')[4].get('status').title() # Media Proxy
pn_status = components.get('components')[6].get('status').title() # Push Notifications
s_status = components.get('components')[8].get('status').title() # Search
v_status = components.get('components')[11].get('status').title() # Voice
cf_status = components.get('components')[2].get('status').title() # Cloudflare
title = "All Systems Operational" if api_status == "Operational" and mp_status == "Operational" and pn_status == "Operational" and s_status == "Operational" and v_status == "Operational" and cf_status == "Operational" else "Known Incident"
color = discord.Color.green() if title == "All Systems Operational" else discord.Color.orange()
last_incident = incidents.get('incidents')[0].get('name')
last_status = incidents.get('incidents')[0].get('status').title()
last_created = datetime.datetime.strptime(incidents.get('incidents')[0].get('created_at'), "%Y-%m-%dT%H:%M:%S.%f%z")
last_update = datetime.datetime.strptime(incidents.get('incidents')[0].get('updated_at'), "%Y-%m-%dT%H:%M:%S.%f%z")
last_impact = incidents.get('incidents')[0].get('impact')
online = '<:status_online:942288772551278623>'
offline = '<:status_dnd:942288811818352652>'
incident_icons = {'none': '<:status_offline:942288832051679302>',
'maintenance': '<:status_total:942290485916073995>',
'minor': '<:status_idle:942288787000680499>',
'major': '<:status_dnd:942288811818352652>',
'critical': '<:status_dnd:942288811818352652>'}
embed = discord.Embed(title=title, description=f"""
{online if api_status == 'Operational' else offline} **API:** {api_status}
{online if mp_status == 'Operational' else offline} **Media Proxy:** {mp_status}
{online if pn_status == 'Operational' else offline} **Push Notifications:** {pn_status}
{online if s_status == 'Operational' else offline} **Search:** {s_status}
{online if v_status == 'Operational' else offline} **Voice:** {v_status}
{online if cf_status == 'Operational' else offline} **Cloudflare:** {cf_status}
__**Last outage information**__
**Incident:** {incident_icons.get(last_impact)} {last_incident}
**Status:** {online if last_status == 'Resolved' else offline} {last_status}
**Identified at:** {format_dt(last_created, style='F')}
**{'Resolved at' if last_status == 'Resolved' else 'Last updated'}:** {format_dt(last_update, style='F')}
""", color=color)
embed.set_footer(text="Powered by discordstatus.com")
await ctx.respond(embed=embed)
@topic.error
@rule.error
@poll.error
@bypass.error
@cve.error
@dstatus.error
@remindme.error
@jumbo.error
@avatar.error
async def info_error(self, ctx: BlooContext, error):
if isinstance(error, discord.ApplicationCommandInvokeError):
error = error.original
if (isinstance(error, commands.MissingRequiredArgument)
or isinstance(error, PermissionsFailure)
or isinstance(error, commands.BadArgument)
or isinstance(error, commands.BadUnionArgument)
or isinstance(error, commands.MissingPermissions)
or isinstance(error, commands.BotMissingPermissions)
or isinstance(error, commands.MaxConcurrencyReached)
or isinstance(error, commands.NoPrivateMessage)):
await ctx.send_error(error)
else:
await ctx.send_error("A fatal error occured. Tell <@109705860275539968> about this.")
logger.error(traceback.format_exc())
def setup(bot):
bot.add_cog(Misc(bot))
| 42.510504 | 247 | 0.629009 | 18,488 | 0.913213 | 0 | 0 | 14,292 | 0.705952 | 15,672 | 0.774117 | 5,593 | 0.276266 |
a4c9be815d012e1c1acce15131efcf8844daf07b | 14,699 | py | Python | src/preprocessing.py | smartdatalake/pathlearn | 9bf76e6ffc4c16800f53ed4e4985e6f6f3674756 | [
"Apache-2.0"
] | null | null | null | src/preprocessing.py | smartdatalake/pathlearn | 9bf76e6ffc4c16800f53ed4e4985e6f6f3674756 | [
"Apache-2.0"
] | null | null | null | src/preprocessing.py | smartdatalake/pathlearn | 9bf76e6ffc4c16800f53ed4e4985e6f6f3674756 | [
"Apache-2.0"
] | null | null | null | """This module provides various functions used to read/write and generate the data structures used for Path Learn"""
import networkx as nx
import random as rnd
import numpy as np
import pandas as pd
import os
def find_single_paths(G, node, lim, paths_lim=float('inf')):
"""
:param G: A NetworkX graph.
:param node: A node v.
:param lim: Maximum number of steps.
:param paths_lim: Maximum number of paths.
:return: All paths up to lim steps, starting from node v.
"""
paths = []
to_extend = [[node]]
while to_extend and len(paths) < paths_lim:
cur_path = to_extend.pop(0)
paths.append(cur_path)
if len(cur_path) < 2 * lim + 1:
for neigh in G[cur_path[-1]]:
if neigh not in cur_path:
for rel_id in G[cur_path[-1]][neigh]:
ext_path = list(cur_path)
ext_path.append(rel_id)
ext_path.append(neigh)
to_extend.append(ext_path)
return paths[1:]
def has_circles(path):
"""
:param path: A sequence of node/edges
:return: True if the path contains circles
"""
nodes = set()
for i, n in enumerate(path):
if i % 2 == 0:
if n in nodes:
return True
else:
nodes.add(n)
return False
def find_paths_between(G, start, end, length):
"""
Finds all paths up to a given length between two nodes.
:param G: NetworkX graph.
:param start: Start node.
:param end: End node.
:param length: Maximum path length.
:return: A set with all paths up to *length* fron *start* to *ends*.
"""
if length % 2 == 0:
length1 = length / 2
length2 = length / 2
else:
length1 = int(length / 2) + 1
length2 = int(length / 2)
paths1 = find_single_paths(G, start, length1)
paths2 = find_single_paths(G, end, length2)
path2_ind = {}
for path2 in paths2:
if path2[-1] not in path2_ind:
path2_ind[path2[-1]] = []
path2_ind[path2[-1]].append(path2)
full_paths = set()
if end in G[start]:
for edge_id in G[start][end]:
full_paths.add((start, edge_id, end))
for path1 in paths1:
try:
ext_paths = path2_ind[path1[-1]]
except:
ext_paths = []
for ext_path in ext_paths:
full_path = tuple(path1 + list(reversed(ext_path[0:-1])))
if not has_circles(full_path):
full_paths.add(full_path)
return full_paths
def find_pair_paths(G, all_pairs, length, rev_rel=None):
"""
Finds paths beetween a collection of node pairs.
:param G: NetworkX graph.
:param all_pairs: A collection of node pairs.
:param length: Maximum path length
:param rev_rel: Type of reverse relation, for directed graphs.
:return: A two level dictionary with the paths for each pair.
"""
T = {}
if not rev_rel:
rev_rel = all_pairs[0][1]
for count, pair in enumerate(all_pairs):
print('finding paths: ' + str(count) + '/' + str(len(all_pairs)))
start = pair[0]
rel = pair[1]
end = pair[2]
rev_pair = (end, rev_rel, start)
if start not in T:
T[start] = {}
if end not in T[start]:
T[start][end] = set()
# paths = nx.all_simple_paths(G,start,end,length)
paths = find_paths_between(G, start, end, length)
for path in paths:
path_ext = []
dirty = False
for i in range(1, len(path) - 1):
if i % 2 == 0:
path_ext.append(path[i])
else:
step_rel = G[path[i - 1]][path[i + 1]][path[i]]['type']
step_pair = (path[i - 1], step_rel, path[i + 1])
if (step_pair[0] != pair[0] or step_pair[1] != pair[1] or step_pair[2] != pair[2]) and (
step_pair[0] != rev_pair[0] or step_pair[1] != rev_pair[1] or step_pair[2] !=
rev_pair[2]):
path_ext.append(step_rel)
else:
dirty = True
break
if not dirty:
T[start][end].add(tuple(path_ext))
print('returning')
return T
def find_single_pair_paths(G, pair, length, rev_rel=None):
"""
Finds all paths for a single pair of nodes.
:param G: NetworkX graph.
:param pair: The node pair.
:param length: Maximum length.
:param rev_rel: Reverse relation type, for directed graphs.
:return: All paths up to *length( between the pair.
"""
if not rev_rel:
rev_rel = pair[1]
start = pair[0]
rel = pair[1]
end = pair[2]
rev_pair = [end, rev_rel, start]
paths = find_paths_between(G, start, end, length)
paths_out = set()
for path in paths:
path_ext = []
dirty = False
for i in range(1, len(path) - 1):
if i % 2 == 0:
path_ext.append(path[i])
else:
step_rel = G[path[i - 1]][path[i + 1]][path[i]]['elabel']
step_pair = [path[i - 1], step_rel, path[i + 1]]
if step_pair != pair and step_pair != rev_pair:
path_ext.append(step_rel)
else:
dirty = True
break
if not dirty:
paths_out.add(tuple(path_ext))
return paths_out
def add_paths(G, pair_set, steps, rev_rel, T):
"""
Adds new paths to path dictionary T.
:param G: Networkx graph.
:param pair_set: A collection of pairs.
:param steps: Maximum path length.
:param rev_rel: Reverse relation type, for directed graphs.
:param T: A path dictionary T.
:return: A path dictionary T that includes paths for pairs in pair_set.
"""
T_new = find_pair_paths(G, pair_set, steps, rev_rel)
for u in T_new:
if u not in T:
T[u] = {}
for v in T_new[u]:
if v not in T[u]:
T[u][v] = set()
for path in T_new[u][v]:
T[u][v].add(path)
return T
def add_Ts(T0,T1):
"""
Merges two path dictionaries.
:param T0: A path dictionary.
:param T1: A path dictionary.
:return: A merged path dictionary.
"""
for u in T1:
if u not in T0:
T0[u] = {}
for v in T1[u]:
if v not in T0[u]:
T0[u][v] = set()
for path in T1[u][v]:
T0[u][v].add(path)
return T0
def graph_to_files(G, path):
"""
Saves graph to file.
:param G: NetworkX graph.
:param path: Folder path.
"""
nodes = list(G.nodes)
node_types = [G.nodes[n]['type'] for n in nodes]
for type in set(node_types):
print('writing type '+str(type))
node_feats = []
nodes_of_type = [n for n in G if G.nodes[n]['type']==type]
for n in nodes_of_type:
node_feats.append([n] + (G.nodes[n]['features'] if 'features' in G.nodes[n] else [0]))
column_names = ['id'] + ['feat_'+str(i) for i in range(len(node_feats[0])-1)]
pd.DataFrame(node_feats,columns=column_names).fillna(0).to_csv(path+'/nodes/'+str(type)+'.csv',index=False)
print('writing relations')
edges = list(G.edges)
edge_feats = []
for e in edges:
edge_feats.append([e[0], e[1], G[e[0]][e[1]][int(e[2])]['type']] + (G[e[0]][e[1]][int(e[2])]['features'] if 'features' in G[e[0]][e[1]][int(e[2])] else [0]))
column_names = ['src', 'dst', 'type'] + ['feat_' + str(i) for i in range( max( [len(ef) for ef in edge_feats] ) - 3)]
pd.DataFrame(edge_feats, columns=column_names).fillna(0).to_csv(path+'/relations/relations.csv',index=False)
def graph_from_files(path):
"""
Reads graph from file.
:param path: folder path.
:return: NetworkX graph.
"""
G = nx.MultiDiGraph()
for file in os.listdir(path+'/nodes'):
print('loading '+file)
node_type = file.split('.')[-2]
nodes = pd.read_csv(path+'/nodes/'+file,dtype={0: str})
for i, row in nodes.iterrows():
row = list(row)
G.add_node(str(row[0]),type=node_type,features=row[1:])
print('loading relations')
edges = pd.read_csv(path+'/relations/relations.csv',dtype={0: str,1: str,2: str})
for i, row in edges.iterrows():
row = list(row)
G.add_edge(str(row[0]),str(row[1]),type=str(row[2]),features=row[3:])
return G
# def make_small_data(G):
# nodes = np.array(list(G.nodes))
# node_types = np.array([G.nodes[n]['type'] for n in nodes])
# sel_nodes = set()
# for type in set(node_types):
# print('writing type ' + str(type))
# node_feats = []
# for n in nodes[node_types == type][0:10000]:
# sel_nodes.add(n)
# node_feats.append([n] + G.nodes[n]['features'])
# column_names = ['id'] + ['feat_' + str(i) for i in range(len(node_feats[0]) - 1)]
# pd.DataFrame(node_feats, columns=column_names).fillna(0).to_csv('../data/small/' + '/nodes/' + str(type) + '.csv', index=False)
# print('writing relations')
# edges = list(G.edges)
# edge_feats = []
# for e in edges:
# if e[0] in sel_nodes and e[1] in sel_nodes:
# edge_feats.append([e[0], e[1], G[e[0]][e[1]][e[2]]['type']] + G[e[0]][e[1]][e[2]]['features'])
# column_names = ['src', 'dst', 'type'] + ['feat_' + str(i) for i in range(max([len(ef) for ef in edge_feats]) - 3)]
# pd.DataFrame(edge_feats, columns=column_names).fillna(0).to_csv('../data/small/' + '/relations/relations.csv', index=False)
def types_from_files(path):
"""
Gets node/edge types of graph files.
:param path: folder path.
:return: dict with node/edge types.
"""
node_types = []
for file in os.listdir(path+'/nodes'):
node_types.append(file.split('.')[0])
edge_types = list(set(pd.read_csv(path+'/relations/relations.csv').iloc[:,2].astype(str)))
return {'node_types': node_types, 'edge_types': edge_types}
def add_neg_samples(G, pos_pairs, samp_size, steps):
"""
Performs negative sampling.
:param G: NetworkX graph.
:param pos_pairs: A collection of pairs with edges.
:param samp_size: Negative samples per existing edge.
:param steps: Max length of random walk.
:return: A list of positive and negative node pairs and their labels.
"""
pairs = []
labels = []
print(pos_pairs[0])
tail_type = G.nodes[pos_pairs[0][2]]['type']
print(tail_type)
tails = set([n for n in G.nodes if G.nodes[n]['type'] == tail_type])
for i, (head,rel_id,tail) in enumerate(pos_pairs):
print('neg samples {}/{}'.format(i, len(pos_pairs)))
pos = set(G[head])
near = set(nx.ego_graph(G, head, steps).nodes) - pos
#near = set([n for n in subG if G.nodes[n] == tail_type]) - pos
# for j in range(1,steps):
# near -= set(nx.ego_graph(G, head, steps-j).nodes)
near_samp = rnd.sample(near, min(len(near), samp_size))
far = tails - pos - near
far_samp = rnd.sample(far, min(len(far), samp_size))
pairs.append([head, rel_id, tail])
labels.append(1)
for tail in near_samp:
pairs.append([head, rel_id, tail])
labels.append(0)
for tail in far_samp:
pairs.append([head, rel_id, tail])
labels.append(0)
return pairs, labels
def find_node_types(G, edge_type):
"""
:param G: NetworkX graph.
:param edge_type: Edge type.
:return: Node types that correspond to the edge type.
"""
for e in G.edges:
if G[e[0]][e[1]][e[2]]['type'] == edge_type:
u, v = e[0], e[1]
break
utype = G.nodes[u]['type']
vtype = G.nodes[v]['type']
try:
if int(utype) > int(vtype):
return utype, vtype
else:
return vtype, utype
except:
return utype, vtype
def find_candidate_type(G, edge_type, src_node):
"""
:param G: NetworkX graph.
:param edge_type: An edge type.
:param src_node: A source node.
:return: The node type that is connecter with edge_type to src_node.
"""
stype = G.nodes[src_node]['type']
for e in G.edges:
if G[e[0]][e[1]][e[2]]['type'] == edge_type:
u,v = e[0], e[1]
break
utype = G.nodes[u]['type']
vtype = G.nodes[v]['type']
return vtype if stype==utype else utype
def filter_pairs(test_pairs, test_labels, pair_filter):
'''
:param test_pairs: Node pairs.
:param test_labels: Labels.
:param pair_filter: Filter set.
:return: test_pairs and test_labels that do not exist in pair_filter
'''
new_pairs = []
new_labels = []
for pair, label in zip(test_pairs,test_labels):
if tuple(pair) not in pair_filter:
new_pairs.append(pair)
new_labels.append(label)
return test_pairs, test_labels
def make_train_data(G, edge_type, ntr, nvl, nts, steps=3, neg=5):
"""
:param G: NetworkX graph.
:param edge_type: Edge type.
:param ntr: Number of positive training pairs.
:param nvl: Number of positive validation pairs.
:param nvs: Number of positive test pairs.
:param steps: Maximum path length.
:param neg: Negative samples pre positive egde.
:return: train pairs, train labels, validation pairs, validation edges, path dictionary T
"""
utype, vtype = find_node_types(G, edge_type)
sel_edges = rnd.sample([[e[0],edge_type,e[1]] for e in G.edges if G[e[0]][e[1]][e[2]]['type'] == edge_type
and G.nodes[e[0]]['type'] == utype and G.nodes[e[1]]['type'] == vtype], ntr+nvl+nts)
train_pairs = sel_edges[0:ntr]
train_pairs, train_labels = add_neg_samples(G,train_pairs, neg, steps)
T_tr = find_pair_paths(G, train_pairs, steps)
val_pairs = sel_edges[ntr:ntr+nvl]
val_pairs, val_labels = add_neg_samples(G, val_pairs, neg, steps)
T_vl = find_pair_paths(G, val_pairs, steps)
test_pairs = sel_edges[ntr + nvl:ntr + nvl + nts]
test_pairs, test_labels = add_neg_samples(G, test_pairs, neg, steps)
pair_filter = set([(u, r, v) for u, r, v in train_pairs + val_pairs])
test_pairs, test_labels = filter_pairs(test_pairs, test_labels, pair_filter)
T_ts = find_pair_paths(G, test_pairs, steps)
T = add_Ts(T_ts, add_Ts(T_tr, T_vl))
return train_pairs, train_labels, val_pairs, val_labels, test_pairs, test_labels, T
| 32.023965 | 165 | 0.572148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,109 | 0.347575 |
a4cad8e8992ae30f9b134717c3b3a0717a48b60e | 9,781 | py | Python | refinery/bnpy/bnpy-dev/tests/merge/TestMergeHDPTopicModel.py | csa0001/Refinery | 0d5de8fc3d680a2c79bd0e9384b506229787c74f | [
"MIT"
] | 103 | 2015-01-13T00:48:14.000Z | 2021-11-08T10:53:22.000Z | refinery/bnpy/bnpy-dev/tests/merge/TestMergeHDPTopicModel.py | csa0001/Refinery | 0d5de8fc3d680a2c79bd0e9384b506229787c74f | [
"MIT"
] | 7 | 2015-02-21T04:03:40.000Z | 2021-08-23T20:24:54.000Z | refinery/bnpy/bnpy-dev/tests/merge/TestMergeHDPTopicModel.py | csa0001/Refinery | 0d5de8fc3d680a2c79bd0e9384b506229787c74f | [
"MIT"
] | 27 | 2015-01-23T00:54:31.000Z | 2020-12-30T14:30:50.000Z | '''
Unit tests for MergeMove.py for HDPTopicModels
Verification merging works as expected and produces valid models.
Attributes
------------
self.Data : K=4 simple WordsData object from AbstractBaseTestForHDP
self.hmodel : K=4 simple bnpy model from AbstractBaseTestForHDP
Coverage
-----------
* run_many_merge_moves
* fails to merge away any true comps
* successfully merges away all duplicated comps when chosen randomly
* successfully merges away all duplicated comps when chosen via marglik
* run_merge_move
* fails to merge away any true comps
* successfully merges away all duplicated comps when targeted specifically
* successfully merges away all duplicated comps when chosen randomly
* successfully merges away all duplicated comps when chosen via marglik
success rate > 95%
'''
import numpy as np
import unittest
from AbstractBaseTestForHDP import AbstractBaseTestForHDP
import bnpy
from bnpy.learnalg import MergeMove
from scipy.special import digamma
import copy
class TestMergeHDP(AbstractBaseTestForHDP):
def getSuffStatsPrepForMerge(self, hmodel):
''' With merge flats ENABLED,
run Estep, calc suff stats, then do an Mstep
'''
LP = hmodel.calc_local_params(self.Data)
flagDict = dict(doPrecompEntropy=True, doPrecompMergeEntropy=True)
SS = hmodel.get_global_suff_stats(self.Data, LP, **flagDict)
hmodel.update_global_params(SS)
return LP, SS
######################################################### Test many moves
#########################################################
def test_run_many_merge_moves_trueModel_random(self):
LP, SS = self.getSuffStatsPrepForMerge(self.hmodel)
PRNG = np.random.RandomState(0)
mergeKwArgs = dict(mergename='random')
a, b, c, MTracker = MergeMove.run_many_merge_moves(self.hmodel,
self.Data, SS,
nMergeTrials=100, randstate=PRNG,
**mergeKwArgs)
assert MTracker.nTrial == SS.K * (SS.K-1)/2
assert MTracker.nSuccess == 0
def test_run_many_merge_moves_dupModel_random(self):
self.MakeModelWithDuplicatedComps()
LP, SS = self.getSuffStatsPrepForMerge(self.dupModel)
PRNG = np.random.RandomState(0)
mergeKwArgs = dict(mergename='random')
a, b, c, MTracker = MergeMove.run_many_merge_moves(self.dupModel,
self.Data, SS,
nMergeTrials=100, randstate=PRNG,
**mergeKwArgs)
assert MTracker.nSuccess == 4
assert (0,4) in MTracker.acceptedOrigIDs
assert (1,5) in MTracker.acceptedOrigIDs
assert (2,6) in MTracker.acceptedOrigIDs
assert (3,7) in MTracker.acceptedOrigIDs
def test_run_many_merge_moves_dupModel_marglik(self):
self.MakeModelWithDuplicatedComps()
LP, SS = self.getSuffStatsPrepForMerge(self.dupModel)
PRNG = np.random.RandomState(456)
mergeKwArgs = dict(mergename='marglik')
a, b, c, MTracker = MergeMove.run_many_merge_moves(self.dupModel,
self.Data, SS,
nMergeTrials=100, randstate=PRNG,
**mergeKwArgs)
for msg in MTracker.InfoLog:
print msg
assert MTracker.nSuccess == 4
assert MTracker.nTrial == 4
assert (0,4) in MTracker.acceptedOrigIDs
assert (1,5) in MTracker.acceptedOrigIDs
assert (2,6) in MTracker.acceptedOrigIDs
assert (3,7) in MTracker.acceptedOrigIDs
######################################################### run_merge_move
######################################################### full tests
def test_model_matches_ground_truth_as_precheck(self):
''' Verify HDPmodel is able to learn ground truth parameters
and maintain stable estimates after several E/M steps
'''
np.set_printoptions(precision=3,suppress=True)
# Advance the model several iterations
for rr in range(5):
self.run_Estep_then_Mstep()
for k in range(self.hmodel.obsModel.K):
logtopicWordHat = self.hmodel.obsModel.comp[k].Elogphi
topicWordHat = np.exp(logtopicWordHat)
diffVec = np.abs(topicWordHat - self.Data.TrueParams['topics'][k])
print diffVec
print ' '
assert np.max(diffVec) < 0.04
######################################################### run_merge_move
######################################################### full tests
def test_run_merge_move_on_true_comps_fails(self):
''' Should not be able to merge "true" components into one another
Each is necessary to explain (some) data
'''
mergeFlags = dict(doPrecompEntropy=True, doPrecompMergeEntropy=True)
LP = self.hmodel.calc_local_params(self.Data)
SS = self.hmodel.get_global_suff_stats(self.Data, LP, **mergeFlags)
for trial in range(10):
newModel, newSS, newEv, MoveInfo = MergeMove.run_merge_move(self.hmodel, self.Data, SS, mergename='random')
assert newModel.allocModel.K == self.hmodel.allocModel.K
assert newModel.obsModel.K == self.hmodel.obsModel.K
def test_run_merge_move_on_dup_comps_succeeds_with_each_ideal_pair(self):
''' Given the duplicated comps model,
which has a redundant copy of each "true" component,
We show that deliberately merging each pair does succeed.
This is "ideal" since we know in advance which merge pair to try
'''
self.MakeModelWithDuplicatedComps()
mergeFlags = dict(doPrecompEntropy=True, doPrecompMergeEntropy=True)
LP = self.dupModel.calc_local_params(self.Data)
SS = self.dupModel.get_global_suff_stats(self.Data, LP, **mergeFlags)
for kA in [0,1,2,3]:
kB = kA + 4 # Ktrue=4, so kA's best match is kA+4
newModel, newSS, newEv, MoveInfo = MergeMove.run_merge_move(self.dupModel,
self.Data, SS, kA=kA, kB=kB)
print MoveInfo['msg']
assert newModel.allocModel.K == self.dupModel.allocModel.K - 1
assert newModel.obsModel.K == self.dupModel.obsModel.K - 1
assert MoveInfo['didAccept'] == 1
def test_run_merge_move_on_dup_comps_fails_with_nonideal_pairs(self):
''' Given the duplicated comps model,
which has a redundant copy of each "true" component,
We show that deliberately merging each pair does succeed.
This is "ideal" since we know in advance which merge pair to try
'''
self.MakeModelWithDuplicatedComps()
mergeFlags = dict(doPrecompEntropy=True, doPrecompMergeEntropy=True)
LP = self.dupModel.calc_local_params(self.Data)
SS = self.dupModel.get_global_suff_stats(self.Data, LP, **mergeFlags)
for Kstep in [1,2,3,5,6,7]:
for kA in range(8 - Kstep):
kB = kA + Kstep
newM, newSS, newEv, MoveInfo = MergeMove.run_merge_move(self.dupModel,
self.Data, SS, kA=kA, kB=kB)
print MoveInfo['msg']
assert MoveInfo['didAccept'] == 0
def test_run_merge_move_on_dup_comps_succeeds_with_all_ideal_pairs(self):
self.MakeModelWithDuplicatedComps()
mergeFlags = dict(doPrecompEntropy=True, doPrecompMergeEntropy=True)
LP = self.dupModel.calc_local_params(self.Data)
SS = self.dupModel.get_global_suff_stats(self.Data, LP, **mergeFlags)
myModel = self.dupModel.copy()
for kA in [3,2,1,0]: # descend backwards so indexing still works
kB = kA + 4 # Ktrue=4, so kA's best match is kA+4
myModel, SS, newEv, MoveInfo = MergeMove.run_merge_move(myModel,
self.Data, SS, kA=kA, kB=kB)
print MoveInfo['msg']
assert MoveInfo['didAccept'] == 1
def test_run_merge_move_on_dup_comps_succeeds_with_random_choice(self):
''' Consider Duplicated Comps model.
Out of (8 choose 2) = 28 possible pairs,
exactly 4 produce sensible merges.
Verify that over many random trials where kA,kB drawn uniformly,
we obtain a success rate not too different from 4 / 28 = 0.142857
'''
self.MakeModelWithDuplicatedComps()
mergeFlags = dict(doPrecompEntropy=True, doPrecompMergeEntropy=True)
LP = self.dupModel.calc_local_params(self.Data)
SS = self.dupModel.get_global_suff_stats(self.Data, LP, **mergeFlags)
nTrial = 100
nSuccess = 0
PRNG = np.random.RandomState(0)
for trial in range(nTrial):
newModel, newSS, newEv, MoveInfo = MergeMove.run_merge_move(self.dupModel, self.Data, SS, mergename='random', randstate=PRNG)
if MoveInfo['didAccept']:
print MoveInfo['msg']
nSuccess += 1
assert nSuccess > 0
rate = float(nSuccess)/float(nTrial)
print "Expected rate: .1428"
print "Measured rate: %.3f" % (rate)
assert rate > 0.1
assert rate < 0.2
def test_run_merge_move_on_dup_comps_succeeds_with_marglik_choice(self):
''' Consider Duplicated Comps model.
Use marglik criteria to select candidates kA, kB.
Verify that the merge accept rate is much higher than at random.
The accept rate should actually be near perfect!
'''
self.MakeModelWithDuplicatedComps()
mergeFlags = dict(doPrecompEntropy=True, doPrecompMergeEntropy=True)
LP = self.dupModel.calc_local_params(self.Data)
SS = self.dupModel.get_global_suff_stats(self.Data, LP, **mergeFlags)
nTrial = 100
nSuccess = 0
PRNG = np.random.RandomState(0)
for trial in range(nTrial):
newModel, newSS, newEv, MoveInfo = MergeMove.run_merge_move(self.dupModel, self.Data, SS, mergename='marglik', randstate=PRNG)
print MoveInfo['msg']
if MoveInfo['didAccept']:
nSuccess += 1
assert nSuccess > 0
rate = float(nSuccess)/float(nTrial)
print "Expected rate: >.95"
print "Measured rate: %.3f" % (rate)
assert rate > 0.95
| 42.71179 | 132 | 0.661896 | 8,773 | 0.896943 | 0 | 0 | 0 | 0 | 0 | 0 | 2,971 | 0.303752 |
a4cb13969e913f20fb909e4c665d75e81e4a9503 | 5,309 | py | Python | bluzelle/codec/crud/Paging_pb2.py | hhio618/bluezelle-py | c38a07458a36305457680196e8c47372008db5ab | [
"MIT"
] | 3 | 2021-08-19T10:09:29.000Z | 2022-01-05T14:19:59.000Z | bluzelle/codec/crud/Paging_pb2.py | hhio618/bluzelle-py | c38a07458a36305457680196e8c47372008db5ab | [
"MIT"
] | null | null | null | bluzelle/codec/crud/Paging_pb2.py | hhio618/bluzelle-py | c38a07458a36305457680196e8c47372008db5ab | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: crud/Paging.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="crud/Paging.proto",
package="bluzelle.curium.crud",
syntax="proto3",
serialized_options=b"Z'github.com/bluzelle/curium/x/crud/types",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x11\x63rud/Paging.proto\x12\x14\x62luzelle.curium.crud"0\n\rPagingRequest\x12\x10\n\x08startKey\x18\x01 \x01(\t\x12\r\n\x05limit\x18\x02 \x01(\x04"0\n\x0ePagingResponse\x12\x0f\n\x07nextKey\x18\x01 \x01(\t\x12\r\n\x05total\x18\x02 \x01(\x04\x42)Z\'github.com/bluzelle/curium/x/crud/typesb\x06proto3',
)
_PAGINGREQUEST = _descriptor.Descriptor(
name="PagingRequest",
full_name="bluzelle.curium.crud.PagingRequest",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="startKey",
full_name="bluzelle.curium.crud.PagingRequest.startKey",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="limit",
full_name="bluzelle.curium.crud.PagingRequest.limit",
index=1,
number=2,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=43,
serialized_end=91,
)
_PAGINGRESPONSE = _descriptor.Descriptor(
name="PagingResponse",
full_name="bluzelle.curium.crud.PagingResponse",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="nextKey",
full_name="bluzelle.curium.crud.PagingResponse.nextKey",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="total",
full_name="bluzelle.curium.crud.PagingResponse.total",
index=1,
number=2,
type=4,
cpp_type=4,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=93,
serialized_end=141,
)
DESCRIPTOR.message_types_by_name["PagingRequest"] = _PAGINGREQUEST
DESCRIPTOR.message_types_by_name["PagingResponse"] = _PAGINGRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PagingRequest = _reflection.GeneratedProtocolMessageType(
"PagingRequest",
(_message.Message,),
{
"DESCRIPTOR": _PAGINGREQUEST,
"__module__": "crud.Paging_pb2"
# @@protoc_insertion_point(class_scope:bluzelle.curium.crud.PagingRequest)
},
)
_sym_db.RegisterMessage(PagingRequest)
PagingResponse = _reflection.GeneratedProtocolMessageType(
"PagingResponse",
(_message.Message,),
{
"DESCRIPTOR": _PAGINGRESPONSE,
"__module__": "crud.Paging_pb2"
# @@protoc_insertion_point(class_scope:bluzelle.curium.crud.PagingResponse)
},
)
_sym_db.RegisterMessage(PagingResponse)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 30.687861 | 323 | 0.63722 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,258 | 0.236956 |
a4cb393ccfc40dfac226961281c5eae50416352c | 2,249 | py | Python | Python/Utilities/dater.py | S-c-r-a-t-c-h-y/coding-projects | cad33aedb72720c3e3a37c7529e55abd3edb291a | [
"MIT"
] | null | null | null | Python/Utilities/dater.py | S-c-r-a-t-c-h-y/coding-projects | cad33aedb72720c3e3a37c7529e55abd3edb291a | [
"MIT"
] | null | null | null | Python/Utilities/dater.py | S-c-r-a-t-c-h-y/coding-projects | cad33aedb72720c3e3a37c7529e55abd3edb291a | [
"MIT"
] | null | null | null | import sys
import time
from datetime import datetime, timedelta
import os
import pathlib
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
creation = "C:\\Date de création"
today = "C:\\Date d'aujourd'hui"
last_modification = "C:\\Date de dernière modification"
class MyHandler(FileSystemEventHandler):
deleted_files = []
tracked_folders = [creation, today, last_modification]
def on_created(self, event):
file_name = event.src_path.split("\\")[-1]
folder = "\\".join(event.src_path.split("\\")[:-1])
if file_name in (only_file_names := [couple[0] for couple in self.deleted_files]):
couple = self.deleted_files[only_file_names.index(file_name)]
if folder in self.tracked_folders:
date = ""
if folder == today:
date = datetime.today().strftime("%d-%m-%Y")
elif folder == creation:
creation_date = datetime.fromtimestamp(pathlib.Path(event.src_path).stat().st_ctime).date()
modification_date = datetime.fromtimestamp(pathlib.Path(event.src_path).stat().st_mtime).date()
date = (
creation_date.strftime("%d-%m-%Y")
if (creation_date - modification_date) < timedelta()
else modification_date.strftime("%d-%m-%Y")
)
elif folder == last_modification:
date = (
datetime.fromtimestamp(pathlib.Path(event.src_path).stat().st_mtime).date().strftime("%d-%m-%Y")
)
os.rename(event.src_path, f"{couple[1]}\\{date} - {file_name}")
self.deleted_files.remove(couple)
def on_deleted(self, event):
self.deleted_files.append((event.src_path.split("\\")[-1], "\\".join(event.src_path.split("\\")[:-1])))
if __name__ == "__main__":
path = "C:\\"
event_handler = MyHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
| 36.274194 | 120 | 0.59004 | 1,627 | 0.72279 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.089294 |
a4cbc8eb1a90dacf94afc3353aa00d270b02f07d | 7,956 | py | Python | blendhunter/blend.py | andrevitorelli/BlendHunter | 64e4c97e7e0995e2d3561300dff708ca86adb380 | [
"MIT"
] | 2 | 2019-07-01T15:12:50.000Z | 2021-03-03T18:57:06.000Z | blendhunter/blend.py | ablacan/BlendHunter | 2806648bcdd1b192457daf1c7ae1f90869deeca6 | [
"MIT"
] | 4 | 2021-04-27T11:31:17.000Z | 2021-07-13T09:01:42.000Z | blendhunter/blend.py | ablacan/BlendHunter | 2806648bcdd1b192457daf1c7ae1f90869deeca6 | [
"MIT"
] | 1 | 2022-02-10T13:39:54.000Z | 2022-02-10T13:39:54.000Z | # -*- coding: utf-8 -*-
""" BLEND
This module defines classes and methods for blending images.
:Author: Samuel Farrens <samuel.farrens@cea.fr>
"""
import numpy as np
from lmfit import Model
from lmfit.models import GaussianModel, ConstantModel
from modopt.base.np_adjust import pad2d
from sf_tools.image.stamp import postage_stamp
from sf_tools.image.distort import recentre
class Blender(object):
def __init__(self, images, ratio=1.0, overlap=True, stamp_shape=(116, 116),
method='sf', xwang_sigma=0.15, seed=None):
self.ratio = ratio
self.overlap = overlap
self.stamp_shape = np.array(stamp_shape)
if method in ('sf', 'xwang'):
self.method = method
else:
raise ValueError('Method must be "sf" or "xwang".')
self.xwang_sigma = xwang_sigma
self.seed = seed
if images.shape[0] % 2:
images = images[:-1]
half_sample = images.shape[0] // 2
self._images = images
self._centrals = images[:half_sample]
self._companions = images[half_sample:]
self.obj_centres = []
@staticmethod
def _fit_gauss(xval, yval):
model = GaussianModel()
result = model.fit(yval, model.guess(yval, x=xval,
amplitude=np.max(yval)), x=xval)
return result
@classmethod
def _fit_image(cls, image):
sum_x = image.sum(axis=0)
sum_y = image.sum(axis=1)
x_vals = np.arange(sum_x.size)
sum_x_fit = cls._fit_gauss(x_vals, sum_x)
sum_y_fit = cls._fit_gauss(x_vals, sum_y)
centre = (int(sum_x_fit.params['center'].value),
int(sum_y_fit.params['center'].value))
width = min(sum_x_fit.params['fwhm'].value,
sum_y_fit.params['fwhm'].value)
return centre, width
@staticmethod
def _random_shift(radius, outer_radius=None, seed=None):
if seed:
np.random.seed(seed)
theta = np.random.ranf() * 2 * np.pi
if outer_radius:
r = radius + np.random.ranf() * (outer_radius - radius)
else:
r = np.random.ranf() * radius
x = int(np.around(r * np.cos(theta)))
y = int(np.around(r * np.sin(theta)))
return x, y
@staticmethod
def _pad_image_shift(image, shift):
pad = [(_shift, 0) if _shift >= 0 else (0, -_shift)
for _shift in shift]
return np.pad(image, pad, 'constant')
@classmethod
def _blend(cls, image1, image2, shift):
dim = image1.shape
image2 = cls._pad_image_shift(image2, shift)
image2 = image2[:dim[0]] if shift[0] >= 0 else image2[-shift[0]:]
image2 = image2[:, :dim[1]] if shift[1] >= 0 else image2[:, -shift[1]:]
return image1 + image2
@staticmethod
def _gal_size_xwang(image):
return np.array([np.count_nonzero(image.sum(axis=ax))
for ax in range(2)])
@staticmethod
def _area_prob(shape1, shape2):
shape1, shape2 = np.array(shape1), np.array(shape2)
area = np.prod(shape1) - np.prod(shape2)
shape_diff = (shape1 - shape2) // 2
prob_ab = shape_diff[1] * shape1[0] / area
prob_cd = 0.5 - prob_ab
return prob_ab, prob_ab, prob_cd, prob_cd
@classmethod
def _blend_pos_xwang(cls, centre, box, limits, overlap=True):
centre, box, limits = np.array(centre), np.array(box), np.array(limits)
if overlap:
blend_pos = [np.random.randint(centre[i] - box[i],
centre[i] + box[i]) for i in range(2)]
else:
sector = np.random.choice(['a', 'b', 'c', 'd'],
p=cls.area_prob(centre * 2, box))
blend_pos = [None, None]
if sector == 'a':
blend_pos[0] = np.random.randint(limits[0][0], limits[1][0])
blend_pos[1] = np.random.randint(limits[0][1],
centre[1] - box[1])
elif sector == 'b':
blend_pos[0] = np.random.randint(limits[0][0], limits[1][0])
blend_pos[1] = np.random.randint(centre[1] + box[1],
limits[1][1])
elif sector == 'c':
blend_pos[0] = np.random.randint(limits[0][0],
centre[0] - box[0])
blend_pos[1] = np.random.randint(centre[1] - box[1],
centre[1] + box[1])
elif sector == 'd':
blend_pos[0] = np.random.randint(centre[0] + box[0],
limits[1][1])
blend_pos[1] = np.random.randint(centre[1] - box[1],
centre[1] + box[1])
return blend_pos
@classmethod
def _blend_xwang(cls, image1, image2, ps_shape=(116, 116), sigma=0.15,
overlap=True):
shape1, shape2 = np.array(image1.shape), np.array(image2.shape)
rad2 = shape2 // 2
ps_shape = np.array(ps_shape)
shape_diff = (ps_shape - shape1) // 2 + shape2
dis = cls._gal_size_xwang(image1) + cls._gal_size_xwang(image2)
box = np.around(sigma * dis).astype(int)
padding = ((shape_diff[0], shape_diff[0]),
(shape_diff[1], shape_diff[1]))
new_image = np.pad(image1, padding, 'constant')
new_shape = np.array(new_image.shape)
new_centre = new_shape // 2
limits = rad2, new_shape - rad2
bp = cls._blend_pos_xwang(new_centre, box, limits, overlap=True)
blend_slice = [slice(bp[i] - shape2[i] // 2,
bp[i] + shape2[i] // 2 + 1) for i in range(2)]
new_image[blend_slice[0], blend_slice[1]] += image2
new_image = postage_stamp(new_image, pos=new_centre,
pixel_rad=ps_shape // 2)
return new_image
def _pad_image(self, image):
if not isinstance(image, np.ndarray):
print(type(image))
im_shape = np.array(image.shape)
padding = (self.stamp_shape - im_shape) // 2
return pad2d(image, padding)
def _combine_images(self, image1, image2):
if self.method == 'xwang':
res = self._blend_xwang(image1, image2, ps_shape=self.stamp_shape,
sigma=self.xwang_sigma,
overlap=self.overlap)
else:
centre1, width1 = self._fit_image(image1)
centre2, width2 = self._fit_image(image2)
image1 = self._pad_image(recentre(image1, centre1))
image2 = self._pad_image(recentre(image2, centre2))
radius = self.ratio * (width1 + width2)
outer_radius = image1.shape[0] / 2.
if self.overlap:
shift = self._random_shift(radius, seed=self.seed)
else:
shift = self._random_shift(radius, outer_radius=outer_radius,
seed=self.seed)
im1_cen = np.array(image1.shape) // 2
im2_cen = np.copy(im1_cen) + np.array(shift)[::-1]
self.obj_centres.append((tuple(im1_cen), tuple(im2_cen)))
res = self._blend(image1, image2, shift)
return res
def blend(self):
blends = [self._combine_images(image1, image2) for image1, image2 in
zip(self._centrals, self._companions)]
return np.array(blends)
def pad(self):
im1_cen = np.array(self._pad_image(self._images[0]).shape) // 2
res = []
for image in self._images:
res.append(self._pad_image(image))
self.obj_centres.append((tuple(im1_cen), (None, None)))
return np.array(res)
| 31.571429 | 79 | 0.54085 | 7,573 | 0.95186 | 0 | 0 | 4,896 | 0.615385 | 0 | 0 | 275 | 0.034565 |
a4cc37aae42aa69844101a9983d06cd085b2f1ce | 1,159 | py | Python | Guia/4.0-Estados_conversacion.py | nicosiebert2/telegram-bot | 4d2a44382290f6fa3b94572cbeba344ef68c825d | [
"MIT"
] | null | null | null | Guia/4.0-Estados_conversacion.py | nicosiebert2/telegram-bot | 4d2a44382290f6fa3b94572cbeba344ef68c825d | [
"MIT"
] | null | null | null | Guia/4.0-Estados_conversacion.py | nicosiebert2/telegram-bot | 4d2a44382290f6fa3b94572cbeba344ef68c825d | [
"MIT"
] | null | null | null | import logging
from telegram import InlineKeyboardButton, InlineKeyboardMarkup
from telegram.ext import Updater, CommandHandler, CallbackQueryHandler, ConversationHandler
#Obtener la info de la sesion
logging.basicConfig(level = logging.INFO, format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger = logging.getLogger()
#FUNCIONES DE LOS COMANDOS
def start(update, context):
update.message.reply_text("Hola perro")
botones0(update)
#FUNCION BOTONES
def botones0(update):
btn1 = InlineKeyboardButton(text = 'Airdrop', callback_data = 'airdrop')
update.message.reply_text(
text = 'heyyy',
reply_markup = InlineKeyboardMarkup([
[btn1]
]
)
)
updater = Updater("1899599200:AAEkbuuuoytTKpTyiuiTdggO3_ycLUUaEOs", use_context=True)
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", start))
#estados de conversation
dp.add_handler(ConversationHandler(
entry_points = [
CallbackQueryHandler(pattern = 'airdrop', callback= 'airdrop')
],
states = {},
fallbacks = []
))
updater.start_polling()
updater.idle() | 27.595238 | 107 | 0.691113 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 263 | 0.22692 |
a4cde45972393a744163a78ff712575ad44f9c88 | 42,133 | py | Python | src/radical/pilot/unit_manager.py | karahbit/radical.pilot | c611e1df781749deef899dcf5815728e1d8a962e | [
"MIT"
] | null | null | null | src/radical/pilot/unit_manager.py | karahbit/radical.pilot | c611e1df781749deef899dcf5815728e1d8a962e | [
"MIT"
] | null | null | null | src/radical/pilot/unit_manager.py | karahbit/radical.pilot | c611e1df781749deef899dcf5815728e1d8a962e | [
"MIT"
] | null | null | null |
__copyright__ = "Copyright 2013-2016, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import time
import threading as mt
import radical.utils as ru
from . import utils as rpu
from . import states as rps
from . import constants as rpc
from . import compute_unit_description as rpcud
# bulk callbacks are implemented, but are currently not used nor exposed.
_USE_BULK_CB = False
if os.environ.get('RADICAL_PILOT_BULK_CB', '').lower() in ['true', 'yes', '1']:
_USE_BULK_CB = True
# ------------------------------------------------------------------------------
#
class UnitManager(rpu.Component):
"""
A UnitManager manages :class:`radical.pilot.ComputeUnit` instances which
represent the **executable** workload in RADICAL-Pilot. A UnitManager
connects the ComputeUnits with one or more :class:`Pilot` instances (which
represent the workload **executors** in RADICAL-Pilot) and a **scheduler**
which determines which :class:`ComputeUnit` gets executed on which
:class:`Pilot`.
**Example**::
s = rp.Session(database_url=DBURL)
pm = rp.PilotManager(session=s)
pd = rp.ComputePilotDescription()
pd.resource = "futuregrid.alamo"
pd.cores = 16
p1 = pm.submit_pilots(pd) # create first pilot with 16 cores
p2 = pm.submit_pilots(pd) # create second pilot with 16 cores
# Create a workload of 128 '/bin/sleep' compute units
compute_units = []
for unit_count in range(0, 128):
cu = rp.ComputeUnitDescription()
cu.executable = "/bin/sleep"
cu.arguments = ['60']
compute_units.append(cu)
# Combine the two pilots, the workload and a scheduler via
# a UnitManager.
um = rp.UnitManager(session=session, scheduler=rp.SCHEDULER_ROUND_ROBIN)
um.add_pilot(p1)
um.submit_units(compute_units)
The unit manager can issue notification on unit state changes. Whenever
state notification arrives, any callback registered for that notification is
fired.
NOTE: State notifications can arrive out of order wrt the unit state model!
"""
# --------------------------------------------------------------------------
#
def __init__(self, session, cfg='default', scheduler=None):
"""
Creates a new UnitManager and attaches it to the session.
**Arguments:**
* session [:class:`radical.pilot.Session`]:
The session instance to use.
* cfg (`dict` or `string`):
The configuration or name of configuration to use.
* scheduler (`string`):
The name of the scheduler plug-in to use.
**Returns:**
* A new `UnitManager` object [:class:`radical.pilot.UnitManager`].
"""
self._pilots = dict()
self._pilots_lock = ru.RLock('umgr.pilots_lock')
self._units = dict()
self._units_lock = ru.RLock('umgr.units_lock')
self._callbacks = dict()
self._cb_lock = ru.RLock('umgr.cb_lock')
self._terminate = mt.Event()
self._closed = False
self._rec_id = 0 # used for session recording
self._uid = ru.generate_id('umgr.%(item_counter)04d',
ru.ID_CUSTOM, ns=session.uid)
for m in rpc.UMGR_METRICS:
self._callbacks[m] = dict()
# NOTE: `name` and `cfg` are overloaded, the user cannot point to
# a predefined config and amed it at the same time. This might
# be ok for the session, but introduces a minor API inconsistency.
#
name = None
if isinstance(cfg, str):
name = cfg
cfg = None
cfg = ru.Config('radical.pilot.umgr', name=name, cfg=cfg)
cfg.uid = self._uid
cfg.owner = self._uid
cfg.sid = session.uid
cfg.base = session.base
cfg.path = session.path
cfg.dburl = session.dburl
cfg.heartbeat = session.cfg.heartbeat
if scheduler:
# overwrite the scheduler from the config file
cfg.scheduler = scheduler
rpu.Component.__init__(self, cfg, session=session)
self.start()
self._log.info('started umgr %s', self._uid)
self._rep.info('<<create unit manager')
# create pmgr bridges and components, use session cmgr for that
self._cmgr = rpu.ComponentManager(self._cfg)
self._cmgr.start_bridges()
self._cmgr.start_components()
# The output queue is used to forward submitted units to the
# scheduling component.
self.register_output(rps.UMGR_SCHEDULING_PENDING,
rpc.UMGR_SCHEDULING_QUEUE)
# the umgr will also collect units from the agent again, for output
# staging and finalization
if self._cfg.bridges.umgr_staging_output_queue:
self._has_sout = True
self.register_output(rps.UMGR_STAGING_OUTPUT_PENDING,
rpc.UMGR_STAGING_OUTPUT_QUEUE)
else:
self._has_sout = False
# register the state notification pull cb
# FIXME: this should be a tailing cursor in the update worker
self.register_timed_cb(self._state_pull_cb,
timer=self._cfg['db_poll_sleeptime'])
# register callback which pulls units back from agent
# FIXME: this should be a tailing cursor in the update worker
self.register_timed_cb(self._unit_pull_cb,
timer=self._cfg['db_poll_sleeptime'])
# also listen to the state pubsub for unit state changes
self.register_subscriber(rpc.STATE_PUBSUB, self._state_sub_cb)
# let session know we exist
self._session._register_umgr(self)
self._prof.prof('setup_done', uid=self._uid)
self._rep.ok('>>ok\n')
# --------------------------------------------------------------------------
#
def initialize(self):
# the manager must not carry bridge and component handles across forks
ru.atfork(self._atfork_prepare, self._atfork_parent, self._atfork_child)
# --------------------------------------------------------------------------
#
# EnTK forks, make sure we don't carry traces of children across the fork
#
def _atfork_prepare(self): pass
def _atfork_parent(self) : pass
def _atfork_child(self) :
self._bridges = dict()
self._components = dict()
# --------------------------------------------------------------------------
#
def finalize(self):
self._cmgr.close()
# --------------------------------------------------------------------------
#
def close(self):
"""
Shut down the UnitManager and all its components.
"""
# we do not cancel units at this point, in case any component or pilot
# wants to continue to progress unit states, which should indeed be
# independent from the umgr life cycle.
if self._closed:
return
self._terminate.set()
self._rep.info('<<close unit manager')
# disable callbacks during shutdown
with self._cb_lock:
self._callbacks = dict()
for m in rpc.UMGR_METRICS:
self._callbacks[m] = dict()
self._cmgr.close()
self._log.info("Closed UnitManager %s." % self._uid)
self._closed = True
self._rep.ok('>>ok\n')
# --------------------------------------------------------------------------
#
def as_dict(self):
"""
Returns a dictionary representation of the UnitManager object.
"""
ret = {
'uid': self.uid,
'cfg': self.cfg
}
return ret
# --------------------------------------------------------------------------
#
def __str__(self):
"""
Returns a string representation of the UnitManager object.
"""
return str(self.as_dict())
# --------------------------------------------------------------------------
#
def _pilot_state_cb(self, pilots, state=None):
if self._terminate.is_set():
return False
# we register this callback for pilots added to this umgr. It will
# specifically look out for pilots which complete, and will make sure
# that all units are pulled back into umgr control if that happens
# prematurely.
#
# If we find units which have not completed the agent part of the unit
# state model, we declare them FAILED. If they can be restarted, we
# resubmit an identical unit, which then will get a new unit ID. This
# avoids state model confusion (the state model is right now expected to
# be linear), but is not intuitive for the application (FIXME).
#
# FIXME: there is a race with the umgr scheduler which may, just now,
# and before being notified about the pilot's demise, send new
# units to the pilot.
# we only look into pilot states when the umgr is still active
# FIXME: note that there is a race in that the umgr can be closed while
# we are in the cb.
# FIXME: `self._closed` is not an `mt.Event`!
if self._closed:
self._log.debug('umgr closed, ignore pilot cb %s',
['%s:%s' % (p.uid, p.state) for p in pilots])
return True
if not isinstance(pilots, list):
pilots = [pilots]
for pilot in pilots:
state = pilot.state
if state in rps.FINAL:
self._log.debug('pilot %s is final - pull units', pilot.uid)
unit_cursor = self.session._dbs._c.find({
'type' : 'unit',
'pilot' : pilot.uid,
'umgr' : self.uid,
'control' : {'$in' : ['agent_pending', 'agent']}})
if not unit_cursor.count():
units = list()
else:
units = list(unit_cursor)
self._log.debug("units pulled: %3d (pilot dead)", len(units))
if not units:
continue
# update the units to avoid pulling them again next time.
# NOTE: this needs not locking with the unit pulling in the
# _unit_pull_cb, as that will only pull umgr_pending
# units.
uids = [unit['uid'] for unit in units]
self._session._dbs._c.update({'type' : 'unit',
'uid' : {'$in' : uids}},
{'$set' : {'control' : 'umgr'}},
multi=True)
to_restart = list()
for unit in units:
unit['state'] = rps.FAILED
if not unit['description'].get('restartable'):
self._log.debug('unit %s not restartable', unit['uid'])
continue
self._log.debug('unit %s is restartable', unit['uid'])
unit['restarted'] = True
ud = rpcud.ComputeUnitDescription(unit['description'])
to_restart.append(ud)
# FIXME: increment some restart counter in the description?
# FIXME: reference the resulting new uid in the old unit.
if to_restart and not self._closed:
self._log.debug('restart %s units', len(to_restart))
restarted = self.submit_units(to_restart)
for u in restarted:
self._log.debug('restart unit %s', u.uid)
# final units are not pushed
self.advance(units, publish=True, push=False)
# keep cb registered
return True
# --------------------------------------------------------------------------
#
def _state_pull_cb(self):
if self._terminate.is_set():
return False
# pull all unit states from the DB, and compare to the states we know
# about. If any state changed, update the unit instance and issue
# notification callbacks as needed. Do not advance the state (again).
# FIXME: we also pull for dead units. That is not efficient...
# FIXME: this needs to be converted into a tailed cursor in the update
# worker
units = self._session._dbs.get_units(umgr_uid=self.uid)
for unit in units:
if not self._update_unit(unit, publish=True, advance=False):
return False
return True
# --------------------------------------------------------------------------
#
def _unit_pull_cb(self):
if self._terminate.is_set():
return False
# pull units from the agent which are about to get back
# under umgr control, and push them into the respective queues
# FIXME: this should also be based on a tailed cursor
# FIXME: Unfortunately, 'find_and_modify' is not bulkable, so we have
# to use 'find'. To avoid finding the same units over and over
# again, we update the 'control' field *before* running the next
# find -- so we do it right here.
unit_cursor = self.session._dbs._c.find({'type' : 'unit',
'umgr' : self.uid,
'control' : 'umgr_pending'})
if not unit_cursor.count():
# no units whatsoever...
# self._log.info("units pulled: 0")
return True # this is not an error
# update the units to avoid pulling them again next time.
units = list(unit_cursor)
uids = [unit['uid'] for unit in units]
self._log.info("units pulled: %d", len(uids))
for unit in units:
unit['control'] = 'umgr'
self._session._dbs._c.update({'type' : 'unit',
'uid' : {'$in' : uids}},
{'$set' : {'control' : 'umgr'}},
multi=True)
self._log.info("units pulled: %4d", len(units))
self._prof.prof('get', msg="bulk size: %d" % len(units), uid=self.uid)
for unit in units:
# we need to make sure to have the correct state:
uid = unit['uid']
self._prof.prof('get', uid=uid)
old = unit['state']
new = rps._unit_state_collapse(unit['states'])
if old != new:
self._log.debug("unit pulled %s: %s / %s", uid, old, new)
unit['state'] = new
# now we really own the CUs, and can start working on them (ie. push
# them into the pipeline).
to_stage = list()
to_finalize = list()
for unit in units:
# only advance units to data stager if we need data staging
# = otherwise finalize them right away
if unit['description'].get('output_staging'):
to_stage.append(unit)
else:
to_finalize.append(unit)
# don't profile state transitions - those happened in the past
if to_stage:
if self._has_sout:
# normal route: needs data stager
self.advance(to_stage, publish=True, push=True, prof=False)
else:
self._log.error('output staging needed but not available!')
for unit in to_stage:
unit['target_state'] = rps.FAILED
to_finalize.append(unit)
if to_finalize:
# shortcut, skip the data stager, but fake state transition
self.advance(to_finalize, state=rps.UMGR_STAGING_OUTPUT,
publish=True, push=False)
# move to final stata
for unit in to_finalize:
unit['state'] = unit['target_state']
self.advance(to_finalize, publish=True, push=False)
return True
# --------------------------------------------------------------------------
#
def _state_sub_cb(self, topic, msg):
if self._terminate.is_set():
return False
cmd = msg.get('cmd')
arg = msg.get('arg')
if cmd != 'update':
self._log.debug('ignore state cb msg with cmd %s', cmd)
return True
if isinstance(arg, list): things = arg
else : things = [arg]
cb_requests = list()
for thing in things:
if thing.get('type') == 'unit':
# we got the state update from the state callback - don't
# publish it again
to_notify = self._update_unit(thing, publish=False,
advance=False)
if to_notify:
cb_requests += to_notify
else:
self._log.debug('umgr state cb ignores %s/%s', thing.get('uid'),
thing.get('state'))
if cb_requests:
if _USE_BULK_CB:
self._bulk_cbs(set([unit for unit,state in cb_requests]))
else:
for unit,state in cb_requests:
self._unit_cb(unit, state)
return True
# --------------------------------------------------------------------------
#
def _update_unit(self, unit_dict, publish=False, advance=False):
uid = unit_dict['uid']
# return information about needed callback and advance activities, so
# that we don't break bulks here.
# note however that individual unit callbacks are still being called on
# each unit (if any are registered), which can lead to arbitrary,
# application defined delays.
to_notify = list()
with self._units_lock:
# we don't care about units we don't know
if uid not in self._units:
self._log.debug('umgr: unknown: %s', uid)
return None
unit = self._units[uid]
# only update on state changes
current = unit.state
target = unit_dict['state']
if current == target:
self._log.debug('umgr: static: %s', uid)
return None
target, passed = rps._unit_state_progress(uid, current, target)
if target in [rps.CANCELED, rps.FAILED]:
# don't replay intermediate states
passed = passed[-1:]
for s in passed:
unit_dict['state'] = s
self._units[uid]._update(unit_dict)
to_notify.append([unit, s])
# we don't usually advance state at this point, but just keep up
# with state changes reported from elsewhere
if advance:
self.advance(unit_dict, s, publish=publish, push=False,
prof=False)
self._log.debug('umgr: notify: %s %s %s', len(to_notify), unit_dict,
unit_dict['state'])
return to_notify
# --------------------------------------------------------------------------
#
def _unit_cb(self, unit, state):
with self._cb_lock:
uid = unit.uid
cb_dicts = list()
metric = rpc.UNIT_STATE
# get wildcard callbacks
cb_dicts += self._callbacks[metric].get('*', {}).values()
cb_dicts += self._callbacks[metric].get(uid, {}).values()
for cb_dict in cb_dicts:
cb = cb_dict['cb']
cb_data = cb_dict['cb_data']
try:
if cb_data: cb(unit, state, cb_data)
else : cb(unit, state)
except:
self._log.exception('cb error (%s)', cb.__name__)
# --------------------------------------------------------------------------
#
def _bulk_cbs(self, units, metrics=None):
if not metrics: metrics = [rpc.UNIT_STATE]
else : metrics = ru.as_list(metrics)
cbs = dict() # bulked callbacks to call
with self._cb_lock:
for metric in metrics:
# get wildcard callbacks
cb_dicts = self._callbacks[metric].get('*')
for cb_name in cb_dicts:
cbs[cb_name] = {'cb' : cb_dicts[cb_name]['cb'],
'cb_data': cb_dicts[cb_name]['cb_data'],
'units' : set(units)}
# add unit specific callbacks if needed
for unit in units:
uid = unit.uid
if uid not in self._callbacks[metric]:
continue
cb_dicts = self._callbacks[metric].get(uid, {})
for cb_name in cb_dicts:
if cb_name in cbs:
cbs[cb_name]['units'].add(unit)
else:
cbs[cb_name] = {'cb' : cb_dicts[cb_name]['cb'],
'cb_data': cb_dicts[cb_name]['cb_data'],
'units' : set([unit])}
for cb_name in cbs:
cb = cbs[cb_name]['cb']
cb_data = cbs[cb_name]['cb_data']
objs = cbs[cb_name]['units']
if cb_data: cb(list(objs), cb_data)
else : cb(list(objs))
# --------------------------------------------------------------------------
#
# FIXME: this needs to go to the scheduler
def _default_wait_queue_size_cb(self, umgr, wait_queue_size):
# FIXME: this needs to come from the scheduler?
if self._terminate.is_set():
return False
self._log.info("[Callback]: wait_queue_size: %s.", wait_queue_size)
# --------------------------------------------------------------------------
#
@property
def uid(self):
"""
Returns the unique id.
"""
return self._uid
# --------------------------------------------------------------------------
#
@property
def scheduler(self):
"""
Returns the scheduler name.
"""
return self._cfg.get('scheduler')
# --------------------------------------------------------------------------
#
def add_pilots(self, pilots):
"""
Associates one or more pilots with the unit manager.
**Arguments:**
* **pilots** [:class:`radical.pilot.ComputePilot` or list of
:class:`radical.pilot.ComputePilot`]: The pilot objects that will be
added to the unit manager.
"""
if not isinstance(pilots, list):
pilots = [pilots]
if len(pilots) == 0:
raise ValueError('cannot add no pilots')
with self._pilots_lock:
# sanity check, and keep pilots around for inspection
for pilot in pilots:
pid = pilot.uid
if pid in self._pilots:
raise ValueError('pilot %s already added' % pid)
self._pilots[pid] = pilot
# subscribe for state updates
pilot.register_callback(self._pilot_state_cb)
pilot_docs = [pilot.as_dict() for pilot in pilots]
# publish to the command channel for the scheduler to pick up
self.publish(rpc.CONTROL_PUBSUB, {'cmd' : 'add_pilots',
'arg' : {'pilots': pilot_docs,
'umgr' : self.uid}})
# --------------------------------------------------------------------------
#
def list_pilots(self):
"""
Lists the UIDs of the pilots currently associated with the unit manager.
**Returns:**
* A list of :class:`radical.pilot.ComputePilot` UIDs [`string`].
"""
with self._pilots_lock:
return list(self._pilots.keys())
# --------------------------------------------------------------------------
#
def get_pilots(self):
"""
Get the pilots instances currently associated with the unit manager.
**Returns:**
* A list of :class:`radical.pilot.ComputePilot` instances.
"""
with self._pilots_lock:
return list(self._pilots.values())
# --------------------------------------------------------------------------
#
def remove_pilots(self, pilot_ids, drain=False):
"""
Disassociates one or more pilots from the unit manager.
After a pilot has been removed from a unit manager, it won't process
any of the unit manager's units anymore. Calling `remove_pilots`
doesn't stop the pilot itself.
**Arguments:**
* **drain** [`boolean`]: Drain determines what happens to the units
which are managed by the removed pilot(s). If `True`, all units
currently assigned to the pilot are allowed to finish execution.
If `False` (the default), then non-final units will be canceled.
"""
# TODO: Implement 'drain'.
# NOTE: the actual removal of pilots from the scheduler is asynchron!
if drain:
raise RuntimeError("'drain' is not yet implemented")
if not isinstance(pilot_ids, list):
pilot_ids = [pilot_ids]
if len(pilot_ids) == 0:
raise ValueError('cannot remove no pilots')
with self._pilots_lock:
# sanity check, and keep pilots around for inspection
for pid in pilot_ids:
if pid not in self._pilots:
raise ValueError('pilot %s not removed' % pid)
del(self._pilots[pid])
# publish to the command channel for the scheduler to pick up
self.publish(rpc.CONTROL_PUBSUB, {'cmd' : 'remove_pilots',
'arg' : {'pids' : pilot_ids,
'umgr' : self.uid}})
# --------------------------------------------------------------------------
#
def list_units(self):
"""
Returns the UIDs of the :class:`radical.pilot.ComputeUnit` managed by
this unit manager.
**Returns:**
* A list of :class:`radical.pilot.ComputeUnit` UIDs [`string`].
"""
with self._pilots_lock:
return list(self._units.keys())
# --------------------------------------------------------------------------
#
def submit_units(self, descriptions):
"""
Submits on or more :class:`radical.pilot.ComputeUnit` instances to the
unit manager.
**Arguments:**
* **descriptions** [:class:`radical.pilot.ComputeUnitDescription`
or list of :class:`radical.pilot.ComputeUnitDescription`]: The
description of the compute unit instance(s) to create.
**Returns:**
* A list of :class:`radical.pilot.ComputeUnit` objects.
"""
from .compute_unit import ComputeUnit
ret_list = True
if not isinstance(descriptions, list):
ret_list = False
descriptions = [descriptions]
if len(descriptions) == 0:
raise ValueError('cannot submit no unit descriptions')
# we return a list of compute units
self._rep.progress_tgt(len(descriptions), label='submit')
units = list()
for ud in descriptions:
if not ud.executable:
raise ValueError('compute unit executable must be defined')
unit = ComputeUnit(umgr=self, descr=ud)
units.append(unit)
# keep units around
with self._units_lock:
self._units[unit.uid] = unit
if self._session._rec:
ru.write_json(ud.as_dict(), "%s/%s.batch.%03d.json"
% (self._session._rec, unit.uid, self._rec_id))
self._rep.progress()
self._rep.progress_done()
if self._session._rec:
self._rec_id += 1
# insert units into the database, as a bulk.
unit_docs = [u.as_dict() for u in units]
self._session._dbs.insert_units(unit_docs)
# Only after the insert can we hand the units over to the next
# components (ie. advance state).
self.advance(unit_docs, rps.UMGR_SCHEDULING_PENDING,
publish=True, push=True)
if ret_list: return units
else : return units[0]
# --------------------------------------------------------------------------
#
def get_units(self, uids=None):
"""Returns one or more compute units identified by their IDs.
**Arguments:**
* **uids** [`string` or `list of strings`]: The IDs of the
compute unit objects to return.
**Returns:**
* A list of :class:`radical.pilot.ComputeUnit` objects.
"""
if not uids:
with self._units_lock:
ret = list(self._units.values())
return ret
ret_list = True
if (not isinstance(uids, list)) and (uids is not None):
ret_list = False
uids = [uids]
ret = list()
with self._units_lock:
for uid in uids:
if uid not in self._units:
raise ValueError('unit %s not known' % uid)
ret.append(self._units[uid])
if ret_list: return ret
else : return ret[0]
# --------------------------------------------------------------------------
#
def wait_units(self, uids=None, state=None, timeout=None):
"""
Returns when one or more :class:`radical.pilot.ComputeUnits` reach a
specific state.
If `uids` is `None`, `wait_units` returns when **all**
ComputeUnits reach the state defined in `state`. This may include
units which have previously terminated or waited upon.
**Example**::
# TODO -- add example
**Arguments:**
* **uids** [`string` or `list of strings`]
If uids is set, only the ComputeUnits with the specified
uids are considered. If uids is `None` (default), all
ComputeUnits are considered.
* **state** [`string`]
The state that ComputeUnits have to reach in order for the call
to return.
By default `wait_units` waits for the ComputeUnits to
reach a terminal state, which can be one of the following:
* :data:`radical.pilot.rps.DONE`
* :data:`radical.pilot.rps.FAILED`
* :data:`radical.pilot.rps.CANCELED`
* **timeout** [`float`]
Timeout in seconds before the call returns regardless of Pilot
state changes. The default value **None** waits forever.
"""
if not uids:
with self._units_lock:
uids = list()
for uid,unit in self._units.items():
if unit.state not in rps.FINAL:
uids.append(uid)
if not state : states = rps.FINAL
elif not isinstance(state, list): states = [state]
else : states = state
# we simplify state check by waiting for the *earliest* of the given
# states - if the unit happens to be in any later state, we are sure the
# earliest has passed as well.
check_state_val = rps._unit_state_values[rps.FINAL[-1]]
for state in states:
check_state_val = min(check_state_val,
rps._unit_state_values[state])
ret_list = True
if not isinstance(uids, list):
ret_list = False
uids = [uids]
start = time.time()
to_check = None
with self._units_lock:
to_check = [self._units[uid] for uid in uids]
# We don't want to iterate over all units again and again, as that would
# duplicate checks on units which were found in matching states. So we
# create a list from which we drop the units as we find them in
# a matching state
self._rep.progress_tgt(len(to_check), label='wait')
while to_check and not self._terminate.is_set():
# check timeout
if timeout and (timeout <= (time.time() - start)):
self._log.debug ("wait timed out")
break
time.sleep (0.1)
# FIXME: print percentage...
# print 'wait units: %s' % [[u.uid, u.state] for u in to_check]
check_again = list()
for unit in to_check:
# we actually don't check if a unit is in a specific (set of)
# state(s), but rather check if it ever *has been* in any of
# those states
if unit.state not in rps.FINAL and \
rps._unit_state_values[unit.state] < check_state_val:
# this unit does not match the wait criteria
check_again.append(unit)
else:
# stop watching this unit
if unit.state in [rps.FAILED]:
self._rep.progress() # (color='error', c='-')
elif unit.state in [rps.CANCELED]:
self._rep.progress() # (color='warn', c='*')
else:
self._rep.progress() # (color='ok', c='+')
to_check = check_again
self._rep.progress_done()
# grab the current states to return
state = None
with self._units_lock:
states = [self._units[uid].state for uid in uids]
sdict = {state: states.count(state) for state in set(states)}
for state in sorted(set(states)):
self._rep.info('\t%-10s: %5d\n' % (state, sdict[state]))
if to_check: self._rep.warn('>>timeout\n')
else : self._rep.ok ('>>ok\n')
# done waiting
if ret_list: return states
else : return states[0]
# --------------------------------------------------------------------------
#
def cancel_units(self, uids=None):
"""
Cancel one or more :class:`radical.pilot.ComputeUnits`.
Note that cancellation of units is *immediate*, i.e. their state is
immediately set to `CANCELED`, even if some RP component may still
operate on the units. Specifically, other state transitions, including
other final states (`DONE`, `FAILED`) can occur *after* cancellation.
This is a side effect of an optimization: we consider this
acceptable tradeoff in the sense "Oh, that unit was DONE at point of
cancellation -- ok, we can use the results, sure!".
If that behavior is not wanted, set the environment variable:
export RADICAL_PILOT_STRICT_CANCEL=True
**Arguments:**
* **uids** [`string` or `list of strings`]: The IDs of the
compute units objects to cancel.
"""
if not uids:
with self._units_lock:
uids = list(self._units.keys())
else:
if not isinstance(uids, list):
uids = [uids]
# NOTE: We advance all units to cancelled, and send a cancellation
# control command. If that command is picked up *after* some
# state progression, we'll see state transitions after cancel.
# For non-final states that is not a problem, as it is equivalent
# with a state update message race, which our state collapse
# mechanism accounts for. For an eventual non-canceled final
# state, we do get an invalid state transition. That is also
# corrected eventually in the state collapse, but the point
# remains, that the state model is temporarily violated. We
# consider this a side effect of the fast-cancel optimization.
#
# The env variable 'RADICAL_PILOT_STRICT_CANCEL == True' will
# disable this optimization.
#
# FIXME: the effect of the env var is not well tested
if 'RADICAL_PILOT_STRICT_CANCEL' not in os.environ:
with self._units_lock:
units = [self._units[uid] for uid in uids ]
unit_docs = [unit.as_dict() for unit in units]
self.advance(unit_docs, state=rps.CANCELED, publish=True, push=True)
# we *always* issue the cancellation command to the local components
self.publish(rpc.CONTROL_PUBSUB, {'cmd' : 'cancel_units',
'arg' : {'uids' : uids,
'umgr' : self.uid}})
# we also inform all pilots about the cancelation request
self._session._dbs.pilot_command(cmd='cancel_units', arg={'uids':uids})
# In the default case of calling 'advance' above, we just set the state,
# so we *know* units are canceled. But we nevertheless wait until that
# state progression trickled through, so that the application will see
# the same state on unit inspection.
self.wait_units(uids=uids)
# --------------------------------------------------------------------------
#
def register_callback(self, cb, cb_data=None, metric=None, uid=None):
"""
Registers a new callback function with the UnitManager. Manager-level
callbacks get called if the specified metric changes. The default
metric `UNIT_STATE` fires the callback if any of the ComputeUnits
managed by the PilotManager change their state.
All callback functions need to have the same signature::
def cb(obj, value)
where ``object`` is a handle to the object that triggered the callback,
``value`` is the metric, and ``data`` is the data provided on
callback registration.. In the example of `UNIT_STATE` above, the
object would be the unit in question, and the value would be the new
state of the unit.
If 'cb_data' is given, then the 'cb' signature changes to
def cb(obj, state, cb_data)
and 'cb_data' are passed unchanged.
If 'uid' is given, the callback will invoked only for the specified
unit.
Available metrics are:
* `UNIT_STATE`: fires when the state of any of the units which are
managed by this unit manager instance is changing. It communicates
the unit object instance and the units new state.
* `WAIT_QUEUE_SIZE`: fires when the number of unscheduled units (i.e.
of units which have not been assigned to a pilot for execution)
changes.
"""
# FIXME: the signature should be (self, metrics, cb, cb_data)
if not metric:
metric = rpc.UNIT_STATE
if metric not in rpc.UMGR_METRICS:
raise ValueError ("Metric '%s' not available on the umgr" % metric)
if not uid:
uid = '*'
elif uid not in self._units:
raise ValueError('no such unit %s' % uid)
with self._cb_lock:
cb_name = cb.__name__
if metric not in self._callbacks:
self._callbacks[metric] = dict()
if uid not in self._callbacks[metric]:
self._callbacks[metric][uid] = dict()
self._callbacks[metric][uid][cb_name] = {'cb' : cb,
'cb_data' : cb_data}
# --------------------------------------------------------------------------
#
def unregister_callback(self, cb=None, metrics=None, uid=None):
if not metrics: metrics = [rpc.UMGR_METRICS]
else : metrics = ru.as_list(metrics)
if not uid:
uid = '*'
elif uid not in self._units:
raise ValueError('no such unit %s' % uid)
for metric in metrics:
if metric not in rpc.UMGR_METRICS :
raise ValueError ("invalid umgr metric '%s'" % metric)
with self._cb_lock:
for metric in metrics:
if metric not in rpc.UMGR_METRICS :
raise ValueError("cb metric '%s' unknown" % metric)
if metric not in self._callbacks:
raise ValueError("cb metric '%s' invalid" % metric)
if uid not in self._callbacks[metric]:
raise ValueError("cb target '%s' invalid" % uid)
if cb:
to_delete = [cb.__name__]
else:
to_delete = list(self._callbacks[metric][uid].keys())
for cb_name in to_delete:
if cb_name not in self._callbacks[uid][metric]:
raise ValueError("cb %s not registered" % cb_name)
del(self._callbacks[uid][metric][cb_name])
# ------------------------------------------------------------------------------
| 35.228261 | 84 | 0.515297 | 41,455 | 0.983908 | 0 | 0 | 245 | 0.005815 | 0 | 0 | 19,825 | 0.470534 |
a4cdf015552f19cc1e695e21a45142e408efd72b | 11,130 | py | Python | syncstream/file.py | cainmagi/sync-stream | 8d50bd2c9f13071b057dd2583b80f63229f89b0e | [
"MIT"
] | null | null | null | syncstream/file.py | cainmagi/sync-stream | 8d50bd2c9f13071b057dd2583b80f63229f89b0e | [
"MIT"
] | null | null | null | syncstream/file.py | cainmagi/sync-stream | 8d50bd2c9f13071b057dd2583b80f63229f89b0e | [
"MIT"
] | null | null | null | #!python
# -*- coding: UTF-8 -*-
'''
################################################################
# File-based stream synchronization.
# @ Sync-stream
# Produced by
# Yuchen Jin @ cainmagi@gmail.com,
# yjin4@uh.edu.
# Requirements: (Pay attention to version)
# python 3.6+
# fasteners 0.16+
# This module is based on the file-lock package (fasteners). It
# uses rotating files to record the message items.
################################################################
'''
import os
import glob
try:
from typing import Tuple, Sequence
except ImportError:
from builtins import tuple as Tuple
from collections.abc import Sequence
import fasteners
from .base import is_end_line_break
class LineFileBuffer:
'''The file-locked line-based buffer handle.
This buffer provides a rotating item stroage for the text-based stream. The text is stored not
by length, but by lines. The maximal line number of the storage is limited.
The file-locked handle could be shared by different processes, but we do not recommend to do
that. A better way to use this handle is to initialize it in each sub-processes (if needed).
Note that this handle is process-safe, not thread-safe. In other words, each process should
only maintain one INDEPENDENT LineFileBuffer. The LineFileBuffer should not be shared by
either different threads or different processes.
'''
def __init__(self, file_path: str, maxlen: int = 20, tmp_id: str = 'tmp') -> None:
'''Initialization.
Arguments:
file_path: the path of the record files. The file suffix would be automatically set
as `.log`.
maxlen: the maximal number of records. Each record would be saved as one file.
tmp_id: the identifier for the temporary file. Each process should holds one
unique id. A conflict id may cause the written flows from different
processes to interrupt each other.
'''
if not isinstance(maxlen, int) or maxlen < 1:
raise TypeError('syncstream: The argument "maxlen" should be a positive integer.')
if not isinstance(file_path, str) or file_path == '':
raise TypeError('syncstream: The argument "file_path" should be a non-empty str.')
tmp_id = str(tmp_id)
if not isinstance(tmp_id, str) or tmp_id == '':
raise TypeError('syncstream: The argument "tmp_id" should be a non-empty str.')
self.__file_path = os.path.splitext(file_path)[0]
file_dir, file_name = os.path.split(self.__file_path)
if file_name == '':
raise TypeError('syncstream: The argument "file_path" should contain a non-empty file name.')
self.__file_dir = '.' if file_dir == '' else file_dir
self.__file_name = file_name
self.__tmp_id = tmp_id
self.maxlen = maxlen
self.__file_lock = fasteners.InterProcessReaderWriterLock(self.__file_path + '.lock')
self.__file_tmp_lock = fasteners.InterProcessReaderWriterLock(self.__file_path + '-{0}.lock'.format(self.__tmp_id))
@property
def __tmp_file_path(self) -> str:
'''Get the temporary file path of this buffer.
This property is private and should not be exposed to users.
'''
return '{0}-{1}.tmp'.format(self.__file_path, self.__tmp_id)
def new_line(self) -> None:
R'''Manually trigger a new line to the buffer. If the current stream is already
a new line, do nothing.
This method is equivalent to
```python
if (last line is not empty):
write('\n')
```
'''
if self.__get_last_line() != '':
self.__write('\n')
def clear(self) -> None:
'''Clear all log files.
This method would search and remove all log files, including the temporary file.
However, the lock files would not be removed. A typical usage of this method is
to clear files only in the main process.
'''
with self.__file_lock.write_lock():
for fpath_remove in glob.iglob('{0}-*.log'.format(self.__file_path), recursive=False):
os.remove(fpath_remove)
with self.__file_tmp_lock.write_lock():
tmp_path = self.__tmp_file_path
if os.path.isfile(tmp_path):
os.remove(tmp_path)
def flush(self) -> None:
'''Flush the current written line stream.
'''
pass # pylint: disable=unnecessary-pass
def __update_records(self, lines: Sequence[str]) -> None:
'''Update the log files.
The log files would be updated by this method. Each line would be saved in one
log file.
This method is private and should not be exposed to users.
Arguments:
lines: the new lines to be written in the log files.
'''
# Lock the log files in writer mode.
with self.__file_lock.write_lock():
# Check the number of lines, and truncate the lines.
n_lines = len(lines)
if n_lines <= 0:
return
if n_lines >= self.maxlen:
lines = lines[-self.maxlen:]
n_lines = self.maxlen
# Check the number of log files.
log_files = os.listdir(self.__file_dir)
n_current = 0 # Current number of log files.
for n in range(self.maxlen):
if '{0}-{1:d}.log'.format(self.__file_name, n) in log_files:
n_current += 1
else:
break
# Move the existing files.
n_remain = min(n_current, self.maxlen - n_lines)
for n in range(n_remain - 1, -1, -1):
file_old = '{0}-{1:d}.log'.format(self.__file_path, n)
file_new = '{0}-{1:d}.log'.format(self.__file_path, n + n_lines)
if os.path.isfile(file_new):
os.remove(file_new)
os.rename(file_old, file_new)
# Write new log files in the reversed order.
for n in range(n_lines):
with open('{0}-{1:d}.log'.format(self.__file_path, n_lines - 1 - n), 'w') as fobj:
fobj.write(lines[n])
def __get_last_line(self) -> str:
'''Get the last line from the log files.
The last line should be saved in the newest log file (with a number of 0).
This method is private and should not be exposed to users.
'''
file_name = self.__tmp_file_path
with self.__file_tmp_lock.read_lock():
if os.path.isfile(file_name):
with open(file_name, 'r') as fobj:
last_line = fobj.read()
else:
last_line = ''
return last_line
def __clean_last_line(self) -> None:
'''Clean the last line file.
This method is used for optimizing the clearning operation for a single line.
This method is private and should not be exposed to users.
'''
# Lock the log files in writer mode.
with self.__file_tmp_lock.write_lock():
with open(self.__tmp_file_path, 'w'):
pass
def __write_last_line(self, line: str) -> int:
'''Append message to the last line in the log file.
This method is used for optimizing the writting operation for a single line.
This method is private and should not be exposed to users.
'''
# Lock the log files in writer mode.
with self.__file_tmp_lock.write_lock():
with open(self.__tmp_file_path, 'a') as fobj:
return fobj.write(line)
def parse_lines(self, lines: Sequence[str]) -> None:
'''Parse the lines.
This method would be triggered when the new lines are written by `write()` method.
The default behavior is writting the lines to the log files.
Users could inherit this method and override it with their customized parsing method,
like regular expression searching.
Arguments:
lines: the new lines to be written in the log files.
'''
self.__update_records(lines)
def read(self, size: int = None) -> Tuple[str]:
'''Read the records.
Fetch the stored record items from the buffer. Using the `read()` method is process-
safe and would not influence the cursor of `write()` method.
If the current written line is not blank, the `read()` method would regard it as the
last record item.
Arguments:
size: if set None, would return the whole storage.
if set a int value, would return the last `size` items.
'''
if isinstance(size, int) and size <= 0:
return tuple()
# Get the last line.
last_line = self.__get_last_line()
with self.__file_lock.read_lock():
# Check the number of log files.
log_files = os.listdir(self.__file_dir)
n_current = 0 # Current number of log files.
for n in range(self.maxlen - 1 if last_line else self.maxlen):
if '{0}-{1:d}.log'.format(self.__file_name, n) in log_files:
n_current += 1
else:
break
# Get the number of reading lines.
if size is None:
n_read = n_current
else:
n_read = min(size - 1, n_current)
# Read the log files.
res = list()
for n in range(n_read - 1, -1, -1):
with open('{0}-{1:d}.log'.format(self.__file_path, n), 'r') as fobj:
res.append(fobj.read())
if last_line:
res.append(last_line)
return tuple(res)
def __write(self, data: str) -> int:
'''The write() method without lock.
This method is private and should not be used by users.
'''
message_lines = data.splitlines()
n_lines = len(message_lines)
if n_lines == 1 and message_lines[0] == '':
self.parse_lines((self.__get_last_line(), ))
self.__clean_last_line()
return 1
elif is_end_line_break(data):
message_lines.append('')
n_lines += 1
if n_lines > 1:
message_lines[0] = self.__get_last_line() + message_lines[0]
last_line = message_lines.pop()
self.parse_lines(message_lines)
self.__clean_last_line()
return self.__write_last_line(last_line)
elif n_lines == 1:
return self.__write_last_line(message_lines[0])
def write(self, data: str) -> int:
'''Write the records.
The source data is the same as that of a text-based IO. Each time when `data` contains
a line break, a new record item would be pushed in the storage. The `write()` method
is process-safe.
Arguments:
data: the data that would be written in the stream.
'''
return self.__write(data)
| 42.972973 | 123 | 0.591554 | 10,407 | 0.93504 | 0 | 0 | 252 | 0.022642 | 0 | 0 | 5,483 | 0.492633 |
a4d004f441fa442a8baa2ecc38cd15fb719c4976 | 1,636 | py | Python | spatialtis/_plotting/__init__.py | Mr-Milk/SpatialTis | bcdc6df5213b8b256cbe4c9a7c0f3b5e6d3c56b6 | [
"Apache-2.0"
] | 10 | 2020-07-14T13:27:35.000Z | 2021-11-24T21:41:30.000Z | spatialtis/_plotting/__init__.py | Mr-Milk/SpatialTis | bcdc6df5213b8b256cbe4c9a7c0f3b5e6d3c56b6 | [
"Apache-2.0"
] | 21 | 2021-01-10T09:39:25.000Z | 2022-03-12T01:04:52.000Z | spatialtis/_plotting/__init__.py | Mr-Milk/SpatialTis | bcdc6df5213b8b256cbe4c9a7c0f3b5e6d3c56b6 | [
"Apache-2.0"
] | null | null | null | import matplotlib as mpl
from matplotlib import cycler
from .api import (
NCDMarkers,
NMDMarkers,
cell_co_occurrence,
cell_components,
cell_density,
cell_map,
cell_morphology,
community_map,
expression_map,
neighborhood_analysis,
neighbors_map,
spatial_co_expression,
spatial_distribution,
spatial_enrichment_analysis,
spatial_heterogeneity,
)
from .base import get_colors, get_linear_colors
SPATIALTIS_STYLE = {
"lines.linewidth": 2,
"lines.markeredgecolor": "white",
"lines.markeredgewidth": 1,
"lines.markersize": 7,
"patch.linewidth": 1,
"patch.facecolor": "C0",
"patch.edgecolor": "black",
"text.color": "#0b0b0b",
"axes.edgecolor": "#D0D0E0",
"axes.grid": True,
"axes.grid.axis": "both",
"axes.titlesize": 10,
"axes.labelsize": 10,
"axes.labelcolor": "#0b0b0b",
"axes.spines.left": False,
"axes.spines.bottom": False,
"axes.spines.top": False,
"axes.spines.right": False,
"axes.prop_cycle": cycler(
"color",
[
"#00BEFF",
"#D4CA3A",
"#FF6DAE",
"#67E1B5",
"#EBACFA",
"#9E9E9E",
"#F1988E",
"#5DB15A",
"#E28544",
"#52B8AA",
],
),
"xtick.color": "#0b0b0b",
"xtick.labelsize": 10,
"ytick.color": "#0b0b0b",
"ytick.labelsize": 10,
"grid.color": "#93939c",
"grid.linestyle": "--",
"grid.alpha": 0.2,
"figure.titlesize": 10,
"figure.titleweight": "bold",
"font.size": 10,
}
mpl.rcParams.update(SPATIALTIS_STYLE)
| 23.371429 | 47 | 0.575183 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 661 | 0.404034 |
a4d084b6842b2f5652d3fa521fd4534554cd0a32 | 518 | py | Python | terminal.py | TeknohouseID/rumah_aria_graha_NEW_2018 | c3513100f68ce57be477372bbc7176ddbde607ad | [
"Unlicense"
] | null | null | null | terminal.py | TeknohouseID/rumah_aria_graha_NEW_2018 | c3513100f68ce57be477372bbc7176ddbde607ad | [
"Unlicense"
] | null | null | null | terminal.py | TeknohouseID/rumah_aria_graha_NEW_2018 | c3513100f68ce57be477372bbc7176ddbde607ad | [
"Unlicense"
] | null | null | null | import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
pin_terminal = [15,16] #definisi pin GPIO yg terhubung ke relay terminal
GPIO.setup(pin_terminal, GPIO.OUT)
def terminal_on(pin): #fungsi untuk menyalakan lampu (NC)
GPIO.output(pin, 1)
def terminal_off(pin): #fungsi untuk mematikan lampu (NC)
GPIO.output(pin, 0)
#Coded by Faisal Candrasyah H, Founder Teknohouse.ID, Co-founder and former CTO of Indisbuilding
#pin 15 = relay 4 = dispenser_cewek
#pin 16 = relay 5 = dispenser_cowok
| 24.666667 | 96 | 0.758687 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 284 | 0.548263 |
a4d08644e553bef96fa074dce2bce8a5b53b58cb | 722 | py | Python | ListaExercicios1/exercicio21.py | GabrielSouzaGit/PythonStudies | 49ec26d4ae45999695ab32f8e1f27587adb5ca4b | [
"MIT"
] | null | null | null | ListaExercicios1/exercicio21.py | GabrielSouzaGit/PythonStudies | 49ec26d4ae45999695ab32f8e1f27587adb5ca4b | [
"MIT"
] | null | null | null | ListaExercicios1/exercicio21.py | GabrielSouzaGit/PythonStudies | 49ec26d4ae45999695ab32f8e1f27587adb5ca4b | [
"MIT"
] | null | null | null | '''Escreva um programa que solicite ao usuário dois números e apresente na tela os resultados das
operações aritméticas (soma, subtração, multiplicação, divisão, resto da divisão, exponenciação, radiciação)'''
import math
num1 = float(input('Informe um numero: '))
num2 = float(input('Informe outro numero: '))
print(f'a soma dos numeros sao: {num1+num2}')
print(f'a subtracao dos numeros sao: {num1-num2}')
print(f'a multiplicacao dos numeros sao: {num1*num2}')
print(f'a divisao dos numeros sao: {num1/num2}')
print(f'o resto da divisao dos numeros sao: {num1%num2}')
print(f'a exponenciacao dos numeros sao: {math.exp(num1), math.exp(num2)}')
print(f'a radiciacao dos numeros sao: {math.sqrt(num1), math.sqrt(num2)}') | 55.538462 | 111 | 0.742382 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 624 | 0.846676 |
a4d104f25c19782240064f16d0ff882042f1b364 | 2,706 | py | Python | benchmarking/remote/django_url_printer.py | virtan/FAI-PEP | 8641a54b2328c343ab0470f195a42da1021d1392 | [
"Apache-2.0"
] | 1 | 2019-08-09T07:50:21.000Z | 2019-08-09T07:50:21.000Z | benchmarking/remote/django_url_printer.py | virtan/FAI-PEP | 8641a54b2328c343ab0470f195a42da1021d1392 | [
"Apache-2.0"
] | 1 | 2021-04-19T09:50:14.000Z | 2021-04-19T09:50:14.000Z | benchmarking/remote/django_url_printer.py | isabella232/FAI-PEP | a4089c79ab765e7f05080348c2978a07c3487d4c | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
import urllib
from remote.url_printer_base import URLPrinterBase
from remote.url_printer_base import registerResultURL
DJANGO_SUB_URL = "benchmark/visualize"
DISPLAY_COLUMNS = [
"identifier",
"metric",
"net_name",
"p10",
"p50",
"p90",
"platform",
"time",
"type",
"user_identifier",
]
class DjangoURLPrinter(URLPrinterBase):
def __init__(self, args):
self.args = args
self.db_url = os.path.join(self.args.server_addr, DJANGO_SUB_URL)
def getColumnSelParams(self):
col_sel_params = []
for display_column in DISPLAY_COLUMNS:
col_param = {
'name': 'columns',
'value': display_column,
}
col_sel_params.append(col_param)
return col_sel_params
def getGraphConfParams(self):
graph_conf_params = [
{
'name': 'graph-type-dropdown',
'value': 'bar-graph',
},
{
'name': 'rank-column-dropdown',
'value': 'p10',
}
]
return graph_conf_params
def getFilterParams(self, user_identifier):
if user_identifier is None:
return {}
filter_params = {
"condition": "AND",
"rules": [
{
"id": "user_identifier",
"field": "user_identifier",
"type": "string",
"input": "text",
"operator": "equal",
"value": str(user_identifier)
}
],
"valid": True
}
return filter_params
def getDjangoParams(self, user_identifier):
col_sel_params = self.getColumnSelParams()
graph_conf_params = self.getGraphConfParams()
filter_params = self.getFilterParams(user_identifier)
params = {
'sort': '-p10',
'selection_form': json.dumps(col_sel_params + graph_conf_params),
'filters': json.dumps(filter_params),
}
return params
def printURL(self, dataset, user_identifier, benchmarks):
params = self.getDjangoParams(user_identifier)
try:
# pytyon 2
param_string = urllib.urlencode(params)
except Exception:
# python 3
param_string = urllib.parse.urlencode(params)
url = (
self.db_url + "?{}"
).format(param_string)
print("Result URL => " + url)
registerResultURL("django", DjangoURLPrinter)
| 25.528302 | 82 | 0.546194 | 2,221 | 0.820769 | 0 | 0 | 0 | 0 | 0 | 0 | 424 | 0.156689 |
a4d11f16c93cfceb962bd09ed2bcc56346026e31 | 4,595 | py | Python | pynq_networking/lib/mqttsn_sw.py | Xilinx/PYNQ-Networking | b763121537e2973734d87e874c41012f044fb1f8 | [
"BSD-3-Clause"
] | 40 | 2017-10-19T20:36:59.000Z | 2022-02-16T08:04:29.000Z | pynq_networking/lib/mqttsn_sw.py | drichmond/PYNQ-Networking | d9baf100dfbf8672b7fe9da108cbd2d26a289771 | [
"BSD-3-Clause"
] | 12 | 2018-02-13T19:51:14.000Z | 2021-11-01T12:03:31.000Z | pynq_networking/lib/mqttsn_sw.py | drichmond/PYNQ-Networking | d9baf100dfbf8672b7fe9da108cbd2d26a289771 | [
"BSD-3-Clause"
] | 13 | 2018-01-05T01:39:35.000Z | 2020-12-10T16:21:07.000Z | # Copyright (c) 2017, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import struct
import logging
logging.getLogger("kamene.runtime").setLevel(logging.ERROR)
from kamene.all import *
from .mqttsn import *
__author__ = "Stephen Neuendorffer"
__copyright__ = "Copyright 2017, Xilinx"
__email__ = "stephenn@xilinx.com"
""" Kamene implementation of the MQTTSN protocol """
def valid_ack(ack, t):
if isinstance(ack[IP].payload, ICMP):
print("Error response:")
ack[IP].payload.show()
return False
if not isinstance(ack[MQTTSN].payload, t):
print("Unexpected response should have been " + str(t) + ":")
ack.payload.show()
return False
return True
class MQTT_Client:
def __init__(self, serverIP, serverPort, name, verbose=0):
self.serverIP = serverIP
self.serverPort = serverPort
self.client = name
self.verbose = verbose
def __enter__(self):
try:
self.connect()
except Exception:
raise Exception
return self
def __exit__(self, type, value, traceback):
self.disconnect()
def connect(self):
"""Establish the connection.
Return the valid acknowledgement.
"""
connack = sr1(IP(dst=self.serverIP) /
UDP(sport=50000, dport=self.serverPort) /
MQTTSN() / MQTTSN_CONNECT(client=self.client),
verbose=self.verbose)
return valid_ack(connack, MQTTSN_CONNACK)
def disconnect(self):
"""Destroy the connection.
The rsmb tends to respond without the disconnect payload.
"""
_ = send(IP(dst=self.serverIP) /
UDP(sport=50000, dport=self.serverPort) /
MQTTSN() / MQTTSN_DISCONNECT(),
verbose=self.verbose)
def register(self, topic):
"""Register the given topic.
Return the associated topicID.
"""
regack = sr1(IP(dst=self.serverIP) /
UDP(sport=50000, dport=self.serverPort) /
MQTTSN() / MQTTSN_REGISTER(topic=topic),
verbose=self.verbose)
if not valid_ack(regack, MQTTSN_REGACK):
raise RuntimeError("register() not acknowledged.")
return regack[MQTTSN_REGACK].topicID
def publish(self, topicID, message, qos=1):
"""Publish on the given topicID with the given message.
With qos=1, it will guarantee the delivery.
Return bool indicating success.
"""
frame = IP(dst=self.serverIP) / \
UDP(sport=50000, dport=self.serverPort) / \
MQTTSN() / MQTTSN_PUBLISH(qos=qos,
topicID=topicID, message=message)
if qos == 0:
send(frame, verbose=self.verbose)
if qos == 1:
puback_frame = sr1(frame, verbose=self.verbose)
if not valid_ack(puback_frame, MQTTSN_PUBACK):
return False
return True
| 35.346154 | 79 | 0.641785 | 2,349 | 0.511208 | 0 | 0 | 0 | 0 | 0 | 0 | 2,253 | 0.490316 |