max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
tests/importer/votingparser_test.py
|
raphiz/bsAbstimmungen
| 0
|
12783051
|
#!/usr/bin/env python
# coding=utf-8
import json
import pytest
from bsAbstimmungen.importer.votingimporter import VotingParser
from bsAbstimmungen.exceptions import AlreadyImportedException
from ..utils import mockdb
def test_raise_exception_when_alread_parsed(mockdb):
parser = VotingParser(mockdb)
parser.parse(
'tests/data/Abst_0205_20130109_111125_0003_0000_sa.pdf'
)
with pytest.raises(AlreadyImportedException) as excinfo:
parser.parse('tests/data/Abst_0205_20130109_111125_0003_0000_sa.pdf')
def test_reuse_existing_councillors(mockdb):
parser = VotingParser(mockdb)
parser.parse(
'tests/data/Abst_0205_20130109_111125_0003_0000_sa.pdf'
)
parser.parse(
'tests/data/Abst_0147_20130605_090518_0001_0000_ab.pdf'
)
# Check the rough numbers
assert 2 == mockdb['votes'].count()
assert 124 == mockdb['councillors'].count()
def test_multiline_affairs(mockdb):
parser = VotingParser(mockdb)
parser.parse('tests/data/Abst_0205_20130109_111125_0003_0000_sa.pdf')
vote = mockdb['votes'].find()[0]
assert ('Bericht der Umwelt-, Verkehrs- und '
'Energiekommission zum Ratschlag Nr. 12.0788.01 '
'Rahmenausgabenbewilligung zur weiteren Umsetzung '
'von Tempo 30. Projektierung und Umsetzung von '
'Massnahmen aus dem aktualisierten Tempo 30-Konzept '
'sowie Bericht zu zehn Anzügen und zu zwei '
'Petitionen sowie Bericht der Kommissionsminderheit' ==
vote['affair'])
def test_parser_extracts_data(mockdb):
parser = VotingParser(mockdb)
parser.parse(
'tests/data/Abst_0147_20130605_090518_0001_0000_ab.pdf'
)
assert 1 == mockdb['votes'].count()
assert 100 == mockdb['councillors'].count()
# Load verification details
verification = json.load(open(
'tests/data/Abst_0147_20130605_090518_0001_0000_ab.json'
))
# Verify the imported vote
vote = mockdb['votes'].find_one({'nr': verification['vote']['nr']})
assert verification['vote']['timestamp'] == vote['timestamp'].isoformat()
assert verification['vote']['affair'] == vote['affair']
assert verification['vote']['proposal'] == vote['proposal']
assert verification['vote']['question'] == vote['question']
assert verification['vote']['type'] == vote['type']
# Verify all counillors
for councillor in verification['votings']:
loaded = mockdb['councillors'].find_one({'fullname':
councillor['name']})
assert councillor['name'] == loaded['fullname']
assert councillor['fraction'] == loaded['fraction']
assert councillor['voting'] == loaded['votings'][0]['voting']
| 2.265625
| 2
|
desktop/core/ext-py/urllib3-1.25.8/test/appengine/test_urlfetch.py
|
yetsun/hue
| 5,079
|
12783052
|
<reponame>yetsun/hue
"""These tests ensure that when running in App Engine standard with the
App Engine sandbox enabled that urllib3 appropriately uses the App
Engine-patched version of httplib to make requests."""
import httplib
import StringIO
from mock import patch
import pytest
from ..test_no_ssl import TestWithoutSSL
class MockResponse(object):
def __init__(self, content, status_code, content_was_truncated, final_url, headers):
self.content = content
self.status_code = status_code
self.content_was_truncated = content_was_truncated
self.final_url = final_url
self.header_msg = httplib.HTTPMessage(
StringIO.StringIO(
"".join(["%s: %s\n" % (k, v) for k, v in headers.iteritems()] + ["\n"])
)
)
self.headers = headers
@pytest.mark.usefixtures("sandbox")
class TestHTTP(TestWithoutSSL):
def test_urlfetch_called_with_http(self):
"""Check that URLFetch is used to fetch non-https resources."""
resp = MockResponse(
"OK", 200, False, "http://www.google.com", {"content-type": "text/plain"}
)
fetch_patch = patch("google.appengine.api.urlfetch.fetch", return_value=resp)
with fetch_patch as fetch_mock:
import urllib3
pool = urllib3.HTTPConnectionPool("www.google.com", "80")
r = pool.request("GET", "/")
assert r.status == 200, r.data
assert fetch_mock.call_count == 1
@pytest.mark.usefixtures("sandbox")
class TestHTTPS(object):
@pytest.mark.xfail(
reason="This is not yet supported by urlfetch, presence of the ssl "
"module will bypass urlfetch."
)
def test_urlfetch_called_with_https(self):
"""
Check that URLFetch is used when fetching https resources
"""
resp = MockResponse(
"OK", 200, False, "https://www.google.com", {"content-type": "text/plain"}
)
fetch_patch = patch("google.appengine.api.urlfetch.fetch", return_value=resp)
with fetch_patch as fetch_mock:
import urllib3
pool = urllib3.HTTPSConnectionPool("www.google.com", "443")
pool.ConnectionCls = urllib3.connection.UnverifiedHTTPSConnection
r = pool.request("GET", "/")
assert r.status == 200, r.data
assert fetch_mock.call_count == 1
| 2.109375
| 2
|
bin/demo_disease2gene.py
|
cariaso/metapub
| 28
|
12783053
|
from __future__ import absolute_import, print_function, unicode_literals
import sys
from tabulate import tabulate
from metapub import MedGenFetcher
try:
term = sys.argv[1]
except IndexError:
print('Supply a disease/syndrome/condition name in quotation marks as the argument to this script.')
sys.exit()
####
import logging
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("eutils").setLevel(logging.INFO)
####
fetch = MedGenFetcher()
uids = fetch.uids_by_term(term)
print(uids)
headers = ['CUI', 'Title', 'Semantic Type', 'MedGenUID',
'OMIM ID', 'Modes of Inheritance', 'Assoc Genes', ]
table = []
def _join_or_NA(some_list, select=None, joiner=','):
'returns a joined string or NA if empty'
if some_list and select:
return joiner.join(item[select] for item in some_list)
elif some_list:
return joiner.join([item for item in some_list])
else:
return 'NA'
for this_id in uids:
concept = fetch.concept_by_uid(this_id)
#print concept.to_dict()
assert concept.medgen_uid == this_id
if concept.associated_genes:
line = [concept.CUI, concept.title, concept.semantic_type, concept.medgen_uid]
line.append(_join_or_NA(concept.OMIM))
line.append(_join_or_NA(concept.modes_of_inheritance, 'name'))
line.append(_join_or_NA(concept.associated_genes, 'hgnc'))
table.append(line)
else:
continue
print(tabulate(table, headers, tablefmt="simple"))
| 2.28125
| 2
|
tests/ut/test_edge_volumes.py
|
CTERA-Networks/ctera-python-sdk
| 5
|
12783054
|
from unittest import mock
from cterasdk import exception
from cterasdk.common import Object
from cterasdk.edge.enum import VolumeStatus
from cterasdk.edge import volumes
from tests.ut import base_edge
class TestEdgeVolumes(base_edge.BaseEdgeTest):
_drive_1 = 'SATA-1'
_drive_2 = 'SATA-2'
_drive_size = 81920
_mount_task_1 = (1, 'Task 1')
_mount_task_2 = (2, 'Task 2')
def setUp(self):
super().setUp()
self._volume_1_name = 'localcache'
self._volume_2_name = 'logs'
self._volumes = [self._volume_1_name, self._volume_2_name]
self._volume_passphrase = 'password'
self._mount_id = 'task id'
def test_get_all_volumes(self):
get_response = 'Success'
self._init_filer(get_response=get_response)
ret = volumes.Volumes(self._filer).get()
self._filer.get.assert_called_once_with('/config/storage/volumes')
self.assertEqual(ret, get_response)
def test_get_volume(self):
get_response = 'Success'
self._init_filer(get_response=get_response)
ret = volumes.Volumes(self._filer).get(self._volume_1_name)
self._filer.get.assert_called_once_with('/config/storage/volumes/' + self._volume_1_name)
self.assertEqual(ret, get_response)
def test_add_volume_default_args_single_device_success(self):
add_response = 'Success'
self._init_filer(add_response=add_response)
self._filer.get = mock.MagicMock(side_effect=TestEdgeVolumes._mock_no_arrays_single_drive)
track_volume_creation_status_mock = self.patch_call("cterasdk.edge.volumes.track")
ret = volumes.Volumes(self._filer).add(self._volume_1_name)
self._filer.get.assert_has_calls(
[
mock.call('/status/storage/arrays'),
mock.call('/status/storage/disks')
]
)
track_volume_creation_status_mock.assert_called_once_with(self._filer,
'/status/storage/volumes/' + self._volume_1_name + '/status',
[VolumeStatus.Ok],
[VolumeStatus.Formatting],
[VolumeStatus.Mounting, VolumeStatus.Checking, VolumeStatus.Repairing],
[VolumeStatus.Corrupted, VolumeStatus.Unknown])
self._filer.add.assert_called_once_with('/config/storage/volumes', mock.ANY)
expected_param = self._get_add_volume_param(device=TestEdgeVolumes._drive_1, size=TestEdgeVolumes._drive_size)
actual_param = self._filer.add.call_args[0][1]
self._assert_equal_objects(actual_param, expected_param)
self.assertEqual(ret, add_response)
def test_add_encrypted_volume_default_args_single_device_success(self):
add_response = 'Success'
self._init_filer(add_response=add_response)
self._filer.get = mock.MagicMock(side_effect=TestEdgeVolumes._mock_no_arrays_single_drive)
track_volume_creation_status_mock = self.patch_call("cterasdk.edge.volumes.track")
ret = volumes.Volumes(self._filer).add(self._volume_1_name, passphrase=self._volume_passphrase)
self._filer.get.assert_has_calls(
[
mock.call('/status/storage/arrays'),
mock.call('/status/storage/disks')
]
)
track_volume_creation_status_mock.assert_called_once_with(self._filer,
'/status/storage/volumes/' + self._volume_1_name + '/status',
[VolumeStatus.Ok],
[VolumeStatus.Formatting],
[VolumeStatus.Mounting, VolumeStatus.Checking, VolumeStatus.Repairing],
[VolumeStatus.Corrupted, VolumeStatus.Unknown])
self._filer.add.assert_called_once_with('/config/storage/volumes', mock.ANY)
expected_param = self._get_add_volume_param(device=TestEdgeVolumes._drive_1,
size=TestEdgeVolumes._drive_size,
passphrase=self._volume_passphrase)
actual_param = self._filer.add.call_args[0][1]
self._assert_equal_objects(actual_param, expected_param)
self.assertEqual(ret, add_response)
def test_add_volume_no_devices(self):
self._init_filer()
self._filer.get = mock.MagicMock(side_effect=TestEdgeVolumes._mock_no_devices)
with self.assertRaises(exception.CTERAException) as error:
volumes.Volumes(self._filer).add(self._volume_1_name)
self._filer.get.assert_has_calls(
[
mock.call('/status/storage/arrays'),
mock.call('/status/storage/disks')
]
)
self.assertEqual('Could not find any drives or arrays', error.exception.message)
def test_add_volume_invalid_device_name(self):
self._init_filer()
self._filer.get = mock.MagicMock(side_effect=TestEdgeVolumes._mock_no_arrays_multiple_drive)
with self.assertRaises(exception.InputError) as error:
volumes.Volumes(self._filer).add(self._volume_1_name, device='Invalid device name')
self._filer.get.assert_has_calls(
[
mock.call('/status/storage/arrays'),
mock.call('/status/storage/disks')
]
)
self.assertEqual('Invalid device name', error.exception.message)
def test_add_volume_must_specify_device_name(self):
self._init_filer()
self._filer.get = mock.MagicMock(side_effect=TestEdgeVolumes._mock_no_arrays_multiple_drive)
with self.assertRaises(exception.CTERAException) as error:
volumes.Volumes(self._filer).add(self._volume_1_name)
self._filer.get.assert_has_calls(
[
mock.call('/status/storage/arrays'),
mock.call('/status/storage/disks')
]
)
self.assertEqual('You must specify a drive or an array name', error.exception.message)
def test_add_volume_with_device_success(self):
add_response = 'Success'
self._init_filer(add_response=add_response)
self._filer.get = mock.MagicMock(side_effect=TestEdgeVolumes._mock_no_arrays_multiple_drive)
track_volume_creation_status_mock = self.patch_call("cterasdk.edge.volumes.track")
ret = volumes.Volumes(self._filer).add(self._volume_1_name, device=TestEdgeVolumes._drive_1)
self._filer.get.assert_has_calls(
[
mock.call('/status/storage/arrays'),
mock.call('/status/storage/disks')
]
)
track_volume_creation_status_mock.assert_called_once_with(self._filer,
'/status/storage/volumes/' + self._volume_1_name + '/status',
[VolumeStatus.Ok],
[VolumeStatus.Formatting],
[VolumeStatus.Mounting, VolumeStatus.Checking, VolumeStatus.Repairing],
[VolumeStatus.Corrupted, VolumeStatus.Unknown])
self._filer.add.assert_called_once_with('/config/storage/volumes', mock.ANY)
expected_param = self._get_add_volume_param(device=TestEdgeVolumes._drive_1, size=TestEdgeVolumes._drive_size)
actual_param = self._filer.add.call_args[0][1]
self._assert_equal_objects(actual_param, expected_param)
self.assertEqual(ret, add_response)
def test_add_volume_exceeding_drive_size(self):
self._init_filer()
self._filer.get = mock.MagicMock(side_effect=TestEdgeVolumes._mock_no_arrays_single_drive)
with self.assertRaises(exception.InputError) as error:
volumes.Volumes(self._filer).add(self._volume_1_name, size=999999999)
self._filer.get.assert_has_calls(
[
mock.call('/status/storage/arrays'),
mock.call('/status/storage/disks')
]
)
self.assertEqual('You cannot exceed the available storage capacity', error.exception.message)
def test_delete_volume_success(self):
delete_response = 'Success'
self._init_filer(delete_response=delete_response)
self._filer.tasks.by_name = mock.MagicMock(return_value=[TestEdgeVolumes._get_pending_mount_task(self._mount_id)])
self._filer.tasks.wait = mock.MagicMock()
ret = volumes.Volumes(self._filer).delete(self._volume_1_name)
self._filer.tasks.by_name.assert_called_once_with(' '.join(['Mounting', self._volume_1_name, 'file system']))
self._filer.tasks.wait.assert_called_once_with(self._mount_id)
self._filer.delete.assert_called_once_with('/config/storage/volumes/' + self._volume_1_name)
self.assertEqual(ret, delete_response)
def test_delete_volume_raise(self):
self._init_filer()
self._filer.delete = mock.MagicMock(side_effect=exception.CTERAException())
self._filer.tasks.by_name = mock.MagicMock(return_value=[])
with self.assertRaises(exception.CTERAException) as error:
volumes.Volumes(self._filer).delete(self._volume_1_name)
self._filer.tasks.by_name.assert_called_once_with(' '.join(['Mounting', self._volume_1_name, 'file system']))
self._filer.delete.assert_called_once_with('/config/storage/volumes/' + self._volume_1_name)
self.assertEqual('Volume deletion falied', error.exception.message)
def test_delete_all_volume_success(self):
delete_response = 'Success'
self._init_filer(get_response=self._get_volumes_response_param(), delete_response=delete_response)
self._filer.tasks.running = mock.MagicMock(return_value=TestEdgeVolumes._get_pending_mount_tasks())
self._filer.tasks.by_name = mock.MagicMock()
self._filer.tasks.wait = mock.MagicMock()
volumes.Volumes(self._filer).delete_all()
self._filer.get.assert_called_once_with('/config/storage/volumes')
self._filer.tasks.running.assert_called_once()
self._filer.delete.assert_has_calls(
[
mock.call('/config/storage/volumes/' + self._volume_1_name),
mock.call('/config/storage/volumes/' + self._volume_2_name)
]
)
def test_modify_volume_success(self):
before_volume_size = 1000
after_volume_size = 9999
put_response = 'Success'
self._init_filer(get_response=TestEdgeVolumes._get_volume_response(self._volume_1_name, before_volume_size),
put_response=put_response)
ret = volumes.Volumes(self._filer).modify(self._volume_1_name, 9999)
self._filer.get.assert_called_once_with('/config/storage/volumes/' + self._volume_1_name)
self._filer.put.assert_called_once_with('/config/storage/volumes/' + self._volume_1_name, mock.ANY)
expected_param = TestEdgeVolumes._get_volume_response(self._volume_1_name, after_volume_size)
actual_param = self._filer.put.call_args[0][1]
self._assert_equal_objects(actual_param, expected_param)
self.assertEqual(ret, put_response)
def test_modify_volume_not_found(self):
self._init_filer()
self._filer.get = mock.MagicMock(side_effect=exception.CTERAException())
with self.assertRaises(exception.CTERAException) as error:
volumes.Volumes(self._filer).modify(self._volume_1_name, 9999)
self._filer.get.assert_called_once_with('/config/storage/volumes/' + self._volume_1_name)
self.assertEqual('Failed to get the volume', error.exception.message)
@staticmethod
def _get_volume_response(name, size):
param = Object()
param.name = name
param.size = size
return param
def _get_volumes_response_param(self):
storage_volumes = []
for volume_name in self._volumes:
param = Object()
param.name = volume_name
storage_volumes.append(param)
return storage_volumes
def test_delete_no_volumes_found(self):
self._init_filer(get_response=[])
self._filer.tasks.running = mock.MagicMock(return_value=[])
volumes.Volumes(self._filer).delete_all()
self._filer.get.assert_called_once_with('/config/storage/volumes')
@staticmethod
def _get_pending_mount_tasks():
mount_id, task_name = TestEdgeVolumes._mount_task_1
task_1 = TestEdgeVolumes._get_pending_mount_task(mount_id, task_name)
mount_id, task_name = TestEdgeVolumes._mount_task_2
task_2 = TestEdgeVolumes._get_pending_mount_task(mount_id, task_name)
return [task_1, task_2]
@staticmethod
def _get_pending_mount_task(mount_id=None, name=None):
param = Object()
if mount_id:
param.id = mount_id
if name:
param.name = name
return param
@staticmethod
def _get_drive(name, capacity):
param = Object()
param.name = name
param.availableCapacity = capacity
return param
def _get_add_volume_param(self, device=None, size=None, passphrase=None):
param = Object()
param.name = self._volume_1_name
if device:
param.device = device
if size:
param.size = size
param.fileSystemType = 'xfs'
if passphrase:
param.encrypted = True
param.encPassphrase = passphrase
return param
@staticmethod
def _mock_no_devices(path):
if path == '/status/storage/arrays':
return []
if path == '/status/storage/disks':
return []
return None
@staticmethod
def _mock_no_arrays_single_drive(path):
if path == '/status/storage/arrays':
return []
if path == '/status/storage/disks':
return [TestEdgeVolumes._get_drive(TestEdgeVolumes._drive_1, TestEdgeVolumes._drive_size)]
return None
@staticmethod
def _mock_no_arrays_multiple_drive(path):
if path == '/status/storage/arrays':
return []
if path == '/status/storage/disks':
return [TestEdgeVolumes._get_drive(TestEdgeVolumes._drive_1, TestEdgeVolumes._drive_size),
TestEdgeVolumes._get_drive(TestEdgeVolumes._drive_2, TestEdgeVolumes._drive_size)]
return None
| 2.21875
| 2
|
simple_model/utils.py
|
felipegr/simple-model
| 76
|
12783055
|
<reponame>felipegr/simple-model
import inspect
import re
import typing
NOT_WORD = re.compile(r'\W')
SNAKE_CASE = re.compile('([a-z0-9])([A-Z])')
SNAKE_CASE_AUX = re.compile('(.)([A-Z][a-z]+)')
_PRIVATE_ATTR_RE = re.compile(r'_[\w\d]+__[\w\d]')
def capitalize_first(string: str) -> str:
return string[0].upper() + string[1:] if string != '' else string
def camel_case(string: str) -> str:
string = capitalize_first(string)
for separator in ('_', '-', ' '):
if separator not in string:
continue
string = ''.join(capitalize_first(substr) for substr in string.split(separator))
return string
def coerce_to_alpha(string: str) -> str:
return NOT_WORD.sub('_', string)
def snake_case(string: str) -> str:
aux = SNAKE_CASE_AUX.sub(r'\1_\2', string)
return SNAKE_CASE.sub(r'\1_\2', aux).lower()
def is_not_special_object(obj):
return not any((
inspect.isclass(obj),
inspect.ismethod(obj),
inspect.isfunction(obj),
inspect.isgeneratorfunction(obj),
inspect.isgenerator(obj),
inspect.isroutine(obj),
isinstance(obj, property),
))
def getkey(d: dict, key: typing.Any):
return d[key]
def remove_private_keys(d: dict) -> dict:
return {
k: v for k, v in d.items() if not k.startswith('__')
}
def is_private_attribute(name):
return _PRIVATE_ATTR_RE.match(name) is not None
| 2.796875
| 3
|
githeart/settings.py
|
andressadotpy/template-projeto-selecao
| 0
|
12783056
|
from pathlib import Path
import os
import django_heroku
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'requests',
'starred_repos',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
]
TAGGIT_CASE_INSENSITIVE = True
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware'
]
ROOT_URLCONF = 'githeart.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect'
],
},
},
]
WSGI_APPLICATION = 'githeart.wsgi.application'
DJANGO_SETTINGS_MODULE = 'githeart.settings'
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'githeart',
'USER': 'postgres',
'PASSWORD': '<PASSWORD>',
'HOST': 'localhost'
}
}
AUTHENTICATION_BACKENDS = (
'social_core.backends.github.GithubOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'githeart/static')
]
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
messages.SUCCESS: 'success'
}
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = 'home'
# GitHub Login
SOCIAL_AUTH_GITHUB_KEY = '<KEY>'
SOCIAL_AUTH_GITHUB_SECRET = '1917f9ce2cc914a7935e8eca01fb4fa3d3d771e9'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'githeart/static')
]
django_heroku.settings(locals())
| 1.9375
| 2
|
_05_FUNCTIONS_ADVANCED_LAB/_05_min_max_sum.py
|
Andrey-V-Georgiev/PythonAdvanced
| 1
|
12783057
|
def print_min_max_sum(*args):
num_list = list(args[0])
min_num = min(num_list)
max_num = max(num_list)
sum_num = sum(num_list)
print(f'The minimum number is {min_num}')
print(f'The maximum number is {max_num}')
print(f'The sum number is: {sum_num}')
input_string = input()
input_numbers = map(int, input_string.split(' '))
print_min_max_sum(input_numbers)
| 3.90625
| 4
|
test/test_acd.py
|
LCAV/PyLocus
| 13
|
12783058
|
#!/usr/bin/env python
# module TEST_ACD
import unittest
import numpy as np
from .test_common import BaseCommon
from pylocus.point_set import PointSet
from pylocus.algorithms import reconstruct_acd
from pylocus.simulation import create_noisy_edm
class TestACD(BaseCommon.TestAlgorithms):
def setUp(self):
BaseCommon.TestAlgorithms.setUp(self)
self.create_points()
self.n_it = 10
def create_points(self, N=5, d=2):
print('TestACD:create_points')
self.pts = PointSet(N, d)
self.pts.set_points('random')
self.pts.init()
self.index = 0
def call_method(self, method=''):
print('TestACD:call_method')
Xhat, costs = reconstruct_acd(self.pts.edm,
W=np.ones(self.pts.edm.shape),
X0=self.pts.points,
print_out=False, sweeps=3)
return Xhat
def add_noise(self, noise=1e-6):
self.pts.edm = create_noisy_edm(self.pts.edm, noise)
if __name__ == "__main__":
unittest.main()
| 2.59375
| 3
|
components/studio/monitor/urls.py
|
ScilifelabDataCentre/stackn
| 0
|
12783059
|
<filename>components/studio/monitor/urls.py
from django.urls import path
from . import views
app_name = 'monitor'
urlpatterns = [
path('', views.overview, name='overview'),
path('<resource_type>/cpuchart', views.cpuchart, name='cpuchart'),
path('lab/delete/<uid>', views.delete_lab, name='delete_lab'),
path('serve/delete/<model_id>', views.delete_deployment, name='delete_deployment'),
]
| 1.546875
| 2
|
test/test_cli.py
|
bninja/rump
| 6
|
12783060
|
<filename>test/test_cli.py
import json
import threading
import mock
import pytest
import requests
import time
from rump import dumps, cli, Settings
@pytest.fixture
def config_path(request):
return request.config.fixtures_path.join('settings', 'main.conf')
@pytest.fixture
def requests_path(request):
return request.config.fixtures_path.join('requests')
@pytest.fixture
def parser(config_path):
return cli.parser(conf_file=str(config_path))
@pytest.fixture
def settings(config_path):
return Settings.from_file(str(config_path))
def test_list(capsys, parser):
args = parser.parse_args(['list'])
cli.setup(args)
args.command(args)
out, err = capsys.readouterr()
assert out == '\n'.join([
'router1',
'router2',
'router3',
'',
])
assert err == ''
def test_show(capsys, parser):
args = parser.parse_args(['show', 'router1'])
cli.setup(args)
args.command(args)
out, err = capsys.readouterr()
assert out == dumps(args.settings.routers[0]) + '\n'
assert err == ''
def test_check(capsys, parser):
args = parser.parse_args(['check', 'router1'])
cli.setup(args)
rc = args.command(args)
assert rc == 0
out, err = capsys.readouterr()
assert out == ''
assert err == ''
def test_edit(tmpdir, parser):
path = tmpdir.join('edit1.json')
path.write(data=json.dumps({'compile_rules': False}), ensure=True)
args = parser.parse_args(['edit', 'router2', str(path)])
cli.setup(args)
args.command(args)
path = tmpdir.join('edit2.json')
path.write(data=json.dumps({'compile_rules': True}), ensure=True)
args = parser.parse_args(['edit', 'router2', str(path)])
cli.setup(args)
args.command(args)
def test_watch(capsys, tmpdir, parser):
def _watch():
args = parser.parse_args(['watch', 'router2', '-n', '1', '-t', '10'])
cli.setup(args)
args.command(args)
thd = threading.Thread(target=_watch)
thd.start()
time.sleep(1.0)
path = tmpdir.join('edit1.json')
path.write(data=json.dumps({'compile_rules': False}), ensure=True)
args = parser.parse_args(['edit', 'router2', str(path)])
cli.setup(args)
args.command(args)
thd.join()
out, _ = capsys.readouterr()
assert out == 'router changed - router2\n'
def test_eval(capsys, tmpdir, parser, requests_path):
requests = tmpdir.join('requests.http')
for request_path in requests_path.listdir():
requests.write(request_path.read(), ensure=True)
args = parser.parse_args(['eval', '-r', str(requests)])
cli.setup(args)
args.command(args)
out, _ = capsys.readouterr()
assert out == '\n'.join([
'https://www.google.com',
'https://www.yahoo.com',
'http://dev.google.com',
'',
])
def test_serve(parser, settings):
def _serve():
args = parser.parse_args(['serve'])
cli.setup(args)
with mock.patch('rump.cli.server_for') as patched:
patched.return_value = server
args.command(args)
server = cli.server_for(host='localhost', port=0, mode=None)
thd = threading.Thread(target=_serve)
thd.daemon = True
thd.start()
time.sleep(1.0)
try:
root = 'http://{0}:{1}'.format(*server.server_address)
for case in [
{
'path': '/1/a',
'headers': {'Host': 'google.example.com'},
'x-rump-forward': 'https://www.google.com',
'x-rump-redir-proto': 'https',
'x-rump-redir-host': 'www.google.com',
}, {
'path': '/2/b',
'headers': {'Host': 'yahoo.example.com'},
'x-rump-forward': 'https://www.yahoo.com',
'x-rump-redir-proto': 'https',
'x-rump-redir-host': 'www.yahoo.com',
}, {
'path': '/3/c',
'headers': {'Host': 'dev.google.com'},
'x-rump-forward': 'http://dev.google.com',
'x-rump-redir-proto': 'http',
'x-rump-redir-host': 'dev.google.com',
},
]:
resp = requests.get(root + case['path'], headers=case['headers'])
assert resp.status_code == 200
assert 'x-rump-forward' in resp.headers
assert resp.headers['x-rump-forward'] == case['x-rump-forward']
assert 'x-rump-redir-proto' in resp.headers
assert resp.headers['x-rump-redir-proto'] == case['x-rump-redir-proto']
assert 'x-rump-redir-host' in resp.headers
assert resp.headers['x-rump-redir-host'] == case['x-rump-redir-host']
finally:
server.shutdown()
thd.join()
| 2.15625
| 2
|
mmtbx/regression/tst_sequence_validation.py
|
jbeilstenedmands/cctbx_project
| 0
|
12783061
|
<filename>mmtbx/regression/tst_sequence_validation.py<gh_stars>0
from __future__ import division
from libtbx import easy_mp
from libtbx import easy_pickle
from libtbx.utils import Sorry, null_out
import os
def exercise () :
import libtbx.utils
if (libtbx.utils.detect_multiprocessing_problem() is not None) :
print "multiprocessing not available, skipping this test"
return
if (os.name == "nt"):
print "easy_mp fixed_func not supported under Windows, skipping this test"
return
from mmtbx.validation.sequence import validation, get_sequence_n_copies, \
get_sequence_n_copies_from_files
import iotbx.bioinformatics
import iotbx.pdb
from iotbx import file_reader
import libtbx.load_env # import dependency
from libtbx.test_utils import Exception_expected, contains_lines, approx_equal
from cStringIO import StringIO
pdb_in = iotbx.pdb.input(source_info=None, lines="""\
ATOM 2 CA ARG A 10 -6.299 36.344 7.806 1.00 55.20 C
ATOM 25 CA TYR A 11 -3.391 33.962 7.211 1.00 40.56 C
ATOM 46 CA ALA A 12 -0.693 34.802 4.693 1.00 67.95 C
ATOM 56 CA ALA A 13 0.811 31.422 3.858 1.00 57.97 C
ATOM 66 CA GLY A 14 4.466 31.094 2.905 1.00 49.24 C
ATOM 73 CA ALA A 15 7.163 28.421 2.671 1.00 54.70 C
ATOM 83 CA ILE A 16 6.554 24.685 2.957 1.00 51.79 C
ATOM 102 CA LEU A 17 7.691 23.612 6.406 1.00 42.30 C
ATOM 121 CA PTY A 18 7.292 19.882 5.861 1.00 36.68 C
ATOM 128 CA PHE A 19 5.417 16.968 4.327 1.00 44.99 C
ATOM 148 CA GLY A 20 3.466 14.289 6.150 1.00 41.99 C
ATOM 155 CA GLY A 21 1.756 11.130 4.965 1.00 35.77 C
ATOM 190 CA ALA A 24 1.294 19.658 3.683 1.00 47.02 C
ATOM 200 CA VAL A 24A 2.361 22.009 6.464 1.00 37.13 C
ATOM 216 CA HIS A 25 2.980 25.633 5.535 1.00 42.52 C
ATOM 234 CA LEU A 26 4.518 28.425 7.577 1.00 47.63 C
ATOM 253 CA ALA A 27 2.095 31.320 7.634 1.00 38.61 C
ATOM 263 CA ARG A 28 1.589 34.719 9.165 1.00 37.04 C
END""")
seq1 = iotbx.bioinformatics.sequence("MTTPSHLSDRYELGEILGFGGMSEVHLARD".lower())
v = validation(
pdb_hierarchy=pdb_in.construct_hierarchy(),
sequences=[seq1],
log=null_out(),
nproc=1)
out = StringIO()
v.show(out=out)
assert contains_lines(out.getvalue(), """\
sequence identity: 76.47%
13 residue(s) missing from PDB chain (9 at start, 1 at end)
2 gap(s) in chain
4 mismatches to sequence
residue IDs: 12 13 15 24""")
cif_block = v.as_cif_block()
assert list(cif_block['_struct_ref.pdbx_seq_one_letter_code']) == [
'MTTPSHLSDRYELGEILGFGGMSEVHLARD']
assert approx_equal(cif_block['_struct_ref_seq.pdbx_auth_seq_align_beg'],
['10', '14', '16', '19', '24'])
assert approx_equal(cif_block['_struct_ref_seq.pdbx_auth_seq_align_end'],
['11', '14', '17', '21', '28'])
assert approx_equal(cif_block['_struct_ref_seq.db_align_beg'],
['10', '14', '16', '19', '25'])
assert approx_equal(cif_block['_struct_ref_seq.db_align_end'],
['11', '14', '17', '21', '29'])
assert cif_block['_struct_ref_seq.pdbx_seq_align_beg_ins_code'][4] == 'A'
seq2 = iotbx.bioinformatics.sequence("MTTPSHLSDRYELGEILGFGGMSEVHLA")
v = validation(
pdb_hierarchy=pdb_in.construct_hierarchy(),
sequences=[seq2],
log=null_out(),
nproc=1)
out = StringIO()
v.show(out=out)
assert contains_lines(out.getvalue(), """\
1 residues not found in sequence
residue IDs: 28""")
try :
v = validation(
pdb_hierarchy=pdb_in.construct_hierarchy(),
sequences=[],
log=null_out(),
nproc=1)
except AssertionError :
pass
else :
raise Exception_expected
cif_block = v.as_cif_block()
assert list(cif_block['_struct_ref.pdbx_seq_one_letter_code']) == [
'MTTPSHLSDRYELGEILGFGGMSEVHLA-']
assert approx_equal(cif_block['_struct_ref_seq.pdbx_auth_seq_align_end'],
['11', '14', '17', '21', '27'])
assert approx_equal(cif_block['_struct_ref_seq.db_align_end'],
['11', '14', '17', '21', '28'])
#
pdb_in2 = iotbx.pdb.input(source_info=None, lines="""\
ATOM 2 CA ARG A 10 -6.299 36.344 7.806 1.00 55.20 C
ATOM 25 CA TYR A 11 -3.391 33.962 7.211 1.00 40.56 C
ATOM 46 CA ALA A 12 -0.693 34.802 4.693 1.00 67.95 C
ATOM 56 CA ALA A 13 0.811 31.422 3.858 1.00 57.97 C
ATOM 66 CA GLY A 14 4.466 31.094 2.905 1.00 49.24 C
ATOM 73 CA ALA A 15 7.163 28.421 2.671 1.00 54.70 C
ATOM 83 CA ILE A 16 6.554 24.685 2.957 1.00 51.79 C
ATOM 102 CA LEU A 17 7.691 23.612 6.406 1.00 42.30 C
TER
ATOM 1936 P G B 2 -22.947 -23.615 15.323 1.00123.20 P
ATOM 1959 P C B 3 -26.398 -26.111 19.062 1.00110.06 P
ATOM 1979 P U B 4 -29.512 -30.638 21.164 1.00101.06 P
ATOM 1999 P C B 5 -30.524 -36.109 21.527 1.00 92.76 P
ATOM 2019 P U B 6 -28.684 -41.458 21.223 1.00 87.42 P
ATOM 2062 P G B 8 -18.396 -45.415 21.903 1.00 80.35 P
ATOM 2085 P A B 9 -13.852 -43.272 24.156 1.00 77.76 P
ATOM 2107 P G B 10 -8.285 -44.242 26.815 1.00 79.86 P
END
""")
seq3 = iotbx.bioinformatics.sequence("AGCUUUGGAG")
v = validation(
pdb_hierarchy=pdb_in2.construct_hierarchy(),
sequences=[seq2,seq3],
log=null_out(),
nproc=1,
extract_coordinates=True)
out = StringIO()
v.show(out=out)
cif_block = v.as_cif_block()
assert approx_equal(cif_block['_struct_ref.pdbx_seq_one_letter_code'],
['MTTPSHLSDRYELGEILGFGGMSEVHLA', 'AGCUUUGGAG'])
assert approx_equal(cif_block['_struct_ref_seq.pdbx_auth_seq_align_beg'],
['10', '14', '16', '2', '6', '8'])
assert approx_equal(cif_block['_struct_ref_seq.pdbx_auth_seq_align_end'],
['11', '14', '17', '4', '6', '10'])
assert (len(v.chains[0].get_outliers_table()) == 3)
assert (len(v.get_table_data()) == 4)
assert approx_equal(
v.chains[0].get_mean_coordinate_for_alignment_range(11,11),
(-0.693, 34.802, 4.693))
assert approx_equal(
v.chains[0].get_mean_coordinate_for_alignment_range(11,14),
(2.93675, 31.43475, 3.53175))
assert (v.chains[0].get_highlighted_residues() == [11,12,14])
assert contains_lines(out.getvalue(), """\
3 mismatches to sequence
residue IDs: 12 13 15""")
assert contains_lines(out.getvalue(), """\
sequence identity: 87.50%
2 residue(s) missing from PDB chain (1 at start, 0 at end)
1 gap(s) in chain
1 mismatches to sequence
residue IDs: 5""")
s = easy_pickle.dumps(v)
seq4 = iotbx.bioinformatics.sequence("")
try :
v = validation(
pdb_hierarchy=pdb_in2.construct_hierarchy(),
sequences=[seq4],
log=null_out(),
nproc=1,
extract_coordinates=True)
except AssertionError :
pass
else :
raise Exception_expected
# check that nucleic acid chain doesn't get aligned against protein sequence
pdb_in = iotbx.pdb.input(source_info=None, lines="""\
ATOM 18932 P B DG D 1 -12.183 60.531 25.090 0.50364.79 P
ATOM 18963 P B DG D 2 -9.738 55.258 20.689 0.50278.77 P
ATOM 18994 P B DA D 3 -10.119 47.855 19.481 0.50355.17 P
ATOM 19025 P B DT D 4 -13.664 42.707 21.119 0.50237.06 P
ATOM 19056 P B DG D 5 -19.510 39.821 21.770 0.50255.45 P
ATOM 19088 P B DA D 6 -26.096 40.001 21.038 0.50437.49 P
ATOM 19120 P B DC D 7 -31.790 41.189 18.413 0.50210.00 P
ATOM 19149 P B DG D 8 -34.639 41.306 12.582 0.50313.99 P
ATOM 19179 P B DA D 9 -34.987 38.244 6.813 0.50158.92 P
ATOM 19210 P B DT D 10 -32.560 35.160 1.082 0.50181.38 P
HETATM19241 P BTSP D 11 -27.614 30.137 0.455 0.50508.17 P
""")
sequences, _ = iotbx.bioinformatics.fasta_sequence_parse.parse(
""">4GFH:A|PDBID|CHAIN|SEQUENCE
MSTEPVSASDKYQKISQLEHILKRPDTYIGSVETQEQLQWIYDEETDCMIEKNVTIVPGLFKIFDEILVNAADNKVRDPS
MKRIDVNIHAEEHTIEVKNDGKGIPIEIHNKENIYIPEMIFGHLLTSSNYDDDEKKVTGGRNGYGAKLCNIFSTEFILET
ADLNVGQKYVQKWENNMSICHPPKITSYKKGPSYTKVTFKPDLTRFGMKELDNDILGVMRRRVYDINGSVRDINVYLNGK
SLKIRNFKNYVELYLKSLEKKRQLDNGEDGAAKSDIPTILYERINNRWEVAFAVSDISFQQISFVNSIATTMGGTHVNYI
TDQIVKKISEILKKKKKKSVKSFQIKNNMFIFINCLIENPAFTSQTKEQLTTRVKDFGSRCEIPLEYINKIMKTDLATRM
FEIADANEENALKKSDGTRKSRITNYPKLEDANKAGTKEGYKCTLVLTEGDSALSLAVAGLAVVGRDYYGCYPLRGKMLN
VREASADQILKNAEIQAIKKIMGLQHRKKYEDTKSLRYGHLMIMTDQDHDGSHIKGLIINFLESSFPGLLDIQGFLLEFI
TPIIKVSITKPTKNTIAFYNMPDYEKWREEESHKFTWKQKYYKGLGTSLAQEVREYFSNLDRHLKIFHSLQGNDKDYIDL
AFSKKKADDRKEWLRQYEPGTVLDPTLKEIPISDFINKELILFSLADNIRSIPNVLDGFKPGQRKVLYGCFKKNLKSELK
VAQLAPYVSECTAYHHGEQSLAQTIIGLAQNFVGSNNIYLLLPNGAFGTRATGGKDAAAARYIYTELNKLTRKIFHPADD
PLYKYIQEDEKTVEPEWYLPILPMILVNGAEGIGTGWSTYIPPFNPLEIIKNIRHLMNDEELEQMHPWFRGWTGTIEEIE
PLRYRMYGRIEQIGDNVLEITELPARTWTSTIKEYLLLGLSGNDKIKPWIKDMEEQHDDNIKFIITLSPEEMAKTRKIGF
YERFKLISPISLMNMVAFDPHGKIKKYNSVNEILSEFYYVRLEYYQKRKDHMSERLQWEVEKYSFQVKFIKMIIEKELTV
TNKPRNAIIQELENLGFPRFNKEGKPYYGSPNDEIAEQINDVKGATSDEEDEESSHEDTENVINGPEELYGTYEYLLGMR
IWSLTKERYQKLLKQKQEKETELENLLKLSAKDIWNTDLKAFEVGYQEFLQRDAEAR
>4GFH:D|PDBID|CHAIN|SEQUENCE
GGATGACGATX
""")
v = validation(
pdb_hierarchy=pdb_in.construct_hierarchy(),
sequences=sequences,
log=null_out(),
nproc=1,)
out = StringIO()
v.show(out=out)
assert v.chains[0].n_missing == 0
assert v.chains[0].n_missing_end == 0
assert v.chains[0].n_missing_start == 0
assert len(v.chains[0].alignment.matches()) == 11
#
pdb_in = iotbx.pdb.input(source_info=None, lines="""\
ATOM 2 CA GLY A 1 1.367 0.551 0.300 1.00 7.71 C
ATOM 6 CA CYS A 2 2.782 3.785 1.683 1.00 5.18 C
ATOM 12 CA CYS A 3 -0.375 5.128 3.282 1.00 5.21 C
ATOM 18 CA SER A 4 -0.870 2.048 5.492 1.00 7.19 C
ATOM 25 CA LEU A 5 2.786 2.056 6.642 1.00 6.78 C
ATOM 33 CA PRO A 6 3.212 4.746 9.312 1.00 7.03 C
ATOM 40 CA PRO A 7 6.870 5.690 8.552 1.00 7.97 C
ATOM 47 CA CYS A 8 6.021 6.070 4.855 1.00 6.48 C
ATOM 53 CA ALA A 9 2.812 8.041 5.452 1.00 7.15 C
ATOM 58 CA LEU A 10 4.739 10.382 7.748 1.00 8.36 C
ATOM 66 CA SER A 11 7.292 11.200 5.016 1.00 7.00 C
ATOM 73 CA ASN A 12 4.649 11.435 2.264 1.00 5.40 C
ATOM 81 CA PRO A 13 1.879 13.433 3.968 1.00 5.97 C
ATOM 88 CA ASP A 14 0.485 15.371 0.986 1.00 7.70 C
ATOM 96 CA TYR A 15 0.565 12.245 -1.180 1.00 6.55 C
ATOM 108 CA CYS A 16 -1.466 10.260 1.363 1.00 7.32 C
ATOM 113 N NH2 A 17 -2.612 12.308 2.058 1.00 8.11 N
""")
seq = iotbx.bioinformatics.sequence("GCCSLPPCALSNPDYCX")
v = validation(
pdb_hierarchy=pdb_in.construct_hierarchy(),
sequences=[seq],
log=null_out(),
nproc=1,)
out = StringIO()
v.show(out=out)
assert v.chains[0].n_missing == 0
assert v.chains[0].n_missing_end == 0
assert v.chains[0].n_missing_start == 0
assert len(v.chains[0].alignment.matches()) == 17
#
pdb_in = iotbx.pdb.input(source_info=None, lines="""\
ATOM 2518 CA PRO C 3 23.450 -5.848 45.723 1.00 85.24 C
ATOM 2525 CA GLY C 4 20.066 -4.416 44.815 1.00 79.25 C
ATOM 2529 CA PHE C 5 19.408 -0.913 46.032 1.00 77.13 C
ATOM 2540 CA GLY C 6 17.384 -1.466 49.208 1.00 83.44 C
ATOM 2544 CA GLN C 7 17.316 -5.259 49.606 1.00 89.25 C
ATOM 2553 CA GLY C 8 19.061 -6.829 52.657 1.00 90.67 C
""")
sequences, _ = iotbx.bioinformatics.fasta_sequence_parse.parse(
""">1JN5:A|PDBID|CHAIN|SEQUENCE
MASVDFKTYVDQACRAAEEFVNVYYTTMDKRRRLLSRLYMGTATLVWNGNAVSGQESLSEFFEMLPSSEFQISVVDCQPV
HDEATPSQTTVLVVICGSVKFEGNKQRDFNQNFILTAQASPSNTVWKIASDCFRFQDWAS
>1JN5:B|PDBID|CHAIN|SEQUENCE
APPCKGSYFGTENLKSLVLHFLQQYYAIYDSGDRQGLLDAYHDGACCSLSIPFIPQNPARSSLAEYFKDSRNVKKLKDPT
LRFRLLKHTRLNVVAFLNELPKTQHDVNSFVVDISAQTSTLLCFSVNGVFKEVDGKSRDSLRAFTRTFIAVPASNSGLCI
VNDELFVRNASSEEIQRAFAMPAPTPSSSPVPTLSPEQQEMLQAFSTQSGMNLEWSQKCLQDNNWDYTRSAQAFTHLKAK
GEIPEVAFMK
>1JN5:C|PDBID|CHAIN|SEQUENCE
GQSPGFGQGGSV
""")
v = validation(
pdb_hierarchy=pdb_in.construct_hierarchy(),
sequences=sequences,
log=null_out(),
nproc=1,)
out = StringIO()
v.show(out=out)
assert v.chains[0].n_missing_start == 3
assert v.chains[0].n_missing_end == 3
assert v.chains[0].identity == 1.0
assert v.chains[0].alignment.match_codes == 'iiimmmmmmiii'
#
pdb_in = iotbx.pdb.input(source_info=None, lines="""\
ATOM 2 CA ALA A 2 -8.453 57.214 -12.754 1.00 52.95 C
ATOM 7 CA LEU A 3 -8.574 59.274 -9.471 1.00 24.33 C
ATOM 15 CA ARG A 4 -12.178 60.092 -8.575 1.00 28.40 C
ATOM 26 CA GLY A 5 -14.170 61.485 -5.667 1.00 26.54 C
ATOM 30 CA THR A 6 -17.784 60.743 -4.783 1.00 31.78 C
ATOM 37 CA VAL A 7 -19.080 64.405 -4.464 1.00 21.31 C
""")
seq = iotbx.bioinformatics.sequence("XALRGTV")
v = validation(
pdb_hierarchy=pdb_in.construct_hierarchy(),
sequences=[seq],
log=null_out(),
nproc=1,)
out = StringIO()
v.show(out=out)
assert v.chains[0].n_missing_start == 1
assert v.chains[0].n_missing_end == 0
assert v.chains[0].identity == 1.0
assert v.chains[0].alignment.match_codes == 'immmmmm'
#
pdb_in = iotbx.pdb.input(source_info=None, lines="""\
ATOM 2171 CA ASP I 355 5.591 -11.903 1.133 1.00 41.60 C
ATOM 2175 CA PHE I 356 7.082 -8.454 0.828 1.00 39.82 C
ATOM 2186 CA GLU I 357 5.814 -6.112 -1.877 1.00 41.12 C
ATOM 2195 CA GLU I 358 8.623 -5.111 -4.219 1.00 42.70 C
ATOM 2199 CA ILE I 359 10.346 -1.867 -3.363 1.00 43.32 C
ATOM 2207 CA PRO I 360 11.658 0.659 -5.880 1.00 44.86 C
ATOM 2214 CA GLU I 361 14.921 -0.125 -7.592 1.00 44.32 C
ATOM 2219 CA GLU I 362 15.848 3.489 -6.866 1.00 44.27 C
HETATM 2224 CA TYS I 363 16.482 2.005 -3.448 1.00 44.52 C
""")
seq = iotbx.bioinformatics.sequence("NGDFEEIPEEYL")
v = validation(
pdb_hierarchy=pdb_in.construct_hierarchy(),
sequences=[seq],
log=null_out(),
nproc=1,)
out = StringIO()
v.show(out=out)
assert v.chains[0].n_missing_start == 2
assert v.chains[0].n_missing_end == 1
assert v.chains[0].identity == 1.0
pdb_in = iotbx.pdb.input(source_info=None, lines="""\
ATOM 450 CA ASN A 1 37.242 41.665 44.160 1.00 35.89 C
ATOM 458 CA GLY A 2 37.796 38.269 42.523 1.00 30.13 C
HETATM 463 CA AMSE A 3 35.878 39.005 39.326 0.54 22.83 C
HETATM 464 CA BMSE A 3 35.892 39.018 39.323 0.46 22.96 C
ATOM 478 CA ILE A 4 37.580 38.048 36.061 1.00 22.00 C
ATOM 486 CA SER A 5 37.593 40.843 33.476 1.00 18.73 C
ATOM 819 CA ALA A 8 25.982 34.781 27.220 1.00 18.43 C
ATOM 824 CA ALA A 9 23.292 32.475 28.614 1.00 19.60 C
HETATM 830 CA BMSE A 10 22.793 30.814 25.223 0.41 22.60 C
HETATM 831 CA CMSE A 10 22.801 30.850 25.208 0.59 22.54 C
ATOM 845 CA GLU A 11 26.504 30.054 24.966 1.00 25.19 C
ATOM 854 CA GLY A 12 25.907 28.394 28.320 1.00 38.88 C
""")
seq = iotbx.bioinformatics.sequence("NGMISAAAAMEG")
v = validation(
pdb_hierarchy=pdb_in.construct_hierarchy(),
sequences=[seq],
log=null_out(),
nproc=1,)
out = StringIO()
v.show(out=out)
assert v.chains[0].alignment.a == 'NGMISXXAAMEG'
assert v.chains[0].alignment.b == 'NGMISAAAAMEG'
pdb_in = iotbx.pdb.input(source_info=None, lines="""\
ATOM 4615 CA ALA C 1 1.000 1.000 1.000 1.00 10.00
ATOM 4622 CA ALA C 2 1.000 1.000 1.000 1.00 10.00
ATOM 4627 CA ALA C 3 1.000 1.000 1.000 1.00 10.00
ATOM 4634 CA ALA C 4 1.000 1.000 1.000 1.00 10.00
ATOM 4646 CA ALA C 5 1.000 1.000 1.000 1.00 10.00
ATOM 4658 CA ALA C 6 1.000 1.000 1.000 1.00 10.00
ATOM 4664 CA ALA C 7 1.000 1.000 1.000 1.00 10.00
ATOM 4669 CA ALA C 8 1.000 1.000 1.000 1.00 10.00
ATOM 4680 CA ARG C 9 1.000 1.000 1.000 1.00 10.00
ATOM 4690 CA GLY C 10 1.000 1.000 1.000 1.00 10.00
ATOM 4698 CA PRO C 11 1.000 1.000 1.000 1.00 10.00
ATOM 4705 CA LYS C 12 1.000 1.000 1.000 1.00 10.00
ATOM 4712 CA TRP C 13 1.000 1.000 1.000 1.00 10.00
ATOM 4726 CA GLU C 14 1.000 1.000 1.000 1.00 10.00
ATOM 4738 CA SER C 15 1.000 1.000 1.000 1.00 10.00
ATOM 4744 CA THR C 16 1.000 1.000 1.000 1.00 10.00
ATOM 4751 CA GLY C 17 1.000 1.000 1.000 1.00 10.00
ATOM 4755 CA TYR C 18 1.000 1.000 1.000 1.00 10.00
ATOM 4767 CA PHE C 19 1.000 1.000 1.000 1.00 10.00
ATOM 4778 CA ALA C 20 1.000 1.000 1.000 1.00 10.00
ATOM 4786 CA ALA C 21 1.000 1.000 1.000 1.00 10.00
ATOM 4798 CA TRP C 22 1.000 1.000 1.000 1.00 10.00
ATOM 4812 CA GLY C 23 1.000 1.000 1.000 1.00 10.00
ATOM 4816 CA GLN C 24 1.000 1.000 1.000 1.00 10.00
ATOM 4822 CA GLY C 25 1.000 1.000 1.000 1.00 10.00
ATOM 4826 CA THR C 26 1.000 1.000 1.000 1.00 10.00
ATOM 4833 CA LEU C 27 1.000 1.000 1.000 1.00 10.00
ATOM 4841 CA VAL C 28 1.000 1.000 1.000 1.00 10.00
ATOM 4848 CA THR C 29 1.000 1.000 1.000 1.00 10.00
ATOM 4855 CA VAL C 30 1.000 1.000 1.000 1.00 10.00
ATOM 4862 CA SER C 31 1.000 1.000 1.000 1.00 10.00
ATOM 4868 CA SER C 32 1.000 1.000 1.000 1.00 10.00
END
""")
seq = iotbx.bioinformatics.sequence(
"AAAAAAAARGKWESPAALLKKAAWCSGTLVTVSSASAPKWKSTSGCYFAAPWNKRALRVTVLQSS")
v = validation(
pdb_hierarchy=pdb_in.construct_hierarchy(),
sequences=[seq],
log=null_out(),
nproc=1,)
out = StringIO()
v.show(out=out)
# all tests below here have additional dependencies
if (not libtbx.env.has_module("ksdssp")) :
print "Skipping advanced tests (require ksdssp module)"
return
pdb_file = libtbx.env.find_in_repositories(
relative_path="phenix_regression/pdb/1ywf.pdb",
test=os.path.isfile)
if (pdb_file is not None) :
seq = iotbx.bioinformatics.sequence("MGSSHHHHHHSSGLVPRGSHMAVRELPGAWNFRDVADTATALRPGRLFRSSELSRLDDAGRATLRRLGITDVADLRSSREVARRGPGRVPDGIDVHLLPFPDLADDDADDSAPHETAFKRLLTNDGSNGESGESSQSINDAATRYMTDEYRQFPTRNGAQRALHRVVTLLAAGRPVLTHCFAGKDRTGFVVALVLEAVGLDRDVIVADYLRSNDSVPQLRARISEMIQQRFDTELAPEVVTFTKARLSDGVLGVRAEYLAAARQTIDETYGSLGGYLRDAGISQATVNRMRGVLLG")
pdb_in = file_reader.any_file(pdb_file, force_type="pdb")
hierarchy = pdb_in.file_object.hierarchy
v = validation(
pdb_hierarchy=hierarchy,
sequences=[seq],
log=null_out(),
nproc=1,
include_secondary_structure=True,
extract_coordinates=True)
out = StringIO()
v.show(out=out)
aln1, aln2, ss = v.chains[0].get_alignment(include_sec_str=True)
assert ("HHH" in ss) and ("LLL" in ss) and ("---" in ss)
cif_block = v.as_cif_block()
assert cif_block['_struct_ref.pdbx_seq_one_letter_code'] == seq.sequence
assert list(
cif_block['_struct_ref_seq.pdbx_auth_seq_align_beg']) == ['4', '117']
assert list(
cif_block['_struct_ref_seq.pdbx_auth_seq_align_end']) == ['85', '275']
assert list(cif_block['_struct_ref_seq.seq_align_beg']) == ['1', '114']
assert list(cif_block['_struct_ref_seq.seq_align_end']) == ['82', '272']
# determine relative counts of sequences and chains
n_seq = get_sequence_n_copies(
pdb_hierarchy=hierarchy,
sequences=[seq] * 4,
copies_from_xtriage=4,
out=null_out())
assert (n_seq == 1)
hierarchy = hierarchy.deep_copy()
chain2 = hierarchy.only_model().chains()[0].detached_copy()
hierarchy.only_model().append_chain(chain2)
n_seq = get_sequence_n_copies(
pdb_hierarchy=hierarchy,
sequences=[seq] * 4,
copies_from_xtriage=2,
out=null_out())
assert (n_seq == 1)
n_seq = get_sequence_n_copies(
pdb_hierarchy=hierarchy,
sequences=[seq],
copies_from_xtriage=2,
out=null_out())
assert (n_seq == 4)
try :
n_seq = get_sequence_n_copies(
pdb_hierarchy=hierarchy,
sequences=[seq] * 3,
copies_from_xtriage=2,
out=null_out())
except Sorry, s :
assert ("round number" in str(s))
else :
raise Exception_expected
n_seq = get_sequence_n_copies(
pdb_hierarchy=hierarchy,
sequences=[seq] * 3,
copies_from_xtriage=2,
force_accept_composition=True,
out=null_out())
assert (n_seq == 1)
try :
n_seq = get_sequence_n_copies(
pdb_hierarchy=hierarchy,
sequences=[seq] * 4,
copies_from_xtriage=1,
out=null_out())
except Sorry, s :
assert ("less than" in str(s))
else :
raise Exception_expected
n_seq = get_sequence_n_copies(
pdb_hierarchy=hierarchy,
sequences=[seq] * 4,
copies_from_xtriage=1,
assume_xtriage_copies_from_sequence_file=True,
out=null_out())
assert (n_seq == 0.5)
hierarchy = hierarchy.deep_copy()
chain2 = hierarchy.only_model().chains()[0].detached_copy()
hierarchy.only_model().append_chain(chain2)
try :
n_seq = get_sequence_n_copies(
pdb_hierarchy=hierarchy,
sequences=[seq] * 2,
copies_from_xtriage=2,
out=null_out())
except Sorry, s :
assert ("round number" in str(s))
else :
raise Exception_expected
n_seq = get_sequence_n_copies(
pdb_hierarchy=hierarchy,
sequences=[seq],
copies_from_xtriage=1,
out=null_out())
assert (n_seq == 3)
hierarchy = hierarchy.deep_copy()
chain2 = hierarchy.only_model().chains()[0].detached_copy()
hierarchy.only_model().append_chain(chain2)
n_seq = get_sequence_n_copies(
pdb_hierarchy=hierarchy,
sequences=[seq] * 2,
copies_from_xtriage=2,
out=null_out())
assert (n_seq == 4)
# now with files as input
seq_file = "tmp_mmtbx_validation_sequence.fa"
open(seq_file, "w").write(">1ywf\n%s" % seq.sequence)
n_seq = get_sequence_n_copies_from_files(
pdb_file=pdb_file,
seq_file=seq_file,
copies_from_xtriage=4,
out=null_out())
try :
assert (n_seq == 4)
finally :
os.remove(seq_file)
if (__name__ == "__main__") :
exercise()
print "OK"
| 1.96875
| 2
|
solutions/PE700.py
|
KerimovEmil/ProjectEuler
| 1
|
12783062
|
"""
PROBLEM
<NAME> was born on 15 April 1707.
Consider the sequence 1504170715041707n mod 4503599627370517.
An element of this sequence is defined to be an Eulercoin if it is strictly smaller than all previously found Eulercoins.
For example, the first term is 1504170715041707 which is the first Eulercoin. The second term is 3008341430083414 which
is greater than 1504170715041707 so is not an Eulercoin. However, the third term is 8912517754604 which is small enough
to be a new Eulercoin.
The sum of the first 2 Eulercoins is therefore 1513083232796311.
Find the sum of all Eulercoins.
ANSWER:
1517926517777556
Solve time ~0.003 seconds
"""
import unittest
from util.utils import timeit
# Explanation from ProjectEuler user RubiksCube:
# After brute forcing the first 15 Eulercoins I tried the Euclidean algorithm and found that I got every coin and the
# distance between the coins from the step-by-step in the Euclidean algorithm.
#
# Short example:
# Start with 2 steps and use the last right hand side to get the first coin.
# 4503599627370517 = 1504170715041707 * 2 + 1495258197287103
# 1504170715041707 = 1495258197287103 * 1 + 8912517754604
#
# First coin: 1495258197287103 * 1 + 8912517754604 = 1504170715041707
#
# Do two steps again:
# 1495258197287103 = 8912517754604 * 167 + 6867732268235
# 8912517754604 = 6867732268235 * 1 + 2044785486369
#
# Second coin: 6867732268235 * 1 + 2044785486369 = 8912517754604
#
# Do two more steps, note the "2" giving us 2 coins.
# 6867732268235 = 2044785486369 * 3 + 733375809128
# 2044785486369 = 733375809128 * 2 + 578033868113
#
# Third coin: 733375809128 * 2 + 578033868113 = 2044785486369
# Fourth coin: 733375809128 * 1 + 578033868113 = 1311409677241
#
# Repeat until the Euclidean algorithm is finished
class Problem700:
@timeit
def solve(self, a, m):
res = 0
while a > 0:
res += a
a, m = -m % a, a
return res
class Solution700(unittest.TestCase):
def setUp(self):
self.problem = Problem700()
def test_solution(self):
a = 1504170715041707 # 17 × 1249 × 12043 × 5882353
m = 4503599627370517 # prime number
self.assertEqual(1517926517777556, self.problem.solve(a, m))
if __name__ == '__main__':
unittest.main()
# [(euler_coin, n)]
# [(1504170715041707, 1), (8912517754604, 3), (2044785486369, 506), (1311409677241, 2527), (578033868113, 4548),
# (422691927098, 11117), (267349986083, 17686), (112008045068, 24255), (68674149121, 55079), (25340253174, 85903),
# (7346610401, 202630), (4046188430, 724617), (745766459, 1246604), (428410324, 6755007), (111054189, 12263410),
# (15806432, 42298633), (15397267, 326125654), (14988102, 609952675), (14578937, 893779696), (14169772, 1177606717),
# (13760607, 1461433738), (13351442, 1745260759), (12942277, 2029087780), (12533112, 2312914801), (12123947, 2596741822),
# (11714782, 2880568843), (11305617, 3164395864), (10896452, 3448222885), (10487287, 3732049906), (10078122, 4015876927),
# (9668957, 4299703948), (9259792, 4583530969), (8850627, 4867357990), (8441462, 5151185011), (8032297, 5435012032),
# (7623132, 5718839053), (7213967, 6002666074), (6804802, 6286493095), (6395637, 6570320116), (5986472, 6854147137),
# (5577307, 7137974158), (5168142, 7421801179), (4758977, 7705628200), (4349812, 7989455221), (3940647, 8273282242),
# (3531482, 8557109263), (3122317, 8840936284), (2713152, 9124763305), (2303987, 9408590326), (1894822, 9692417347),
# (1485657, 9976244368), (1076492, 10260071389), (667327, 10543898410), (258162, 10827725431), (107159, 21939277883),
# (63315, 54990108218), (19471, 88040938553), (14569, 297173645994), (9667, 506306353435), (4765, 715439060876),
# (4628, 1640010829193), (4491, 2564582597510), (4354, 3489154365827), (4217, 4413726134144), (4080, 5338297902461),
# (3943, 6262869670778), (3806, 7187441439095), (3669, 8112013207412), (3532, 9036584975729), (3395, 9961156744046),
# (3258, 10885728512363), (3121, 11810300280680), (2984, 12734872048997), (2847, 13659443817314), (2710, 14584015585631),
# (2573, 15508587353948), (2436, 16433159122265), (2299, 17357730890582), (2162, 18282302658899), (2025, 19206874427216),
# (1888, 20131446195533), (1751, 21056017963850), (1614, 21980589732167), (1477, 22905161500484), (1340, 23829733268801),
# (1203, 24754305037118), (1066, 25678876805435), (929, 26603448573752), (792, 27528020342069), (655, 28452592110386),
# (518, 29377163878703), (381, 30301735647020), (244, 31226307415337), (107, 32150879183654), (77, 65226330135625),
# (47, 98301781087596), (17, 131377232039567), (4, 295829915031105), (3, 1347772343115958), (2, 2399714771200811),
# (1, 3451657199285664)]
| 3.53125
| 4
|
tests/test_generate_trashs.py
|
PTank/trashtalk
| 0
|
12783063
|
<reponame>PTank/trashtalk<gh_stars>0
from pathlib import Path
from trashtalk import generate_trashs
from tests.init_test import generate_trash
import pwd
def test_add_profil_info(tmpdir):
test_file = tmpdir.join('.trashtalk')
s = "MEDIA_PATH=/testmediapath\nTRASH_PATH=/testtrashpath , bob"
test_file.write(s)
f = Path(str(test_file))
generate_trashs.MEDIA_DIR = ['/media']
generate_trashs.TRASHS_PATH = []
generate_trashs.add_profil_info(f.open())
assert generate_trashs.MEDIA_DIR == ['/media', '/testmediapath']
assert generate_trashs.TRASHS_PATH == [('bob', '/testtrashpath')]
def test_get_media_trashs(generate_trash, tmpdir, monkeypatch):
trash = generate_trash
generate_trashs.MEDIA_DIR = [str(tmpdir)]
generate_trashs.TRASHS_PATH = [("test", trash.path)]
def mockgetpwnam(user):
return [1, 2, '0000']
monkeypatch.setattr(pwd, 'getpwnam', mockgetpwnam)
trashs, err = generate_trashs.get_media_trashs("remy")
assert trashs[0].path == str(tmpdir) + "/media/.Trash-0000"
assert trashs[0].name == "media"
assert trashs[1].path == trash.path
assert trashs[1].name == "test"
trashs, err = generate_trashs.get_media_trashs("remy", ['desk'])
assert trashs == []
assert bool(err) is True
generate_trashs.TRASHS_PATH = [("error", str(tmpdir) + "/fail")]
trashs, err = generate_trashs.get_media_trashs("remy", ['error'])
assert trashs == []
assert bool(err) is True
| 2.234375
| 2
|
2018/bamboofox/baby-lea/hack.py
|
ss8651twtw/CTF
| 12
|
12783064
|
#!/usr/bin/env python
from pwn import *
import base64
r = remote('bamboofox.cs.nctu.edu.tw', 58789)
token = 'user=<PASSWORD>00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xc0user=<PASSWORD>'
auth = '<PASSWORD>'
r.recvuntil('input your token: ')
r.sendline(base64.b64encode(token))
r.recvuntil('input your authentication code: ')
r.sendline(auth)
r.interactive()
| 2.171875
| 2
|
aggregator/admin.py
|
EndlessTrax/spondy-news
| 1
|
12783065
|
<filename>aggregator/admin.py<gh_stars>1-10
from django.contrib import admin
from .models import Entry
def publish_selected(modeladmin, request, queryset):
queryset.update(is_published=True)
publish_selected.short_description = "Publish the selected posts"
@admin.register(Entry)
class EntryAdmin(admin.ModelAdmin):
list_display = ("pub_date", "title", "category", "is_featured", "is_published")
actions = [publish_selected]
ordering = ("-pub_date",)
| 1.945313
| 2
|
z2/part3/updated_part2_batch/jm/parser_errors_2/912800483.py
|
kozakusek/ipp-2020-testy
| 1
|
12783066
|
<reponame>kozakusek/ipp-2020-testy<gh_stars>1-10
from part1 import (
gamma_board,
gamma_busy_fields,
gamma_delete,
gamma_free_fields,
gamma_golden_move,
gamma_golden_possible,
gamma_move,
gamma_new,
)
"""
scenario: test_random_actions
uuid: 912800483
"""
"""
random actions, total chaos
"""
board = gamma_new(4, 4, 2, 7)
assert board is not None
assert gamma_move(board, 1, 0, 1) == 1
assert gamma_move(board, 1, 0, 3) == 1
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_free_fields(board, 2) == 14
assert gamma_move(board, 1, 2, 1) == 1
assert gamma_move(board, 2, 1, 1) == 1
assert gamma_golden_possible(board, 2) == 1
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_golden_move(board, 1, 1, 1) == 1
assert gamma_move(board, 2, 2, 0) == 1
assert gamma_move(board, 2, 0, 2) == 1
assert gamma_free_fields(board, 2) == 10
assert gamma_move(board, 1, 2, 2) == 1
board654287219 = gamma_board(board)
assert board654287219 is not None
assert board654287219 == ("1...\n"
"2.1.\n"
"111.\n"
"..2.\n")
del board654287219
board654287219 = None
assert gamma_move(board, 2, 2, 2) == 0
assert gamma_move(board, 1, 1, 3) == 1
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 2, 1, 0) == 1
assert gamma_move(board, 2, 0, 3) == 0
assert gamma_free_fields(board, 2) == 7
assert gamma_move(board, 1, 3, 0) == 1
assert gamma_move(board, 1, 0, 3) == 0
assert gamma_golden_possible(board, 1) == 0
assert gamma_move(board, 2, 3, 3) == 1
assert gamma_move(board, 1, 2, 3) == 1
board867387183 = gamma_board(board)
assert board867387183 is not None
assert board867387183 == ("1112\n"
"2.1.\n"
"111.\n"
".221\n")
del board867387183
board867387183 = None
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 1, 0, 1) == 0
assert gamma_move(board, 2, 2, 3) == 0
assert gamma_move(board, 1, 1, 1) == 0
board637069540 = gamma_board(board)
assert board637069540 is not None
assert board637069540 == ("1112\n"
"2.1.\n"
"111.\n"
".221\n")
del board637069540
board637069540 = None
assert gamma_move(board, 2, 3, 2) == 1
assert gamma_busy_fields(board, 2) == 5
assert gamma_move(board, 1, 0, 0) == 1
assert gamma_move(board, 2, 1, 3) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 1, 2, 0) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 2, 1, 3) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 2, 1, 3) == 0
assert gamma_move(board, 2, 0, 1) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 1, 3, 1) == 1
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 1, 1, 3) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_move(board, 1, 2, 1) == 0
assert gamma_move(board, 1, 3, 2) == 0
assert gamma_move(board, 2, 2, 1) == 0
assert gamma_busy_fields(board, 2) == 5
assert gamma_golden_possible(board, 2) == 1
gamma_delete(board)
| 2.015625
| 2
|
CorbanDallas1982/Home_work_3/Ex-3.py
|
kolyasalubov/Lv-639.pythonCore
| 0
|
12783067
|
var_1=int(input('Enter first variable:'))
var_2=int(input('Enter second variable:'))
print(f"First variable {var_1}, second variable {var_2}")
var_1,var_2=var_2,var_1
print(f"After change: The first variable is {var_1} and the second variable is {var_2}")
| 4.03125
| 4
|
app_questions/models.py
|
Audiotuete/backend_kassel_api
| 0
|
12783068
|
<reponame>Audiotuete/backend_kassel_api
from django.apps import apps as django_apps
from django.db import models
from django.contrib.postgres.fields import ArrayField
# from django.db.transaction import atomic
from ordered_model.models import OrderedModel
class Question(OrderedModel):
poll = models.ForeignKey('app_polls.Poll', on_delete=models.CASCADE, verbose_name="Poll (not changeable after creation)")
question_type = models.CharField(max_length=150, default='')
question_text = models.TextField(max_length=250)
# question_videolink = models.CharField(max_length=150, null=True, blank=True)
question_imagelink = models.CharField(max_length=150, null=True, blank=True)
pub_date = models.DateTimeField(auto_now_add=True)
__poll_pre_save = None
order_class_path = __module__ + '.Question'
class Meta:
ordering = ('order',)
def __str__(self):
return self.question_text
# When Question doesn't already exist create UserAnswer for every user in the poll
def save(self, *args, **kwargs):
question_model_name = self.__class__.__name__
models_dict = {
'QuestionOpen': 'UserAnswerOpen',
'QuestionYesOrNo': 'UserAnswerYesOrNo',
'QuestionMultiple': 'UserAnswerMultiple',
}
# Check if question_model_name is not "Question" to allow ordering inside the Questions Admin
# Because ordering inside Question Admin sets question_model_name = "Question".
if question_model_name is 'Question':
UserAnswerModel = django_apps.get_model('app_user_answers', models_dict['Question' + self.question_type])
else:
UserAnswerModel = django_apps.get_model('app_user_answers', models_dict[question_model_name])
UserPoll = django_apps.get_model('app_user_polls', 'UserPoll')
if self.pk == None:
super(Question, self).save(*args, **kwargs)
all_user_polls = UserPoll.objects.filter(poll = self.poll)
user_answer_list = []
for user_poll in all_user_polls:
user_answer_list.append(UserAnswerModel(user_id = user_poll.user_id, poll_id = user_poll.poll_id, question = self))
UserAnswerModel.objects.bulk_create(user_answer_list)
elif not self.poll_changed(self):
pass
# changing_user_answers = UserAnswerModel.objects.filter(question = self)
# @atomic
# def saves_user_answers(changing_user_answers):
# for user_answer in changing_user_answers:
# print('Poll for ' + user_answer.__class__.__name__ + ' changed from ' + user_answer.poll.poll_name + ' to ' + self.poll.poll_name)
# user_answer.poll = self.poll
# user_answer.save()
# saves_user_answers(changing_user_answers)
# super(Question, self).save(*args, **kwargs)
else:
super(Question, self).save(*args, **kwargs)
if not(self.question_type):
self.question_type = question_model_name[8:]
self.save()
def poll_changed(instance, *args, **kwargs):
pre_save_poll_id = Question.objects.get(pk = instance.pk).poll_id
if pre_save_poll_id == instance.poll_id:
return True
else:
return False
# To check if the the question moved to an other poll, we need to save the poll value before save inside "init" and compare it with the current poll.
# See elif above "self.__poll_pre_save is not self.poll"
# def __init__(self, *args, **kwargs):
# super(Question, self).__init__(*args, **kwargs)
# self.__poll_id_pre_save = self.poll
class QuestionYesOrNo(Question):
pass
class QuestionOpen(Question):
pass
class QuestionMultiple(Question):
options = ArrayField(models.CharField(max_length=150, blank=True), default=list, null=True, size=6)
| 2.421875
| 2
|
tests/q01.py
|
sophiarora/CS61A-Hog
| 0
|
12783069
|
test = {
'names': [
'q01',
'1',
'q1'
],
'points': 1,
'suites': [
[
{
'locked': True,
'test': """
>>> roll_dice(2, make_test_dice(4, 6, 1))
0d67364f3a6639e82e67af0673b4cc6e
# locked
""",
'type': 'doctest'
},
{
'locked': True,
'test': """
>>> roll_dice(3, make_test_dice(4, 6, 1))
e2f636ebfe71bb770b320ce6f799139c
# locked
""",
'type': 'doctest'
},
{
'locked': True,
'test': """
>>> roll_dice(3, make_test_dice(1, 2, 3))
e2f636ebfe71bb770b320ce6f799139c
# locked
""",
'type': 'doctest'
},
{
'locked': True,
'test': """
>>> counted_dice = make_test_dice(4, 1, 2, 6)
>>> roll_dice(3, counted_dice)
e2f636ebfe71bb770b320ce6f799139c
# locked
>>> roll_dice(1, counted_dice) # Make sure you call dice exactly num_rolls times!
414f04076138a0647c6470ad3afd249d
# locked
""",
'type': 'doctest'
}
]
]
}
| 2.15625
| 2
|
hparams.py
|
twiet/LM-LSTM-CRF
| 0
|
12783070
|
<reponame>twiet/LM-LSTM-CRF
class hparams:
checkpoint_dir = "./checkpoint/"
load_check_point = False
rand_embedding = False
gpu = 0
batch_size = 10
unk = "unk"
char_hidden = 300
word_hidden = 300
drop_out = 0.55
epoch = 200
start_epoch = 0
caseless = True
char_dim = 30
word_dim = 100
char_layers = 1
word_layers = 1
lr = 0.015
lr_decay = 0.05
fine_tune = False
load_opt = True
update = "sgd"
momentum = 0.9
clip_grad = 5
small_crf = True
mini_count = 5
lambda0 = 1
co_train = True
patience = 15
high_way = True
highway_layers = 1
eva_matrix = "fa"
least_iters = 50
shrink_embedding = True
decode_type = "label"
| 1.40625
| 1
|
getlivechatemailstotxt.py
|
54853315/livechatinc_chats_export
| 2
|
12783071
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'konakona'
__VERSION__ = "v1.0"
import os,sys,os.path,pycurl,cStringIO,json
python_version = sys.version_info[0]
if(python_version !=2):
print("本系统依赖于python2.7,您的系统不兼容, goodbye!")
exit()
start_date = raw_input("1.请输入开始日期(YYYY-MM-DD):")
end_date = raw_input("2.请输入结束日期(YYYY-MM-DD):")
start_page = raw_input("3.请输入需要从第几页开始(可以不填):")
end_page = raw_input("4.请输入需要结束页(可以不填):")
echo_flag = raw_input("5.输入'Y'代表保存文件,输入'N'代表打印结果:")
if(echo_flag!='Y' and echo_flag!='N'):
print("您的输入有误")
exit()
if(start_date=="" or end_date == ""):
print("请输入开始和结束日期")
exit()
if (start_date > end_date):
print("开始日期不能大于结束日期!")
exit()
if(type(start_page)!=int):
start_page = 2;
else:
if(start_page > 2):
pass
else:
start_page = 2
print("程序初始化...")
content = cStringIO.StringIO()
buf = cStringIO.StringIO()
file = open('emails.txt','wa')
#请修改为你的email和key
config = ['input your email','input your key']
def runCurl(start_date,end_date,page = 1):
api_url = 'https://api.livechatinc.com/chats?date_from='+start_date+"&date_to="+end_date+"&page="+bytes(page)+"&has_goal=1"
c = pycurl.Curl()
c.setopt(c.URL, api_url)
c.setopt(c.WRITEFUNCTION, buf.write)
c.setopt(c.USERPWD, config[0]+":"+config[1])
c.setopt(c.CONNECTTIMEOUT, 0)
c.setopt(c.TIMEOUT, 0)
# c.setopt(c.PROXY, 'http://www.crazyphper.com') #如果你需要设置代理访问
c.perform()
if(c.getinfo(c.RESPONSE_CODE) != 200):
print("[Warring] 在请求第"+page+"页时,失败了!")
# return
c.close()
# HTTP response code, e.g. 200.
# print('Status: %d' % c.getinfo(c.RESPONSE_CODE))
# Elapsed time for the transfer.
# print('Status: %f' % c.getinfo(c.TOTAL_TIME))
def saveContent(email):
if(email != ""):
content.write(email+"\n")
def saveFile(email):
if(email != ""):
file.write(email+"\n")
#-----------------程序正式开始-----------------
runCurl(start_date,end_date)
json_val = json.loads(buf.getvalue())
buf.seek(0)
buf.truncate()
if(type(end_page)!=int):
totalPage = json_val["pages"]
else:
if(end_page > 2):
if(end_page > json_val["pages"]):
totalPage = json_val["pages"]
else:
totalPage = end_page
else:
totalPage = json_val["pages"]
if(start_page >2):
pass
else:
for kk in json_val["chats"]:
if(echo_flag == 'Y'):
saveFile(kk["prechat_survey"][1]["value"])
else:
saveContent(kk["prechat_survey"][1]["value"])
for page in range(start_page,totalPage):
runCurl(start_date,end_date,page)
json_val = json.loads(buf.getvalue())
buf.seek(0)
buf.truncate()
for kk in json_val["chats"]:
if(echo_flag == 'Y'):
saveFile(kk["prechat_survey"][1]["value"])
else:
saveContent(kk["prechat_survey"][1]["value"])
if(echo_flag == 'Y'):
file.close()
else:
print(content.getvalue())
content.close()
if(raw_input("程序执行完毕,请按任意键结束...")):
exit()
| 2.890625
| 3
|
deepNeuralNetwork.py
|
fy-meng/lunarlander-saliency
| 0
|
12783072
|
<reponame>fy-meng/lunarlander-saliency<gh_stars>0
"""
File name: deepNeuralNetwork.py
Deep Neural Network Class implementation with Keras and Tensorflow [1].
Author: <NAME>
enail: <EMAIL>
License: MIT
Date last modified: 03.12.2019
References:
[1] https://keras.io
Python Version: 3.6
"""
# Keras modules
from keras.models import Sequential
from keras.layers import Dense
import keras.optimizers as opt
from keras.models import load_model
import deepQNetwork
# Disable TensorFlow Information printouts
import warnings
warnings.filterwarnings('ignore')
'''
Constants
'''
C_VERBOSE_NONE = 0 # Printing is disabled
C_VERBOSE_INFO = 1 # Only information printouts (constructor)
C_VERBOSE_DEBUG = 2 # Debugging printing level (all printouts)
class DeepNeuralNetwork(object):
"""
Summary:
Deep Neural Network Class implementation with Keras and Tensorflow.
Private Attributes:
__verbose: int
Verbose level (0: None, 1: INFO, 2: DEBUG, see CONSTANTS section)
__model: Keras model
The Deep Neural Network model created using keras.
Public Attributes:
-
Private Methods:
__init__(inputs, file_name, outputs, hidden_layers, hidden_layers_size, optimizer_learning_rate,
seed, verbose): returns none
Class constructor. Creates a Deep Neural Network using Keras (frontend) and TensorFlow
(backend). In case file_name is present, then the model is loaded from the given file.
Public Methods:
train(X instances data, Y instances labels): returns none
Trains the Deep NN model.
predict(X instances data): returns a numpy array of labels
Predicts the label value for the input instances.
saveModel(file_name):
Saves the model.
"""
def __init__(self, file_name=None, inputs=None, outputs=None, hidden_layers=None, hidden_layers_size=None,
optimizer_learning_rate=0.001, seed=None, verbose=C_VERBOSE_NONE):
"""
Summary:
Class constructor. Creates a Deep Neural Network using Keras (frontend) and TensorFlow
(backend). In case file_name is present, then the model is loaded from the given file.
Args:
file_name: string
The model to be loaded. Rest parameters (except verbose) are ignored if the file_name is not
None.
inputs: int
The number of inputs of the Deep Neural Network.
outputs: int
The number of outputs of the Deep Neural Network.
hidden_layers: int
The number of hidden layers of the Deep Neural Network. Not including the first
and last layer.
hidden_layers_size: int
The size of each hidden layer of the Neural Network.
optimizer_learning_rate: float (Default 0.001)
The Adam optimizer learning rate.
seed: int
Optional Seed to be used with the Keras and Tensor Flow environments, for results
reproducability.
verbose: int
Verbose level (0: None, 1: INFO, 2: DEBUG, see CONSTANTS section)
Raises:
-
Returns:
-
notes:
Considerations for a next version:
Pass activation function and optimizer as input parameters to the constructor.
"""
self.__verbose = verbose
# If file_name is present, just loads the model from the file
if file_name is not None:
if self.__verbose > C_VERBOSE_NONE:
print('\nDeep Neural Network object initialization (file_name = ', file_name, ')', sep='')
self.__model = load_model(file_name)
return None
if self.__verbose > C_VERBOSE_NONE:
print('\nDeep Neural Network object initialization (inputs = ', inputs, ', outputs = ', outputs,
', hidden_layers = ', hidden_layers, ', hidden_layers_size = ', hidden_layers_size,
', optimizer_learning_rate = ', optimizer_learning_rate, ', seed = ', seed, ')', sep='')
# Applies the given seed to the Keras (with Tensor Flow backend)
if seed is not None:
self.__applySeed(seed)
# Create a sequential model
self.__model = Sequential()
# Create first layer (use 'relu' as activation function, hardcoded)
self.__model.add(Dense(units=hidden_layers_size, activation='relu', input_dim=inputs))
# Create hidden layers (use 'relu' as activation function, hardcoded)
for i in range(hidden_layers):
self.__model.add(Dense(units=hidden_layers_size, activation='relu'))
# Create last layer (use 'linear' as activation function, hardcoded)
self.__model.add(Dense(units=outputs, activation='linear'))
# Compile model, optimizer used is Adam with its defaults values, only learning rate is passed
# for experimenting during the model complexity analysis.
self.__model.compile(loss='mse', optimizer=opt.Adam(lr=optimizer_learning_rate))
def __applySeed(self, seed):
"""
Summary:
Applies the given seed to the Keras with Tensor Flow backend, environment.
Args:
seed: int
Seed value.
Raises:
-
Returns:
-
notes:
see:
https://keras.io/getting-started/faq/#how-can-i-obtain-reproducible-results-using-keras-during-development
"""
if self.__verbose > C_VERBOSE_INFO:
print('Apply Seed to the \'Keras\' with the \'Tensor Flow\' Backend environment (seed = ', seed, ')',
sep='')
import tensorflow
from keras import backend
# Sets random seed for the tensor flow and limits the parallel threds to one.
tensorflow.set_random_seed(seed)
backend.set_session(tensorflow.Session(graph=tensorflow.get_default_graph(),
config=tensorflow.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1)))
def train(self, X, Y):
"""
Summary:
Trains the model with the input instance(s).
Args:
X: numpy array
The training instances (data).
Y: numpy array
The training labels.
Raises:
-
Returns:
-
notes:
-
"""
if self.__verbose > C_VERBOSE_INFO:
print('Deep Neural Network Train (training_instances = ', X.shape[0], ')', sep='')
# Train the model using all the default values, presented here for experimental reasons
# and future use. Verbose is disabled.
self.__model.fit(x=X, y=Y, batch_size=None, epochs=1, verbose=False, callbacks=None,
validation_split=0.0, validation_data=None, shuffle=True, class_weight=None,
sample_weight=None,
initial_epoch=0, steps_per_epoch=None, validation_steps=None)
def predict(self, X):
"""
Summary:
Predicts the label value for the input instance(s).
Args:
X: numpy array
The instance(s) (data) for which a label prediction is requested.
Raises:
-
Returns:
Y: numpy array
The prediction(s).
notes:
-
"""
if self.__verbose > C_VERBOSE_INFO:
print('Deep Neural Network Predict (prediction_instances = ', X.shape, ')', sep='')
# If there is only one instance reshape the array so each column holds each one of the feature values
# See keras predict function.
if len(X.shape) == 1:
return self.__model.predict(x=X.reshape(1, X.shape[0]), batch_size=None, verbose=0, steps=None)
else:
return self.__model.predict(x=X, batch_size=None, verbose=0, steps=None)
def get_qvalues(self, X):
assert isinstance(self.__model, deepQNetwork.DeepQNetwork)
if len(X.shape) == 1:
X = X.reshape(1, X.shape[0])
return self.__model.qValues(X)
def saveModel(self, file_name):
"""
Summary:
Saves the model.
Args:
file_name: string
The file in which the model should be saved.
Raises:
-
Returns:
-
notes:
-
"""
if self.__verbose > C_VERBOSE_INFO:
print('Deep Neural Network Model Saved (file_name = ', file_name, ')', sep='')
self.__model.save(file_name)
| 3.09375
| 3
|
codes/models/archs/arch_gcpnet.py
|
GuoShi28/GCP-Net
| 24
|
12783073
|
''' network architecture for backbone '''
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import models.archs.arch_util as arch_util
import numpy as np
import math
import pdb
from torch.nn.modules.utils import _pair
from models.archs.dcn.deform_conv import ModulatedDeformConvPack as DCN
class SimpleBlock(nn.Module):
def __init__(self, depth=3, n_channels=64, input_channels=3, output_channel=64, kernel_size=3):
super(SimpleBlock, self).__init__()
padding = 1
layers = []
layers.append(nn.Conv2d(in_channels=input_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding, bias=True))
layers.append(nn.LeakyReLU(negative_slope=0.1, inplace=True))
for _ in range(depth - 2):
layers.append(nn.Conv2d(in_channels=n_channels, out_channels=n_channels, kernel_size=kernel_size, padding=padding, bias=False))
layers.append(nn.LeakyReLU(negative_slope=0.1, inplace=True))
layers.append(nn.Conv2d(in_channels=n_channels, out_channels=output_channel, kernel_size=kernel_size, padding=padding, bias=False))
self.simple_block = nn.Sequential(*layers)
self._initialize_weights()
def forward(self, x):
out = self.simple_block(x)
return out
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.orthogonal_(m.weight)
print('init weight')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class DualABlock(nn.Module):
def __init__(self, res_num=5, n_channels=64, input_channels=3, output_channel=64, kernel_size=3):
super(DualABlock, self).__init__()
padding = 1
self.res_num = res_num
self.square_conv = nn.Conv2d(in_channels=input_channels, out_channels=n_channels, \
kernel_size=(kernel_size, kernel_size), padding=(padding, padding), bias=False)
self.relu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.extract_conv = nn.Conv2d(in_channels=n_channels, out_channels=n_channels, \
kernel_size=kernel_size, padding=padding, bias=True)
self.res_block1 = SimpleBlock(depth=2, n_channels=n_channels, input_channels=n_channels, \
output_channel=n_channels, kernel_size=3) # 64, H, W
self.res_block2 = SimpleBlock(depth=2, n_channels=n_channels, input_channels=n_channels, \
output_channel=n_channels, kernel_size=3) # 64, H, W
self.down = nn.Conv2d(in_channels=n_channels, out_channels=int(n_channels/2), kernel_size=1, stride=1, bias=True)
self.up = nn.Conv2d(in_channels=int(n_channels/2), out_channels=n_channels, kernel_size=1, stride=1, bias=True)
self.spatial_att = nn.Conv2d(in_channels=n_channels, out_channels=1, kernel_size=7, stride=1, padding=3,bias=True)
self._initialize_weights()
def forward(self, x):
x_temp = self.square_conv(x)
x_temp = self.relu(x_temp)
x_temp = self.extract_conv(x_temp)
x_temp = x + x_temp
x_temp2 = self.res_block1(x_temp)
x_temp = x_temp + x_temp2
x_temp2 = self.res_block2(x_temp)
x_temp = x_temp + x_temp2
# channel attention
x_se = F.avg_pool2d(x_temp, kernel_size=(x_temp.size(2), x_temp.size(3)))
x_se = self.down(x_se)
x_se = self.relu(x_se)
x_se = self.up(x_se)
x_se = F.sigmoid(x_se)
x_se = x_se.repeat(1, 1, x_temp.size(2), x_temp.size(3))
# spatial attention
x_sp = F.sigmoid(self.spatial_att(x_temp))
x_temp = x_temp + x_temp * x_se + x_temp * x_sp
return x_temp
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.orthogonal_(m.weight)
print('init weight')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class GCABlock(nn.Module):
def __init__(self, res_num=5, n_channels=64, input_channels=3, output_channel=64, kernel_size=3):
super(GCABlock, self).__init__()
padding = 1
self.res_num = res_num
self.square_conv = nn.Conv2d(in_channels=input_channels, out_channels=n_channels, \
kernel_size=kernel_size, padding=padding, bias=False)
self.relu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.extract_conv = nn.Conv2d(in_channels=n_channels, out_channels=n_channels, \
kernel_size=kernel_size, padding=padding, bias=True)
self.res_block1 = SimpleBlock(depth=2, n_channels=n_channels, input_channels=n_channels, \
output_channel=n_channels, kernel_size=3) # 64, H, W
self.res_block2 = SimpleBlock(depth=2, n_channels=n_channels, input_channels=n_channels, \
output_channel=n_channels, kernel_size=3) # 64, H, W
self.down = nn.Conv2d(in_channels=n_channels, out_channels=int(n_channels/2), kernel_size=1, stride=1, bias=True)
self.up = nn.Conv2d(in_channels=int(n_channels/2), out_channels=n_channels, kernel_size=1, stride=1, bias=True)
self.spatial_att = nn.Conv2d(in_channels=n_channels, out_channels=1, kernel_size=7, stride=1, padding=3,bias=True)
self._initialize_weights()
def forward(self, x, guided_lam, guided_beta):
x_temp = self.square_conv(x)
x_temp = x_temp.mul(guided_lam) + guided_beta
x_temp = self.relu(x_temp)
x_temp = self.extract_conv(x_temp)
x_temp = x + x_temp
x_temp2 = self.res_block1(x_temp)
x_temp = x_temp + x_temp2
x_temp2 = self.res_block2(x_temp)
x_temp = x_temp + x_temp2
# channel attention
x_se = F.avg_pool2d(x_temp, kernel_size=(x_temp.size(2), x_temp.size(3)))
x_se = self.down(x_se)
x_se = self.relu(x_se)
x_se = self.up(x_se)
x_se = F.sigmoid(x_se)
x_se = x_se.repeat(1, 1, x_temp.size(2), x_temp.size(3))
# spatial attention
x_sp = F.sigmoid(self.spatial_att(x_temp))
x_temp = x_temp + x_temp * x_se + x_temp * x_sp
return x_temp
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.orthogonal_(m.weight)
print('init weight')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
class SimpleLSTM(nn.Module):
def __init__(self, input_dim, hidden_dim):
super(SimpleLSTM, self).__init__()
self.nf = input_dim
self.hf = hidden_dim
self.conv = nn.Conv2d(self.nf+self.hf, 4*self.hf, 3, 1, 1, bias=True)
def forward(self, input_tensor, h_cur, c_cur):
if h_cur is None:
tensor_size = (input_tensor.size(2),input_tensor.size(3))
h_cur = self._init_hidden(batch_size=input_tensor.size(0),tensor_size=tensor_size)
c_cur = self._init_hidden(batch_size=input_tensor.size(0),tensor_size=tensor_size)
combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis
combined_conv = self.conv(combined)
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hf, dim=1)
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
c_next = f * c_cur + i * g
h_next = o * torch.tanh(c_next)
return h_next, c_next
def _init_hidden(self, batch_size, tensor_size):
height, width = tensor_size
return torch.zeros(batch_size, self.hf, height, width).cuda()
class PCD_Align(nn.Module):
''' Alignment module using Pyramid, Cascading and Deformable convolution
with 3 pyramid levels.
'''
def __init__(self, nf=64, groups=8):
super(PCD_Align, self).__init__()
# L3: level 3, 1/4 spatial size
self.L3_offset_conv1 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for diff
self.L3_rnn = SimpleLSTM(nf, int(nf/2))
self.L3_rnn_conv = nn.Conv2d(nf+int(nf/2), nf, 3, 1, 1, bias=True)
self.L3_offset_conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.L3_dcnpack = DCN(nf, nf, 3, stride=1, padding=1, dilation=1, deformable_groups=groups,
extra_offset_mask=True)
# L2: level 2, 1/2 spatial size
self.L2_offset_conv1 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for diff
self.L2_rnn = SimpleLSTM(nf, int(nf/2))
self.L2_rnn_conv = nn.Conv2d(nf+int(nf/2), nf, 3, 1, 1, bias=True)
self.L2_offset_conv2 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for offset
self.L2_offset_conv3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.L2_dcnpack = DCN(nf, nf, 3, stride=1, padding=1, dilation=1, deformable_groups=groups,
extra_offset_mask=True)
self.L2_fea_conv = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for fea
# L1: level 1, original spatial size
self.L1_offset_conv1 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for diff
self.L1_rnn = SimpleLSTM(nf, int(nf/2))
self.L1_rnn_conv = nn.Conv2d(nf+int(nf/2), nf, 3, 1, 1, bias=True)
self.L1_offset_conv2 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for offset
self.L1_offset_conv3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.L1_dcnpack = DCN(nf, nf, 3, stride=1, padding=1, dilation=1, deformable_groups=groups,
extra_offset_mask=True)
self.L1_fea_conv = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for fea
# Cascading DCN
self.cas_offset_conv1 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True) # concat for diff
self.cas_offset_conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.cas_dcnpack = DCN(nf, nf, 3, stride=1, padding=1, dilation=1, deformable_groups=groups,
extra_offset_mask=True)
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, nbr_fea_l, ref_fea_l, guided_nbr_fea_l, guided_ref_fea_l, h_cur, c_cur):
'''align other neighboring frames to the reference frame in the feature level
nbr_fea_l, ref_fea_l: [L1, L2, L3], each with [B,C,H,W] features
'''
h_next = []
c_next = []
# L3
L3_comine = torch.cat((guided_nbr_fea_l[2], guided_ref_fea_l[2]), dim=1)
L3_offset = self.lrelu(self.L3_offset_conv1(L3_comine))
L3_offset_temp, c_out = self.L3_rnn(L3_offset, h_cur[0], c_cur[0])
h_next.append(L3_offset_temp)
c_next.append(c_out)
L3_offset = torch.cat((L3_offset, L3_offset_temp), dim=1)
L3_offset = self.lrelu(self.L3_rnn_conv(L3_offset))
L3_offset = self.lrelu(self.L3_offset_conv2(L3_offset))
L3_fea = self.lrelu(self.L3_dcnpack([nbr_fea_l[2], L3_offset]))
# L2
L2_comine = torch.cat((guided_nbr_fea_l[1], guided_ref_fea_l[1]), dim=1)
L2_offset = self.lrelu(self.L2_offset_conv1(L2_comine))
L2_offset_temp, c_out = self.L2_rnn(L2_offset, h_cur[1], c_cur[1])
h_next.append(L2_offset_temp)
c_next.append(c_out)
L2_offset = torch.cat((L2_offset, L2_offset_temp), dim=1)
L2_offset = self.lrelu(self.L2_rnn_conv(L2_offset))
L3_offset = F.interpolate(L3_offset, scale_factor=2, mode='bilinear', align_corners=False)
L2_offset = self.lrelu(self.L2_offset_conv2(torch.cat([L2_offset, L3_offset * 2], dim=1)))
L2_offset = self.lrelu(self.L2_offset_conv3(L2_offset))
L2_fea = self.L2_dcnpack([nbr_fea_l[1], L2_offset])
L3_fea = F.interpolate(L3_fea, scale_factor=2, mode='bilinear', align_corners=False)
L2_fea = self.lrelu(self.L2_fea_conv(torch.cat([L2_fea, L3_fea], dim=1)))
# L1
L1_comine = torch.cat((guided_nbr_fea_l[0], guided_ref_fea_l[0]), dim=1)
L1_offset = self.L1_offset_conv1(L1_comine)
L1_offset_temp, c_out = self.L1_rnn(L1_offset, h_cur[2], c_cur[2])
h_next.append(L1_offset_temp)
c_next.append(c_out)
L1_offset = torch.cat((L1_offset, L1_offset_temp), dim=1)
L1_offset = self.lrelu(self.L1_rnn_conv(L1_offset))
L2_offset = F.interpolate(L2_offset, scale_factor=2, mode='bilinear', align_corners=False)
L1_offset = self.lrelu(self.L1_offset_conv2(torch.cat([L1_offset, L2_offset * 2], dim=1)))
L1_offset = self.lrelu(self.L1_offset_conv3(L1_offset))
L1_fea = self.L1_dcnpack([nbr_fea_l[0], L1_offset])
L2_fea = F.interpolate(L2_fea, scale_factor=2, mode='bilinear', align_corners=False)
L1_fea = self.L1_fea_conv(torch.cat([L1_fea, L2_fea], dim=1))
# Cascading
offset_comine = torch.cat((L1_fea, ref_fea_l[0]), dim=1)
offset = self.lrelu(self.cas_offset_conv1(offset_comine))
offset = self.lrelu(self.cas_offset_conv2(offset))
L1_fea = self.lrelu(self.cas_dcnpack([L1_fea, offset]))
return L1_fea, h_next, c_next
class GCPNet(nn.Module):
def __init__(self, nf=64, nframes=5, groups=8, in_channel=1, output_channel=1, center=None):
super(GCPNet, self).__init__()
self.nf = nf
self.center = nframes // 2 if center is None else center
self.nframes = nframes
## GCP Branch
self.feature_guided1 = SimpleBlock(depth=3, n_channels=nf, input_channels=in_channel*2*2, \
output_channel=nf, kernel_size=3) # 64, H, W
self.feature_guided1_lam = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
self.feature_guided1_beta = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
self.feature_guided2 = SimpleBlock(depth=3, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H, W
self.feature_guided2_lam = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
self.feature_guided2_beta = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
self.feature_guided3 = SimpleBlock(depth=3, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H, W
self.feature_guided3_lam = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
self.feature_guided3_beta = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
self.feature_guided4 = SimpleBlock(depth=3, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H, W
self.feature_guided4_lam = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
self.feature_guided4_beta = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
self.feature_guided5 = SimpleBlock(depth=3, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H, W
self.feature_guided6 = SimpleBlock(depth=3, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H, W
self.feature_guided6_up = nn.ConvTranspose2d(in_channels=nf, out_channels=nf,\
kernel_size=2, stride=2, padding=0, bias=True) # 64, H*2, W*2
self.feature_guided6_lam = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
self.feature_guided6_beta = nn.Conv2d(in_channels=nf, out_channels=nf, kernel_size=3, stride=1, bias=True, padding=1)
## IntraF Module
self.feature_extract = SimpleBlock(depth=5, n_channels=nf, input_channels=in_channel*4*2, \
output_channel=nf, kernel_size=3) # 64, H, W
self.feature_extract_acse1 = GCABlock(res_num=2, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H, W
self.feature_extract_acse2 = GCABlock(res_num=2, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H, W
self.feature_extract_acse3 = GCABlock(res_num=2, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H*2, W*2
self.feature_extract_acse4 = GCABlock(res_num=2, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H*2, W*2
## InterF Module
self.fea_L2_conv1 = nn.Conv2d(nf, nf, 3, 2, 1, bias=True)
self.fea_L2_conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.fea_L3_conv1 = nn.Conv2d(nf, nf, 3, 2, 1, bias=True)
self.fea_L3_conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.pcd_align = PCD_Align(nf=nf, groups=groups)
self.merge = nn.Conv2d(nf*nframes, nf, 3, 1, 1, bias=True)
self.feature_up = nn.ConvTranspose2d(in_channels=nf, out_channels=nf,\
kernel_size=2, stride=2, padding=0, bias=True) # 64, H*2, W*2
# encoder
self.conv_block_s1 = SimpleBlock(depth=2, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H*2, W*2
self.acse_block_s1 = DualABlock(res_num=2, n_channels=nf, input_channels=nf, \
output_channel=nf, kernel_size=3) # 64, H*2, W*2
self.pool1 = nn.Conv2d(nf, 2*nf, 3, 2, 1, bias=True) # 128
self.conv_block_s2 = SimpleBlock(depth=2, n_channels=2*nf, input_channels=2*nf, \
output_channel=2*nf, kernel_size=3) # 128, H, W
self.acse_block_s2 = DualABlock(res_num=2, n_channels=2*nf, input_channels=2*nf, \
output_channel=2*nf, kernel_size=3) # 128, H, W
self.pool2 = nn.Conv2d(2*nf, 4*nf, 3, 2, 1, bias=True) # 256
self.conv_block_s3 = SimpleBlock(depth=2, n_channels=4*nf, input_channels=4*nf, \
output_channel=4*nf, kernel_size=3) # 256, H//2, W//2
self.acse_block_s3 = DualABlock(res_num=2, n_channels=4*nf, input_channels=4*nf, \
output_channel=4*nf, kernel_size=3) # 256, H//2, W//2
self.conv_block_s3_2 = SimpleBlock(depth=2, n_channels=4*nf, input_channels=4*nf, \
output_channel=4*nf, kernel_size=3) # 256, H//2, W//2
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
# decoder
self.up1 = nn.ConvTranspose2d(in_channels=4*nf, out_channels=2*nf,\
kernel_size=2, stride=2, padding=0, bias=True) # 128, H, W
### With SkipConnection
# cat with conv_block_s4 # 128, H, W
self.conv_block_s4 = SimpleBlock(depth=2, n_channels=2*nf, input_channels=4*nf, \
output_channel=2*nf, kernel_size=3) # 128, H, W
self.acse_block_s4 = DualABlock(res_num=2, n_channels=2*nf, input_channels=2*nf, \
output_channel=2*nf, kernel_size=3) # 128, H, W
self.up2 = nn.ConvTranspose2d(in_channels=2*nf, out_channels=nf,\
kernel_size=2, stride=2, padding=0, bias=True) # 64, H*2, W*2
# cat with conv_block_s3 # 64, H*2, W*2
self.conv_block_s5 = SimpleBlock(depth=3, n_channels=nf, input_channels=2*nf, \
output_channel=output_channel, kernel_size=3) # 64, H*2, W*2
def forward(self, x, nmap):
B, N, C, H, W = x.size() # N video frames, C is 4 response to RGGB channel
# GCP Branch
x_gr = x[:,:,1:3,:,:].clone()
x_gr_map = nmap[:,:,1:3,:,:].clone()
x_gr = x_gr.view(-1, int(C/2), H, W)
x_gr_map = x_gr_map.view(-1, int(C/2), H, W)
temp = torch.cat([x_gr, x_gr_map], dim=1)
x_gr1 = self.feature_guided1(temp)
x_gr1_lam = self.feature_guided1_lam(x_gr1)
x_gr1_beta = self.feature_guided1_beta(x_gr1)
x_gr2 = self.feature_guided2(x_gr1)
x_gr2_lam = self.feature_guided2_lam(x_gr2)
x_gr2_beta = self.feature_guided2_beta(x_gr2)
x_gr3 = self.feature_guided3(x_gr2)
x_gr3_lam = self.feature_guided3_lam(x_gr3)
x_gr3_beta = self.feature_guided3_beta(x_gr3)
x_gr4 = self.feature_guided4(x_gr3)
x_gr4_lam = self.feature_guided4_lam(x_gr4)
x_gr4_beta = self.feature_guided4_beta(x_gr4)
x_gr5 = self.feature_guided5(x_gr4)
x_gr5 = x_gr5.view(B, N, -1, H, W)
x_gr6 = self.feature_guided6(x_gr5[:, self.center, :, :, :])
x_gr5 = x_gr5.view(B*N, -1, H, W)
x_gr6 = self.feature_guided6_up(x_gr6)
x_gr6_lam = self.feature_guided6_lam(x_gr6)
x_gr6_beta = self.feature_guided6_beta(x_gr6)
# IntraF Module
x_temp = x.view(-1, C, H, W)
x_nm_temp = nmap.view(-1, C, H, W)
temp = torch.cat([x_temp, x_nm_temp], dim=1)
x_s1 = self.feature_extract(temp) # B*N, fea_C, H, W
x_s1 = self.feature_extract_acse1(x_s1, x_gr1_lam, x_gr1_beta)
x_s1 = self.feature_extract_acse2(x_s1, x_gr2_lam, x_gr2_beta)
x_s1 = self.feature_extract_acse3(x_s1, x_gr3_lam, x_gr3_beta) # B*N, fea_C, H, W
x_s1 = self.feature_extract_acse4(x_s1, x_gr4_lam, x_gr4_beta) # B*N, fea_C, H, W
# InterF Module
x_s1 = self.align_feature(x_s1, x_gr5, B, N, self.nf, H, W) # [B*N, fea, H, W] -> [B, N, fea, H, W]
x_s1 = self.merge(x_s1.view(-1, self.nf*N, H, W))
# Merge Module: encoder -- decoder
x_s1 = self.feature_up(x_s1) # B, fea_C, H*2, W*2
x_s1 = x_s1.mul(x_gr6_lam) + x_gr6_beta
###
x_s1 = self.conv_block_s1(x_s1) # 64, H*2, W*2
x_s1 = self.acse_block_s1(x_s1)
###
L1_temp = x_s1.clone()
###
x_s2 = self.pool1(x_s1) # 128, H, W
x_s2 = self.conv_block_s2(x_s2) # 128, H, W
x_s2 = self.acse_block_s2(x_s2) # 128, H, W
###
L2_temp = x_s2.clone()
###
x_s3 = self.pool2(x_s2) # 256, H//2, W//2
x_s3 = self.conv_block_s3(x_s3) # 256, H//2, W//2
x_s3 = self.acse_block_s3(x_s3) # 256, H//2, W//2
x_s3 = self.conv_block_s3_2(x_s3) # 256, H//2, W//2
# decoder
out = self.up1(x_s3) # 128, H, W
out = torch.cat((out, L2_temp), 1) # 256, H, W
out = self.conv_block_s4(out) # 128, H, W
out = self.acse_block_s4(out) # 128, H, W
out = self.up2(out) # 64, H*2, W*2
out = torch.cat((out, L1_temp), 1) # 128, H*2, W*2
out = self.conv_block_s5(out) # out_ch, H, W
return out
def align_feature(self, feature, guided_feature, B, N, C, H, W):
feature_temp = torch.cat([feature, guided_feature], dim=0)
# L2
L2_fea = self.lrelu(self.fea_L2_conv1(feature_temp)) # H//2, W//2
L2_fea = self.lrelu(self.fea_L2_conv2(L2_fea))
# L3
L3_fea = self.lrelu(self.fea_L3_conv1(L2_fea)) # H//4, W//4
L3_fea = self.lrelu(self.fea_L3_conv2(L3_fea))
L1_fea = feature_temp.view(2*B, N, -1, H, W)
L2_fea = L2_fea.view(2*B, N, -1, H // 2, W // 2)
L3_fea = L3_fea.view(2*B, N, -1, H // 4, W // 4)
#### align using DConv
# ref feature list
ref_fea_l = [
L1_fea[0:B, self.center, :, :, :].clone(), L2_fea[0:B, self.center, :, :, :].clone(),
L3_fea[0:B, self.center, :, :, :].clone()
]
ref_fea_l_g = [
L1_fea[B:, self.center, :, :, :].clone(), L2_fea[B:, self.center, :, :, :].clone(),
L3_fea[B:, self.center, :, :, :].clone()
]
aligned_fea = []
h_cur = [None, None, None]
c_cur = [None, None, None]
for i in range(N):
nbr_fea_l = [
L1_fea[0:B, i, :, :, :].clone(), L2_fea[0:B, i, :, :, :].clone(),
L3_fea[0:B, i, :, :, :].clone()
]
nbr_fea_l_g = [
L1_fea[B:, i, :, :, :].clone(), L2_fea[B:, i, :, :, :].clone(),
L3_fea[B:, i, :, :, :].clone()
]
a_fea, h_cur, c_cur = self.pcd_align(nbr_fea_l, ref_fea_l, nbr_fea_l_g, ref_fea_l_g, h_cur, c_cur)
aligned_fea.append(a_fea)
aligned_fea = torch.stack(aligned_fea, dim=1) # [B, N, C, H, W]
return aligned_fea
| 2.546875
| 3
|
crims2s/training/infer.py
|
crim-ca/crims2s
| 7
|
12783074
|
import hydra
import logging
import os
import torch
import tqdm
import xarray as xr
from ..dataset import S2SDataset, TransformedDataset
from ..transform import ExampleToPytorch, CompositeTransform
from ..util import ECMWF_FORECASTS, collate_with_xarray
from .lightning import S2STercilesModule
from .util import find_checkpoint_file
_logger = logging.getLogger(__name__)
def terciles_pytorch_to_xarray(
t2m, tp, example_forecast, dims=["category", "lead_time", "latitude", "longitude"]
):
t2m_array = xr.DataArray(data=t2m.detach().numpy(), dims=dims, name="t2m")
tp_array = xr.DataArray(data=tp.detach().numpy(), dims=dims, name="tp")
dataset = xr.Dataset(data_vars={"t2m": t2m_array, "tp": tp_array,})
dataset = dataset.assign_coords(
{
"forecast_year": example_forecast.forecast_year.data,
"forecast_monthday": example_forecast.forecast_monthday.data,
"lead_time": example_forecast.lead_time.data,
"valid_time": example_forecast.valid_time.data,
"forecast_time": example_forecast.forecast_time.data,
"latitude": example_forecast.latitude.data,
"longitude": example_forecast.longitude.data,
"category": ["below normal", "near normal", "above normal"],
}
).expand_dims(["forecast_year", "forecast_monthday"])
return dataset
def concat_predictions(predictions):
yearly_predictions = {}
for p in predictions:
year = int(p.forecast_year.data)
yearly_list = yearly_predictions.get(year, [])
yearly_list.append(p)
yearly_predictions[year] = yearly_list
nested_datasets = [yearly_predictions[k] for k in sorted(yearly_predictions.keys())]
yearly_datasets = []
for l in nested_datasets:
l = sorted(l, key=lambda x: str(x.forecast_monthday[0]))
d = xr.concat(l, dim="forecast_monthday")
yearly_datasets.append(d)
return xr.concat(yearly_datasets, dim="forecast_year")
def fix_dims_for_output(forecast_dataset):
"""Manipulate the dimensions of the dataset of a single forecast so that we
can concatenate them easily."""
return (
forecast_dataset.stack(
{"forecast_label": ["forecast_year", "forecast_monthday"]}
)
.expand_dims("forecast_time")
.drop("forecast_label")
.squeeze("forecast_label")
)
def example_to_cuda(example):
new_example = {}
for k in example:
if k not in ["monthday", "month", "year"]:
new_example[k] = example[k].cuda()
else:
new_example[k] = example[k]
return new_example
@hydra.main(config_path="conf", config_name="infer")
def cli(cfg):
transform = hydra.utils.instantiate(cfg.experiment.transform)
# Find where we convert to pytorch. For inference we delay the conversion to pytorch
# because we want to use the xarray data as a template to generate the output file.
for i, t in enumerate(transform.transforms):
if isinstance(t, ExampleToPytorch):
pytorch_transform_idx = i
last_transform = CompositeTransform(transform.transforms[pytorch_transform_idx:])
transform.transforms = transform.transforms[:pytorch_transform_idx]
years = list(range(cfg.begin, cfg.end))
if cfg.experiment.dataset.index is not None:
month, day = ECMWF_FORECASTS[cfg.experiment.dataset.index]
label = f"{month:02}{day:02}.nc"
_logger.info("Targetting monthday %s", label)
name_filter = lambda x: x.endswith(label)
else:
name_filter = None
dataset = TransformedDataset(
S2SDataset(
hydra.utils.to_absolute_path(cfg.test_dataset_dir),
years=years,
name_filter=name_filter,
include_features=cfg.experiment.dataset.load_features,
),
transform,
)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=1,
batch_sampler=None,
collate_fn=collate_with_xarray,
num_workers=int(cfg.num_workers),
shuffle=False,
)
checkpoint_path = find_checkpoint_file(
hydra.utils.to_absolute_path(cfg.checkpoint_dir)
)
_logger.info(f"Will run on checkpoint {checkpoint_path}")
model = hydra.utils.instantiate(cfg.experiment.model)
optimizer = hydra.utils.call(cfg.experiment.optimizer, model)
lightning_module = S2STercilesModule.load_from_checkpoint(
checkpoint_path, model=model, optimizer=optimizer
)
lightning_module.eval()
lightning_module.freeze()
lightning_module.cuda()
datasets_of_examples = []
for example in tqdm.tqdm(dataloader):
example_forecast = example["terciles"]
pytorch_example = last_transform(example)
pytorch_example = example_to_cuda(pytorch_example)
t2m_terciles, tp_terciles, *_ = lightning_module(pytorch_example)
dataset = terciles_pytorch_to_xarray(
t2m_terciles.cpu(),
tp_terciles.cpu(),
example_forecast,
dims=["batch", "category", "lead_time", "latitude", "longitude"],
)
datasets_of_examples.append(fix_dims_for_output(dataset))
sorted_datasets = sorted(
datasets_of_examples, key=lambda x: str(x.forecast_time.data[0])
)
ml_prediction = (
xr.concat(sorted_datasets, dim="forecast_time")
.drop("valid_time")
.squeeze("batch")
)
_logger.info(f"Outputting forecasts to {os.getcwd() + '/' + cfg.output_file}.")
ml_prediction.to_netcdf(cfg.output_file)
if __name__ == "__main__":
cli()
| 2.28125
| 2
|
cli/files.py
|
bcarld/vision-tools
| 0
|
12783075
|
<reponame>bcarld/vision-tools
#!/usr/bin/env python
# IBM_PROLOG_BEGIN_TAG
#
# Copyright 2019,2020 IBM International Business Machines Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# IBM_PROLOG_END_TAG
import sys
import json
import paiv
import paiv_cli_utils
from paiv_cli_utils import reportSuccess, reportApiError, translate_flags
# All of the PAIV CLI requires python 3.6 due to format string
# Make the check in a common location
if sys.hexversion < 0x03060000:
sys.exit("Python 3.6 or newer is required to run this program.")
server = None
# Common flag and description strings for usage statements
ds_file_flags = "(--dsid=<dataset-id>) (--fileid=<file-id>)"
ds_file_description = """ --dsid Required parameter identifying the dataset to which
the file belongs
--fileid Required parameter identifying the targeted file"""
#--- Upload Operation ---------------------------------------------
upload_usage = """
Usage: files upload --dsid=<dataset_id> <file_paths>...
Where:
--dsid Required parameter that identifies the dataset into which the
file(s) are to be loaded
<file_paths> Space separated list of file paths to upload
Uploads one or more files to a dataset.
Note that at this time, directories of files are not supported."""
def upload(params):
"""Handles the 'upload' operation for loading files into a dataset.
The "<file_paths>" from 'params' is passed to the library.
"""
dsid = params.get("--dsid", "missing_id")
rsp = server.files.upload(dsid, params["<file_paths>"])
if rsp is None:
try:
results = server.json()["resultList"]
total = len(results)
success = sum([1 for x in results if x["result"] == "success"])
fail = sum([1 for x in results if x["result"] == "fail"])
except:
total = "?"
success = "?"
fail = "?"
reportApiError(server,
f"Failure uploading files to dataset {dsid}; total={total}, successes={success}, fails={fail}")
else:
try:
results = server.json()["resultList"]
total = len(results)
except:
total = "?"
reportSuccess(server, f"Successfully uploaded {total} files to dataset {dsid}")
#--- Change/Update Operation --------------------------------------
change_usage = f"""
Usage: files change {ds_file_flags} [--catid=<category_id>]
Where:
{ds_file_description}
--catid Optional parameter to change the category with which the file
is associated. The category must already exist. An
empty string ("") for category id will disassociate the file
from its current category
Modifies metadata for a file. Currently the only modification available
through this operation is the category association."""
def update(params):
"""Handles the 'change' operation for modifying a file.
Expected flags in 'params' are translated to Json Field names to identify modifications to be made"""
dsid = params.get("--dsid", "missing_id")
fileid = params.get("--fileid", "missing_id")
expectedArgs = {'--catid': 'target_category_id'}
kwargs = translate_flags(expectedArgs, params)
kwargs["action"] = "change_category"
rsp = server.files.action(dsid, fileid, **kwargs)
if rsp is None:
reportApiError(server, f"Failure attempting to change file id '{fileid}' in dataset '{dsid}'")
else:
reportSuccess(server, f"Changed file id '{fileid}' in dataset '{dsid}'")
#--- Delete Operation ---------------------------------------------
delete_usage = f"""
Usage: files delete {ds_file_flags}
Where:
{ds_file_description}
Deletes the indicated file. At this time, only 1 file can be
deleted at a time.
"""
def delete(params):
"""Deletes one file identified by the --dsid and --fileid parameters.
Future work should allow a list of files."""
dsid = params.get("--dsid", "missing_id")
fileid = params.get("--fileid", "missing_id")
rsp = server.files.delete(dsid, fileid)
if rsp is None:
reportApiError(server, f"Failure attempting to delete file id '{fileid}' in dataset '{dsid}'")
else:
reportSuccess(server, f"Deleted file id '{fileid}' in dataset '{dsid}'")
#--- List/Report Operation ----------------------------------------
list_usage = f"""
Usage: files list --dsid=<dataset_id> [--catid=<category_id>] [--parentid=<parent_id>]
[--sort=<string>] [--summary]
{paiv_cli_utils.limit_skip_flags}
Where:
--dsid Required parameter that identifies the dataset to which the files
belong.
--catid Optional parameter to filter results to files belonging to the
indicated category. Only 1 category can be specified
--parentid Optional parameter to filter results to files with the
indicated parent. Only 1 parent can be specified.
--sort Comma separated string of field names on which to sort.
Add " DESC" after a field name to change to a descending sort.
If adding " DESC", the field list must be enclosed in quotes.
--summary Flag requesting only summary output for each dataset returned
Generates a JSON list of files matching the input criteria."""
def report(params):
"""Handles the 'list' operation.
'params' flags are translated into query-parameter variable names."""
summaryFields = None
if params["--summary"]:
summaryFields = ["_id", "original_file_name", "file_type"]
dsid = params.get("--dsid", "missing_id")
expectedArgs = {'--catid': 'category_id',
'--parentid': 'parent_id',
'--sort': 'sortby',
'--limit': 'limit',
'--skip': 'skip'}
kwargs = translate_flags(expectedArgs, params)
rsp = server.files.report(dsid, **kwargs)
if rsp is None:
reportApiError(server, "Failure attempting to list files")
else:
reportSuccess(server, None, summaryFields=summaryFields)
#--- Show Operation -----------------------------------------------
show_usage = f"""
Usage: files show --dsid=<dataset_id> --fileid=<file_id>
Where:
{ds_file_description}
Shows detail metadata information for the indicated file."""
def show(params):
"""Handles the 'show' operation to show details of a single file"""
dsid = params.get("--dsid", "missing_id")
fileid = params.get("--fileid", "missing_id")
rsp = server.files.show(dsid, fileid)
if rsp is None:
reportApiError(server, f"Failure attempting to get file id '{fileid}' in dataset id '{dsid}'")
else:
reportSuccess(server)
# --- Download Operation -------------------------------------------
download_usage = f"""
Usage: files download --dsid=<dataset_id> --fileid=<file_id> [--thumbnail]
Where:
{ds_file_description}
--thumbnail Optional parameter to download the thumbnail instead of
the file.
Downloads the image associated with the indicated file."""
def download(params):
"""Handles the 'download' operation to show details of a single file"""
dsid = params.get("--dsid", "missing_id")
fileid = params.get("--fileid", "missing_id")
print("'download' operation not yet implemented", file=sys.stderr)
return -1
#--- savelabels Operation -----------------------------------------
savelabels_usage = f"""
Usage: files savelabels --dsid=<dataset_id> --fileid=<file_id>
(--label_file=<json_file> | <json_string>)
Where:
{ds_file_description}
--label_File Optional parameter identifying the file that contains the
Json label information. Only one of '--label_file' or
'<json_string>' must be supplied.
<json_string> Optional string containing the json for the label.
Only one of '--label_file' or '<json_string>' must be supplied.
Saves a group of labels belonging to the indicated file. This command replaces
all labels currently associated with the file with those labels provided. For
adding a single label, see the 'object-labels' command."""
def savelabels(params):
"""Handles the 'savelabels' operation"""
dsid = params.get("--dsid", "missing_id")
fileid = params.get("--fileid", "missing_id")
file_name = params.get("--label_file")
if file_name is not None:
try:
with open(file_name) as json_file:
data = json.load(json_file)
except Exception as e:
print(f"ERROR: failed to read json data from file '{file_name}'; {e}", file=sys.stderr)
return -1
else:
try:
data = json.loads(params.get("<json_string>", ""))
except Exception as e:
print(f"ERROR: Failed to convert label input to json; {e}", file=sys.stderr)
return -1
rsp = server.object_labels.create(dsid, fileid, data)
if rsp is None:
reportApiError(server, f"Failed to save labels for file {fileid} in dataset {dsid}")
else:
reportSuccess(server, f"Successfully created labels for file {fileid} in dataset {dsid}.")
# --- getlabels Operation -------------------------------------------
getlabels_usage = f"""
Usage: files getlabels --dsid=<dataset_id> --fileid=<file_id>
Where:
{ds_file_description}
Gets labels for the given file in "old style" format."""
def getlabels(params):
"""Handles the 'getlabels' operation"""
dsid = params.get("--dsid", "missing_id")
fileid = params.get("--fileid", "missing_id")
rsp = server.object_labels.getlabels(dsid, fileid)
if rsp is None:
reportApiError(server, f"Failed to get labels for dataset {dsid} and file {fileid}")
else:
reportSuccess(server, None)
cmd_usage = f"""
Usage: files {paiv_cli_utils.common_cmd_flags} <operation> [<args>...]
Where: {paiv_cli_utils.common_cmd_flag_descriptions}
<operation> is required and must be one of:
upload -- upload file(s) to a dataset
list -- report a list of files
change -- change certain metadata attributes of a file
delete -- delete one or more files
show -- show a metadata for a specific file
download -- download a file
savelabels -- save object labels to a file
getlabels -- get object labels associated with a file
Use 'files <operation> --help' for more information on a specific command."""
# Usage statement map -- indexed by CLI operation name
usage_stmt = {
"usage": cmd_usage,
"upload": upload_usage,
"list": list_usage,
"change": change_usage,
"delete": delete_usage,
"show": show_usage,
"download": download_usage,
"getlabels": getlabels_usage,
"savelabels": savelabels_usage
}
# Operation map to map CLI operation name to function implementing that operation
operation_map = {
"upload": upload,
"list": report,
"change": update,
"delete": delete,
"show": show,
"download": download,
"getlabels": getlabels,
"savelabels": savelabels
}
def main(params, cmd_flags=None):
global server
args = paiv_cli_utils.get_valid_input(usage_stmt, operation_map, argv=params,cmd_flags=cmd_flags)
if args is not None:
server = paiv.connect_to_server(paiv_cli_utils.host_name, paiv_cli_utils.token)
args.operation(args.op_params)
if __name__ == "__main__":
main(None)
| 1.625
| 2
|
main.py
|
liatrio/bitbucket-elasticsearch-connector
| 0
|
12783076
|
<filename>main.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from index import index_repos
import logging
import os
import time
import requests
import json
import getpass
import sys, signal
import atexit
try:
from argparse import ArgumentParser
except ImportError:
logging.error("argparse is required to run this script")
exit(1)
try:
from elasticsearch import Elasticsearch
except ImportError:
logging.error("Elasticsearch is required to run this script")
exit(1)
try:
from requests_oauthlib import OAuth1
except ImportError:
logging.error("requests-oauthlib is required to run this script")
exit(1)
lastrun = None
def check_es_configs(config):
if 'host' not in config.keys():
raise KeyError("Elasticsearch host is missing in elasticsearch.conf")
exit(1)
if 'repo_index' not in config.keys():
raise KeyError("Elasticsearch repo_index is missing in elasticsearch.conf")
exit(1)
if 'file_index' not in config.keys():
raise KeyError("Elasticsearch file_index is missing in elasticsearch.conf")
exit(1)
if 'commit_index' not in config.keys():
raise KeyError("Elasticsearch commit_index is missing in elasticsearch.conf")
exit(1)
def check_bitbucket_configs(config):
if 'token' not in config.keys():
raise KeyError("Bitbucket token is missing in bitbucket.conf")
exit(1)
if 'api_endpoint' not in config.keys():
raise KeyError("Bitbucket api_endpoint is missing in bitbucket.conf")
exit(1)
def last_run():
'''
reads from .bitbucketHistory when bitbucket content was last indexed
'''
if os.path.isfile(".bitbucketHistory"):
sincestr = open(".bitbucketHistory").read()
since = time.strptime(sincestr, '%Y-%m-%dT%H:%M:%S')
else:
since = 0
return since
def write_history(lastrun):
'''
writes the timestamp when bitbucket content was last indexed or updated
uses a file named '.bitbucketHistory' to save the timestamp for next run
'''
if lastrun:
history_file = open(".bitbucketHistory", 'w')
history_file.write(lastrun)
history_file.close()
def init_elasticsearch():
config = {}
execfile("elasticsearch.conf", config)
check_es_configs(config)
try:
es_conn = Elasticsearch(config['host'], max_retries=8)
except:
logging.error("elasticsearch is not running")
exit(1)
return es_conn
def main():
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
bitbucket_config = {}
execfile("bitbucket.conf", bitbucket_config)
check_bitbucket_configs(bitbucket_config)
headers = {"Authorization":"Bearer "+bitbucket_config['token']}
## Bitbucket connection:
s = requests.Session()
s.headers = headers
## elasticsearch connection:
es_conn = init_elasticsearch()
argparser = ArgumentParser(description=__doc__)
argparser.add_argument('index', default='index',
help='index, update or pindex')
args = argparser.parse_args()
if args.index == "index":
lastrun = time.strftime('%Y-%m-%dT%H:%M:%S', time.gmtime())
index_repos(s, es_conn)
write_history(lastrun)
else:
raise ValueError("Unknown mode. Please use one of the following:\n index")
atexit.register(write_history, lastrun)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
#lastrun = time.strftime('%Y-%m-%dT%H:%M:%S', time.gmtime())
print lastrun
write_history(lastrun)
sys.exit()
| 2.640625
| 3
|
pymailer/__init__.py
|
complexsum/pymailer
| 1
|
12783077
|
from .__about__ import __version__
from .utils.message import build_message
from .core.email_service import EmailService
| 0.921875
| 1
|
genesys/genesys/simulation/sysarray_models.py
|
VeriGOOD-ML/public
| 6
|
12783078
|
# This file contains the functions for the data access and cycle count models for the Layers executed on the Systolic Array
import logging
import math
import numpy as np
from data_objects import HardwareObject, SAResult_Inflayer, SIMDResult_Inflayer
from layer_object import LayerObject
def conv_access_model(Hardware_param, LayerObj, SysResult_inflayer):
# data access model for convolution layer
#unpacking the parameters. Doing this unpacking at the beginning of each function
# Although repetition of code, doing this since some parameters such bit-width fusion etc may come from a different object
# In future to address such changes, only this unpacking part will need to be modified and the main body of the function will be untouched
bw_filter = LayerObj.bw_filter; bw_ifmap = LayerObj.bw_ifmap; bw_ofmap = LayerObj.bw_ofmap
bw_psum = LayerObj.bw_psum; bw_bias = LayerObj.bw_bias
OW = LayerObj.OW
OH = LayerObj.OH
OC = LayerObj.OC
KW = LayerObj.KW
KH = LayerObj.KH
IC = LayerObj.IC
IW = LayerObj.IW
IH = LayerObj.IH
Batch = LayerObj.Batch
DTile_ow = LayerObj.DTile_ow
DTile_oh = LayerObj.DTile_oh
DTile_oc = LayerObj.DTile_oc
DTile_kw = LayerObj.DTile_kw
DTile_kh = LayerObj.DTile_kh
DTile_ic = LayerObj.DTile_ic
DTile_iw = LayerObj.DTile_iw
DTile_ih = LayerObj.DTile_ih
DTile_batch = LayerObj.DTile_batch
Loop_order = LayerObj.Loop_order
fusion_status = LayerObj.fusion_status
#print(Size_IBUF)
#print(Loop_order)
#print(Pad)
# Determining which dataflow out of the three dataflow class form the input loop order
WS_key = ['ow', 'oh', 'n']
OS_key = ['kw', 'kh', 'ic']
IS_key = ['oc']
for key in WS_key:
if Loop_order[0] == key:
dataflow = "weight_stationary"
break
for key in OS_key:
if Loop_order[0] == key:
dataflow = "output_stationary"
break
for key in IS_key:
if Loop_order[0] == key:
dataflow = "input_stationary"
break
#print(dataflow)
######### Model for DRAM accesses
if (fusion_status == "NoFusion"):
if dataflow == "weight_stationary":
#imap access
ifmap_access_DRAM = (DTile_iw * DTile_ih * DTile_ic * DTile_batch) * (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) \
* (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * math.ceil((OC/DTile_oc)) * bw_ifmap # in bit
#######filter access
#common multiplier regardless of the variant of WS dataflow
filter_access_common = (DTile_kw * DTile_kh * DTile_ic * DTile_oc) * (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * (OC/DTile_oc)
ow_multiplier = OW/DTile_ow
oh_multiplier = OH/DTile_oh
n_multiplier = Batch/DTile_batch
WS_Dict = {'ow': ow_multiplier, 'oh': oh_multiplier, 'n': n_multiplier}
#print("WS_Dict:", WS_Dict)
# First determining how many keys from the innermost loop matched in the given loop order
loopids = {'first': "match", 'second': "nomatch", 'third': "nomatch"}
#the first loop id will always be one of the keys since this is under WS category, hence first one is matched by default.
#beginning with no match for the second and third ids and will change them to match depending on the cases
#print("BEFORE:", loopids)
for key in WS_key:
if Loop_order[1] == key:
loopids['second'] = "match"
if loopids['second'] == "nomatch":
WScase = "oneKey" #case determined, only one key match, case 1, no further calculation needed
else:
for key in WS_key:
if Loop_order[2] == key:
loopids['third'] = "match"
if loopids['third'] == "nomatch":
WScase = "twoKey" #case determined, two keys matched, case 2, no further calculation needed
else:
WScase = "threeKey" #case determined, all three keys matched, case 3
#print("AFTER:", loopids)
#print("WS Case:", WScase)
#Depending on the WScase, now determining filter multiplier based on how many innermost loops matches the WS_keys
if WScase == "threeKey":
filter_multiplier = 1 # all three key matched, so optimal WS, filter multiplier is 1
elif WScase == "twoKey":
for key in WS_key:
if key != Loop_order[0] and key != Loop_order[1]: # tow key matched and one key does not match
mulkey = key
#print("mulkey:", mulkey)
filter_multiplier = WS_Dict[mulkey]
elif WScase == "oneKey":
mulkey1 = "empty"
mulkey2 = "empty"
for key in WS_key:
if key != Loop_order[0]: # only one key matched, hence two unmatched key to identify
if mulkey1 == "empty" and mulkey2 == "empty":
mulkey1 = key # one unmatched key is placed in mulkey 1
else:
mulkey2 = key # another unmatched key is placed in mulkey 2
print("mulkey1:", mulkey1)
print("mulkey2:", mulkey2)
filter_multiplier = WS_Dict[mulkey1] * WS_Dict[mulkey2]
#print("filter_multiplier:", filter_multiplier)
filter_access_DRAM = filter_access_common * filter_multiplier * bw_filter # in bit
#psum access
ofpsm_access_DRAM = (DTile_ow * DTile_oh * DTile_oc * DTile_batch) * (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (OC/DTile_oc) \
* (2 * math.ceil(IC/DTile_ic) * math.ceil(KW/DTile_kw) * math.ceil(KH/DTile_kh) - 1)
psum_access_DRAM = (ofpsm_access_DRAM - (OW * OH * OC * Batch)) * bw_psum #in bit
#ofmap access
ofmap_access_DRAM = OW * OH * OC * Batch * bw_ofmap # in bit
#bias access
bias_access_DRAM = DTile_oc * (OC/DTile_oc) * bw_bias
#print("ifmap_access_DRAM:", ifmap_access_DRAM)
#print("filter_access_DRAM:", filter_access_DRAM)
#print("ofpsm_access_DRAM:", ofpsm_access_DRAM)
#print("psum_access_DRAM:", psum_access_DRAM)
#print("ofmap_access_DRAM:", ofmap_access_DRAM)
#print("bias_access_DRAM:", bias_access_DRAM)
elif dataflow == "output_stationary":
print("will do")
elif dataflow == "input_stationary":
print("will do")
else:
print("Invalid dataflow")
else:
print("model for fusion do not exist yet")
##### Model for SRAM accesses (Original SRAM access do not depend on fusion)
SRAM_stationary_flag = "NoStationary" # current genesys systolic PE hardware does not support any stationary logic for SRAM accesses
if SRAM_stationary_flag == "NoStationary":
conv_SRAM_access_NoStationary(Hardware_param, LayerObj, SysResult_inflayer)
else:
print("will write generic code for SRAM stationary logic based on dataflow")
SysResult_inflayer.DRAM_access['filter'] = filter_access_DRAM
SysResult_inflayer.DRAM_access['ifmap'] = ifmap_access_DRAM
SysResult_inflayer.DRAM_access['ofmap'] = ofmap_access_DRAM
SysResult_inflayer.DRAM_access['psum'] = psum_access_DRAM
SysResult_inflayer.DRAM_access['bias'] = bias_access_DRAM
def conv_SRAM_access_NoStationary(Hardware_param, LayerObj, SysResult_inflayer):
# Current genesys PE hardware does not support any stationary logic for SRAM accesses
# Hence SRAM access pattern does not depend on loop order or dataflow and this function gives the SRAM access pattern for this scenario
# unpacking the parameters
bw_filter = LayerObj.bw_filter; bw_ifmap = LayerObj.bw_ifmap; bw_ofmap = LayerObj.bw_ofmap
bw_psum = LayerObj.bw_psum; bw_bias = LayerObj.bw_bias
OW = LayerObj.OW
OH = LayerObj.OH
OC = LayerObj.OC
KW = LayerObj.KW
KH = LayerObj.KH
IC = LayerObj.IC
IW = LayerObj.IW
IH = LayerObj.IH
Batch = LayerObj.Batch
DTile_ow = LayerObj.DTile_ow
DTile_oh = LayerObj.DTile_oh
DTile_oc = LayerObj.DTile_oc
DTile_kw = LayerObj.DTile_kw
DTile_kh = LayerObj.DTile_kh
DTile_ic = LayerObj.DTile_ic
DTile_iw = LayerObj.DTile_iw
DTile_ih = LayerObj.DTile_ih
DTile_batch = LayerObj.DTile_batch
Stile_ow = LayerObj.Stile_ow
Stile_oh = LayerObj.Stile_oh
Stile_oc = LayerObj.Stile_oc
Stile_kw = LayerObj.Stile_kw
Stile_kh = LayerObj.Stile_kh
Stile_ic = LayerObj.Stile_ic
Stile_iw = LayerObj.Stile_iw
Stile_ih = LayerObj.Stile_ih
Stile_batch = LayerObj.Stile_batch
#ifmap access
ifmap_DRAM_loop_mul = (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * math.ceil((OC/DTile_oc))
ifmap_access_SRAM = (Stile_iw * Stile_ih * Stile_ic * Stile_batch) * (DTile_ow/Stile_ow) * (DTile_oh/Stile_oh) * (DTile_batch/Stile_batch) \
* (DTile_ic/Stile_ic) * (DTile_kw/Stile_kw) * (DTile_kh/Stile_kh) * math.ceil((DTile_oc/Stile_oc)) * ifmap_DRAM_loop_mul * bw_ifmap # in bit
# filter access
filter_DRAM_loop_mul = math.ceil((OW/DTile_ow)) * math.ceil((OH/DTile_oh)) * math.ceil((Batch/DTile_batch)) \
* (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * (OC/DTile_oc)
filter_access_SRAM = (Stile_kw * Stile_kh * Stile_ic * Stile_oc) * (DTile_ic/Stile_ic) * (DTile_kw/Stile_kw) * (DTile_kh/Stile_kh) * (DTile_oc/Stile_oc) \
* math.ceil((DTile_ow/Stile_ow)) * math.ceil((DTile_oh/Stile_oh)) * math.ceil((DTile_batch/Stile_batch)) \
* filter_DRAM_loop_mul * bw_filter # in bit
# psum access
pDRAM_loop_mula = math.ceil(IC/DTile_ic) * math.ceil(KW/DTile_kw) * math.ceil(KH/DTile_kh)
pDRAM_loop_mulb = (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (OC/DTile_oc)
psum_access_SRAM = (Stile_ow * Stile_oh * Stile_oc * Stile_batch) * (DTile_ow/Stile_ow) * (DTile_oh/Stile_oh) * (DTile_batch/Stile_batch) * (DTile_oc/Stile_oc) \
* (2 * math.ceil(DTile_ic/Stile_ic) * math.ceil(DTile_kw/Stile_kw) * math.ceil(DTile_kh/Stile_kh) * pDRAM_loop_mula - 1) \
* pDRAM_loop_mulb * bw_psum # in bit
# bias access, for each ofmap location, bias term need to be added once,
bias_DRAM_loop_mul = (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (OC/DTile_oc)
bias_access_SRAM = (Stile_oc) * DTile_ow * DTile_oh * DTile_batch * (DTile_oc/Stile_oc) * bias_DRAM_loop_mul * bw_bias # in bit
#print("ifmap_access_SRAM:", ifmap_access_SRAM)
#print("filter_access_SRAM:", filter_access_SRAM)
#print("psum_access_SRAM:", psum_access_SRAM)
#print("bias_access_SRAM:", bias_access_SRAM)
SysResult_inflayer.SRAM_access['filter'] = filter_access_SRAM
SysResult_inflayer.SRAM_access['ifmap'] = ifmap_access_SRAM
SysResult_inflayer.SRAM_access['psum'] = psum_access_SRAM
SysResult_inflayer.SRAM_access['bias'] = bias_access_SRAM
def conv_cycle_model(Hardware_param, LayerObj, SysResult_inflayer):
#compute cycle and DRAM stall cycle count model for the convolution layer
# unpacking the parameters
SysArray_row = Hardware_param.SysArray_row; SysArray_col = Hardware_param.SysArray_col
OW = LayerObj.OW
OH = LayerObj.OH
OC = LayerObj.OC
KW = LayerObj.KW
KH = LayerObj.KH
IC = LayerObj.IC
IW = LayerObj.IW
IH = LayerObj.IH
Batch = LayerObj.Batch
DTile_ow = LayerObj.DTile_ow
DTile_oh = LayerObj.DTile_oh
DTile_oc = LayerObj.DTile_oc
DTile_kw = LayerObj.DTile_kw
DTile_kh = LayerObj.DTile_kh
DTile_ic = LayerObj.DTile_ic
DTile_iw = LayerObj.DTile_iw
DTile_ih = LayerObj.DTile_ih
DTile_batch = LayerObj.DTile_batch
Stile_ow = LayerObj.Stile_ow
Stile_oh = LayerObj.Stile_oh
Stile_oc = LayerObj.Stile_oc
Stile_kw = LayerObj.Stile_kw
Stile_kh = LayerObj.Stile_kh
Stile_ic = LayerObj.Stile_ic
Stile_iw = LayerObj.Stile_iw
Stile_ih = LayerObj.Stile_ih
Stile_batch = LayerObj.Stile_batch
fusion_status = LayerObj.fusion_status
### determining the on-chip compute cycles, compute cycles do not depend on loop order, or fusion
cycle_oneTile = (DTile_ow/Stile_ow) * (DTile_oh/Stile_oh) * (DTile_kw/Stile_kw) * (DTile_kh/Stile_kh) * (DTile_batch/Stile_batch) \
* math.ceil(DTile_ic/Stile_ic) * math.ceil(DTile_oc/Stile_oc)
#print(cycle_oneTile)
#pipeline overhead for each DRAM tile
pipe_overhead_tile = (SysArray_row - 1) + (SysArray_col - 1) #using PE row and col
#for now omitting the use of any ceil since DRAM tile size will be integer multiple of loops
Number_of_Tile = (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * (OC/DTile_oc)
compute_cycles = math.ceil((cycle_oneTile + pipe_overhead_tile) * Number_of_Tile) # giving the outer ceil to avoid fraction cycle numbers
#print("compute_cycles:", compute_cycles)
SysResult_inflayer.cycles['compute'] = compute_cycles
#of cycles to compute one tile including the pipeline setup operhead, need this variable to compute DRAM stall cycles
ComputeTile_cycles = cycle_oneTile + pipe_overhead_tile
######## model for the DRAM stall cycles, depends on loop order, fusion etc
if (fusion_status == "NoFusion"): #Model for the version where there is no fusion
DRAM_stall_cycles = conv_stall_model_nofu(Hardware_param, LayerObj, ComputeTile_cycles, SysResult_inflayer)
else:
print("model for fusion do not exist yet")
SysResult_inflayer.cycles['total'] = compute_cycles + DRAM_stall_cycles
####### Counting number of MAC operations: writing in a generic way for future extension (ceiling affects cycle count and #of MAC differently)
PE_tile_mac = (Stile_ow * Stile_oh * Stile_oc * Stile_batch) * (Stile_ic * Stile_kw * Stile_kh)
SRAM_tile_mac = PE_tile_mac * (DTile_ow/Stile_ow) * (DTile_oh/Stile_oh) * (DTile_kw/Stile_kw) * (DTile_kh/Stile_kh) * (DTile_batch/Stile_batch) \
* (DTile_ic/Stile_ic) * (DTile_oc/Stile_oc)
Nos_of_mac = SRAM_tile_mac * (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * (OC/DTile_oc)
print("Nos of MAC:", Nos_of_mac)
SysResult_inflayer.arithmetic['mac'] = Nos_of_mac
def conv_stall_model_nofu(Hardware_param, LayerObj, ComputeTile_cycles, SysResult_inflayer):
#DRAM stall cycle count model for the convolution layer when there is no fusion
bw_filter = LayerObj.bw_filter; bw_ifmap = LayerObj.bw_ifmap; bw_ofmap = LayerObj.bw_ofmap
bw_psum = LayerObj.bw_psum; bw_bias = LayerObj.bw_bias
SysArray_row = Hardware_param.SysArray_row; SysArray_col = Hardware_param.SysArray_col
RBW_DRAM_to_WBUF = Hardware_param.RBW_DRAM_to_WBUF # in bit/cycle, bias is also loaded through the same AXI interface
RBW_DRAM_to_IBUF = Hardware_param.RBW_DRAM_to_IBUF
RBW_DRAM_to_OBUF = Hardware_param.RBW_DRAM_to_OBUF
WBW_OBUF_to_DRAM = Hardware_param.WBW_OBUF_to_DRAM
OW = LayerObj.OW
OH = LayerObj.OH
OC = LayerObj.OC
KW = LayerObj.KW
KH = LayerObj.KH
IC = LayerObj.IC
IW = LayerObj.IW
IH = LayerObj.IH
Batch = LayerObj.Batch
DTile_ow = LayerObj.DTile_ow
DTile_oh = LayerObj.DTile_oh
DTile_oc = LayerObj.DTile_oc
DTile_kw = LayerObj.DTile_kw
DTile_kh = LayerObj.DTile_kh
DTile_ic = LayerObj.DTile_ic
DTile_iw = LayerObj.DTile_iw
DTile_ih = LayerObj.DTile_ih
DTile_batch = LayerObj.DTile_batch
Loop_order = LayerObj.Loop_order
# Determining which dataflow out of the three dataflow class form the input loop order
WS_key = ['ow', 'oh', 'n']
OS_key = ['kw', 'kh', 'ic']
IS_key = ['oc']
for key in WS_key:
if Loop_order[0] == key:
dataflow = "weight_stationary"
break
for key in OS_key:
if Loop_order[0] == key:
dataflow = "output_stationary"
break
for key in IS_key:
if Loop_order[0] == key:
dataflow = "input_stationary"
break
#print("Dataflow:", dataflow)
if dataflow == "weight_stationary":
# The current DRAM stall model is valid for any WS loop order with oc at the outermost loop (DUE TO SOME CORNER SITUATIONs, EXTENSION IS POSSIBLE)
Loop_order1 = ['ow', 'oh', 'kw', 'kh', 'ic', 'n', 'oc'] # current GeneSys Loop order
Loop_order2 = ['ow', 'oh', 'n', 'kw', 'kh', 'ic', 'oc'] # an optimal WS loop order, there are equivalent varients of these loop order, WILL ADD LATER IN CODE
if (Loop_order == Loop_order1 and (OW/DTile_ow * OH/DTile_oh) > 2) or (Loop_order == Loop_order2 and (OW/DTile_ow * OH/DTile_oh * Batch/DTile_batch) > 2):
# The tiling condition ensures that the numbers of WS tiles is at least 3 to be able to normally execute the 3 stage double-buffered DRAM pipeline
No_Warning = "True"
else:
print("WARNING: Number of WS tile is less than 3")
print("Nos of WS tile:", (OW/DTile_ow * OH/DTile_oh * Batch/DTile_batch))
print("Nos of DRAM WS + OS tiles:", (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh))
print("Nos of total DRAM tiles:", (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * (OC/DTile_oc))
#print("OH:", OH, "OW:", OW, "DTile_oh:", DTile_oh, "DTile_ow:", DTile_ow)
if (Loop_order == Loop_order1) or (Loop_order == Loop_order2):
if Loop_order == Loop_order1:
filter_multiplier = Batch/DTile_batch
elif Loop_order == Loop_order2:
filter_multiplier = 1
#print(filter_multiplier)
#of tiles where weights are being loaded (regardless of bias)
NT_weight = (KW/DTile_kw) * (KH/DTile_kh) * (IC/DTile_ic) * (OC/DTile_oc) * filter_multiplier
#of tiles where (weight + bias) are being loaded. (bias is loaded with the oc loop)
NT_wgt_bias = OC/DTile_oc
#of tiles where only weights are being loaded
NT_wgt_only = NT_weight - NT_wgt_bias
#of tiles where psum is written to the DRAM
NT_ps_wrt = (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * (OC/DTile_oc)
#of tiles where psum write only happens
NT_ps_wrtonly = (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (OC/DTile_oc)
#of tiles where both psum read and write occur
NT_ps_rdwrt = NT_ps_wrt - NT_ps_wrtonly
#print("NT_weight:", NT_weight, ";", "NT_wgt_bias:", NT_wgt_bias, ";", "NT_wgt_only:", NT_wgt_only)
#print("NT_ps_wrt:", NT_ps_wrt, ";", "NT_ps_wrtonly:", NT_ps_wrtonly, ";", "NT_ps_rdwrt:", NT_ps_rdwrt)
## Performing CASE counts
#CASE-5: #of tiles where weight+bias is being loaded (exclude the first tile)
NT_case5 = NT_wgt_bias - 1
#CASE-4: #of tiles where only weight is being loaded
NT_case4 = NT_wgt_only
#CASE-1: #of tiles where ifmap read and psum write happens (exclude the last 2 tiles)
NT_case1 = (NT_ps_wrtonly - 2) - NT_case5
#CASE-2: #of tiles where ifmap read and psum read+write happens
NT_case2 = NT_ps_rdwrt - NT_case4
#print("NT_case1:", NT_case1, "NT_case2:", NT_case2, "NT_case4:", NT_case4, "NT_case5:", NT_case5)
## place condition to address the situation when tiles from ic, kw, kh, oc, n loops are equal to their original dimensions
if (NT_case2 + NT_case4 == 0):
NT_case2 = 0
NT_case4 = 0
print("NT_case1:", NT_case1, "NT_case2:", NT_case2, "NT_case4:", NT_case4, "NT_case5:", NT_case5)
#The following two tiles are placing as seperate cases for future exception code when WS tiles can be < 3. There it is possible for these cases to be zero
NT_case7 = 1 # The second tile
NT_case8 = 1 # The second last tile
#of cycles required to load/store each tile of each kind of data
WgtTile_load_cycles = math.ceil((DTile_kw * DTile_kh * DTile_ic * DTile_oc * bw_filter) / RBW_DRAM_to_WBUF)
BiasTile_load_cycles = math.ceil((DTile_oc * bw_bias) / RBW_DRAM_to_WBUF)
ifmapTile_load_cycles = math.ceil((DTile_iw * DTile_ih * DTile_ic * DTile_batch * bw_ifmap) / RBW_DRAM_to_IBUF)
psumTile_load_cycles = math.ceil((DTile_ow * DTile_oh * DTile_oc * DTile_batch * bw_psum) / RBW_DRAM_to_OBUF)
psumTile_store_cycles = math.ceil((DTile_ow * DTile_oh * DTile_oc * DTile_batch * bw_psum) / WBW_OBUF_to_DRAM)
#do not need to use 8-bit ofmap, not for the no-fusion version as well. Since SIMD operations are 32 bit and there is always at least a ReLU layer after each
#Conv layer, the output of conv will go to SIMD and the quantization of 32 to 8 bit happens at SIMD. Hence the ofmap from a conv will be 32 bit
#print("computeTile_cycles:", ComputeTile_cycles)
#print("WgtTile_load_cycles:", WgtTile_load_cycles)
#print("BiasTile_load_cycles:", BiasTile_load_cycles)
#print("ifmapTile_load_cycles:", ifmapTile_load_cycles)
#print("psumTile_load_cycles:", psumTile_load_cycles)
#print("psumTile_store_cycles:", psumTile_store_cycles)
# Determining the #of stall cycles for each case
#Case1
L11 = ifmapTile_load_cycles - ComputeTile_cycles
L12 = psumTile_store_cycles - ComputeTile_cycles
stall_case1 = max(0, L11, L12) * NT_case1
#Case2
L21 = ifmapTile_load_cycles - ComputeTile_cycles
L22 = (psumTile_load_cycles + psumTile_store_cycles - ComputeTile_cycles) #one AXI for both read and write of psum.
stall_case2 = max(0, L21, L22) * NT_case2
#Case4
L41 = ifmapTile_load_cycles - ComputeTile_cycles
L42 = WgtTile_load_cycles - ComputeTile_cycles
L43 = (psumTile_load_cycles + psumTile_store_cycles - ComputeTile_cycles) #one AXI for both read and write of psum.
stall_case4 = max(0, L41, L42, L43) * NT_case4
#Case5
L51 = ifmapTile_load_cycles - ComputeTile_cycles
L52 = psumTile_store_cycles - ComputeTile_cycles
L53 = (WgtTile_load_cycles + BiasTile_load_cycles) - ComputeTile_cycles
stall_case5 = max(0, L51, L52, L53) * NT_case5
print("stall_case1:", stall_case1, "; stall_case2:", stall_case2, "; stall_case4:", stall_case4, "; stall_case5:", stall_case5)
#First tile
Lf1 = ifmapTile_load_cycles
Lf2 = WgtTile_load_cycles + BiasTile_load_cycles
stall_first = max(Lf1, Lf2)
#Last tile
stall_last = psumTile_store_cycles
#Case7
L71 = ifmapTile_load_cycles - ComputeTile_cycles
stall_case7 = max(0, L71) * NT_case7
#Case8
L81 = psumTile_store_cycles - ComputeTile_cycles
stall_case8 = max(0, L81)
print("stall_first:", stall_first, "; stall_last:", stall_last, "; stall_case7:", stall_case7, "; stall_case8:", stall_case8)
#of total DRAM stall cycles
DRAM_stall_cycles = stall_case1 + stall_case2 + stall_case4 + stall_case5 + stall_case7 + stall_case8 + stall_first + stall_last
#print("DRAM_stall_cycles:", DRAM_stall_cycles)
SysResult_inflayer.cycles['DRAM_Stall'] = DRAM_stall_cycles
else:
print("WS DRAM stall model do not exist for the input loop order")
elif dataflow == "output_stationary":
print("DARM stall model do not exist yet")
elif dataflow == "input_stationary":
print("DRAM stall model do not exist yet")
else:
print("Invalid dataflow")
return DRAM_stall_cycles
def gemm_access_model(Hardware_param, LayerObj, SysResult_inflayer):
# data access model for fully connected layer (i.e., gemm)
#unpacking the parameters. Doing this unpacking at the beginning of each function
bw_filter = LayerObj.bw_filter; bw_ifmap = LayerObj.bw_ifmap; bw_ofmap = LayerObj.bw_ofmap
bw_psum = LayerObj.bw_psum; bw_bias = LayerObj.bw_bias
OC = LayerObj.OC
IC = LayerObj.IC
Batch = LayerObj.Batch
DTile_oc = LayerObj.DTile_oc
DTile_ic = LayerObj.DTile_ic
DTile_batch = LayerObj.DTile_batch
Loop_order = LayerObj.Loop_order
fusion_status = LayerObj.fusion_status
# Current implementation is for one loop order only, this is sort of the most optimal loop order for gemm analytically.
# So probably no need to implement the support for any loop order for gemm
if Batch > 1 and Loop_order == ['n', 'ic', 'oc']: # weight stationary category
LayerObj.Loop_order = ['ow', 'oh', 'n', 'kw', 'kh', 'ic', 'oc'] # converting FC loop order to convolution loop order
conv_access_model(Hardware_param, LayerObj, SysResult_inflayer)
LayerObj.Loop_order = Loop_order # doing this to retain the original Loop order in LayerObj so that it can be used in later function calls
elif Batch == 1 and Loop_order == ['n', 'ic', 'oc']: #output stationary category
###### Model for DRAM access cost
if (fusion_status == "NoFusion"):
# ifmap access
if math.ceil(IC/DTile_ic) == 1:
ifmap_oc_multiplier = 1 # the loop becomes input stationary wrt DRAM access
else:
ifmap_oc_multiplier = OC/DTile_oc
#print(ifmap_oc_multiplier)
ifmap_access_DRAM = (DTile_ic) * (IC/DTile_ic) * (OC/DTile_oc) * bw_ifmap
# filter access
filter_access_DRAM = (DTile_ic * DTile_oc) * (IC/DTile_ic) * (OC/DTile_oc) * bw_filter # in bit
# ofmap access, no pusm DRAM access since output stationary
ofmap_access_DRAM = (DTile_oc) * (OC/DTile_oc) * bw_ofmap
# bias access
bias_access_DRAM = (DTile_oc) * (OC/DTile_oc) * bw_bias
else:
print("model for fusion do not exist yet")
##### Model for SRAM accesses (Original SRAM access do not depend on fusion)
SRAM_stationary_flag = "NoStationary" # current genesys systolic PE hardware does not support any stationary logic for SRAM accesses
if SRAM_stationary_flag == "NoStationary":
conv_SRAM_access_NoStationary(Hardware_param, LayerObj, SysResult_inflayer)
else:
print("will write generic code for SRAM stationary logic based on dataflow")
SysResult_inflayer.DRAM_access['filter'] = filter_access_DRAM
SysResult_inflayer.DRAM_access['ifmap'] = ifmap_access_DRAM
SysResult_inflayer.DRAM_access['ofmap'] = ofmap_access_DRAM
#SysResult_inflayer.DRAM_access['psum'] = psum_access_DRAM
SysResult_inflayer.DRAM_access['bias'] = bias_access_DRAM
else:
print("The input loop order is not optimal and not supported")
def gemm_cycle_model(Hardware_param, LayerObj, SysResult_inflayer):
#compute cycle and DRAM stall cycle count model for the fully connecetd layer
# unpacking the parameters
SysArray_row = Hardware_param.SysArray_row; SysArray_col = Hardware_param.SysArray_col
OW = LayerObj.OW
OH = LayerObj.OH
OC = LayerObj.OC
KW = LayerObj.KW
KH = LayerObj.KH
IC = LayerObj.IC
IW = LayerObj.IW
IH = LayerObj.IH
Batch = LayerObj.Batch
DTile_ow = LayerObj.DTile_ow
DTile_oh = LayerObj.DTile_oh
DTile_oc = LayerObj.DTile_oc
DTile_kw = LayerObj.DTile_kw
DTile_kh = LayerObj.DTile_kh
DTile_ic = LayerObj.DTile_ic
DTile_iw = LayerObj.DTile_iw
DTile_ih = LayerObj.DTile_ih
DTile_batch = LayerObj.DTile_batch
Stile_ow = LayerObj.Stile_ow
Stile_oh = LayerObj.Stile_oh
Stile_oc = LayerObj.Stile_oc
Stile_kw = LayerObj.Stile_kw
Stile_kh = LayerObj.Stile_kh
Stile_ic = LayerObj.Stile_ic
Stile_iw = LayerObj.Stile_iw
Stile_ih = LayerObj.Stile_ih
Stile_batch = LayerObj.Stile_batch
Loop_order = LayerObj.Loop_order
fusion_status = LayerObj.fusion_status
# Current implementation is for one loop order only, this is sort of the most optimal loop order for gemm analytically.
# So no need to implement the support for any loop order for gemm
if Batch > 1 and Loop_order == ['n', 'ic', 'oc']: # weight stationary category
LayerObj.Loop_order = ['ow', 'oh', 'n', 'kw', 'kh', 'ic', 'oc'] # converting FC loop order to convolution loop order
conv_cycle_model(Hardware_param, LayerObj, SysResult_inflayer)
LayerObj.Loop_order = Loop_order # doing this to retain the original Loop order in LayerObj so that it can be used in later function calls if needed
elif Batch == 1 and Loop_order == ['n', 'ic', 'oc']:
### determining computing cycles, using the convolution equations cause that works
#determining the on-chip compute cycles, compute cycles do not depend on loop order, or fusion
cycle_oneTile = (DTile_ow/Stile_ow) * (DTile_oh/Stile_oh) * (DTile_kw/Stile_kw) * (DTile_kh/Stile_kh) * (DTile_batch/Stile_batch) \
* math.ceil(DTile_ic/Stile_ic) * math.ceil(DTile_oc/Stile_oc)
#print("cycle_oneTile:", cycle_oneTile)
#pipeline overhead for each DRAM tile
pipe_overhead_tile = (SysArray_row - 1) + (SysArray_col - 1) #using PE row and col,
#for now omitting the use of any ceil since DRAM tile size will be integer multiple of loops,
Number_of_Tile = (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * (OC/DTile_oc)
compute_cycles = math.ceil((cycle_oneTile + pipe_overhead_tile) * Number_of_Tile) # giving the outer ceil to avoid fraction cycle numbers
#print("compute_cycles:", compute_cycles)
SysResult_inflayer.cycles['compute'] = compute_cycles
#of cycles to compute one tile including the pipeline setup operhead, need this variable to compute DRAM stall cycles
ComputeTile_cycles = cycle_oneTile + pipe_overhead_tile
######## model for the DRAM stall cycles, depends on loop order, fusion etc
if (fusion_status == "NoFusion"): #Model for the version where there is no fusion
DRAM_stall_cycles = gemmb1_stall_model_nofu(Hardware_param, LayerObj, ComputeTile_cycles, SysResult_inflayer) # stall model for batch = 1, output stationary
else:
print("model for fusion do not exist yet")
SysResult_inflayer.cycles['total'] = compute_cycles + DRAM_stall_cycles
####### Counting number of MAC operations: using the convolution equations cause that works (ceiling affects cycle count and #of MAC differently)
PE_tile_mac = (Stile_ow * Stile_oh * Stile_oc * Stile_batch) * (Stile_ic * Stile_kw * Stile_kh)
SRAM_tile_mac = PE_tile_mac * (DTile_ow/Stile_ow) * (DTile_oh/Stile_oh) * (DTile_kw/Stile_kw) * (DTile_kh/Stile_kh) * (DTile_batch/Stile_batch) \
* (DTile_ic/Stile_ic) * (DTile_oc/Stile_oc)
Nos_of_mac = SRAM_tile_mac * (OW/DTile_ow) * (OH/DTile_oh) * (Batch/DTile_batch) * (IC/DTile_ic) * (KW/DTile_kw) * (KH/DTile_kh) * (OC/DTile_oc)
print("Nos of MAC:", Nos_of_mac)
SysResult_inflayer.arithmetic['mac'] = Nos_of_mac
else:
print("The input loop order is not optimal and not supported")
def gemmb1_stall_model_nofu(Hardware_param, LayerObj, ComputeTile_cycles, SysResult_inflayer):
#DRAM stall cycle count model for the gemm layer when there is no fusion, batch size = 1, output stationary
bw_filter = LayerObj.bw_filter; bw_ifmap = LayerObj.bw_ifmap; bw_ofmap = LayerObj.bw_ofmap
bw_psum = LayerObj.bw_psum; bw_bias = LayerObj.bw_bias
RBW_DRAM_to_WBUF = Hardware_param.RBW_DRAM_to_WBUF # in bit/cycle, bias is also loaded through the same AXI interface
RBW_DRAM_to_IBUF = Hardware_param.RBW_DRAM_to_IBUF
RBW_DRAM_to_OBUF = Hardware_param.RBW_DRAM_to_OBUF
WBW_OBUF_to_DRAM = Hardware_param.WBW_OBUF_to_DRAM
OC = LayerObj.OC
IC = LayerObj.IC
Batch = LayerObj.Batch
DTile_oc = LayerObj.DTile_oc
DTile_ic = LayerObj.DTile_ic
DTile_batch = LayerObj.DTile_batch
#of cycles required to load/store each tile of each kind of data
WgtTile_load_cycles = math.ceil((DTile_ic * DTile_oc * bw_filter) / RBW_DRAM_to_WBUF)
BiasTile_load_cycles = math.ceil((DTile_oc * bw_bias) / RBW_DRAM_to_WBUF)
ifmapTile_load_cycles = math.ceil((DTile_ic * bw_ifmap) / RBW_DRAM_to_IBUF)
ofmapTile_store_cycles = math.ceil((DTile_oc * bw_ofmap) / WBW_OBUF_to_DRAM)
#do not need to use 8-bit ofmap, not for the no-fusion version as well. Since SIMD operations are 32 bit and there is always at least a ReLU layer after each
#Conv layer, the output of conv will go to SIMD and the quantization of 32 to 8 bit happens at SIMD. Hence the ofmap from a conv will be 32 bit
#print("ComputeTile_cycles:", ComputeTile_cycles)
#print("WgtTile_load_cycles:", WgtTile_load_cycles)
if math.ceil(IC/DTile_ic) == 1:
dataflow = "input_stationary"
else:
dataflow = "output_stationary"
if dataflow == "input_stationary":
## Performing CASE counts, there is only one case
#Case 1: #of tiles where weight+bias is loaded, and ofmap write occurs, except the first two and last two tiles
NT_case1 = (OC/DTile_oc) - 2
# Using this condition to seperately address the situation when OC/DTile_oc is also 1 and NT_case1 becomes negative
NT_case1_flag = "None"
if NT_case1 < 0:
NT_case1 = 0
NT_case1_flag = "Negative"
#print("NT_case1:", NT_case1)
# Determining the #of stall cycles for each case
#Case1
L11 = (WgtTile_load_cycles + BiasTile_load_cycles) - ComputeTile_cycles
L12 = ofmapTile_store_cycles - ComputeTile_cycles
stall_case1 = max(0, L11, L12) * NT_case1
#First tile
Lf1 = ifmapTile_load_cycles
Lf2 = WgtTile_load_cycles + BiasTile_load_cycles
stall_first = max(Lf1, Lf2)
#Second tile
L2nd = (WgtTile_load_cycles + BiasTile_load_cycles) - ComputeTile_cycles
stall_second = max(0, L2nd)
#Second last tile
L2ndlst = ofmapTile_store_cycles - ComputeTile_cycles
stall_secondlast = max(0, L2ndlst)
#Last tile
stall_last = ofmapTile_store_cycles
#print("stall_case1:", stall_case1, "; stall_first:", stall_first, "; stall_second:", stall_second, "; stall_secondlast:", stall_secondlast,\
# "; stall_last:", stall_last)
#of total DRAM stall cycles
if NT_case1_flag == "Negative":
DRAM_stall_cycles = stall_first + stall_last
else:
DRAM_stall_cycles = stall_case1 + stall_first + stall_second + stall_secondlast + stall_last
elif dataflow == "output_stationary":
#of tiles where weights are being loaded (regardless of bias)
NT_weight = (IC/DTile_ic) * (OC/DTile_oc)
#of tiles where (weight + bias) are being loaded. (bias is loaded with the oc loop)
NT_wgt_bias = OC/DTile_oc
#of tiles where only weights are being loaded
NT_wgt_only = NT_weight - NT_wgt_bias
#of tiles where ofmap is written to the DRAM
NT_ofmap_wrt = (OC/DTile_oc)
#print("NT_weight:", NT_weight, ";", "NT_wgt_bias:", NT_wgt_bias, ";", "NT_wgt_only:", NT_wgt_only, "; NT_ofmap_wrt:", NT_ofmap_wrt)
## Performing CASE counts
#CASE-1: #of tiles where weight+bias is being loaded (exclude the first tile)
NT_case1 = NT_wgt_bias - 1
#CASE-4: #of tiles where ofmap write occurs (excluding the last tile, ofmap write does not happen at the second last tile)
NT_case4 = NT_ofmap_wrt - 1
#CASE-3: #of tiles where weightonly read and ifmap read happens (excluding the second tile)
NT_case3 = (NT_wgt_only - 1) - NT_case4
#print("NT_case1:", NT_case1, "NT_case3:", NT_case3, "NT_case4:", NT_case4)
# Determining the #of stall cycles for each case
#Case1
L11 = (WgtTile_load_cycles + BiasTile_load_cycles) - ComputeTile_cycles
L12 = ifmapTile_load_cycles - ComputeTile_cycles
stall_case1 = max(0, L11, L12) * NT_case1
#Case3
L31 = WgtTile_load_cycles - ComputeTile_cycles
L32 = ifmapTile_load_cycles - ComputeTile_cycles
stall_case3 = max(0, L31, L32) * NT_case3
#Case4
L41 = WgtTile_load_cycles - ComputeTile_cycles
L42 = ifmapTile_load_cycles - ComputeTile_cycles
L43 = ofmapTile_store_cycles - ComputeTile_cycles
stall_case4 = max(0, L41, L42, L43) * NT_case4
#print("stall_case1:", stall_case1, "; stall_case3:", stall_case3, "; stall_case4:", stall_case4)
#First tile
Lf1 = ifmapTile_load_cycles
Lf2 = WgtTile_load_cycles + BiasTile_load_cycles
stall_first = max(Lf1, Lf2)
#Second tile
L2nd1 = WgtTile_load_cycles - ComputeTile_cycles
L2nd2 = ifmapTile_load_cycles - ComputeTile_cycles
stall_second = max(0, L2nd1, L2nd2)
#Second last tile, there is no data read/write for the second last tile, only compute
stall_secondlast = 0
#Last tile
stall_last = ofmapTile_store_cycles
#print("stall_first:", stall_first, "; stall_second:", stall_second, "; stall_secondlast:", stall_secondlast, "; stall_last:", stall_last)
#of total DRAM stall cycles
DRAM_stall_cycles = stall_case1 + stall_case3 + stall_case4 + stall_first + stall_second + stall_secondlast + stall_last
#print("DRAM_stall_cycles:", DRAM_stall_cycles)
SysResult_inflayer.cycles['DRAM_Stall'] = DRAM_stall_cycles
return DRAM_stall_cycles
| 2.515625
| 3
|
RecoEcal/EgammaClusterProducers/python/egammaRechitFilter_cfi.py
|
ckamtsikis/cmssw
| 852
|
12783079
|
<filename>RecoEcal/EgammaClusterProducers/python/egammaRechitFilter_cfi.py
import FWCore.ParameterSet.Config as cms
#
# module for filtering of rechits. user provides noise threshold in GeV units
# Author: <NAME>, University of Rome & INFN
#
rechitFilter = cms.EDProducer("RecHitFilter",
noiseEnergyThreshold = cms.double(0.08),
noiseChi2Threshold = cms.double(40),
hitCollection = cms.InputTag('EcalRecHit','EcalRecHitsEB'),
reducedHitCollection = cms.string('FilteredEcalRecHitCollection')
)
| 1.515625
| 2
|
freedom/reco/i3freedom.py
|
JanWeldert/freeDOM
| 0
|
12783080
|
<filename>freedom/reco/i3freedom.py
"""Provides I3Module(s) for FreeDOM reco"""
from freedom.reco.crs_reco import (
timed_fit,
zero_track_fit,
DEFAULT_SEARCH_LIMITS,
DEFAULT_INIT_RANGE,
)
from freedom.reco import transforms
from freedom.utils import i3frame_dataloader
from freedom.llh_service.llh_client import LLHClient
import numpy as np
DEFAULT_N_LIVE_POINTS = 97
DEFAULT_BATCH_SIZE = 12
DEFAULT_MAX_ITER = 10000
DEFAULT_SPHERICAL_INDICES = [[4, 5]]
TRACK_M_PER_GEV = 15 / 3.3
"""Borrowed from Retro's muon_hypo.py"""
class I3FreeDOMClient:
"""FreeDOM client IceTray module. Connects to LLHServices started elsewhere"""
def __init__(self, ctrl_addrs, conf_timeout, rng=None):
"""initialize FreeDOM client, connect to LLH service(s)"""
if isinstance(ctrl_addrs, str):
ctrl_addrs = [ctrl_addrs]
self._llh_clients = [LLHClient(addr, conf_timeout) for addr in ctrl_addrs]
if rng is None:
self._rng = np.random.default_rng(None)
else:
self._rng = rng
def __call__(
self,
frame,
geo,
reco_pulse_series_names,
ug_geo=None,
mdom_directions=None,
suffix="",
init_range=DEFAULT_INIT_RANGE,
search_limits=DEFAULT_SEARCH_LIMITS,
n_live_points=DEFAULT_N_LIVE_POINTS,
do_postfit=True,
store_all=False,
truth_seed=False,
batch_size=DEFAULT_BATCH_SIZE,
par_transforms=None,
do_track_dllh=False,
**crs_fit_kwargs,
):
"""reconstruct an event stored in an i3frame"""
event = i3frame_dataloader.load_event(
frame, geo, reco_pulse_series_names, ug_geo, mdom_directions
)
fit_kwargs = dict(
event=event,
clients=self._llh_clients,
rng=self._rng,
init_range=init_range,
search_limits=search_limits,
n_live_points=n_live_points,
do_postfit=do_postfit,
store_all=store_all,
truth_seed=truth_seed,
param_transforms=par_transforms,
batch_size=batch_size,
spherical_indices=DEFAULT_SPHERICAL_INDICES,
max_iter=DEFAULT_MAX_ITER,
**crs_fit_kwargs,
)
full_res = timed_fit(**fit_kwargs)
if event["params"] is not None:
full_res["truth_LLH"] = 0
for i, client in enumerate(self._llh_clients):
full_res["truth_LLH"] += client.eval_llh(
event["hit_data"][i], event["evt_data"][i], event["params"]
)
prefix = f"FreeDOM_{suffix}_"
store_fit_result(
frame, prefix, full_res, par_transforms, store_i3_particles=True
)
if do_track_dllh:
# do not conduct postfit for zero_track fits
fit_kwargs["do_postfit"] = False
no_track_res, E_only_res = zero_track_fit(full_res, **fit_kwargs)
store_dllh(frame, prefix, full_res, no_track_res, E_only_res)
store_fit_result(frame, prefix + "no_track_", no_track_res, par_transforms)
store_fit_result(frame, prefix + "E_only_", E_only_res, par_transforms)
def store_fit_result(
frame, prefix, fit_res, par_transforms=None, store_i3_particles=False,
):
"""store reco output in an i3frame"""
from icecube.dataclasses import I3VectorString, I3VectorDouble, I3Double
from icecube.icetray import I3Int, I3Bool
fixed_params = fit_res.get("fixed_params", None)
par_names = transforms.free_par_names(par_transforms, fixed_params)
frame[f"{prefix}par_names"] = to_i3_vec(par_names, I3VectorString)
frame[f"{prefix}best_fit"] = to_i3_vec(fit_res["x"], I3VectorDouble)
frame[f"{prefix}success"] = I3Bool(fit_res["success"])
for double_p, double_frame_name in [("fun", "best_LLH"), ("delta", "delta_T")]:
frame[f"{prefix}{double_frame_name}"] = I3Double(float(fit_res[double_p]))
try:
frame[f"{prefix}truth_LLH"] = I3Double(float(fit_res["truth_LLH"]))
except KeyError:
pass
for int_p, int_frame_name in [
("n_calls", "n_llh_calls"),
("nit", "n_crs_iters"),
("stopping_flag", "stopping_flag"),
]:
frame[f"{prefix}{int_frame_name}"] = I3Int(fit_res[int_p])
postfit_res = fit_res.get("postfit", {})
for key, val in postfit_res.items():
if key == "envs":
for i, env_ps in enumerate(zip(*val)):
frame[f"{prefix}env_p{i}"] = to_i3_vec(env_ps, I3VectorDouble)
else:
frame[f"{prefix}{key}"] = to_i3_vec(val, I3VectorDouble)
if store_i3_particles:
trans = par_transforms["trans"] if par_transforms is not None else None
best_fit_pars = transforms.apply_transform(trans, fit_res["x"], fixed_params)
reco_pars = {
name: val
for name, val in zip(i3frame_dataloader.DEFAULT_LABELS, best_fit_pars)
}
reco_pars["success"] = fit_res["success"]
for particle_type in ("neutrino", "cascade", "track"):
frame[f"{prefix}{particle_type}"] = build_i3_particle(
reco_pars, particle_type
)
def store_dllh(frame, prefix, full_res, no_track_res, E_only_res):
"""store no-track delta LLH info in an i3frame"""
from icecube.dataclasses import I3Double
frame[f"{prefix}notrack_dllh"] = I3Double(
float(no_track_res["fun"] - full_res["fun"])
)
frame[f"{prefix}notrack_E_only_dllh"] = I3Double(
float(E_only_res["fun"] - full_res["fun"])
)
def to_i3_vec(array, i3_vec_type):
"""convert a list/array to an I3Vec"""
i3_vec = i3_vec_type()
i3_vec.extend(array)
return i3_vec
_energy_getters = dict(
cascade=lambda pars: pars["cascade_energy"],
track=lambda pars: pars["track_energy"],
neutrino=lambda pars: pars["track_energy"] + pars["cascade_energy"],
)
def build_i3_particle(reco_pars, particle_type):
"""build an I3Particle from reco parameters"""
from icecube.dataclasses import I3Particle, I3Constants, I3Position, I3Direction
from icecube.icetray import I3Units
shape_map = dict(
cascade=I3Particle.ParticleShape.Cascade,
track=I3Particle.ParticleShape.ContainedTrack,
neutrino=I3Particle.ParticleShape.Primary,
)
particle = I3Particle()
if reco_pars["success"]:
particle.fit_status = I3Particle.FitStatus.OK
else:
particle.fit_status = I3Particle.GeneralFailure
particle.dir = I3Direction(reco_pars["zenith"], reco_pars["azimuth"])
particle.energy = _energy_getters[particle_type](reco_pars) * I3Units.GeV
particle.pdg_encoding = I3Particle.ParticleType.unknown
particle.pos = I3Position(*(reco_pars[d] * I3Units.m for d in ("x", "y", "z")))
particle.shape = shape_map[particle_type]
particle.time = reco_pars["time"] * I3Units.ns
particle.speed = I3Constants.c
if particle_type == "track":
particle.length = particle.energy * TRACK_M_PER_GEV * I3Units.m
else:
particle.length = np.nan
return particle
| 1.9375
| 2
|
tools/launch_tensorboard.py
|
isn-dev/imagenet18
| 716
|
12783081
|
<filename>tools/launch_tensorboard.py
#!/usr/bin/env python
# Usage:
# ./launch_tensorboard.py
#
# This will launch r5.large machine on AWS with tensoboard, and print URL
# in the console
import ncluster
ncluster.use_aws()
task = ncluster.make_task('tensorboard',
instance_type='r5.large',
image_name='Deep Learning AMI (Ubuntu) Version 13.0')
task.run('source activate tensorflow_p36')
task.run(f'tensorboard --logdir={task.logdir}/..', non_blocking=True)
print(f"Tensorboard at http://{task.public_ip}:6006")
| 2.21875
| 2
|
labs/mapit.py
|
chris-skud/madison-transit-api
| 0
|
12783082
|
<reponame>chris-skud/madison-transit-api<filename>labs/mapit.py
import os
import wsgiref.handlers
import logging
from operator import itemgetter
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext import db
from google.appengine.ext.db import GeoPt
from google.appengine.ext.webapp import template
from google.appengine.api.labs.taskqueue import Task
from google.appengine.runtime import apiproxy_errors
from data_model import PhoneLog
from data_model import StopLocation
from data_model import RouteListing
class MapHandler(webapp.RequestHandler):
def get(self):
# review the results for popular stops
reqs = getRequestedStops();
stops_stats = []
keyLookup = []
totalReqs = 0
for key,value in reqs.items():
stops_stats.append({'stopID':key,
'count':value,
})
totalReqs += value
keyLookup.append(key+':loc')
# do we have the stop locations?
stopLocations = memcache.get_multi(keyLookup) #reqs.keys())
if stopLocations is None or len(stopLocations) == 0:
logging.error("unable to find stop locations!?")
# create an event to go get this data
task = Task(url='/labs/maptask', params={'clean':'1',})
task.add('crawler')
msg = "no data"
else:
logging.debug('yes... found cached copies of the stop locations!')
msg = "your data"
locations = []
median = 2.5
logging.debug('found a total of %s requests with a median %s' % (str(totalReqs),str(median)))
for key,value in stopLocations.items():
if value is None:
continue;
stopID = key.split(':')[0]
# normalized value = count/median * %Total + (count-median)+ base
weight = (float(reqs[stopID]) / median) + float(reqs[stopID]) - median + 75.0
logging.debug('%s / %s weight is %s' % (stopID,reqs[stopID],str(weight)))
locations.append({'stopID':stopID,
'location':value,
'count':reqs[stopID],
'weight':weight,
})
template_values = {'stops':stops_stats,
'locations':locations,
'message':msg,
}
# create a page that provides a form for sending an SMS message
path = os.path.join(os.path.dirname(__file__), 'mapit.html')
self.response.out.write(template.render(path,template_values))
## end MapHandler()
class DisplayStops(webapp.RequestHandler):
def get(self):
reqs = getRequestedStops();
stops_stats = []
for key,value in reqs.items():
# try to figure out if we have a stoplocation for each id
stop = db.GqlQuery("SELECT * FROM StopLocation where stopID = :1", key).get()
if stop is None:
stops_stats.append({'stopID':key,
'count':value,
})
template_values = {'stops':stops_stats,
}
# create a page that provides a form for sending an SMS message
path = os.path.join(os.path.dirname(__file__), 'stops.html')
self.response.out.write(template.render(path,template_values))
## end DisplayStops
class FixStopForm(webapp.RequestHandler):
def get(self):
template_values = {'stopID':self.request.get('stopID')}
# create a page that provides a form for sending an SMS message
path = os.path.join(os.path.dirname(__file__), 'fixstop.html')
self.response.out.write(template.render(path,template_values))
## end FixStop
class FixStop(webapp.RequestHandler):
def post(self):
stopID = self.request.get('stopID')
lat = self.request.get('lat')
lon = self.request.get('lon')
stop = StopLocation()
stop.stopID = stopID
stop.routeID = '00'
stop.intersection = self.request.get('intersection').upper()
stop.location = GeoPt(lat,lon)
stop.update_location()
stop.direction = '00'
logging.debug('created new stoplocation for %s' % stopID)
stop.put()
routeQ = db.GqlQuery("SELECT * FROM RouteListing WHERE stopID = :1", stopID)
routes = routeQ.fetch(100)
if len(routes) > 0:
for r in routes:
logging.debug('updating route %s with new location' % r.route)
r.stopLocation = stop
r.put()
self.redirect('http://smsmybus.com/labs/displaystops')
## end FixStop
class CollectorHandler(webapp.RequestHandler):
def post(self):
self.get()
def get(self):
# do some analysis on the request history...
reqs = getRequestedStops()
# find that lat/longs for all the stops
validStops = reqs.keys()
stopLocs = memcache.get_multi(validStops)
if self.request.get('clean') or stopLocs is None:
memcache.delete_multi(validStops)
logging.debug("logging stop locations!")
locations = dict()
cursor = None
# Start a query for all stop locations
q = StopLocation.all()
while q is not None:
# If the app stored a cursor during a previous request, use it.
if cursor:
q.with_cursor(cursor)
# Perform the query to get results.
locationQuery = q.fetch(1000)
cursor = q.cursor()
if len(locationQuery) > 0:
logging.debug('just read in another chunk of stop locations...')
for l in locationQuery:
location = l.location
stopKey = l.stopID + ':loc'
if l.stopID in validStops and stopKey not in stopLocs:
logging.debug('adding location %s for stopID %s' % (location,l.stopID))
stopLocs[stopKey] = location
else:
logging.debug('No more stop locations left in the query!')
break
memcache.set_multi(stopLocs)
return
## end CollectorHandler
# @todo memcache this list
# @todo create a task that periodically refreshes this list
def getRequestedStops():
# do some analysis on the request history...
reqs = dict()
cursor = None
# Start a query for all Person entities.
q = PhoneLog.all()
while q is not None:
# If the app stored a cursor during a previous request, use it.
if cursor:
q.with_cursor(cursor)
logQuery = q.fetch(1000)
cursor = q.cursor()
if len(logQuery) > 0:
# run through all of the results and add up the number of
# requests for each stopID
#
for e in logQuery:
# add up all of the unique stop IDs
requestString = e.body.split()
if len(requestString) >= 2:
stopID = requestString[1]
elif len(requestString) > 0:
stopID = requestString[0]
if len(requestString) > 0 and stopID.isdigit() and len(stopID) == 4:
if stopID in reqs:
reqs[stopID] += 1
else:
#logging.debug('new stop found... %s' % stopID)
reqs[stopID] = 1
else:
logging.debug('nothing left!')
break
return reqs
## end
class SpawnCollector(webapp.RequestHandler):
def post(self):
self.get()
def get(self):
# create an event to go get this data
task = Task(url='/labs/maptask', params={'clean':'1',})
task.add('crawler')
def main():
logging.getLogger().setLevel(logging.DEBUG)
application = webapp.WSGIApplication([('/labs/map', MapHandler),
('/labs/maptask', CollectorHandler),
('/labs/spawncollector', SpawnCollector),
('/labs/displaystops', DisplayStops),
('/labs/fixstop', FixStop),
('/labs/fixstopform', FixStopForm),
],
debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
| 2.046875
| 2
|
spiel/footer.py
|
JoshKarpel/spiel
| 3
|
12783083
|
from dataclasses import dataclass
from pendulum import now
from rich.console import ConsoleRenderable
from rich.style import Style
from rich.table import Column, Table
from rich.text import Text
from spiel.modes import Mode
from spiel.rps import RPSCounter
from spiel.state import State
from spiel.utils import drop_nones, filter_join
@dataclass
class Footer:
state: State
rps_counter: RPSCounter
@property
def longest_slide_number_length(self) -> int:
num_slides = len(self.state.deck)
return len(str(num_slides))
def __rich__(self) -> ConsoleRenderable:
grid = Table.grid(
*drop_nones(
Column(
style=Style(dim=True),
justify="left",
),
Column(
style=Style(bold=True),
justify="center",
),
Column(
style=Style(dim=True),
justify="right",
)
if self.state.options.profiling
else None,
Column(
style=Style(dim=True),
justify="right",
),
Column(
style=Style(dim=True),
justify="right",
),
),
expand=True,
padding=1,
)
grid.add_row(
*drop_nones(
Text(
filter_join(
" | ",
[
self.state.deck.name,
self.state.current_slide.title
if self.state.mode is Mode.SLIDE
else None,
],
)
),
self.state.message,
Text(
f"Render Time: {self.rps_counter.last_elapsed_render_time() * 1e3:>3.3f} ms | {self.rps_counter.renders_per_second():.2f} RPS"
)
if self.state.options.profiling
else None,
now().format(self.state.options.footer_time_format),
Text(
f"[{self.state.current_slide_idx + 1:>0{self.longest_slide_number_length}d} / {len(self.state.deck)}]"
)
if self.state.mode is not Mode.HELP
else Text(Mode.HELP.value, style=Style(italic=True)),
)
)
return grid
| 2.265625
| 2
|
tests/MacPortsRequirementTest.py
|
yzgyyang/dependency_management
| 4
|
12783084
|
<filename>tests/MacPortsRequirementTest.py
import unittest
import unittest.mock
import sarge
from dependency_management.requirements.MacPortsRequirement import (
MacPortsRequirement)
class MacPortsRequirementTestCase(unittest.TestCase):
def test__str__(self):
self.assertEqual(str(MacPortsRequirement('figlet')), 'figlet')
def test_installed_requirement(self):
with unittest.mock.patch('dependency_management.requirements.' +
'MacPortsRequirement.run') as mock:
patched = unittest.mock.Mock(spec=sarge.Pipeline)
patched.returncode = 0
mock.return_value = patched
self.assertTrue(MacPortsRequirement(
'some_good_package').is_installed())
def test_not_installed_requirement(self):
with unittest.mock.patch('dependency_management.requirements.' +
'MacPortsRequirement.run') as mock:
patched = unittest.mock.Mock(spec=sarge.Pipeline)
patched.returncode = 1
mock.return_value = patched
self.assertFalse(MacPortsRequirement(
'some_bad_package').is_installed())
| 2.84375
| 3
|
setup.py
|
alliander-opensource/report_Tprognoses_quality
| 1
|
12783085
|
<reponame>alliander-opensource/report_Tprognoses_quality
import setuptools
import os
from setuptools import setup
pkg_dir = os.path.dirname(os.path.realpath(__file__))
# package description
with open(os.path.join(pkg_dir, "README.md")) as f:
long_description = f.read()
with open(os.path.join(pkg_dir, "requirements.txt")) as f:
required = f.read().splitlines()
with open(os.path.join(pkg_dir, "PACKAGENAME")) as f:
pkg_name = f.read().strip().strip("\n")
with open(os.path.join(pkg_dir, "VERSION")) as f:
version = f.read().strip().strip("\n")
if "BETA" in os.environ:
version += f"b-{version}"
print(f"Make beta version number: {version}")
setup(
name=pkg_name,
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="prognoses monitoring package",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/bramtennet/prognoses_monitoring_reports_code",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
)
| 1.8125
| 2
|
alembic/versions/3e59d406d707_update_waze_api_fields.py
|
shaysw/anyway
| 69
|
12783086
|
<reponame>shaysw/anyway
"""update waze api fields
Revision ID: 3e59d406d707
Revises: <PASSWORD>
Create Date: 2020-10-19 16:28:02.553501
"""
# revision identifiers, used by Alembic.
revision = '3e59d406d707'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
import geoalchemy2
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('waze_traffic_jams',
sa.Column('id', sa.BigInteger(), nullable=False),
sa.Column('level', sa.Integer(), nullable=True),
sa.Column('line', sa.Text(), nullable=True),
sa.Column('speed_kmh', sa.Integer(), nullable=True),
sa.Column('turn_type', sa.Integer(), nullable=True),
sa.Column('length', sa.Float(), nullable=True),
sa.Column('type', sa.Text(), nullable=True),
sa.Column('uuid', sa.Text(), nullable=True),
sa.Column('speed', sa.Integer(), nullable=True),
sa.Column('segments', sa.Text(), nullable=True),
sa.Column('road_type', sa.Integer(), nullable=True),
sa.Column('delay', sa.Integer(), nullable=True),
sa.Column('street', sa.Text(), nullable=True),
sa.Column('city', sa.Text(), nullable=True),
sa.Column('end_node', sa.Text(), nullable=True),
sa.Column('blocking_alert_uuid', sa.Text(), nullable=True),
sa.Column('start_node', sa.Text(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('geom', geoalchemy2.types.Geometry(geometry_type='LINESTRING', from_text='ST_GeomFromEWKT', name='geometry'), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_waze_traffic_jams_created_at'), 'waze_traffic_jams', ['created_at'], unique=False)
op.create_index(op.f('ix_waze_traffic_jams_uuid'), 'waze_traffic_jams', ['uuid'], unique=True)
# drop old table
op.drop_index('idx_waze_trafic_jams_geom', table_name='waze_trafic_jams')
op.drop_index('ix_waze_trafic_jams_created_at', table_name='waze_trafic_jams')
op.drop_index('ix_waze_trafic_jams_uuid', table_name='waze_trafic_jams')
op.drop_table('waze_trafic_jams')
op.add_column('waze_alerts', sa.Column('jam_uuid', sa.Text(), nullable=True))
op.add_column('waze_alerts', sa.Column('longitude', sa.Float(), nullable=True))
op.add_column('waze_alerts', sa.Column('report_by_municipality_user', sa.Boolean(), nullable=True))
op.add_column('waze_alerts', sa.Column('report_description', sa.Text(), nullable=True))
op.drop_column('waze_alerts', 'lontitude')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('waze_alerts', sa.Column('lontitude', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.drop_column('waze_alerts', 'report_description')
op.drop_column('waze_alerts', 'report_by_municipality_user')
op.drop_column('waze_alerts', 'longitude')
op.drop_column('waze_alerts', 'jam_uuid')
op.create_table('waze_trafic_jams',
sa.Column('id', sa.BIGINT(), autoincrement=True, nullable=False),
sa.Column('level', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('line', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('speed_kmh', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('turn_type', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('length', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True),
sa.Column('type', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('uuid', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('speed', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('segments', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('road_type', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('delay', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('street', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('city', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('end_node', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('blocking_alert_uuid', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('start_node', sa.TEXT(), autoincrement=False, nullable=True),
sa.Column('created_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.Column('geom', geoalchemy2.types.Geometry(geometry_type='LINESTRING', from_text='ST_GeomFromEWKT', name='geometry'), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('id', name='waze_trafic_jams_pkey')
)
op.create_index('ix_waze_trafic_jams_uuid', 'waze_trafic_jams', ['uuid'], unique=True)
op.create_index('ix_waze_trafic_jams_created_at', 'waze_trafic_jams', ['created_at'], unique=False)
op.create_index('idx_waze_trafic_jams_geom', 'waze_trafic_jams', ['geom'], unique=False)
op.drop_index(op.f('ix_waze_traffic_jams_uuid'), table_name='waze_traffic_jams')
op.drop_index(op.f('ix_waze_traffic_jams_created_at'), table_name='waze_traffic_jams')
op.drop_table('waze_traffic_jams')
# ### end Alembic commands ###
| 1.578125
| 2
|
core-python/Core_Python/multithreading/RLockDemo.py
|
theumang100/tutorials-1
| 9
|
12783087
|
<gh_stars>1-10
import threading
''' This program can't give an output because by using lock we can acquire lock
then we have to release lock otherwise no other thread can able to acquire lock'''
'''lock = threading.Lock()
i = 0
lock.acquire()
i += 1
lock.acquire()
i +=2
lock.release()
print(i)'''
''' We can solve this issue by using RLock'''
lock = threading.RLock()
i = 0
lock.acquire()
i += 1
lock.acquire()
i +=2
lock.release()
print(i)
''' For theoretical reference (RLock vs. Lock) : https://www.geeksforgeeks.org/python-difference-between-lock-and-rlock-objects/ '''
| 3.671875
| 4
|
smartystreets_python_sdk/response.py
|
jasonrfarkas/smartystreets-python-sdk
| 19
|
12783088
|
class Response:
def __init__(self, payload, status_code, error=None):
self.payload = payload
self.status_code = status_code
self.error = error
| 2.359375
| 2
|
xastropy/spec/continuum.py
|
nhmc/xastropy
| 0
|
12783089
|
<reponame>nhmc/xastropy<gh_stars>0
"""
#;+
#; NAME:
#; continuum
#; Version 1.0
#;
#; PURPOSE:
#; Module for continuum code
#; 20-Aug-2015 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
import os, imp
import astropy as apy
from astropy import units as u
from astropy import constants as const
from astropy.io import fits, ascii
from linetools.spectra.xspectrum1d import XSpectrum1D
from xastropy.xutils import xdebug as xdb
xa_path = imp.find_module('xastropy')[1]
def init_conti_dict(Norm=0., tilt=0., piv_wv=0., igm='None'):
'''Initialize a continuum conti_dict
Parameters:
----------
Norm: float, optional
Normaliztion
tilt: float, optional
Power-law tilt to continuum
piv_wv: float, optional
Pivot wave for tilt. Best kept *without* units
igm: str, optional
Adopt average IGM model? ['None']
Returns:
---------
conti_dict: dict
Useful for simple modeling. Keep as a dict for JSON writing
'''
conti_dict = dict(Norm=Norm, tilt=tilt, piv_wv=piv_wv, igm=igm)
#
return conti_dict
def get_telfer_spec(zqso=0., igm=False):
'''Generate a Telfer QSO composite spectrum
Paraemters:
----------
zqso: float, optional
Redshift of the QSO
igm: bool, optional
Include IGM opacity? [False]
Returns:
--------
telfer_spec: XSpectrum1D
Spectrum
'''
# Read
telfer = ascii.read(
xa_path+'/data/quasar/telfer_hst_comp01_rq.ascii', comment='#')
scale = telfer['flux'][(telfer['wrest'] == 1450.)]
telfer_spec = XSpectrum1D.from_tuple((telfer['wrest']*(1+zqso),
telfer['flux']/scale[0])) # Observer frame
# IGM?
if igm is True:
'''The following is quite experimental.
Use at your own risk.
'''
import multiprocessing
from xastropy.igm.fN import model as xifm
from xastropy.igm import tau_eff as xit
fN_model = xifm.default_model()
# Expanding range of zmnx (risky)
fN_model.zmnx = (0.,5.)
# Parallel
igm_wv = np.where(telfer['wrest']<1220.)[0]
adict = []
for wrest in telfer_spec.dispersion[igm_wv].value:
tdict = dict(ilambda=wrest, zem=zqso, fN_model=fN_model)
adict.append(tdict)
# Run
#xdb.set_trace()
pool = multiprocessing.Pool(4) # initialize thread pool N threads
ateff = pool.map(xit.map_etl, adict)
# Apply
telfer_spec.flux[igm_wv] *= np.exp(-1.*np.array(ateff))
# Return
return telfer_spec
## #################################
## #################################
## TESTING
## #################################
if __name__ == '__main__':
flg_tst = 0
flg_tst += 2**0 # Simple Telfer
#if (flg_fig % 2**4) >= 2**3:
| 1.960938
| 2
|
0x04-python-more_data_structures/6-print_sorted_dictionary.py
|
malu17/alx-higher_level_programming
| 0
|
12783090
|
#!/usr/bin/python3
def print_sorted_dictionary(a_dictionary):
for i, j in sorted(a_dictionary.items()):
print("{}: {}".format(i, j))
| 4.0625
| 4
|
crawlerflow/contrib/extensions/timeseries.py
|
EXTREMOPHILARUM/crawlerflow
| 15
|
12783091
|
<filename>crawlerflow/contrib/extensions/timeseries.py
from scrapy.extensions.logstats import LogStats
from datetime import datetime
import os
import yaml
class CrawlerFlowTimeSeriesStats(LogStats):
def log(self, spider):
item_scraped_count = self.stats.get_value('item_scraped_count', 0)
requests_count = self.stats.get_value('downloader/request_count', 0)
response_received_count = self.stats.get_value('response_received_count', 0)
datum = {
"item_scraped_count": item_scraped_count,
"response_received_count": response_received_count,
"requests_count": requests_count,
"time": str(datetime.now())
}
path = os.getcwd()
log_director = '{}/.logs'.format(path)
if not os.path.exists(log_director):
os.makedirs(log_director)
timeseries_log_file = '{}/timeseries-log.txt'.format(log_director)
with open(timeseries_log_file, 'a') as fh:
line = ",".join([str(v) for k, v in datum.items()])
fh.write("{}\n".format(line))
| 2.859375
| 3
|
mathematics/probability/bday-gift.py
|
PingHuskar/hackerrank
| 41
|
12783092
|
# Mathematics > Probability > B'day Gift
# Whats the price Isaac has to pay for HackerPhone
#
# https://www.hackerrank.com/challenges/bday-gift/problem
# https://www.hackerrank.com/contests/nov13/challenges/bday-gift
#
# chaque boule a une probabilité 0.5 d'être ramassée
n = int(input())
e = sum(int(input()) for _ in range(n))
print(e / 2)
| 3.25
| 3
|
projects/mmdet3d_plugin/models/utils/dgcnn_attn.py
|
XiangTodayEatsWhat/detr3d
| 237
|
12783093
|
<reponame>XiangTodayEatsWhat/detr3d<filename>projects/mmdet3d_plugin/models/utils/dgcnn_attn.py
import math
import torch
import torch.nn as nn
from mmcv.cnn.bricks.registry import ATTENTION
from mmcv.runner.base_module import BaseModule
@ATTENTION.register_module()
class DGCNNAttn(BaseModule):
"""A warpper for DGCNN-type self-attention.
Args:
embed_dims (int): The embedding dimension.
num_heads (int): Parallel attention heads. Same as
`nn.MultiheadAttention`.
dropout (float):w A Dropout layer on attn_output_weights. Default: 0..
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
Default: None.
"""
def __init__(self,
embed_dims,
num_heads,
dropout=0.,
init_cfg=None,
**kwargs):
super(DGCNNAttn, self).__init__(init_cfg)
self.embed_dims = embed_dims
self.num_heads = num_heads
self.dropout = dropout
self.conv1 = nn.Sequential(nn.Conv2d(self.embed_dims*2, self.embed_dims, kernel_size=1, bias=False),
nn.BatchNorm2d(self.embed_dims),
nn.ReLU(inplace=True))
self.conv2 = nn.Sequential(nn.Conv2d(self.embed_dims*2, self.embed_dims, kernel_size=1, bias=False),
nn.BatchNorm2d(self.embed_dims),
nn.ReLU(inplace=True))
self.K = kwargs['K']
self.dropout = nn.Dropout(dropout)
def forward(self,
query,
key=None,
value=None,
residual=None,
query_pos=None,
key_pos=None,
attn_mask=None,
key_padding_mask=None,
**kwargs):
"""Forward function for `DGCNN`.
**kwargs allow passing a more general data flow when combining
with other operations in `DGCNN`.
Args:
query (Tensor): The input query with shape [num_queries, bs,
embed_dims]. Same in `nn.MultiheadAttention.forward`.
residual (Tensor): This tensor, with the same shape as x,
will be used for the residual link.
If None, `x` will be used. Defaults to None.
query_pos (Tensor): The positional encoding for query, with
the same shape as `x`. If not None, it will
be added to `x` before forward function. Defaults to None.
Returns:
Tensor: forwarded results with shape [num_queries, bs, embed_dims].
"""
if residual is None:
residual = query
if query_pos is not None:
query = query + query_pos
query = query.permute(1, 0, 2) # [bs, num_queries, embed_dims]
edge_feats = self.edge_feats(query, K=self.K)
edge_feats1 = self.conv1(edge_feats)
edge_feats1 = edge_feats1.max(dim=-1)[0]
out = edge_feats1
edge_feats1 = self.edge_feats(edge_feats1.permute(0, 2, 1))
edge_feats2 = self.conv2(edge_feats1)
edge_feats2 = edge_feats2.max(dim=-1)[0]
out = out + edge_feats2
out = out.permute(2, 0, 1)
return residual + self.dropout(out)
def edge_feats(self, query, K=16):
# (B, N, N)
affinity = torch.cdist(query, query)
# (B, N, K)
_, topk = torch.topk(affinity, k=K, dim=2)
B, N, C = query.size()
idx_base = torch.arange(0, B, device=query.device).view(-1, 1, 1) * N
idx = topk + idx_base
idx = idx.view(-1)
query = query.reshape(B*N, C)
query_neighbor = query[idx, :].view(B, N, K, C)
query = query.reshape(B, N, 1, C).repeat(1, 1, K, 1)
out = torch.cat((query_neighbor, query), dim=-1).permute(0, 3, 1, 2).contiguous()
return out
| 2.21875
| 2
|
main.py
|
INTJT/conway
| 0
|
12783094
|
if __name__ == "__main__":
from core.editor import Editor
from widgets.editor_window import EditorWindow
from PyQt5 import QtWidgets
editor = Editor()
application = QtWidgets.QApplication([])
window = EditorWindow()
window.show()
application.exec()
| 1.929688
| 2
|
load/benchmark_database.py
|
cirrostratus1/benchmark-database
| 0
|
12783095
|
<reponame>cirrostratus1/benchmark-database<gh_stars>0
# Copyright (c) 2019 fortiss GmbH
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import os
import pandas as pd
import logging
import shutil
logging.getLogger().setLevel(logging.INFO)
import pickle
import zipfile
from modules.runtime.scenario.scenario_generation.scenario_generation import ScenarioGeneration
from modules.runtime.commons.parameters import ParameterServer
from serialization.scenario_set_serializer import ScenarioSetSerializer
FILE_EXTENSION_SCENARIO_SET = "bark_scenarios"
# The DatabaseSerializer recursively serializes all scenario param files sets
# within a folder and releases the whole database as zip file to github
class BenchmarkDatabase:
def __init__(self, database_root=None, dataframe=None):
if database_root and not isinstance(dataframe, pd.DataFrame):
self._init_from_database_root(database_root)
elif isinstance(dataframe, pd.DataFrame) and database_root:
self._init_from_dataframe(dataframe, database_root)
else:
raise ValueError("Invalid argument combination \
for initialization of database")
def _init_from_database_root(self, database_root):
self.database_root = database_root
if not os.path.exists(database_root):
logging.error("Given database root does not exist")
return
if database_root.endswith("zip"):
logging.info("extracting zipped-database {} to temporary directory /tmp/database".format(database_root))
shutil.rmtree("/tmp/database")
os.makedirs("/tmp/database")
with zipfile.ZipFile(database_root, 'r') as zip_obj:
zip_obj.extractall('/tmp/database')
self.database_root = '/tmp/database'
# parse recursively all info dictionaries in database into pandas table
self.dataframe = pd.DataFrame()
for root, dirs, files in os.walk(self.database_root):
for file in files:
if ScenarioSetSerializer.scenario_set_info_fileprefix() in file:
logging.info("Found info dict {}".format(file))
with open(os.path.join(root,file), "rb") as f:
info_dict = pickle.load(f)
self.dataframe = self.dataframe.append(info_dict, ignore_index=True)
logging.info("The following scenario sets are available")
logging.info("\n"+self.dataframe.to_string())
def _init_from_dataframe(self, dataframe, database_root):
self.database_root = database_root
self.dataframe = dataframe
def get_num_scenario_sets(self):
return len(self.dataframe.index)
def get_scenario_generator(self, scenario_set_id):
serialized_file_name = self.dataframe.iloc[scenario_set_id]["Serialized"]
if os.path.exists(serialized_file_name):
serialized_file_path = serialized_file_name
else:
serialized_file_path = os.path.join(self.database_root, serialized_file_name)
if os.path.exists(self.database_root):
# move into database root that map files can be found
os.chdir(self.database_root)
param_file_name = self.dataframe.iloc[scenario_set_id]["Params"]
if not param_file_name:
logging.warning("No param file found for scenario set {}. Using defaults...".format(
self.dataframe.iloc[scenario_set_id]["SetName"]))
params = ParameterServer()
else:
params=ParameterServer(filename=param_file_name)
scenario_generation = ScenarioGeneration(params=params)
scenario_generation.load_scenario_list(filename=serialized_file_name)
scenario_set_name = self.dataframe.iloc[scenario_set_id]["SetName"]
return scenario_generation, scenario_set_name
def __iter__(self):
self.current_iter_idx=0
# An iterator interface to loop over all contained scenario sets
return self
def __next__(self):
if self.current_iter_idx < self.get_num_scenario_sets():
scenario_generator = self.get_scenario_generator(self.current_iter_idx)
self.current_iter_idx += 1
return scenario_generator
else:
raise StopIteration
def apply_filter(self, pattern, **kwargs):
dataframe = self.dataframe[self.dataframe['SetName'].str.contains(pat=pattern, **kwargs)]
return BenchmarkDatabase(database_root=self.database_root,
dataframe=dataframe)
| 2
| 2
|
meiduo_mell/meiduo_mell/apps/oauth/models.py
|
fader-zm/kong
| 0
|
12783096
|
from django.db import models
from meiduo_mell.utils.models import BaseModel
from users.models import User
# Create your models here.
class OauthQQUser(BaseModel):
"""
QQ登录用户数据
"""
# ForeignKey: 设置外键
# on_delete: 指明主表删除数据时,对于外键引用表数据如何处理
# CASCADE 级联,删除主表数据时连通一起删除外键表中数据
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name='用户')
# db_index: 若值为True, 则在表中会为此字段创建索引,默认值是False
# 对于频繁查询的字段, 创建索引能够提升查询效率
openid = models.CharField(max_length=64, verbose_name='openid', db_index=True)
class Meta:
db_table = 'tb_oauth_qq' # 指明数据库表名
verbose_name = '用户登录数据' # 显示admin站点中的名称
verbose_name_plural = verbose_name # 显示的复数名称
| 2.359375
| 2
|
0015 Painting Houses.py
|
ansabgillani/binarysearchcomproblems
| 1
|
12783097
|
<gh_stars>1-10
class Solution:
def solve(self, matrix):
if not matrix or not matrix[0]: return 0
min1,min2 = 0,0
min_i = -1
for row in matrix:
new_min1,new_min2 = inf,inf
new_min_i = -1
for i,cost in enumerate(row):
new_cost = cost + (min2 if i == min_i else min1)
if new_cost < new_min1:
new_min2 = new_min1
new_min1 = new_cost
new_min_i = i
elif new_cost < new_min2:
new_min2 = new_cost
min1,min2,min_i = new_min1,new_min2,new_min_i
return min1
| 3.03125
| 3
|
icv/data/labelme.py
|
dmxj/icv
| 5
|
12783098
|
# -*- coding: UTF-8 -*-
from .dataset import IcvDataSet
from ..utils import is_seq, is_dir
from ..image import imwrite
from ..data.core.bbox import BBox
from ..data.core.polys import Polygon
from ..data.core.sample import Sample, Anno
from ..data.core.meta import AnnoMeta,SampleMeta
from ..vis.color import VIS_COLOR
import random
import os
import json
import shutil
from tqdm import tqdm
class LabelMe(IcvDataSet):
def __init__(self, image_anno_path_list, split="trainval", keep_no_anno_image=True, categories=None,
one_index=False):
assert is_seq(image_anno_path_list)
image_anno_path_list = list(image_anno_path_list)
image_path_list, anno_path_list = list(zip(*image_anno_path_list))
self.split = split
self.keep_no_anno_image = keep_no_anno_image
self.one_index = one_index
self.ids = [os.path.basename(_).rsplit(".", 1)[0] for _ in image_path_list]
self.id2imgpath = {id: image_path_list[ix] for ix, id in enumerate(self.ids)}
self.id2annopath = {id: anno_path_list[ix] for ix, id in enumerate(self.ids)}
self.sample_db = {}
self.color_map = {}
self.get_samples()
self.categories = categories if categories is not None else self.parse_categories()
super(LabelMe, self).__init__(self.ids, self.categories, self.keep_no_anno_image, one_index)
print("there have %d samples in LabelMe dataset" % len(self.ids))
print("there have %d categories in LabelMe dataset" % len(self.categories))
def save(self, output_dir, reset_dir=False, split=None):
anno_path, image_path = LabelMe.reset_dir(output_dir, reset=reset_dir)
for id in self.ids:
self._write(self.get_sample(id), anno_path, image_path)
@staticmethod
def reset_dir(dist_dir, reset=False):
if not reset:
assert is_dir(dist_dir)
if reset and os.path.exists(dist_dir):
shutil.rmtree(dist_dir)
anno_path = os.path.join(dist_dir, "annotations")
image_path = os.path.join(dist_dir, "images")
for _path in [anno_path, image_path]:
if reset or not is_dir(_path):
os.makedirs(_path)
return anno_path, image_path
def _get_bbox_from_points(self, points):
"""
根据polygon顶点获取bbox
:param points:
:return:
"""
x_list = [p[0] for p in points]
y_list = [p[1] for p in points]
xmin = min(x_list)
ymin = min(y_list)
xmax = max(x_list)
ymax = max(y_list)
return xmin, ymin, xmax, ymax
def get_sample(self, id):
"""
get sample
:param id: image name
:return:
"""
if id in self.sample_db:
return self.sample_db[id]
anno_file = self.id2annopath[id]
anno_data = json.load(open(anno_file, "r"))
img_file = self.id2imgpath[id]
annos = []
if "shapes" in anno_data:
shapes = anno_data["shapes"]
for shape in shapes:
if "shape_type" not in shape or "points" not in shape or "label" not in shape:
continue
points = shape["points"]
xmin, ymin, xmax, ymax = self._get_bbox_from_points(points)
label = shape["label"]
if label not in self.color_map:
self.color_map[label] = random.choice(VIS_COLOR)
anno = Anno(
bbox=BBox(xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax, label=label),
label=label,
color=self.color_map[label],
polys=Polygon.init_from(points, label=label) if shape["shape_type"] == "polygon" else None,
meta=AnnoMeta()
)
annos.append(anno)
sample = Sample(
name=id,
image=img_file,
annos=annos,
meta=SampleMeta()
)
self.sample_db[id] = sample
return sample
def _write(self, anno_sample, anno_path, img_path):
assert isinstance(anno_sample, Sample)
if is_dir(anno_path):
anno_path = os.path.join(anno_path, "%s.json" % anno_sample.name)
if is_dir(img_path):
img_path = os.path.join(img_path, "%s.jpg" % anno_sample.name)
imwrite(anno_sample.image, img_path)
anno_json = {
"shapes": [],
"imagePath": img_path,
"imageHeight": anno_sample.height,
"imageWidth": anno_sample.width
}
for anno in anno_sample.annos:
shape = {
"label": anno.label,
"shape_type": "polygon" if anno.seg_mode else "rectangle"
}
if anno.seg_mode_polys:
shape["points"] = anno.polys.exterior.tolist()
elif anno.seg_mode_mask:
shape["points"] = anno.mask.to_ploygons().exterior.tolist()
else:
shape["points"] = [[anno.bbox.xmin, anno.bbox.ymin], [anno.bbox.xmax, anno.bbox.ymax]]
anno_json["shapes"].append(shape)
json.dump(anno_json, open(anno_path, "w"))
def vis(self, id=None, with_bbox=True, with_seg=True, is_show=False, save_dir=None, reset_dir=False):
if save_dir is not None:
if not os.path.exists(save_dir):
os.makedirs(save_dir)
elif reset_dir:
shutil.rmtree(save_dir)
os.makedirs(save_dir)
if id is not None:
sample = self.get_sample(id)
save_path = None if save_dir is None else os.path.join(save_dir, "%s.jpg" % sample.name)
return sample.vis(with_bbox=with_bbox, with_seg=with_seg, is_show=is_show, save_path=save_path)
image_vis = []
for id in tqdm(self.ids):
sample = self.get_sample(id)
save_path = None if save_dir is None else os.path.join(save_dir, "%s.jpg" % sample.name)
image = sample.vis(with_bbox=with_bbox, with_seg=with_seg, is_show=False, save_path=save_path)
image_vis.append(image)
return image_vis
| 2.140625
| 2
|
debug/plot_learning_curves.py
|
DavidSabbagh/meeg_power_regression
| 1
|
12783099
|
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import config as cfg
sns.set_style('darkgrid')
sns.set_context('notebook')
sns.despine(trim=True)
plt.close('all')
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
scores = np.load(op.join(cfg.path_outputs,
'all_scores_learning_curves.npy')).item()
train_sizes = scores['train_sizes']
train_scores = scores['train_scores']
test_scores = scores['test_scores']
train_mean = - np.mean(train_scores, axis=1)
train_std = - np.std(train_scores, axis=1)
test_mean = - np.mean(test_scores, axis=1)
test_std = - np.std(test_scores, axis=1)
ax.plot(train_sizes, train_mean, 'b--', lw=2, label="Training score")
ax.fill_between(train_sizes, train_mean - train_std, train_mean + train_std,
alpha=0.1)
ax.plot(train_sizes, test_mean, 'b-', label="CV score")
ax.fill_between(train_sizes, test_mean - test_std, test_mean + test_std,
alpha=0.1, color="b")
# ax.set_xticks(train_sizes)
ax.set_xlabel("Number of training examples")
ax.set_ylabel("MAE", rotation=0)
# ax.set_title('Learning Curve (SpatialFilter + Riemann)')
ax.legend()
plt.tight_layout()
plt.savefig(op.join(cfg.path_outputs, 'plot_MAE_learning_curves.png'),
dpi=300)
| 2.234375
| 2
|
bottleneck_transformer_pytorch/mish_activ.py
|
mydre/MultiLayerGRU
| 0
|
12783100
|
import pdb
import torch
from torch import nn
import torch.nn.functional as F
class Mish(nn.Module):
def __init__(self):
super().__init__()
print('Mish activation loaded....')
def forward(self, x):
x = x * (torch.tanh(F.softplus(x)))
return x
| 2.734375
| 3
|
api/api.py
|
deborae/faq-chatbot
| 0
|
12783101
|
<gh_stars>0
from flask import Flask
from atlassian import Confluence
import os
CONFLUENCE_URL = os.getenv('CONFLUENCE_URL')
USER_EMAIL = os.getenv('USER_EMAIL')
USER_TOKEN = os.getenv('USER_TOKEN')
import re
import random
import string
import json
import nltk
from langdetect import detect
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import warnings
warnings.filterwarnings('ignore')
noSimilarityResponse = 'I apologize, I did not understand :( Please try to rephrase your message.'
#Download the punkt package
nltk.download('punkt', quiet=True)
nltk.download('stopwords', quiet=True)
# Confluence connection
try:
confluenceUrl = CONFLUENCE_URL
email = USER_EMAIL
apiToken = USER_TOKEN
confluence = Confluence(
url=confluenceUrl,
username=email,
password=<PASSWORD>,
cloud=True)
except Exception as e:
print (e)
#Clean up user input
def tokenize_user_message(user_input):
words_list = user_input.lower().split()
# remove non alphabetic values
words_list = [word for word in words_list if word.isalpha()]
if(len(words_list)):
if detect(user_input) is 'de':
stop_words = set(nltk.corpus.stopwords.words('german'))
else:
stop_words = set(nltk.corpus.stopwords.words('english'))
words_list = [w for w in words_list if not w in stop_words]
words = ' '.join(words_list)
return words
#Get the intents
with open("intents.json") as file:
data = json.load(file)
#A function to return a random greeting response to a users greeting
def greeting_response(user_input):
text_list = user_input.lower().split()
#Bots greeting response
bot_greetings = ['howdy', 'hi', 'hey', 'hello', 'ola']
#Users greeting
user_greetings = ['mekie', 'hey', 'hello', 'hi', 'greetings']
for word in text_list:
if word in user_greetings:
return random.choice(bot_greetings)
# query confluence
def query_confluence(user_input):
try:
cql= 'text ~ "' + user_input + '" and type = page'
# html removal
regex = re.compile('<.*?>')
articles_list = []
response = confluence.cql(cql, start=0, limit=None, expand=None, include_archived_spaces=None, excerpt=None)
for result in response["results"]:
pageUrl = confluenceUrl + 'wiki' + result["url"]
pageId = result['content']['id']
page = confluence.get_page_by_id(pageId, expand="body.storage", status=None, version=None)
pageContent = page['body']['storage']['value']
cleanPageContent = re.sub(regex, '', pageContent)
articles_list.append(cleanPageContent)
return articles_list
except Exception as e:
print (e)
#Sorts the indexes to go from highest to lowest similarity score
def index_sort(list_var):
length = len(list_var)
list_index = list(range(0, length))
x = list_var
for i in range(length):
for j in range(length):
if x[list_index[i]] > x[list_index[j]]:
#Swap
temp_var = list_index[i]
list_index[i] = list_index[j]
list_index[i] = temp_var
return list_index
#Query the bot
def ask_the_bot(user_input):
confluenceResponse = query_confluence(user_input)
confluenceResponse.append(user_input)
bot_response = ''
countMatrix = CountVectorizer().fit_transform(confluenceResponse)
similarity_scores = cosine_similarity(countMatrix[-1], countMatrix)
similarity_scores_list = similarity_scores.flatten()
index = index_sort(similarity_scores_list)
index= index[1:]
response_flag = 0
j = 0
for i in range(len(index)):
if similarity_scores_list[index[i]] > 0.0:
bot_response = bot_response+''+confluenceResponse[index[i]]
response_flag = 1
j = j+1
if j > 2:
break
if response_flag == 0:
bot_response = bot_response+' '+ noSimilarityResponse
return bot_response
# API
app = Flask(__name__)
@app.route('/send-message/<userMessage>', methods = ['POST'])
def chat(userMessage):
robot_greeting = greeting_response(userMessage)
if robot_greeting:
response = robot_greeting
else:
tokenized_user_message = tokenize_user_message(userMessage)
if tokenized_user_message:
response = ask_the_bot(tokenized_user_message)
else:
response = noSimilarityResponse
return { "messages": [response] }
| 3.078125
| 3
|
models/backbones.py
|
smallflyingpig/seeking-the-shape-of-sound
| 12
|
12783102
|
<reponame>smallflyingpig/seeking-the-shape-of-sound
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os.path as osp
from models.base_model import BaseModel
from models.nn.ir_se_model import IR_50
from models.nn.res_se_34l_model import ResNetSE34
from models.nn.module import NormalizeConv, MultiLayerPerceptron, Conv1dModule, LinearModule
class SEResNet50IR(BaseModel):
def __init__(self, args):
super(SEResNet50IR, self).__init__(args)
output_channel = self.args.output_channel
self.model = IR_50([112, 112], pretrained='pretrained_models/backbone_ir50_ms1m_epoch120.pth')
self.fc = nn.Sequential(
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Linear(512, output_channel, bias=False),
nn.Dropout(0.5)
)
self.cls = Classifier(output_channel, self.args.num_classes, self.args.vote)
def forward(self, x, y=None):
x = self.model(x)
x = self.fc(x)
return x
class ThinResNet34(BaseModel):
def __init__(self, args):
super(ThinResNet34, self).__init__(args)
output_channel = self.args.output_channel
self.model = ResNetSE34(pretrained='pretrained_models/baseline_lite_ap.model')
self.fc = nn.Sequential(
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Linear(512, output_channel, bias=False),
nn.Dropout(0.5)
)
def forward(self, x):
x = self.model(x)
x = self.fc(x)
return x
class Classifier(nn.Module):
def __init__(self, nin, nout, vote=False):
super(Classifier, self).__init__()
self.weight = nn.Parameter(torch.FloatTensor(nout, nin))
nn.init.xavier_uniform_(self.weight)
def dist(self, a, b):
dist = (a * b).sum(-1)
return dist
def arc_margin(self, x, y, margin):
dist = self.dist(self.weight.unsqueeze(0), x.unsqueeze(1)) # N x M
one_hot = torch.zeros(dist.size()).to(x.device)
one_hot.scatter_(1, y.view(-1, 1).long(), 1)
if margin is None:
logit = (one_hot * (dist)) + ((1.0 - one_hot) * dist)
else:
logit = (one_hot * (dist - margin.unsqueeze(1))) + ((1.0 - one_hot) * dist)
return logit
def cross_logit(self, x, v):
dist = self.dist(F.normalize(x).unsqueeze(0), v.unsqueeze(1))
one_hot = torch.zeros(dist.size()).to(x.device)
one_hot.scatter_(1, torch.arange(len(x)).view(-1, 1).long().to(x.device), 1)
pos = (one_hot * dist).sum(-1, keepdim=True)
logit = (1.0 - one_hot) * (dist - pos)
loss = torch.log(1 + torch.exp(logit).sum(-1) + 3.4)
return loss
def forward(self, x, y, margin=None):
logit = self.arc_margin(x, y, margin)
return logit
| 2.171875
| 2
|
starthinker_ui/website/management/commands/log.py
|
dvandra/starthinker
| 1
|
12783103
|
###########################################################################
#
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
from django.core.management.base import BaseCommand, CommandError
from django.template.loader import render_to_string
from starthinker_worker.log import log_get
class Command(BaseCommand):
help = 'Render Log HTML'
def add_arguments(self, parser):
parser.add_argument(
'--recipe',
action='store',
dest='recipe',
required=True,
help='Recipe to pull log for.',
)
def handle(self, *args, **kwargs):
log = log_get(kwargs['recipe'])
print render_to_string('log.html', { 'log':log })
| 2.046875
| 2
|
src/main/resources/tf_graphs/lenet_tf.py
|
farizrahman4u/dl4j-test-resources
| 0
|
12783104
|
""" Convolutional Neural Network.
Build and train a convolutional neural network with TensorFlow.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
This example is using TensorFlow layers API, see 'convolutional_network_raw'
example for a raw implementation with variables.
Author: <NAME>
Project: https://github.com/aymericdamien/TensorFlow-Examples/
"""
from __future__ import division, print_function, absolute_import
from tensorflow.python.tools import freeze_graph
import numpy as np
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
import tensorflow as tf
# Training Parameters
learning_rate = 0.001
num_steps = 2000
batch_size = 128
# Network Parameters
num_input = 784 # MNIST data input (img shape: 28*28)
num_classes = 10 # MNIST total classes (0-9 digits)
dropout = 0.75 # Dropout, probability to keep units
# Create the neural network
def conv_net(x_dict, n_classes, dropout, reuse, is_training):
# Define a scope for reusing the variables
# TF Estimator input is a dict, in case of multiple inputs
x = x_dict['images']
# MNIST data input is a 1-D vector of 784 features (28*28 pixels)
# Reshape to match picture format [Height x Width x Channel]
# Tensor input become 4-D: [Batch Size, Height, Width, Channel]
x = tf.reshape(x, shape=[-1, 28, 28, 1])
# Convolution Layer with 32 filters and a kernel size of 5
conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv1 = tf.layers.max_pooling2d(conv1, 2, 2)
# Convolution Layer with 64 filters and a kernel size of 3
conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
# Flatten the data to a 1-D vector for the fully connected layer
fc1 = tf.contrib.layers.flatten(conv2)
# Fully connected layer (in tf contrib folder for now)
fc1 = tf.layers.dense(fc1, 1024)
# Apply Dropout (if is_training is False, dropout is not applied)
fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
# Output layer, class prediction
out = tf.layers.dense(fc1, n_classes)
return out
from tensorflow.python.estimator.export import export
with tf.Session() as sess:
# Build the Estimator
feature_spec = {'images': tf.constant(mnist.train.images)}
serving_input_fn = export.build_raw_serving_input_receiver_fn(feature_spec)
# Train the Model
# Evaluate the Model
# Define the input function for evaluating
input_fn = tf.estimator.inputs.numpy_input_fn(
x={'images': mnist.test.images}, y=mnist.test.labels,
batch_size=batch_size, shuffle=False)
# Define a scope for reusing the variables
# TF Estimator input is a dict, in case of multiple inputs
x = mnist.test.images
is_training = False
n_classes = 10
# MNIST data input is a 1-D vector of 784 features (28*28 pixels)
# Reshape to match picture format [Height x Width x Channel]
# Tensor input become 4-D: [Batch Size, Height, Width, Channel]
x = tf.reshape(x, shape=[-1, 28, 28, 1])
# Convolution Layer with 32 filters and a kernel size of 5
conv1 = tf.layers.conv2d(x, 32, 5, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv1 = tf.layers.max_pooling2d(conv1, 2, 2)
# Convolution Layer with 64 filters and a kernel size of 3
conv2 = tf.layers.conv2d(conv1, 64, 3, activation=tf.nn.relu)
# Max Pooling (down-sampling) with strides of 2 and kernel size of 2
conv2 = tf.layers.max_pooling2d(conv2, 2, 2)
# Flatten the data to a 1-D vector for the fully connected layer
fc1 = tf.contrib.layers.flatten(conv2)
# Fully connected layer (in tf contrib folder for now)
fc1 = tf.layers.dense(fc1, 1024)
# Apply Dropout (if is_training is False, dropout is not applied)
fc1 = tf.layers.dropout(fc1, rate=dropout, training=is_training)
# Use the Estimator 'evaluate' method
# Output layer, class prediction
out = tf.layers.dense(fc1, n_classes,name='output')
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
tf.train.write_graph(sess.graph_def, 'lenet_dir', 'lenet.pbtxt',as_text=True)
saver.save(sess, 'lenet_dir/test3.ckpt',write_meta_graph=False)
| 4.375
| 4
|
usermanager/users/urls.py
|
yasminhillis/user-manager
| 1
|
12783105
|
from rest_framework import routers
from .api import UserViewSet
router = routers.DefaultRouter()
router.register('users', UserViewSet, 'users')
urlpatterns = router.urls
| 1.617188
| 2
|
blocklenium/main.py
|
jpunkt/blocklenium
| 0
|
12783106
|
<gh_stars>0
import logging
import sys
import click
from blocklenium import settings
from blocklenium.blocklenium import Blocklenium
logger = logging.getLogger('blocklenium.main')
if __name__ == '__main__':
print('Please use the click entry-point.')
pass
@click.command()
@click.option('--plc_id', default=settings.PLC_ID, show_default=True,
help='ADS Id of PLC')
@click.option('--plc_flag', 'plc_start_flag',
default=settings.PLC_START_FLAG, show_default=True,
help='PLC variable name of flag to start browser (BOOL)')
@click.option('--plc_errorflag', 'plc_error_flag',
default=settings.PLC_ERROR_FLAG, show_default=True,
help='PLC variable name of flag set TRUE if an error occurs.'
)
@click.option('--plc_errormsg', 'plc_error_msg',
default=settings.PLC_ERROR_MSG, show_default=True,
help='PLC variable name to hold error messages')
@click.option('--webdriver', 'chromedriver_path',
default=settings.CHROMEDRIVER_PATH, show_default=True,
type=click.Path(exists=True, file_okay=True, dir_okay=False),
help='Path to selenium webdriver for chrome')
@click.option('--insecure-con', 'browser_insecure_certs', is_flag=True,
help='Suppress warning for insecure site certificates')
@click.option('--login-required', 'desk_login_required', is_flag=True,
help='''Target URL requires a login. Requires the login
credentials to be set to the appropriate variables in
the PLC (see below for default values)''')
@click.option('--plc-desk-user', 'plc_desk_user', show_default=True,
default=settings.PLC_DESK_USER,
help='''Change the PLC variable which stores the username for
the target URL''')
@click.option('--plc-desk-password', '<PASSWORD>',
show_default=True,
default=settings.PLC_DESK_PW,
help='''Change the PLC variable which stores the password for
the target URL''')
@click.option('-b', '--bookmarklet', 'bookmarklet_path',
required=True,
type=click.Path(exists=True, file_okay=True, dir_okay=False),
help='Path to bookmarklet javascript code')
@click.option('-u', '--url', 'desk_url',
required=True,
help='URL to run bookmarklet on')
def main(plc_id,
plc_start_flag,
plc_error_flag,
plc_error_msg,
chromedriver_path,
browser_insecure_certs,
desk_login_required,
plc_desk_user,
plc_desk_pw,
bookmarklet_path,
desk_url):
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
logger.info('Initializing blocklenium...')
# Build config dictionary from passed arguments
config = {key.upper(): value for key, value in locals().items()}
if logger.level == logging.DEBUG:
for k in config.keys():
logger.debug('config[{0}]={1}'.format(k, config[k]))
blknm = Blocklenium(config)
blknm.start()
| 2.6875
| 3
|
aggregator/static_importer.py
|
olivmaurel/termsearch
| 0
|
12783107
|
<gh_stars>0
import csv
import os
import logging
import sys
import django
LOCAL_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(os.path.abspath(os.path.dirname('../')))
sys.path.append(os.path.abspath(os.path.dirname('.')))
django.setup()
from aggregator.models import Language
# Get an instance of a logger
logger = logging.getLogger(__name__)
def import_languages_from_csv(path):
logger.info("Importing languages...")
with open(path) as f:
reader = csv.reader(f)
next(reader) # skip headers
for row in reader:
_, created = Language.objects.get_or_create(
name = row[0],
code2d = row[1],
code3d = row[2],)
logger.info("{} found. Created {}".format(row[0], created))
logger.info("Imported {} successfully".format(path))
def export_csv_to_json(csvpath, jsonpath):
logger.info("Converting {} to JSON...".format(csvpath))
with open(csvpath, 'r') as f:
csvfile = csv.reader(f)
jsonfile = open(jsonpath, 'w')
# fieldnames = csvfile # todo finish this
pass
def order_csv_alphabetically(file):
pass
if __name__ == "__main__":
language_csv_path = os.path.join(LOCAL_DIR, 'static/aggregator/csv/language.csv')
language_json_path = os.path.join(LOCAL_DIR, 'aggregator/fixtures/language.json')
import_languages_from_csv(language_csv_path)
export_csv_to_json(language_csv_path, language_json_path)
| 2.3125
| 2
|
config/__init__.py
|
kunal-sanghvi/flask-app
| 0
|
12783108
|
from .settings import *
from .constants import *
| 1.179688
| 1
|
tests/data/token_indexers/elmo_indexer_test.py
|
unendin/allennlp
| 1
|
12783109
|
# pylint: disable=no-self-use
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary
from allennlp.data.token_indexers import ELMoTokenCharactersIndexer
class TestELMoTokenCharactersIndexer(AllenNlpTestCase):
def test_bos_to_char_ids(self):
indexer = ELMoTokenCharactersIndexer()
indices = indexer.token_to_indices(Token('<S>'), Vocabulary())
expected_indices = [259, 257, 260, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261]
assert indices == expected_indices
def test_eos_to_char_ids(self):
indexer = ELMoTokenCharactersIndexer()
indices = indexer.token_to_indices(Token('</S>'), Vocabulary())
expected_indices = [259, 258, 260, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261]
assert indices == expected_indices
def test_unicode_to_char_ids(self):
indexer = ELMoTokenCharactersIndexer()
indices = indexer.token_to_indices(Token(chr(256) + 't'), Vocabulary())
expected_indices = [259, 197, 129, 117, 260, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261]
assert indices == expected_indices
def test_elmo_as_array_produces_token_sequence(self): # pylint: disable=invalid-name
indexer = ELMoTokenCharactersIndexer()
indices = [
indexer.token_to_indices(Token(token), Vocabulary())
for token in ['Second', '.']
]
padded_tokens = indexer.pad_token_sequence(indices,
desired_num_tokens=3,
padding_lengths={})
expected_padded_tokens = [[259, 84, 102, 100, 112, 111, 101, 260, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261],
[259, 47, 260, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261, 261, 261, 261, 261,
261, 261, 261, 261, 261],
[0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0]]
assert padded_tokens == expected_padded_tokens
| 2.1875
| 2
|
membership/management/commands/csvtestdata.py
|
guaq/sikteeri
| 0
|
12783110
|
# encoding: UTF-8
"""
Generates test data for CSV import.
"""
from __future__ import print_function
from __future__ import with_statement
# http://www.python.org/dev/peps/pep-3101/ # unicode.format()
# http://www.python.org/dev/peps/pep-3105/ # print function
import codecs
from uuid import uuid4
from datetime import datetime, timedelta
from sys import stdout
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from membership.models import *
header_row = u'''Kirjauspäivä;Arvopäivä;Määrä EUROA;Tapahtumalajikoodi;Selitys;Saaja/Maksaja;Saajan tilinumero;Viite;Viesti;Arkistotunnus;'''
row = u'''{0[date]};{0[date]};{0[sum]};106;TILISIIRTO;{0[payer]};{0[account]};{0[reference]};{0[message]};{0[id]};'''
def dict_for_cycle(cycle):
payment_date = cycle.last_bill().due_date - timedelta(days=1)
if payment_date > datetime.now():
payment_date = datetime.now()
return {
'date': payment_date.strftime('%d.%m.%Y'),
'sum': cycle.sum,
'payer': cycle.membership.name(),
'account': settings.IBAN_ACCOUNT_NUMBER,
'reference': cycle.reference_number,
'message': "Maksu",
'id': str(uuid4())
}
def print_csv(stream=stdout, count=10):
print(header_row, file=stream)
short_sum = False
high_sum = False
wrong_reference = False
for cycle in BillingCycle.objects.filter(is_paid=False):
if count == 0:
break
d = dict_for_cycle(cycle)
if short_sum is False:
d['sum'] -= 5
short_sum = True
elif high_sum is False:
d['sum'] += 5
high_sum = True
elif wrong_reference is False:
d['reference'] = d['reference'][2:]
wrong_reference = True
print(row.format(d), file=stream)
count -= 1
paid_cycle = BillingCycle.objects.filter(is_paid=True)[0]
print(row.format(dict_for_cycle(paid_cycle)), file=stream)
class Command(BaseCommand):
args = '<file_to_write_to>'
help = 'Generate payments CSV to be used for testing out payment import' \
+ ' form'
def handle(self, *args, **options):
if len(args) > 0:
with codecs.open(args[0], 'w', encoding='iso-8859-1') as f:
if len(args) > 1:
print_csv(stream=f, count=int(args[1]))
else:
print_csv(stream=f)
else:
print_csv()
'''Kirjauspäivä;Arvopäivä;Määrä EUROA;Tapahtumalajikoodi;Selitys;Saaja/Maksaja;Saajan tilinumero;Viite;Viesti;Arkistotunnus;
21.05.2008;21.05.2008;-66,50;106;TILISIIRTO;MATTI MEIKÄLÄINEN;211135-00302106;;VUOSIKOKOUKSEN JA YLLÄPITOMATKAN MATKAKORVAUKSET. HALKO 3/2008. ;20080521593497O10031;
03.08.2008;03.08.2008;-33,00;106;TILISIIRTO;MATTI MEIKÄLÄINEN;211135-00302106;;POSTIKULUKORVAUS LASKUTETTU POSTIKULUSTA. HYVÄKSYTTYHALKOSSA 07/2008 24.7.2008. ;20080803593497AK0018;
27.01.2009;27.01.2009;30,00;588;VIITESIIRTO;MEIKÄLÄINEN MATTI JOHANNES;;00000000000007009017; ;200901252588NGNO0290;
21.01.2010;21.01.2010;-1063,35;106;TILISIIRTO;MATTI MEIKÄLÄINEN;211135-00302106;;HALKO 3/2010 KEVÄTKICKOFF TARVIKKEITA. KUPLAMUOVIA XOBIIN ;20100121593497690187;
21.01.2010;21.01.2010;-73,10;106;TILISIIRTO;MATTI MEIKÄLÄINEN;211135-00302106;;HALKO 3/2010 SIKTEERIVIIKONLOPUN MATKOJA. ;201001215934979N0174;
25.01.2010;25.01.2010;30,00;588;VIITESIIRTO;MEIKÄLÄINEN MATTI JOHANNES;;00000000000001110012;SEPA-MAKSU SAAJA/MOTTAG./BEN: Kapsi Internet-kKULUKOODI: SLEV ALKUP.MÄÄRÄ EUR 30.00+ EUR 30.00+;201001255UTZ00002150;
21.04.2010;21.04.2010;20,00;588;VIITESIIRTO;MEIKÄLÄINEN MATTI JOHANNES;;00000000000000032094; ;201004202588NGN52047;
'''
| 2.078125
| 2
|
dupdeletergui.py
|
ocdocdocd/DupDeleter
| 0
|
12783111
|
from gi.repository import Gtk, GdkPixbuf, GLib, Pango
import dhash
import os
import collections
import threading
import Queue
class mainWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="DupDeleter")
self.set_border_width(10)
self.grid = Gtk.Grid()
self.grid.set_column_homogeneous(True)
self.grid.set_row_homogeneous(True)
self.add(self.grid)
# set up the model
# columns = [imgName, imgLocation, # of Dups]
self.dupe_store = Gtk.TreeStore(bool, str, str, int)
self.current_filter_ext = None
self.ext_filter = self.dupe_store.filter_new()
self.ext_filter.set_visible_func(self.ext_filter_func)
# Create model's view
self.treeview = Gtk.TreeView.new_with_model(self.ext_filter)
check_renderer = Gtk.CellRendererToggle()
check_renderer.set_activatable(True)
check_renderer.set_active(False)
check_renderer.connect("toggled", self.on_toggled)
column = Gtk.TreeViewColumn("", check_renderer, active=0)
self.treeview.append_column(column)
for i, column_title in enumerate(["Name", "Location", "# of Dups"]):
renderer = Gtk.CellRendererText()
column = Gtk.TreeViewColumn(column_title, renderer, text=i+1)
column.set_sort_column_id(i)
column.set_fixed_width(200)
column.set_resizable(True)
self.treeview.append_column(column)
self.treeview.connect("cursor-changed", self.on_row_changed)
self.scrollable_treelist = Gtk.ScrolledWindow()
self.scrollable_treelist.set_vexpand(True)
# Create buttons for filtering results by image extension
self.extensions = ("jpg", "gif", "png", "tiff", "All")
self.buttons = list()
for ext in self.extensions:
button = Gtk.Button(ext)
self.buttons.append(button)
button.connect("clicked", self.on_selection_button_clicked)
self.create_toolbar()
# Create labels for showing scan progress
self.scan_status_label = Gtk.Label("No Scan in Progress")
self.scan_status_label.set_halign(3) # 3 = CENTER
self.file_scan_label = Gtk.Label(None)
self.file_scan_label.set_ellipsize(Pango.EllipsizeMode.START)
self.file_scan_label.set_width_chars(30)
self.file_scan_label.set_halign(3) # 3 = CENTER
# Make a frame to hold image previews
self.img_frame = Gtk.Frame(label="Selected Image")
self.img_frame.set_label_align(0.5, 0.5)
self.img_frame.set_shadow_type(Gtk.ShadowType.ETCHED_OUT)
self.img_frame.set_size_request(200, 200)
# Assemble the GUI
self.grid.attach(self.scrollable_treelist, 0, 1, 8, 10)
self.grid.attach_next_to(self.img_frame, self.scrollable_treelist,
Gtk.PositionType.RIGHT, 5, 6)
self.grid.attach_next_to(self.buttons[0], self.scrollable_treelist,
Gtk.PositionType.BOTTOM, 1, 1)
for i, button in enumerate(self.buttons[1:]):
self.grid.attach_next_to(button, self.buttons[i],
Gtk.PositionType.RIGHT, 1, 1)
self.scrollable_treelist.add(self.treeview)
self.grid.attach_next_to(self.scan_status_label, self.buttons[3],
Gtk.PositionType.BOTTOM, 6, 1)
self.grid.attach_next_to(self.file_scan_label, self.scan_status_label,
Gtk.PositionType.BOTTOM, 6, 1)
self.grid.set_column_spacing(10)
self.grid.set_row_spacing(5)
self.queue = Queue.Queue() # Queue for holding fetched images
self.delete_list = list() # List for holding to-be-deleted items
self.show_all()
def getDups(self, path, queue):
'''Collects all image duplicates starting from PATH.
Fills a queue with lists of lists
containing image names and locations.'''
images = collections.defaultdict(list)
image_exts = ('.jpg', '.png', '.gif', '.tiff')
GLib.idle_add(self.scan_status_label.set_text, "Scanning...")
for root, dirs, files in os.walk(path):
for name in files:
GLib.idle_add(self.file_scan_label.set_text,
root)
if name[-4:] in image_exts:
img_loc = os.path.join(root, name)
img_hash = dhash.hash(img_loc)
if img_hash != -1:
# Have to add False at beginning because of
# togglebutton status.
images[img_hash].append([False, name, root])
GLib.idle_add(self.scan_status_label.set_text, "Done")
for group in images:
if len(images[group]) > 1:
queue.put(images[group])
GLib.idle_add(self.generateModelData)
def generateModelData(self):
'''Fills treeModel rows with found duplicates'''
while not self.queue.empty():
image_set = self.queue.get()
parent = ''
no_of_dups = len(image_set)
for img in image_set:
img.append(no_of_dups)
if not parent:
parent = self.dupe_store.append(None, img)
else:
self.dupe_store.append(parent, img)
def on_button_clicked_open(self, widget):
'''Brings up the file browser window.
Returns the path of the root folder.'''
dialog = Gtk.FileChooserDialog("Select the root folder", self,
Gtk.FileChooserAction.SELECT_FOLDER,
(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
"Select", Gtk.ResponseType.OK))
dialog.set_default_size(800, 400)
response = dialog.run()
if response == Gtk.ResponseType.OK:
root = dialog.get_uri()[8:] # have to remove file:///
thread = threading.Thread(target=self.getDups,
args=(root, self.queue))
thread.daemon = True
thread.start()
GLib.timeout_add(200, self.generateModelData)
dialog.destroy()
elif response == Gtk.ResponseType.CANCEL:
dialog.destroy()
def ext_filter_func(self, model, iter, data):
'''Tests if the image extension in the row is the one in the filter'''
if self.current_filter_ext is None or self.current_filter_ext == "All":
return True
else:
return model[iter][0][-3:] == self.current_filter_ext
def create_toolbar(self):
toolbar = Gtk.Toolbar()
self.grid.attach(toolbar, 0, 0, 8, 1)
button_open = Gtk.ToolButton.new_from_stock(Gtk.STOCK_OPEN)
button_open.set_tooltip_text("Scan Directory")
toolbar.insert(button_open, 0)
button_delete = Gtk.ToolButton.new_from_stock(Gtk.STOCK_DELETE)
button_delete.set_tooltip_text("Delete Selected Images")
toolbar.insert(button_delete, 1)
button_auto_prune = Gtk.ToolButton.new_from_stock(Gtk.STOCK_NO)
button_auto_prune.set_tooltip_text("Auto-Prune")
toolbar.insert(button_auto_prune, 2)
button_exit = Gtk.ToolButton.new_from_stock(Gtk.STOCK_QUIT)
button_exit.set_tooltip_text("Exit")
toolbar.insert(button_exit, 3)
button_open.connect("clicked", self.on_button_clicked_open)
button_auto_prune.connect("clicked", self.on_button_clicked_auto_prune)
button_delete.connect("clicked", self.on_button_clicked_delete)
button_exit.connect("clicked", self.on_button_clicked_exit)
def on_selection_button_clicked(self, widget):
'''Called on any selection button click'''
self.current_filter_ext = widget.get_label()
self.ext_filter.refilter()
def on_button_clicked_auto_prune(self, widget):
'''
Automatically deletes all files except for parents
'''
dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.WARNING,
Gtk.ButtonsType.OK_CANCEL,
"Delete all selected images?")
response = dialog.run()
if response == Gtk.ResponseType.OK:
rootiter = self.dupe_store.get_iter_first()
self.prune_helper(rootiter, False)
dialog.destroy()
elif response == Gtk.ResponseType.CANCEL:
dialog.destroy()
def prune_helper(self, treeiter, toDelete):
'''
Deletes all files except for parents
toDelete indicates whether or not treeiter should be deleted.
It should be set to False on call unless you want to delete
everything.
'''
deleted = 0
isValid = True
while (treeiter is not None) and isValid:
if self.dupe_store.iter_has_child(treeiter):
childiter = self.dupe_store.iter_children(treeiter)
deleted += self.prune_helper(childiter, True)
self.dupe_store[treeiter][3] = self.dupe_store.iter_n_children(treeiter)
if toDelete:
path = os.path.join(self.dupe_store[treeiter][2],
self.dupe_store[treeiter][1])
os.remove(path)
isValid = self.dupe_store.remove(treeiter)
deleted += 1
# If treestore.remove() is successful iter is automatically
# updated to the next iter, so we just need to check to
# make sure that isn't the case before using iter_next()
else:
treeiter = self.dupe_store.iter_next(treeiter)
return deleted
def on_button_clicked_delete(self, widget):
'''Deletes all selected files'''
dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.WARNING,
Gtk.ButtonsType.OK_CANCEL,
"Delete all selected images?")
response = dialog.run()
if response == Gtk.ResponseType.OK:
rootiter = self.dupe_store.get_iter_first()
self.delete_helper(rootiter)
dialog.destroy()
elif response == Gtk. ResponseType.CANCEL:
dialog.destroy()
def delete_helper(self, treeiter):
'''
Recursively searches through all rows searching for files to
delete.
'''
deleted = 0
isValid = True
while (treeiter is not None) and isValid:
if self.dupe_store.iter_has_child(treeiter):
childiter = self.dupe_store.iter_children(treeiter)
deleted += self.delete_helper(childiter)
if self.dupe_store[treeiter][0]:
path = os.path.join(self.dupe_store[treeiter][2],
self.dupe_store[treeiter][1])
if self.dupe_store.iter_has_child(treeiter):
child = self.dupe_store.iter_children(treeiter)
isValid = self.childToParent(treeiter, child)
else:
isValid = self.dupe_store.remove(treeiter)
os.remove(path)
deleted += 1
self.dupe_store[treeiter][3] = self.dupe_store.iter_n_children(treeiter)
else:
treeiter = self.dupe_store.iter_next(treeiter)
return deleted
def on_button_clicked_exit(self, widget):
'''Exits the program'''
dialog = Gtk.MessageDialog(self, 0, Gtk.MessageType.WARNING,
Gtk.ButtonsType.OK_CANCEL,
"Are you sure you want to exit?")
response = dialog.run()
if response == Gtk.ResponseType.OK:
Gtk.main_quit()
elif response == Gtk.ResponseType.CANCEL:
dialog.destroy()
def childToParent(self, parent_iter, child_iter):
'''
Replaces parent_iter with child_iter, effectively moving child to
parent's position.
Returns the next iter after parent, or invalidates it if there
isn't one.
'''
for col in xrange(self.dupe_store.get_n_columns()):
childval = self.dupe_store.get_value(child_iter, col)
self.dupe_store.set_value(parent_iter, col, childval)
self.dupe_store.remove(child_iter)
return self.dupe_store.iter_next(parent_iter)
def on_row_changed(self, widget):
(model, pathlist) = widget.get_selection().get_selected_rows()
tree_iter = model.get_iter(pathlist[0])
img_name = model.get_value(tree_iter, 1)
img_root = model.get_value(tree_iter, 2)
img_loc = os.path.join(img_root, img_name)
child = self.img_frame.get_child()
if child:
self.img_frame.remove(child)
alloc = self.img_frame.get_allocation()
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_scale(img_loc,
alloc.width - 20,
alloc.height - 20,
True)
image = Gtk.Image.new_from_pixbuf(pixbuf)
image.show()
self.img_frame.add(image)
def on_toggled(self, widget, path):
'''
Adds or removes the row's treeiter to a list that designates it
for removal.
'''
self.dupe_store[path][0] = not self.dupe_store[path][0]
win = mainWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
| 2.453125
| 2
|
tests/util/sqlalchemy_util_test.py
|
candango/firenado
| 13
|
12783112
|
<reponame>candango/firenado<filename>tests/util/sqlalchemy_util_test.py
#!/usr/bin/env python
#
# Copyright 2015-2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from firenado.util.sqlalchemy_util import Base, base_to_dict
from sqlalchemy import Column, String
from sqlalchemy.types import Integer, DateTime
from sqlalchemy.sql import text
import unittest
class TestBase(Base):
__tablename__ = "test"
id = Column("id", Integer, primary_key=True)
username = Column("username", String(150), nullable=False)
first_name = Column("first_name", String(150), nullable=False)
last_name = Column("last_name", String(150), nullable=False)
password = Column("password", String(150), nullable=False)
email = Column("email", String(150), nullable=False)
created = Column("created", DateTime, nullable=False,
server_default=text("now()"))
modified = Column("modified", DateTime, nullable=False,
server_default=text("now()"))
class BaseToDictTestCase(unittest.TestCase):
def setUp(self):
self.test_object = TestBase()
self.test_object.id = 1
self.test_object.username = "anusername"
self.test_object.password = "<PASSWORD>"
self.test_object.first_name = "Test"
self.test_object.last_name = "Object"
self.test_object.email = "<EMAIL>"
def test_base_to_dict(self):
dict_from_base = base_to_dict(self.test_object)
self.assertEqual(dict_from_base['id'], self.test_object.id)
self.assertEqual(dict_from_base['username'], self.test_object.username)
self.assertEqual(dict_from_base['password'], self.test_object.password)
self.assertEqual(dict_from_base['first_name'],
self.test_object.first_name)
self.assertEqual(dict_from_base['last_name'],
self.test_object.last_name)
self.assertEqual(dict_from_base['email'], self.test_object.email)
self.assertEqual(dict_from_base['created'], self.test_object.created)
self.assertEqual(dict_from_base['modified'], self.test_object.modified)
def test_base_to_dict(self):
dict_from_base = base_to_dict(self.test_object,
["id", "username", "first_name"])
self.assertEqual(dict_from_base['id'], self.test_object.id)
self.assertEqual(dict_from_base['username'], self.test_object.username)
self.assertEqual(dict_from_base['first_name'],
self.test_object.first_name)
self.assertTrue("password" not in dict_from_base)
self.assertTrue("last_name" not in dict_from_base)
self.assertTrue("email" not in dict_from_base)
self.assertTrue("created" not in dict_from_base)
self.assertTrue("modified" not in dict_from_base)
| 2.46875
| 2
|
application/handlers/api/apikmldownloader.py
|
opengovt/openroads-geostore
| 1
|
12783113
|
import json
from application.handlers.base import BaseHandler
class APIKMLDownloader(BaseHandler):
def get(self):
if self.request.get('project_code'):
project_code = self.request.get('project_code')
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps({'project_code': project_code}))
| 2.40625
| 2
|
compare.py
|
MichaelRiabzev/directoriesCompare
| 0
|
12783114
|
<filename>compare.py
import hashlib
import os
import datetime
import sys
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def compareTables(A,B):
onlyInA = []
onlyInB = []
correlation = []
for k in A.keys():
if k in B:
correlation += [(A[k],B[k])]
del B[k]
else: onlyInA += [A[k]]
for k in B.keys():
onlyInB += [B[k]]
return (onlyInA, onlyInB, correlation)
def addDirToTable(path, table):
for root, subdirs, files in os.walk(path):
for f in files:
fullPath = os.path.join(root,f)
md5Val = md5(fullPath)
table[md5Val] = fullPath
srcPaths = [sys.argv[1]]
srcTable = {}
dstPaths = [sys.argv[2]]
dstTable = {}
for p in srcPaths:
print "collecting from " + p + " ; " + str(datetime.datetime.now())
sys.stdout.flush()
addDirToTable(p, srcTable)
for p in dstPaths:
print "collecting from " + p + " ; " + str(datetime.datetime.now())
sys.stdout.flush()
addDirToTable(p, dstTable)
print "Comparing collections " + str(datetime.datetime.now())
sys.stdout.flush()
onlyInA , onlyInB, correlation = compareTables(srcTable, dstTable)
print "Files only in both sets:"
sys.stdout.flush()
for f in correlation:
print f
print "Files only in " + str(srcPaths) + " :"
sys.stdout.flush()
for f in onlyInA:
print f
print "Files only in " + str(dstPaths) + " :"
sys.stdout.flush()
for f in onlyInB:
print f
| 2.609375
| 3
|
GameOfThrones/__init__.py
|
dhillonr/db
| 0
|
12783115
|
#!/usr/bin/env python
# encoding: utf-8
# The MIT License (MIT)
# Copyright (c) 2017 CNRS
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# AUTHORS
# <NAME> - http://herve.niderb.fr
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from pyannote.database import Database
from pyannote.database.protocol import SpeakerDiarizationProtocol
from pyannote.parser import MDTMParser
import os.path as op
from pyannote.core import Segment, Annotation
class GameOfThronesSpeakerDiarizationProtocol(SpeakerDiarizationProtocol):
"""Base speaker diarization protocol for GameOfThrones database
This class should be inherited from, not used directly.
Parameters
----------
preprocessors : dict or (key, preprocessor) iterable
When provided, each protocol item (dictionary) are preprocessed, such
that item[key] = preprocessor(**item). In case 'preprocessor' is not
callable, it should be a string containing placeholder for item keys
(e.g. {'wav': '/path/to/{uri}.wav'})
"""
def __init__(self, preprocessors={}, **kwargs):
super(GameOfThronesSpeakerDiarizationProtocol, self).__init__(
preprocessors=preprocessors, **kwargs)
self.mdtm_parser_ = MDTMParser()
@staticmethod
def get_audio_path(uri):
return op.join(
op.dirname(op.realpath(__file__)),
'data', 'audio', '{uri}.txt'.format(uri=uri))
def load_speaker(self, uri):
speaker = Annotation(uri=uri)
path = self.get_audio_path(uri)
with open(path, 'r') as fp:
for line in fp:
start, duration, name, _, _ = line.strip().split()
start = float(start)
end = start + float(duration)
speaker[Segment(start, end)] = name
return speaker.smooth()
def _subset(self, protocol, subset):
data_dir = op.join(op.dirname(op.realpath(__file__)), 'data')
# load annotations
path = op.join(
data_dir,
'{protocol}.{subset}.lst'.format(subset=subset, protocol=protocol))
with open(path, 'r') as fp:
for line in fp:
uri = line.strip()
annotation = self.load_speaker(uri)
item = {
'database': 'GameOfThrones',
'uri': uri,
'annotation': annotation}
yield item
class Season1(GameOfThronesSpeakerDiarizationProtocol):
"""Season 1
* Training set: episode #1, #2, #3, #4, #5
* Development set: episode #6
* Test set: episode #7, #8, #9, #10
"""
def trn_iter(self):
return self._subset('Season1', 'trn')
def dev_iter(self):
return self._subset('Season1', 'dev')
def tst_iter(self):
return self._subset('Season1', 'tst')
class Season1Test(GameOfThronesSpeakerDiarizationProtocol):
"""Season 1
* Training set: -- not available --
* Development set: -- not available --
* Test set: episode #1, #2, #3, #4, #5, #6, #7, #8, #9, #10
"""
def tst_iter(self):
return self._subset('Season1Test', 'tst')
class GameOfThrones(Database):
"""GameOfThrones corpus
Parameters
----------
preprocessors : dict or (key, preprocessor) iterable
When provided, each protocol item (dictionary) are preprocessed, such
that item[key] = preprocessor(**item). In case 'preprocessor' is not
callable, it should be a string containing placeholder for item keys
(e.g. {'wav': '/path/to/{uri}.wav'})
Reference
---------
Citation
--------
Website
-------
"""
def __init__(self, preprocessors={}, **kwargs):
super(GameOfThrones, self).__init__(preprocessors=preprocessors, **kwargs)
self.register_protocol(
'SpeakerDiarization', 'Season1', Season1)
self.register_protocol(
'SpeakerDiarization', 'Season1Test', Season1Test)
| 1.515625
| 2
|
envelope/constants.py
|
affan2/django-envelope
| 0
|
12783116
|
<reponame>affan2/django-envelope
from django.utils.translation import ugettext_lazy as _
STATE_TYPES = (
(-1, _('Deleted')),
(1, _('Replied')),
(2, _('Pending')),
)
COMPANY_CONTACT_CHOICES = (
("", u''),
("First choice", _('First choice')),
("Second choice", _('Second choice')),
("Third choice", _('Third choice')),
)
PRODUCT_CONTACT_CHOICES = (
("", u''),
("First choice", _('First choice')),
("Second choice", _('Second choice')),
("Third choice", _('Third choice')),
)
SOLUTIONS_CONTACT_CHOICES = (
("", u''),
("First choice", _('First choice')),
("Second choice", _('Second choice')),
("Third choice", _('Third choice')),
)
| 1.84375
| 2
|
spellchecker/Spellcheck.py
|
sa91/Ideas
| 0
|
12783117
|
<gh_stars>0
from Preprocess import pre_process
from Getembedding import embed
from Predictor import predictor
"""
Input of this file can be changed to stream word or anything that can be used to check spelling
For now:
It takes a word, preprocess it
get embedding of the word
send it to predictor
Ideas:
Take a stream around the word, preprocess each word.
get embedding each word
send the stream encoding to predictor
"""
word = input("Input the spelling: ")
word = pre_process(word)
k = input("Number of suggestion: ")
embedding = embed(word)
print("Suggestions: ", predictor(embedding, int(k)))
| 3.578125
| 4
|
tests/bioc/test_json_encoder.py
|
datummd/bioc
| 10
|
12783118
|
<filename>tests/bioc/test_json_encoder.py
import io
import tempfile
from pathlib import Path
import pytest
from bioc import BioCFileType
from bioc.biocjson.encoder import toJSON
import bioc
from bioc.biocjson import BioCJsonIterWriter
from tests.utils import assert_everything
file = Path(__file__).parent / 'everything.json'
def test_dump():
with open(file, encoding='utf8') as fp:
collection = bioc.load(fp, BioCFileType.BIOC_JSON)
tmp = tempfile.mktemp()
with open(tmp, 'w', encoding='utf8') as fp:
bioc.dump(collection, fp, BioCFileType.BIOC_JSON)
with open(tmp, encoding='utf8') as fp:
collection = bioc.load(fp, BioCFileType.BIOC_JSON)
assert_everything(collection)
def test_dumps():
with open(file, encoding='utf8') as fp:
collection = bioc.load(fp, BioCFileType.BIOC_JSON)
s = bioc.dumps(collection, BioCFileType.BIOC_JSON)
collection = bioc.loads(s, BioCFileType.BIOC_JSON)
assert_everything(collection)
def test_level():
with pytest.raises(ValueError):
BioCJsonIterWriter(io.StringIO(), level=-1)
with open(file, encoding='utf8') as fp:
collection = bioc.load(fp, BioCFileType.BIOC_JSON)
with pytest.raises(ValueError):
writer = BioCJsonIterWriter(io.StringIO(), level=bioc.SENTENCE)
writer.write(collection.documents[0])
with pytest.raises(ValueError):
writer = BioCJsonIterWriter(io.StringIO(), level=bioc.PASSAGE)
writer.write(collection.documents[0])
with pytest.raises(ValueError):
writer = BioCJsonIterWriter(io.StringIO(), level=bioc.DOCUMENT)
writer.write(collection.documents[0].passages[0])
def test_toJSON():
with open(file, encoding='utf8') as fp:
collection = bioc.load(fp, BioCFileType.BIOC_JSON)
obj = toJSON(collection)
assert obj['documents'][0]['id'] == '1'
with pytest.raises(TypeError):
toJSON({})
| 2.28125
| 2
|
toolkit/autodiff/scalar/FloatScalar.py
|
joseph-ai/aitoolkit
| 0
|
12783119
|
import toolkit.autodiff.math.scalar as am
import toolkit.autodiff.math as m
from ..math import IdentityOp
from ..CalcFlow import CalcFlow
class FloatScalar(CalcFlow):
def __init__(self, value):
super().__init__(value)
def _calc_unary(self, func):
calc_val = FloatScalar(func.calculate())
super(FloatScalar, self).__calc_unary__(calc_val, func)
return calc_val
def _calc_binary(self, other, func):
calc_val = FloatScalar(func.calculate())
super(FloatScalar, self).__calc_binary__(calc_val, other, func)
return calc_val
@classmethod
def create(cls, value):
v = FloatScalar(value)
math_func = IdentityOp(v)
calc_val = v._calc_unary(math_func)
calc_val.identity = math_func
return calc_val
def __mul__(self, other):
if not CalcFlow.is_calc_flow(other):
raise ValueError("Not CalcFlow")
math_func = m.MultiplyOp(self, other)
return self._calc_binary(other, math_func)
def __add__(self, other):
if not CalcFlow.is_calc_flow(other):
raise ValueError("Not CalcFlow")
math_func = m.AdditionOp(self, other)
return self._calc_binary(other, math_func)
def __sub__(self, other):
if not CalcFlow.is_calc_flow(other):
raise ValueError("Not CalcFlow")
math_func = m.SubtractionOp(self, other)
return self._calc_binary(other, math_func)
def __pow__(self, other):
if not CalcFlow.is_calc_flow(other):
raise ValueError("Not CalcFlow")
math_func = m.ExponentOp(self, other)
return self._calc_binary(other, math_func)
def __div__(self, other):
if not CalcFlow.is_calc_flow(other):
raise ValueError("Not CalcFlow")
math_func = m.DivideOp(self, other)
return self._calc_binary(other, math_func)
def sin(self):
math_func = am.SinOp(self)
return self._calc_unary(math_func)
def exp(self):
math_func = am.ExpOp(self)
return self._calc_unary(math_func)
def ln(self):
math_func = am.LnOp(self)
return self._calc_unary(math_func)
def __truediv__(self, other):
return self.__div__(other)
def __str__(self):
return "[%s] %s" % (self.__id__, self.value)
| 2.9375
| 3
|
scraping.py
|
NaulaN/EDT
| 0
|
12783120
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from datetime import datetime
class Scraping(object):
driver = webdriver.Chrome()
hours: str
edt = {}
def __init__(self, username, password):
# Ouvre la page dans Chrome
self.driver.delete_all_cookies()
self.driver.get('https://ent.iut.univ-paris8.fr/edt/presentations.php')
# Se connecter
self.driver.find_element(By.ID, "username").send_keys(username)
self.driver.find_element(By.ID, "password").send_keys(password)
self.driver.find_element(By.NAME, "submit").click()
self.getEDT()
def getWhatDayWeek(self) -> int:
""" Récupère de 0..7, le jours de la semaine. """
return datetime.today().weekday()
def writeCours(self, element, elementHeure):
""" Écrit l'heure en index et crée une liste contenant les infos concernant le cours """
hoursIndex = elementHeure.find_element(By.CLASS_NAME,"fright").text.split("h")
hoursIndex = float(f"{hoursIndex[0]}.{hoursIndex[1]}") if len(hoursIndex) == 2 else int(hoursIndex[0])
self.edt[hoursIndex] = []
for n,el in enumerate(element.text.split("\n")):
self.edt[hoursIndex].append(el)
def getEDT(self, todayDate=int(datetime.today().weekday())):
""" Update l'emploie du temps """
# Click sur l'emploie du temps qui correspond au jour de la semaine
for element in self.driver.find_elements(By.CLASS_NAME, f"jours.jour{todayDate+1}.plageDIVn"):
if len(element.text) > 0:
allElementsCanFind = self.driver.find_elements(By.CLASS_NAME,"ligne.petit") + self.driver.find_elements(By.CLASS_NAME,"lignegrey") + self.driver.find_elements(By.CLASS_NAME,"plage.petit") # ((todayDate if todayDate not in [5,6] else 0)*20)+6.6667 => only use for B group
print(element.get_attribute("style"))
print(element.text)
if element.text.count("Amphi") >= 1 or element.text.count("Amphi2") >= 1:
for elementHeure in allElementsCanFind:
if abs(int(elementHeure.get_attribute("style").split(";")[0].replace("top: ", "").replace("px", ""))-int(element.get_attribute("style").split(';')[0].replace("top: ", "").replace("px", ""))) in [30, 10]:
print(elementHeure.text)
self.writeCours(element, elementHeure)
break
else:
for style in element.get_attribute("style").split(";"):
print(style)
print(style.count(f" margin-left: {((todayDate+1 if todayDate not in [5,6] else 0)*10)+3.3333}"))
if style.count(f" margin-left: {((todayDate+1 if todayDate not in [5,6] else 0)*10)+3.3333}") >= 1:
for elementHeure in allElementsCanFind:
print(abs(int(elementHeure.get_attribute("style").split(";")[0].replace("top: ","").replace("px","")) - int(element.get_attribute("style").split(';')[0].replace("top: ","").replace("px",""))))
if abs(int(elementHeure.get_attribute("style").split(";")[0].replace("top: ","").replace("px","")) - int(element.get_attribute("style").split(';')[0].replace("top: ","").replace("px",""))) in [30,10]:
print(elementHeure.text)
self.writeCours(element, elementHeure)
break
print(self.edt)
| 3.484375
| 3
|
qsar/tests.py
|
kingrichard2005/qsarweb-public
| 0
|
12783121
|
from django.test import TestCase
# Create your tests here.
class IndexViewsTestCase(TestCase):
def test_index(self):
resp = self.client.get('/qsar/')
self.assertEqual(resp.status_code, 200)
| 2.109375
| 2
|
dev/park_python/camera/capture.py
|
remij1/Park_Detection
| 0
|
12783122
|
<filename>dev/park_python/camera/capture.py
##################################################################
# MESURE DU TAUX D'OCCUPATION DE PARKINGS A L'AIDE #
# DE CAMERAS VIDEOS #
# -------------------------------------------------------------- #
# <NAME> - TB 2018 - HEIG-VD #
# <EMAIL> #
# https://github.com/remij1/TB_2018 #
# July 2018 #
# -------------------------------------------------------------- #
# Helpers and classes to connect to a camera and retreive #
# images. #
##################################################################
import requests
from requests.auth import HTTPBasicAuth
from io import BytesIO
from apscheduler.schedulers.background import BlockingScheduler, BackgroundScheduler
import logging
from datetime import datetime, time
class CameraClient:
"""
Can be used to interact with the camera over the network.
This has been especially designed to work with the Wanscam HW0029 camera.
Requirement: apscheduler
"""
SNAP_ENDPOINT = "/web/tmpfs/snap.jpg"
WEB_PORT = 80
def __init__(self, host, username="admin", password="<PASSWORD>"):
"""
Creates a CameraClient which could be used to capture frame from the camera
Arguments:
host {str} -- The ip or hostname of the camera
Keyword Arguments:
username {str} -- The username to use to connect (default: {"admin"})
password {str} -- The password to use to connect (default: {"<PASSWORD>"})
"""
self.host = host
self.username = username
self.password = password
# Creating a basic authentification from the arguments
self.auth = HTTPBasicAuth(self.username, self.password)
# Defining the http request url which can be used to request an image to the camera
self.url = f"http://{self.host}:{self.WEB_PORT}{self.SNAP_ENDPOINT}"
def capture_raw(self):
"""
Could be used to get a single image from the camera. It is described by bytes.
A skimage/opencv image (a simple [x, y, 3] numpy array) can be easily created with io.imread(camera.capture_raw()).
Returns:
BytesIO -- the byte stream describing the image
Raises:
RequestException -- if the connection was not succesfully completed, for ex. if the host can not be reached.
BadCredentialsError -- if the credentials cannot be used to connect to the camera
BadResponseFormat -- if womething went wrong with the camera response
"""
# requesting the camera
response = requests.get(self.url, auth=self.auth)
# handling bad responses
if response.status_code == 401: # bad credentials
raise self.BadCredentialsError()
elif not str(response.status_code).startswith("2"): # bad response code
raise self.BadResponseFormat()
# returning the image as a byte stream
content = response.content
response.close()
return BytesIO(content)
# ------- ERRORS ------- #
class Error(Exception):
pass
class BadCredentialsError(Error):
def __init__(self):
super().__init__("The credentials cannot be used to connect to the camera")
class BadResponseFormat(Error):
def __init__(self):
super().__init__("Something went wrong with the camera response")
class CameraAgent:
"""
Used to request the camera for a shot periodically. It handles connection loss.
"""
def __init__(self, camera_client, handle_image_callback, hours = 0, minutes = 0, seconds = 0, running_time=None, blocking=True):
"""
Creates an image agent. It request for an image to the camera_client provided as an argument once every
timelaps, which is defined by the hours, minutes and seconds parameters. For the agent to start, use camera_agent.start().
For every image that has been received, the 'handle_image_callback(image_bytes_stream)' is called. The image_bytes_stream
can be easily converted to an skimage/opencv images with image = io.imread(image_bytes_stream).
A running time (ie. when the images have to be fetched) can be passed as a parameter. If None, it is always running
Arguments:
camera_client {CameraClient} -- The camera from which we want to retreive images
handle_image_callback {Function} -- A function that has the following signature: func(image_bytes_stream)
Keyword Arguments:
hours {int} -- Hours between each request (default: {0})
minutes {int} -- Minutes between each request (default: {0})
seconds {int} -- Seconds between each request (default: {0})
running_time {(time, time)} -- The start and stop hours of the agent. (default: {None})
"""
self.camera_client = camera_client
self.handle_image_callback = handle_image_callback
# saving the base interval
self.interval = (hours, minutes, seconds)
self.running_time = running_time
# Creating a scheduler. It will ask for an image to the camera every timelaps provided as arguments
if blocking:
self.scheduler = BlockingScheduler()
else:
self.scheduler = BackgroundScheduler()
self.job = self.scheduler.add_job(self._request_image, 'interval', hours = hours, minutes = minutes, seconds = seconds, id="cam_capture")
# This is set to True when a connection error occured.
self.connection_error = False
# logging
self.logger = logging.getLogger("capture_module")
self.logger.setLevel(logging.INFO)
def _request_image(self):
"""
Used locally to request for an image to the camera. This is executed once every timelaps
"""
# Checking if we are currently running
if not self._is_running():
self.logger.info("Currently not running - not fetching an image")
return
# capturing the image
try:
image = self.camera_client.capture_raw()
if self.connection_error == True:
# There is no more error now, we can use the main interval
self.logger.warning("The camera has been reconnected after a loss of connection !")
self.connection_error = False
self.job.reschedule(trigger='interval', hours = self.interval[0], minutes = self.interval[1], seconds = self.interval[2])
self.logger.info("Image fetched")
self.handle_image_callback(image)
except requests.RequestException :
# Here, the camera could not be found
# Retrying once every minute
if self.connection_error == False: # New error, logging it
self.logger.error("Connection to camera lost, retrying in 1 minute")
self.connection_error = True
self.job.reschedule(trigger='interval', minutes = 1)
else: # This error is not new, logging it as an info
self.logger.info("Camera still not connected, retrying in 1 minute")
except (CameraClient.BadCredentialsError, CameraClient.BadResponseFormat, Exception):
self.logger.exception("An error has occured while retreiving the image")
# Doing nothing, retrying the next time
def _is_running(self):
if self.running_time == None:
return True
cur_t = datetime.now().time()
start_t = self.running_time[0]
stop_t = self.running_time[1]
if start_t <= stop_t: # classic range of time
return start_t <= cur_t <= stop_t
else: # the range covers the 00:00 hour
return start_t <= cur_t or cur_t <= stop_t
def start(self):
"""
Starting capturing image from the camera
"""
# we firstly get one image immediately
self._request_image()
# then, we start the scheduler
self.scheduler.start()
def get_logger(self):
"""
Returns the logger of this agent. Can be usefull to monitorate
Returns:
Logger -- The current logger
"""
return self.logger
| 2.375
| 2
|
docker-jans-configurator/scripts/bootstrap.py
|
duttarnab/jans
| 0
|
12783123
|
<gh_stars>0
import json
import logging.config
import os
import random
import socket
import time
from functools import partial
from uuid import uuid4
import click
from jans.pycloudlib import get_manager
from jans.pycloudlib import wait_for
from jans.pycloudlib.utils import get_random_chars
from jans.pycloudlib.utils import get_sys_random_chars
from jans.pycloudlib.utils import encode_text
from jans.pycloudlib.utils import exec_cmd
from jans.pycloudlib.utils import generate_base64_contents
from jans.pycloudlib.utils import safe_render
from jans.pycloudlib.utils import ldap_encode
from jans.pycloudlib.utils import get_server_certificate
from jans.pycloudlib.utils import generate_ssl_certkey
from jans.pycloudlib.utils import generate_ssl_ca_certkey
from jans.pycloudlib.utils import generate_signed_ssl_certkey
from jans.pycloudlib.utils import as_boolean
from jans.pycloudlib.utils import generate_keystore
from parameter import params_from_file
from settings import LOGGING_CONFIG
DEFAULT_SIG_KEYS = "RS256 RS384 RS512 ES256 ES384 ES512 PS256 PS384 PS512"
DEFAULT_ENC_KEYS = "RSA1_5 RSA-OAEP"
DEFAULT_CONFIG_FILE = "/app/db/config.json"
DEFAULT_SECRET_FILE = "/app/db/secret.json"
DEFAULT_GENERATE_FILE = "/app/db/generate.json"
logging.config.dictConfig(LOGGING_CONFIG)
logger = logging.getLogger("configurator")
manager = get_manager()
def encode_template(fn, ctx, base_dir="/app/templates"):
path = os.path.join(base_dir, fn)
# ctx is nested which has `config` and `secret` keys
data = {}
for _, v in ctx.items():
data.update(v)
with open(path) as f:
return generate_base64_contents(safe_render(f.read(), data))
def generate_openid_keys(passwd, jks_path, jwks_path, dn, exp=365, sig_keys=DEFAULT_SIG_KEYS, enc_keys=DEFAULT_ENC_KEYS):
cmd = " ".join([
"java",
"-Dlog4j.defaultInitOverride=true",
"-cp /app/javalibs/*",
"io.jans.as.client.util.KeyGenerator",
"-enc_keys", enc_keys,
"-sig_keys", sig_keys,
"-dnname", "{!r}".format(dn),
"-expiration", "{}".format(exp),
"-keystore", jks_path,
"-keypasswd", <PASSWORD>,
])
out, err, retcode = exec_cmd(cmd)
if retcode == 0:
with open(jwks_path, "w") as f:
f.write(out.decode())
return out, err, retcode
def generate_openid_keys_hourly(passwd, jks_path, jwks_path, dn, exp=48, sig_keys=DEFAULT_SIG_KEYS, enc_keys=DEFAULT_ENC_KEYS):
cmd = " ".join([
"java",
"-Dlog4j.defaultInitOverride=true",
"-cp /app/javalibs/*",
"io.jans.as.client.util.KeyGenerator",
"-enc_keys", enc_keys,
"-sig_keys", sig_keys,
"-dnname", "{!r}".format(dn),
"-expiration_hours", "{}".format(exp),
"-keystore", jks_path,
"-keypasswd", <PASSWORD>,
])
out, err, retcode = exec_cmd(cmd)
if retcode == 0:
with open(jwks_path, "w") as f:
f.write(out.decode())
return out, err, retcode
def export_openid_keys(keystore, keypasswd, alias, export_file):
cmd = " ".join([
"java",
"-Dlog4j.defaultInitOverride=true",
"-cp /app/javalibs/*",
"io.jans.as.client.util.KeyExporter",
"-keystore {}".format(keystore),
"-keypasswd {}".format(keypasswd),
"-alias {}".format(alias),
"-exportfile {}".format(export_file),
])
return exec_cmd(cmd)
def generate_pkcs12(suffix, passwd, hostname):
# Convert key to pkcs12
cmd = " ".join([
"openssl",
"pkcs12",
"-export",
"-inkey /etc/certs/{}.key".format(suffix),
"-in /etc/certs/{}.crt".format(suffix),
"-out /etc/certs/{}.pkcs12".format(suffix),
"-name {}".format(hostname),
"-passout pass:{}".format(passwd),
])
_, err, retcode = exec_cmd(cmd)
assert retcode == 0, "Failed to generate PKCS12 file; reason={}".format(err)
class CtxManager:
def __init__(self, manager):
self.manager = manager
self.ctx = {"config": {}, "secret": {}}
self._remote_config_ctx = None
self._remote_secret_ctx = None
@property
def remote_config_ctx(self):
if not self._remote_config_ctx:
self._remote_config_ctx = self.manager.config.get_all()
return self._remote_config_ctx
@property
def remote_secret_ctx(self):
if not self._remote_secret_ctx:
self._remote_secret_ctx = self.manager.secret.get_all()
return self._remote_secret_ctx
def set_config(self, key, value, reuse_if_exists=True):
if reuse_if_exists and key in self.remote_config_ctx:
logger.info(f"re-using config {key}")
self.ctx["config"][key] = self.remote_config_ctx[key]
return self.ctx["config"][key]
logger.info(f"adding config {key}")
if callable(value):
value = value()
self.ctx["config"][key] = value
return self.ctx["config"][key]
def set_secret(self, key, value, reuse_if_exists=True):
if reuse_if_exists and key in self.remote_secret_ctx:
logger.info(f"re-using secret {key}")
self.ctx["secret"][key] = self.remote_secret_ctx[key]
return self.ctx["secret"][key]
logger.info(f"adding secret {key}")
if callable(value):
value = value()
self.ctx["secret"][key] = value
return value
def get_config(self, key, default=None):
return self.ctx["config"].get(key) or default
def get_secret(self, key, default=None):
return self.ctx["secret"].get(key) or default
class CtxGenerator:
def __init__(self, manager, params):
self.params = params
self.manager = manager
self.ctx_manager = CtxManager(self.manager)
@property
def ctx(self):
return self.ctx_manager.ctx
def set_config(self, key, value, reuse_if_exists=True):
return self.ctx_manager.set_config(key, value, reuse_if_exists)
def set_secret(self, key, value, reuse_if_exists=True):
return self.ctx_manager.set_secret(key, value, reuse_if_exists)
def get_config(self, key, default=None):
return self.ctx_manager.get_config(key, default)
def get_secret(self, key, default=None):
return self.ctx_manager.get_secret(key, default)
def base_ctx(self):
self.set_secret("encoded_salt", partial(get_random_chars, 24))
self.set_config("orgName", self.params["org_name"])
self.set_config("country_code", self.params["country_code"])
self.set_config("state", self.params["state"])
self.set_config("city", self.params["city"])
self.set_config("hostname", self.params["hostname"])
self.set_config("admin_email", self.params["email"])
# self.set_config("jetty_base", "/opt/jans/jetty")
self.set_config("admin_inum", lambda: f"{uuid4()}")
self.set_secret("encoded_admin_password", partial(ldap_encode, self.params["admin_pw"]))
opt_scopes = self.params["optional_scopes"]
self.set_config("optional_scopes", list(set(opt_scopes)), False)
def ldap_ctx(self):
encoded_salt = self.get_secret("encoded_salt")
# self.set_secret("encoded_ldap_pw", ldap_encode(self.params["admin_pw"]))
self.set_secret(
"encoded_ox_ldap_pw",
partial(encode_text, self.params["ldap_pw"], encoded_salt),
)
self.set_config("ldap_init_host", "localhost")
self.set_config("ldap_init_port", 1636)
self.set_config("ldap_port", 1389)
self.set_config("ldaps_port", 1636)
self.set_config("ldap_binddn", "cn=directory manager")
self.set_config("ldap_site_binddn", "cn=directory manager")
ldap_truststore_pass = self.set_secret("ldap_truststore_pass", get_random_chars)
self.set_config("ldapTrustStoreFn", "/etc/certs/opendj.pkcs12")
hostname = self.get_config("hostname")
generate_ssl_certkey(
"opendj",
self.get_config("admin_email"),
hostname,
self.get_config("orgName"),
self.get_config("country_code"),
self.get_config("state"),
self.get_config("city"),
extra_dns=["ldap"],
)
with open("/etc/certs/opendj.pem", "w") as fw:
with open("/etc/certs/opendj.crt") as fr:
ldap_ssl_cert = fr.read()
self.set_secret(
"ldap_ssl_cert",
partial(encode_text, ldap_ssl_cert, encoded_salt),
)
with open("/etc/certs/opendj.key") as fr:
ldap_ssl_key = fr.read()
self.set_secret(
"ldap_ssl_key",
partial(encode_text, ldap_ssl_key, encoded_salt),
)
ldap_ssl_cacert = "".join([ldap_ssl_cert, ldap_ssl_key])
fw.write(ldap_ssl_cacert)
self.set_secret(
"ldap_ssl_cacert",
partial(encode_text, ldap_ssl_cacert, encoded_salt),
)
generate_pkcs12("opendj", ldap_truststore_pass, hostname)
with open(self.get_config("ldapTrustStoreFn"), "rb") as fr:
self.set_secret(
"ldap_pkcs12_base64",
partial(encode_text, fr.read(), encoded_salt),
)
self.set_secret(
"encoded_ldapTrustStorePass",
partial(encode_text, ldap_truststore_pass, encoded_salt),
)
def redis_ctx(self):
# TODO: move this to persistence-loader
self.set_secret("redis_pw", self.params.get("redis_pw", ""))
def auth_ctx(self):
encoded_salt = self.get_secret("encoded_salt")
self.set_config("default_openid_jks_dn_name", "CN=Janssen Auth CA Certificates")
self.set_secret("pairwiseCalculationKey", partial(get_sys_random_chars, random.randint(20, 30)))
self.set_secret("pairwiseCalculationSalt", partial(get_sys_random_chars, random.randint(20, 30)))
auth_openid_jks_fn = self.set_config("auth_openid_jks_fn", "/etc/certs/auth-keys.jks")
self.set_secret("auth_openid_jks_pass", get_random_chars)
auth_openid_jwks_fn = self.set_config("auth_openid_jwks_fn", "/etc/certs/auth-keys.json")
self.set_config("auth_legacyIdTokenClaims", "false")
self.set_config("auth_openidScopeBackwardCompatibility", "false")
# get user-input signing keys
allowed_sig_keys = DEFAULT_SIG_KEYS.split()
sig_keys = []
for k in self.params.get("auth_sig_keys", "").split():
k = k.strip()
if k not in allowed_sig_keys:
continue
sig_keys.append(k)
# if empty, fallback to default
sig_keys = sig_keys or allowed_sig_keys
sig_keys = " ".join(sig_keys)
self.set_config("auth_sig_keys", sig_keys)
# get user-input encryption keys
allowed_enc_keys = DEFAULT_ENC_KEYS.split()
enc_keys = []
for k in self.params.get("auth_enc_keys", "").split():
k = k.strip()
if k not in allowed_enc_keys:
continue
enc_keys.append(k)
# if empty, fallback to default
enc_keys = enc_keys or allowed_enc_keys
enc_keys = " ".join(enc_keys)
self.set_config("auth_enc_keys", enc_keys)
# default exp = 2 hours + token lifetime (in hour)
exp = int(2 + (3600 / 3600))
_, err, retcode = generate_openid_keys_hourly(
self.get_secret("auth_openid_jks_pass"),
self.get_config("auth_openid_jks_fn"),
auth_openid_jwks_fn,
self.get_config("default_openid_jks_dn_name"),
exp=exp,
sig_keys=sig_keys,
enc_keys=enc_keys,
)
if retcode != 0:
logger.error(f"Unable to generate auth keys; reason={err}")
raise click.Abort()
basedir, fn = os.path.split(auth_openid_jwks_fn)
self.set_secret(
"auth_openid_key_base64",
partial(encode_template, fn, self.ctx, basedir),
)
# auth keys
self.set_config("auth_key_rotated_at", lambda: int(time.time()))
with open(auth_openid_jks_fn, "rb") as fr:
self.set_secret(
"auth_jks_base64",
partial(encode_text, fr.read(), encoded_salt),
)
def config_api_ctx(self):
self.set_config("jca_client_id", lambda: f"1801.{uuid4()}")
jca_client_pw = self.set_secret("jca_client_pw", get_random_chars)
self.set_secret(
"jca_client_encoded_pw",
partial(encode_text, jca_client_pw, self.get_secret("encoded_salt"))
)
def passport_rs_ctx(self):
encoded_salt = self.get_secret("encoded_salt")
self.set_config("passport_rs_client_id", lambda: f"1501.{uuid4()}")
passport_rs_client_jks_fn = self.set_config("passport_rs_client_jks_fn", "/etc/certs/passport-rs.jks")
passport_rs_client_jwks_fn = self.set_config("passport_rs_client_jwks_fn", "/etc/certs/passport-rs-keys.json")
passport_rs_client_jks_pass = self.set_secret("passport_rs_client_jks_pass", get_random_chars)
self.set_secret(
"passport_rs_client_jks_pass_encoded",
partial(encode_text, passport_rs_client_jks_pass, encoded_salt),
)
out, err, retcode = generate_openid_keys(
passport_rs_client_jks_pass,
passport_rs_client_jks_fn,
passport_rs_client_jwks_fn,
self.get_config("default_openid_jks_dn_name"),
)
if retcode != 0:
logger.error(f"Unable to generate Passport RS keys; reason={err}")
raise click.Abort()
passport_rs_client_cert_alg = self.set_config("passport_rs_client_cert_alg", "RS512")
cert_alias = ""
for key in json.loads(out)["keys"]:
if key["alg"] == passport_rs_client_cert_alg:
cert_alias = key["kid"]
break
self.set_config("passport_rs_client_cert_alias", cert_alias)
basedir, fn = os.path.split(passport_rs_client_jwks_fn)
self.set_secret(
"passport_rs_client_base64_jwks",
partial(encode_template, fn, self.ctx, basedir),
)
with open(passport_rs_client_jks_fn, "rb") as fr:
self.set_secret(
"passport_rs_jks_base64",
partial(encode_text, fr.read(), encoded_salt),
)
self.set_config("passport_resource_id", lambda: f"1504.{uuid4()}")
def passport_rp_ctx(self):
encoded_salt = self.get_secret("encoded_salt")
self.set_config("passport_rp_client_id", lambda: f"1502.{uuid4()}")
self.set_config("passport_rp_ii_client_id", lambda: f"1503.{uuid4()}")
passport_rp_client_jks_pass = self.set_secret("passport_rp_client_jks_pass", get_random_chars)
passport_rp_client_jks_fn = self.set_config("passport_rp_client_jks_fn", "/etc/certs/passport-rp.jks")
passport_rp_client_jwks_fn = self.set_config("passport_rp_client_jwks_fn", "/etc/certs/passport-rp-keys.json")
passport_rp_client_cert_fn = self.set_config("passport_rp_client_cert_fn", "/etc/certs/passport-rp.pem")
passport_rp_client_cert_alg = self.set_config("passport_rp_client_cert_alg", "RS512")
out, err, code = generate_openid_keys(
passport_rp_client_jks_pass,
passport_rp_client_jks_fn,
passport_rp_client_jwks_fn,
self.get_config("default_openid_jks_dn_name"),
)
if code != 0:
logger.error(f"Unable to generate Passport RP keys; reason={err}")
raise click.Abort()
cert_alias = ""
for key in json.loads(out)["keys"]:
if key["alg"] == passport_rp_client_cert_alg:
cert_alias = key["kid"]
break
self.set_config("passport_rp_client_cert_alias", cert_alias)
_, err, retcode = export_openid_keys(
passport_rp_client_jks_fn,
passport_rp_client_jks_pass,
cert_alias,
passport_rp_client_cert_fn,
)
if retcode != 0:
logger.error(f"Unable to generate Passport RP client cert; reason={err}")
raise click.Abort()
basedir, fn = os.path.split(passport_rp_client_jwks_fn)
self.set_secret("passport_rp_client_base64_jwks", partial(encode_template, fn, self.ctx, basedir))
with open(passport_rp_client_jks_fn, "rb") as fr:
self.set_secret(
"passport_rp_jks_base64",
partial(encode_text, fr.read(), encoded_salt),
)
with open(passport_rp_client_cert_fn) as fr:
self.set_secret(
"passport_rp_client_cert_base64",
partial(encode_text, fr.read(), encoded_salt),
)
def passport_sp_ctx(self):
encoded_salt = self.get_secret("encoded_salt")
# passportSpKeyPass = self.set_secret("passportSpKeyPass", get_random_chars()) # noqa: N806
_ = self.set_secret("passportSpKeyPass", get_random_chars) # noqa: N806
self.set_config("passportSpTLSCACert", '/etc/certs/passport-sp.pem')
passportSpTLSCert = self.set_config("passportSpTLSCert", '/etc/certs/passport-sp.crt') # noqa: N806
passportSpTLSKey = self.set_config("passportSpTLSKey", '/etc/certs/passport-sp.key') # noqa: N806
self.set_secret("passportSpJksPass", get_random_chars)
self.set_config("passportSpJksFn", '/etc/certs/passport-sp.jks')
generate_ssl_certkey(
"passport-sp",
self.get_config("admin_email"),
self.get_config("hostname"),
self.get_config("orgName"),
self.get_config("country_code"),
self.get_config("state"),
self.get_config("city"),
)
with open(passportSpTLSCert) as f:
self.set_secret(
"passport_sp_cert_base64",
partial(encode_text, f.read(), encoded_salt),
)
with open(passportSpTLSKey) as f:
self.set_secret(
"passport_sp_key_base64",
partial(encode_text, f.read(), encoded_salt),
)
def web_ctx(self):
ssl_cert = "/etc/certs/web_https.crt"
ssl_key = "/etc/certs/web_https.key"
ssl_csr = "/etc/certs/web_https.csr"
ssl_ca_cert = "/etc/certs/ca.crt"
ssl_ca_key = "/etc/certs/ca.key"
# get cert and key (if available) with priorities below:
#
# 1. from mounted files
# 2. from fronted (key file is an empty file)
# 3. self-generate files
logger.info(f"Resolving {ssl_cert} and {ssl_key}")
# check from mounted files
if not (os.path.isfile(ssl_cert) and os.path.isfile(ssl_key)):
# no mounted files, hence download from frontend
ingress_addr = ""
if "CN_INGRESS_ADDRESS" in os.environ:
ingress_addr = os.environ.get("CN_INGRESS_ADDRESS")
ingress_servername = os.environ.get("CN_INGRESS_SERVERNAME") or ingress_addr
if ingress_addr and ingress_servername:
logger.warning(
f"Unable to find mounted {ssl_cert} and {ssl_key}; "
f"trying to download from {ingress_addr}:443 (servername {ingress_servername})" # noqa: C812
)
try:
# cert will be downloaded into `ssl_cert` path
get_server_certificate(ingress_addr, 443, ssl_cert, ingress_servername)
# since cert is downloaded, key must mounted
# or generate empty file
if not os.path.isfile(ssl_key):
with open(ssl_key, "w") as f:
f.write("")
except (socket.gaierror, socket.timeout, OSError) as exc:
# address not resolved or timed out
logger.warning(f"Unable to download cert; reason={exc}")
# no mounted nor downloaded files, hence we need to create self-generated files
if not (os.path.isfile(ssl_cert) and os.path.isfile(ssl_key)):
hostname = self.get_config("hostname")
email = self.get_config("admin_email")
org_name = self.get_config("orgName")
country_code = self.get_config("country_code")
state = self.get_config("state")
city = self.get_config("city")
logger.info(f"Creating self-generated {ssl_ca_cert} and {ssl_ca_key}")
ca_cert, ca_key = generate_ssl_ca_certkey(
"ca",
email,
"Janssen CA",
org_name,
country_code,
state,
city,
)
logger.info(f"Creating self-generated {ssl_csr}, {ssl_cert}, and {ssl_key}")
generate_signed_ssl_certkey(
"web_https",
ca_key,
ca_cert,
email,
hostname,
org_name,
country_code,
state,
city,
)
try:
with open(ssl_ca_cert) as f:
self.set_secret("ssl_ca_cert", f.read)
except FileNotFoundError:
self.set_secret("ssl_ca_cert", "")
try:
with open(ssl_ca_key) as f:
self.set_secret("ssl_ca_key", f.read)
except FileNotFoundError:
self.set_secret("ssl_ca_key", "")
try:
with open(ssl_csr) as f:
self.set_secret("ssl_csr", f.read)
except FileNotFoundError:
self.set_secret("ssl_csr", "")
with open(ssl_cert) as f:
self.set_secret("ssl_cert", f.read)
with open(ssl_key) as f:
self.set_secret("ssl_key", f.read)
def oxshibboleth_ctx(self):
encoded_salt = self.get_secret("encoded_salt")
hostname = self.get_config("hostname")
admin_email = self.get_config("admin_email")
orgName = self.get_config("orgName") # noqa: N806
country_code = self.get_config("country_code")
state = self.get_config("state")
city = self.get_config("city")
self.set_config("idp_client_id", lambda: f"1101.{uuid4()}")
self.set_secret(
"idpClient_encoded_pw",
partial(encode_text, get_random_chars(), encoded_salt),
)
shibJksFn = self.set_config("shibJksFn", "/etc/certs/shibIDP.jks") # noqa: N806
shibJksPass = self.set_secret("shibJksPass", get_random_chars) # noqa: N806
self.set_secret(
"encoded_shib_jks_pw",
partial(encode_text, shibJksPass, encoded_salt),
)
generate_ssl_certkey(
"shibIDP",
admin_email,
hostname,
orgName,
country_code,
state,
city,
)
generate_keystore("shibIDP", hostname, shibJksPass)
with open("/etc/certs/shibIDP.crt") as f:
self.set_secret(
"shibIDP_cert",
partial(encode_text, f.read(), encoded_salt),
)
with open("/etc/certs/shibIDP.key") as f:
self.set_secret(
"shibIDP_key",
partial(encode_text, f.read(), encoded_salt),
)
with open(shibJksFn, "rb") as f:
self.set_secret(
"shibIDP_jks_base64",
partial(encode_text, f.read(), encoded_salt),
)
self.set_config("shibboleth_version", "v3")
self.set_config("idp3Folder", "/opt/shibboleth-idp")
idp3_signing_cert = "/etc/certs/idp-signing.crt"
idp3_signing_key = "/etc/certs/idp-signing.key"
generate_ssl_certkey(
"idp-signing",
admin_email,
hostname,
orgName,
country_code,
state,
city,
)
with open(idp3_signing_cert) as f:
self.set_secret("idp3SigningCertificateText", f.read)
with open(idp3_signing_key) as f:
self.set_secret("idp3SigningKeyText", f.read)
idp3_encryption_cert = "/etc/certs/idp-encryption.crt"
idp3_encryption_key = "/etc/certs/idp-encryption.key"
generate_ssl_certkey(
"idp-encryption",
admin_email,
hostname,
orgName,
country_code,
state,
city,
)
with open(idp3_encryption_cert) as f:
self.set_secret("idp3EncryptionCertificateText", f.read)
with open(idp3_encryption_key) as f:
self.set_secret("idp3EncryptionKeyText", f.read)
_, err, code = gen_idp3_key(shibJksPass)
if code != 0:
logger.warninging(f"Unable to generate Shibboleth sealer; reason={err}")
raise click.Abort()
with open("/etc/certs/sealer.jks", "rb") as f:
self.set_secret(
"sealer_jks_base64",
partial(encode_text, f.read(), encoded_salt),
)
with open("/etc/certs/sealer.kver") as f:
self.set_secret(
"sealer_kver_base64",
partial(encode_text, f.read(), encoded_salt),
)
def radius_ctx(self):
encoded_salt = self.get_secret("encoded_salt")
self.set_config("jans_radius_client_id", f'1701.{uuid4()}')
self.set_secret(
"jans_ro_encoded_pw",
partial(encode_text, get_random_chars(), encoded_salt),
)
radius_jwt_pass = self.set_secret(
"radius_jwt_pass",
partial(encode_text, get_random_chars(), encoded_salt),
)
out, err, code = generate_openid_keys(
radius_jwt_pass,
"/etc/certs/jans-radius.jks",
"/etc/certs/jans-radius.keys",
self.get_config("default_openid_jks_dn_name"),
)
if code != 0:
logger.error(f"Unable to generate Radius keys; reason={err}")
raise click.Abort()
for key in json.loads(out)["keys"]:
if key["alg"] == "RS512":
self.set_config("radius_jwt_keyId", key["kid"])
break
with open("/etc/certs/jans-radius.jks", "rb") as fr:
self.set_secret(
"radius_jks_base64",
partial(encode_text, fr.read(), encoded_salt),
)
basedir, fn = os.path.split("/etc/certs/jans-radius.keys")
self.set_secret(
"jans_ro_client_base64_jwks",
partial(encode_template, fn, self.ctx, basedir),
)
def scim_ctx(self):
self.set_config("scim_client_id", lambda: f"1201.{uuid4()}")
scim_client_pw = self.set_secret("scim_client_pw", get_random_chars)
self.set_secret(
"scim_client_encoded_pw",
partial(encode_text, scim_client_pw, self.get_secret("encoded_salt"))
)
def couchbase_ctx(self):
# TODO: move this to persistence-loader?
self.set_config("couchbaseTrustStoreFn", "/etc/certs/couchbase.pkcs12")
self.set_secret("couchbase_shib_user_password", get_random_chars)
self.set_secret("couchbase_password", self.params["couchbase_pw"])
self.set_secret("couchbase_superuser_password", self.params["couchbase_superuser_pw"])
def jackrabbit_ctx(self):
# self.set_secret("jca_pw", get_random_chars())
# self.set_secret("jca_pw", "admin")
pass
def fido2_ctx(self):
# TODO: hardcoded in persistence-loader?
self.set_config("fido2ConfigFolder", "/etc/jans/conf/fido2")
def sql_ctx(self):
self.set_secret("sql_password", self.params["sql_pw"])
def casa_ctx(self):
self.set_config("casa_client_id", lambda: f"1902.{uuid4()}")
casa_client_pw = self.set_secret("casa_client_pw", get_random_chars)
self.set_secret(
"casa_client_encoded_pw",
partial(encode_text, casa_client_pw, self.get_secret("encoded_salt"))
)
def generate(self):
opt_scopes = self.params["optional_scopes"]
self.base_ctx()
self.auth_ctx()
self.config_api_ctx()
self.web_ctx()
if "ldap" in opt_scopes:
self.ldap_ctx()
if "redis" in opt_scopes:
self.redis_ctx()
# self.passport_rs_ctx()
# self.passport_rp_ctx()
# self.passport_sp_ctx()
# self.oxshibboleth_ctx()
# self.radius_ctx()
if "scim" in opt_scopes:
self.scim_ctx()
if "couchbase" in opt_scopes:
self.couchbase_ctx()
# self.jackrabbit_ctx()
if "fido2" in opt_scopes:
self.fido2_ctx()
if "sql" in opt_scopes:
self.sql_ctx()
if "casa" in opt_scopes:
self.casa_ctx()
# populated config
return self.ctx
def gen_idp3_key(storepass):
cmd = " ".join([
"java",
"-classpath '/app/javalibs/*'",
"net.shibboleth.utilities.java.support.security.BasicKeystoreKeyStrategyTool",
"--storefile /etc/certs/sealer.jks",
"--versionfile /etc/certs/sealer.kver",
"--alias secret",
"--storepass {}".format(storepass),
])
return exec_cmd(cmd)
def _save_generated_ctx(manager, data, type_):
if type_ == "config":
backend = manager.config
else:
backend = manager.secret
logger.info("Saving {} to backend".format(type_))
backend.set_all(data)
def _load_from_file(manager, filepath, type_):
ctx_manager = CtxManager(manager)
if type_ == "config":
setter = ctx_manager.set_config
backend = manager.config
else:
setter = ctx_manager.set_secret
backend = manager.secret
logger.info(f"Loading {type_} from {filepath}")
with open(filepath, "r") as f:
data = json.loads(f.read())
ctx = data.get(f"_{type_}")
if not ctx:
logger.warning(f"Missing '_{type_}' key")
return
# tolerancy before checking existing key
time.sleep(5)
data = {k: setter(k, v) for k, v in ctx.items()}
backend.set_all(data)
def _dump_to_file(manager, filepath, type_):
if type_ == "config":
backend = manager.config
else:
backend = manager.secret
logger.info("Saving {} to {}".format(type_, filepath))
data = {"_{}".format(type_): backend.get_all()}
data = json.dumps(data, sort_keys=True, indent=4)
with open(filepath, "w") as f:
f.write(data)
# ============
# CLI commands
# ============
@click.group()
def cli():
pass
@cli.command()
@click.option(
"--generate-file",
type=click.Path(exists=False),
help="Absolute path to file containing parameters for generating config and secret",
default=DEFAULT_GENERATE_FILE,
show_default=True,
)
@click.option(
"--config-file",
type=click.Path(exists=False),
help="Absolute path to file contains config",
default=DEFAULT_CONFIG_FILE,
show_default=True,
)
@click.option(
"--secret-file",
type=click.Path(exists=False),
help="Absolute path to file contains secret",
default=DEFAULT_SECRET_FILE,
show_default=True,
)
def load(generate_file, config_file, secret_file):
"""Loads config and secret from JSON files (generate if not exist).
"""
deps = ["config_conn", "secret_conn"]
wait_for(manager, deps=deps)
# check whether config and secret in backend have been initialized
should_skip = as_boolean(os.environ.get("CN_CONFIGURATION_SKIP_INITIALIZED", False))
if should_skip and manager.config.get("hostname") and manager.secret.get("ssl_cert"):
# config and secret may have been initialized
logger.info("Config and secret have been initialized")
return
# there's no config and secret in backend, check whether to load from files
if os.path.isfile(config_file) and os.path.isfile(secret_file):
# load from existing files
logger.info(f"Re-using config and secret from {config_file} and {secret_file}")
_load_from_file(manager, config_file, "config")
_load_from_file(manager, secret_file, "secret")
return
# no existing files, hence generate new config and secret from parameters
logger.info(f"Loading parameters from {generate_file}")
params, err, code = params_from_file(generate_file)
if code != 0:
logger.error(f"Unable to load parameters; reason={err}")
raise click.Abort()
logger.info("Generating new config and secret")
ctx_generator = CtxGenerator(manager, params)
ctx = ctx_generator.generate()
# save config to its backend and file
_save_generated_ctx(manager, ctx["config"], "config")
_dump_to_file(manager, config_file, "config")
# save secret to its backend and file
_save_generated_ctx(manager, ctx["secret"], "secret")
_dump_to_file(manager, secret_file, "secret")
@cli.command()
@click.option(
"--config-file",
type=click.Path(exists=False),
help="Absolute path to file to save config",
default=DEFAULT_CONFIG_FILE,
show_default=True,
)
@click.option(
"--secret-file",
type=click.Path(exists=False),
help="Absolute path to file to save secret",
default=DEFAULT_SECRET_FILE,
show_default=True,
)
def dump(config_file, secret_file):
"""Dumps config and secret into JSON files.
"""
deps = ["config_conn", "secret_conn"]
wait_for(manager, deps=deps)
_dump_to_file(manager, config_file, "config")
_dump_to_file(manager, secret_file, "secret")
if __name__ == "__main__":
cli(prog_name="configurator")
| 1.820313
| 2
|
src/util/config/view.py
|
Mathtin/mc-discord-bot
| 0
|
12783124
|
<reponame>Mathtin/mc-discord-bot
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2021-present Daniel [Mathtin] Shiko <<EMAIL>>
Project: Minecraft Discord Bot
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__author__ = "Mathtin"
import typing
from ..exceptions import InvalidConfigException
from typing import Any, Callable, Dict, Type, get_type_hints
class ConfigView(object):
# Class fields
_type_constructor_map: Dict[Type[Any], Callable[[Any, str], Any]] = {
int: lambda v, p: int(v),
float: lambda v, p: float(v),
bool: lambda v, p: bool(v),
str: lambda v, p: str(v),
list: lambda v, p: list(v),
dict: lambda v, p: dict(v),
}
_field_constructor_map: Dict[str, Callable[[Any, str], Any]] = None
# Instance fields
_path_prefix: str
def __init__(self, values: typing.Optional[Dict[str, Any]] = None, path_prefix: str = '') -> None:
if values is None:
values = {}
self._path_prefix = path_prefix
# Build {_field_constructor_map} for each class implementation
if self._field_constructor_map is None:
types = get_type_hints(self.__class__)
self.__class__._field_constructor_map = {
field: self.get_type_constructor(type_)
for field, type_ in types.items()
if not field.startswith('_')
}
# Construct each field value provided by {values}
for key, value in values.items():
if key not in self._field_constructor_map:
raise InvalidConfigException(f"Invalid key: {key}", self.path(key))
if value is not None:
constructor = self._field_constructor_map[key]
field_value = constructor(value, self.path(key))
setattr(self, key, field_value)
def get_type_constructor(self, type_: Type[Any]) -> Callable[[Any, str], Any]:
if type_ not in self._type_constructor_map:
self._type_constructor_map[type_] = self._resolve_constructor(type_)
return self._type_constructor_map[type_]
def _resolve_constructor(self, type_: Type[Any]) -> Callable[[Any, str], Any]:
# Primitive types already exist, only ConfigView and complex List/Dict type-hints are supported
if isinstance(type_, typing._GenericAlias):
# Resolve complex List type-hint
if type_._name == 'List':
sub_constructor = self.get_type_constructor(type_.__args__[0])
return lambda l, p: [sub_constructor(e, f'{p}[{i}]') for i, e in enumerate(l)]
# Resolve complex Dict type-hint
elif type_._name == 'Dict':
# Check key type
if type_.__args__[0] is not str:
raise TypeError(f"Unsupported dict key type hint: {type_.__args__[0]}")
sub_constructor = self.get_type_constructor(type_.__args__[1])
return lambda d, p: {k: sub_constructor(v, f'{p}.{k}') for k, v in d.items()}
# Other type-hints are not supported
raise TypeError(f"Unsupported type hint: {type_}")
# ConfigView are constructor-ready
if issubclass(type_, ConfigView):
return type_
raise TypeError(f"Unsupported type: {type_}")
def path(self, sub_path: str) -> str:
return f'{self._path_prefix}.{sub_path}' if self._path_prefix else sub_path
def get(self, path: str) -> Any:
if path == '.':
return self
parts = path.split('.')
node = self
for part in parts:
if not hasattr(node, part):
raise KeyError(f"Invalid path: {path}")
node = getattr(node, part)
return node
def to_dict(self) -> dict:
res = {}
for field in self._field_constructor_map:
value = getattr(self, field)
res[field] = self.deconstruct_obj(value)
return res
@staticmethod
def deconstruct_obj(o: Any) -> Any:
if isinstance(o, ConfigView):
return o.to_dict()
elif isinstance(o, list):
return [ConfigView.deconstruct_obj(v) for v in o]
elif isinstance(o, dict):
return {k: ConfigView.deconstruct_obj(v) for k, v in o.items()}
return o
def __iter__(self):
for field in self._field_constructor_map:
yield field, getattr(self, field)
| 2.3125
| 2
|
Lib/test/test_async.py
|
pyparallel/pyparallel
| 652
|
12783125
|
import os
import sys
import atexit
import unittest
import tempfile
import async
import _async
import socket
from socket import (
AF_INET,
SOCK_STREAM,
)
def tcpsock():
return socket.socket(AF_INET, SOCK_STREAM)
CHARGEN = [
r""" !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefg""",
r"""!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefgh""",
r""""#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghi""",
r"""#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghij""",
r"""$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijk""",
]
QOTD = 'An apple a day keeps the doctor away.\r\n'
ECHO_HOST = ('echo.snakebite.net', 7)
QOTD_HOST = ('qotd.snakebite.net', 17)
DISCARD_HOST = ('discard.snakebite.net', 9)
DAYTIME_HOST = ('daytime.snakebite.net', 13)
CHARGEN_HOST = ('chargen.snakebite.net', 19)
SERVICES_IP = socket.getaddrinfo(*ECHO_HOST)[0][4][0]
ECHO_IP = (SERVICES_IP, 7)
DISCARD_IP = (SERVICES_IP, 9)
DAYTIME_IP = (SERVICES_IP, 13)
CHARGEN_IP = (SERVICES_IP, 19)
NO_CB = None
NO_EB = None
HOST = '1192.168.3.11'
ADDR = (HOST, 0)
TEMPDIR = None
def rmtempdir():
if TEMPDIR:
TEMPDIR.cleanup()
def tempfile():
if not TEMPDIR:
TEMPDIR = tempfile.TemporaryDirectory()
assert os.path.isdir(TEMPDIR)
atexit.register(rmtempdir)
assert os.path.isdir(TEMPDIR)
f = tempfile.NamedTemporaryFile(dir=TEMPDIR, delete=False)
assert os.path.isfile(f)
return f
def tempfilename():
f = tempfile()
f.close()
return f.name
class TestBasic(unittest.TestCase):
def test_calling_run_with_no_events_fails(self):
self.assertRaises(AsyncRunCalledWithoutEventsError, _async.run_once)
class TestSubmitWork(unittest.TestCase):
def test_submit_simple_work(self):
def f(i):
return i * 2
def cb(r):
_async.call_from_main_thread(
self.assertEqual,
(r, 4),
)
_async.submit_work(f, 2, None, cb, None)
_async.run()
def test_value_error_in_callback(self):
def f():
return laksjdflaskjdflsakjdfsalkjdf
_async.submit_work(f, None, None, None, None)
self.assertRaises(NameError, _async.run)
def test_value_error_in_callback_then_run(self):
def f():
return laksjdflaskjdflsakjdfsalkjdf
_async.submit_work(f, None, None, None, None)
self.assertRaises(NameError, _async.run)
_async.run()
def test_multiple_value_errors_in_callback_then_run(self):
def f():
return laksjdflaskjdflsakjdfsalkjdf
_async.submit_work(f, None, None, None, None)
_async.submit_work(f, None, None, None, None)
self.assertRaises(NameError, _async.run)
self.assertRaises(NameError, _async.run)
_async.run()
def test_call_from_main_thread(self):
d = {}
def f(i):
_async.call_from_main_thread_and_wait(
d.__setitem__,
('foo', i*2),
)
return _async.call_from_main_thread_and_wait(
d.__getitem__, 'foo'
)
def cb(r):
_async.call_from_main_thread(
self.assertEqual,
(r, 4),
)
_async.submit_work(f, 2, None, cb, None)
_async.run()
def test_call_from_main_thread_decorator(self):
@async.call_from_main_thread
def f():
self.assertFalse(_async.is_parallel_thread)
_async.submit_work(f, None, None, None, None)
_async.run()
def test_submit_simple_work_errback_invoked(self):
def f():
return laksjdflaskjdflsakjdfsalkjdf
def test_e(et, ev, eb):
try:
f()
except NameError as e2:
self.assertEqual(et, e2.__class__)
self.assertEqual(ev, e2.args[0])
self.assertEqual(eb.__class__, e2.__traceback__.__class__)
else:
self.assertEqual(0, 1)
def cb(r):
_async.call_from_main_thread(self.assertEqual, (0, 1))
def eb(e):
_async.call_from_main_thread_and_wait(test_e, e)
_async.submit_work(f, None, None, cb, eb)
_async.run()
class TestSubmitFileIO(unittest.TestCase):
def test_write(self):
n = tempfilename()
f = open(n, 'w')
_async.submit_io(f.write, b'foo', None, None, None)
_async.run()
f.close()
with open(n, 'w') as f:
self.assertEqual(f.read(), b'foo')
def test_read(self):
@async.call_from_main_thread
def cb(d):
self.assertEqual(d, b'foo')
n = tempfilename()
with open(n, 'w') as f:
f.write(b'foo')
f = open(n, 'r')
_async.submit_io(f.read, None, None, cb, None)
_async.run()
class TestConnectSocketIO(unittest.TestCase):
def test_backlog(self):
sock = tcpsock()
port = sock.bind(ADDR)
sock.listen(100)
self.assertEqual(sock.backlog, 100)
sock.close()
def test_connect(self):
@async.call_from_main_thread
def cb():
self.assertEqual(1, 1)
sock = tcpsock()
_async.connect(sock, DISCARD_IP, 1, None, cb, NO_EB)
_async.run()
def test_connect_with_data(self):
@async.call_from_main_thread
def cb(sock):
self.assertEqual(1, 1)
sock = tcpsock()
_async.connect(sock, DISCARD_IP, 1, b'buf', cb, NO_EB)
_async.run()
def test_connect_with_data(self):
@async.call_from_main_thread
def cb(sock):
self.assertEqual(1, 1)
sock = tcpsock()
_async.connect(sock, DISCARD_IP, 1, b'buf', cb, NO_EB)
_async.run()
def test_connect_then_recv(self):
@async.call_from_main_thread
def _check(data):
self.assertEqual(data, QOTD)
def read_cb(sock, data):
_check(data)
def connect_cb(sock):
_async.recv(sock, read_cb, NO_EB)
sock = tcpsock()
_async.connect(sock, QOTD_IP, 1, None, connect_cb, NO_EB)
_async.run()
def test_connect_with_data_then_recv(self):
@async.call_from_main_thread
def _check(data):
self.assertEqual(data, b'hello')
def read_cb(sock, data):
_check(data)
def connect_cb(sock):
_async.recv(sock, read_cb, NO_EB)
sock = tcpsock()
_async.connect(sock, ECHO_IP, 1, b'hello', connect_cb, NO_EB)
_async.run()
def test_connect_then_send_then_recv(self):
@async.call_from_main_thread
def _check(data):
self.assertEqual(data, b'hello')
def read_cb(sock, data):
_check(data)
def connect_cb(sock):
_async.recv(sock, read_cb, NO_EB)
_async.send(sock, b'hello', NO_CB, NO_EB)
sock = tcpsock()
_async.connect(sock, ECHO_IP, 1, None, connect_cb, NO_EB)
_async.run()
def test_recv_before_connect_with_data_then_recv(self):
@async.call_from_main_thread
def _check(data):
self.assertEqual(data, b'hello')
def read_cb(sock, data):
_check(data)
sock = tcpsock()
_async.recv(sock, read_cb, NO_EB)
_async.connect(sock, ECHO_IP, 1, b'hello', NO_CB, NO_EB)
_async.run()
def test_recv_before_connect_then_send_then_recv(self):
@async.call_from_main_thread
def _check(data):
self.assertEqual(data, b'hello')
def read_cb(sock, data):
_check(data)
def connect_cb(sock):
_async.send(sock, b'hello', NO_CB, NO_EB)
sock = tcpsock()
_async.recv(sock, read_cb, NO_EB)
_async.connect(sock, ECHO_IP, 1, None, connect_cb, NO_EB)
_async.run()
class TestAcceptSocketIO(unittest.TestCase):
def test_accept(self):
@async.call_from_main_thread
def new_connection(sock, data):
self.assertEqual(data, b'hello')
sock = tcpsock()
port = sock.bind(ADDR)
addr = sock.getsockname()
sock.listen(1)
_async.accept(sock, new_connection, NO_EB)
client = tcpsock()
_async.connect(client, addr, 1, b'hello', NO_CB, NO_EB)
_async.run()
sock.close()
def test_accept_backlog2(self):
counter = 0
@async.call_from_main_thread
def new_connection(sock, data):
self.assertEqual(data, b'hello')
counter += 1
sock = tcpsock()
port = sock.bind(ADDR)
addr = sock.getsockname()
sock.listen(2)
_async.accept(sock, new_connection, NO_EB)
client = tcpsock()
_async.connect(client, addr, 2, b'hello', NO_CB, NO_EB)
_async.run()
self.assertEqual(counter, 2)
if __name__ == '__main__':
unittest.main()
# vim:set ts=8 sw=4 sts=4 tw=78 et:
| 2.484375
| 2
|
relezoo/cli/cli.py
|
Ohtar10/rele-zoo
| 0
|
12783126
|
<gh_stars>0
import logging
import click
from relezoo.__version__ import __version__
from relezoo.algorithms import reinforce
from relezoo.utils.console import start_python_console
def docstring_parameter(*sub):
"""Decorate the main click command to format the docstring."""
def dec(obj):
obj.__doc__ = obj.__doc__.format(*sub)
return obj
return dec
@click.group()
@click.option("--debug/--no-debug", default=False, help="Enable debug output.")
@click.option("-wd", "--work-dir", default=".", help="Working directory to drop output.")
@click.pass_context
@docstring_parameter(__version__)
def relezoo(ctx, debug, work_dir):
"""ReleZoo {0}"""
logging.basicConfig(format="%(levelname)s - %(asctime)s: %(message)s ",
level=logging.INFO if not debug else logging.DEBUG)
ctx.ensure_object(dict)
ctx.obj['WORK_DIR'] = work_dir
@relezoo.command()
@click.pass_context
def shell(ctx):
"""Run interactive shell with preloaded module resources."""
start_python_console(banner='ReleZoo shell')
relezoo.add_command(reinforce.command)
| 2.140625
| 2
|
decode/struct_SHELL_LINK_HEADER.py
|
SkyLined/headsup
| 1
|
12783127
|
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import C;
LINK_CLSID = '{00021401-0000-0000-C000-000000000046}';
VALID_SHOW_COMMAND_VALUES = {
1: 'SW_SHOWNORMAL',
3: 'SW_SHOWMAXIMIZED',
7: 'SW_SHOWMINNOACTIVE',
};
VALID_HOTKEY_LOWBYTE_VALUES = {
0x90: 'NumLock',
0x91: 'ScrollLock',
};
for i in range(0x30, 0x3A):
VALID_HOTKEY_LOWBYTE_VALUES[i] = chr(i); # 0 - 9
for i in range(0x41, 0x5B):
VALID_HOTKEY_LOWBYTE_VALUES[i] = chr(i); # A - Z
for i in range(1, 25):
VALID_HOTKEY_LOWBYTE_VALUES[i + 0x6F] = 'F%d' % i; # F1 - F24 (!)
# bitflags_LinkFlags
def bitflags_LinkFlags(stream, offset, max_size, parent, name):
result = C.BITFIELD(stream, offset, max_size, parent, name, \
('Reserved', 5),
('KeepLocalIDListForUNCTarget', 1),
('PreferEnvironmentPath', 1),
('UnaliasOnSave', 1),
('AllowLinkToLink', 1),
('DisableKnownFolderAlias', 1),
('DisableKnownFolderTracking', 1),
('DisableLinkPathTracking', 1),
('EnableTargetMetadata', 1),
('ForceNoLinkTrack', 1),
('RunWithShimLayer', 1),
('Unused2', 1),
('NoPidlAlis', 1),
('HasExpIcon', 1),
('RunAsUser', 1),
('HasDarwinID', 1),
('Unused1', 1),
('RunInSeperateProcess', 1),
('HasExpString', 1),
('ForceNoLinkInfo', 1),
('IsUnicode', 1),
('HasIconLocation', 1),
('HasArguments', 1),
('HasWorkingDir', 1),
('HasRelativePath', 1),
('HasName', 1),
('HasLinkInfo', 1),
('HasLinkTargetIDList', 1)
);
result.dump_simplified = True;
if result._Unused1.value != 0:
result._Unused1.warnings.append('Expected value to be 0');
if result._Unused2.value != 0:
result._Unused2.warnings.append('Expected value to be 0');
if result._Reserved.value != 0:
result._Reserved.warnings.append('Expected value to be 0');
return result;
# bitflags_FileAttributes
def bitflags_FileAttributes(stream, offset, max_size, parent, name):
result = C.BITFIELD(stream, offset, max_size, parent, name, \
('Unused', 17),
('FILE_ATTRIBUTE_ENCRYPTED', 1),
('FILE_ATTRIBUTE_NOT_CONTENT_INDEXED', 1),
('FILE_ATTRIBUTE_OFFLINE', 1),
('FILE_ATTRIBUTE_COMPRESSED', 1),
('FILE_ATTRIBUTE_REPARSE_POINT', 1),
('FILE_ATTRIBUTE_SPARSE_FILE', 1),
('FILE_ATTRIBUTE_TEMPORARY', 1),
('FILE_ATTRIBUTE_NORMAL', 1),
('Reserved2', 1),
('FILE_ATTRIBUTE_ARCHIVE', 1),
('FILE_ATTRIBUTE_DIRECTORY', 1),
('Reserved1', 1),
('FILE_ATTRIBUTE_SYSTEM', 1),
('FILE_ATTRIBUTE_HIDDEN', 1),
('FILE_ATTRIBUTE_READONLY', 1)
);
result.dump_simplified = True;
if result._Reserved1.value != 0:
result._Reserved1.warnings.append('Expected value to be 0');
if result._Reserved2.value != 0:
result._Reserved2.warnings.append('Expected value to be 0');
return result;
# struct_HotKeyFlags
def struct_HotKeyFlags(stream, offset, max_size, parent, name):
result = C.STRUCT(stream, offset, max_size, parent, name, \
'HotKeyFlags', \
('LowByte', C.BYTE),
('HighByte', {C.BITFIELD: (
('Reserved', 5),
('HOTKEYF_ALT', 1),
('HOTKEYF_CONTROL', 1),
('HOTKEYF_SHIFT', 1),
)})
);
if results._LowByte.value in VALID_HOTKEY_LOWBYTE_VALUES:
result._LowByte.notes.append( \
VALID_HOTKEY_LOWBYTE_VALUES[result._LowByte.value]);
else:
result._LowByte.warnings.append('Unrecognized value');
if results._HighByte._Reserved.value > 0:
results._HighByte._Reserved.warnings.append('Expected value to be 0');
return result;
# http://download.microsoft.com/download/B/0/B/B0B199DB-41E6-400F-90CD-C350D0C14A53/%5BMS-SHLLINK%5D.pdf
def struct_SHELL_LINK_HEADER(stream, offset, max_size, parent, name):
import C;
from struct_GUID import struct_GUID;
result = C.STRUCT(stream, offset, max_size, parent, name, \
'LNK_HEADER', \
('HeaderSize', C.DWORD),
('LinkCLSID', struct_GUID),
('LinkFlags', bitflags_LinkFlags),
('FileAttributes', bitflags_FileAttributes),
('CreationTime', C.QWORD),
('AccessTime', C.QWORD),
('WriteTime', C.QWORD),
('FileSize', C.UINT),
('IconIndex', C.INT),
('ShowCommand', C.UINT),
('HotKey', C.WORD),
('Reserved1', C.WORD),
('Reserved2', C.DWORD),
('Reserved3', C.DWORD)
);
if result._HeaderSize.value != 0x4C:
result._HeaderSize.warnings.append(
'expected value to be 0x4C');
if result._LinkCLSID.string_value != LINK_CLSID:
result._LinkCLSID.warnings.append('expected value to be "%s"' % LINK_CLSID);
if result._ShowCommand.value in VALID_SHOW_COMMAND_VALUES:
result._ShowCommand.notes.append( \
VALID_SHOW_COMMAND_VALUES[result._ShowCommand.value]);
else:
valid_values = VALID_SHOW_COMMAND_VALUES.keys()
valid_values = '%s or %s' % \
(', '.join(valid_values[:-1]), valid_values[-1]);
result._ShowCommand.warnings.append( \
'Expected value to be %s' % valid_values);
if result._Reserved1.value != 0:
result._Reserved1.warnings.append('Expected value to be 0');
if result._Reserved2.value != 0:
result._Reserved2.warnings.append('Expected value to be 0');
return result;
| 1.796875
| 2
|
python/Exercicios/ex094.py
|
Robert-Marchinhaki/primeiros-passos-Python
| 0
|
12783128
|
<filename>python/Exercicios/ex094.py
grupo_pessoas = []
pessoas = {}
med_idade_grupo = 0
mulheres = []
p_acima_media = []
while True:
nome = str(input('Nome: '))
idade = int(input('Idade: '))
sexo = str(input('Sexo [F/M/O]: ')).upper().strip()
while sexo not in 'FMO':
print('Erro! Use somente [F/M/O].')
sexo = str(input('Sexo [F/M/O]: ')).upper().strip()
if sexo in 'F':
mulheres.append(nome)
pessoas['nome'] = nome
pessoas['idade'] = idade
pessoas['sexo'] = sexo
grupo_pessoas.append(pessoas.copy())
med_idade_grupo += idade
parada = str(input('Quer continuar? [S/N]: ')).upper().strip()
print('-=' * 20)
while parada not in 'SN':
print('Erro! Use somento [S/N].')
parada = str(input('Quer continuar? [S/N]: ')).upper().strip()
print('-=' * 20)
if parada in 'N':
break
med_idade_grupo /= len(grupo_pessoas)
for c in range(0, len(grupo_pessoas)):
if grupo_pessoas[c]['idade'] > med_idade_grupo:
p_acima_media.append(grupo_pessoas[c]['nome'])
print('-=' * 20)
print(grupo_pessoas)
print(f'Ao todo {len(grupo_pessoas)} pessoas foram cadastradas')
print(f'As mulheres cadastradas foram: {mulheres}')
print(f'A média de idade do grupo é {med_idade_grupo}')
print(f'As pessoas que tem idade acima da média são: {p_acima_media}')
print(pessoas)
| 3.5625
| 4
|
calendar_service.py
|
jantheprogrammer/event-email-service
| 0
|
12783129
|
<reponame>jantheprogrammer/event-email-service
from datetime import datetime
import pickle
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']
def get_events():
"""Shows basic usage of the Google Calendar API.
Prints the start and name of the next 10 events on the user's calendar.
"""
creds = None
# The file token.pickle stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.pickle'):
with open('token.pickle', 'rb') as token:
creds = pickle.load(token)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.pickle', 'wb') as token:
pickle.dump(creds, token)
service = build('calendar', 'v3', credentials=creds)
now = datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
now_in_ms = datetime.now().timestamp() * 1000
# add to_date, one month from now
one_day_in_ms = 86400000
month_in_ms = 30 * one_day_in_ms
to_date = str(datetime.fromtimestamp((now_in_ms + month_in_ms) / 1000).isoformat()) + 'Z'
# call the Calendar API
events_result = service.events().list(calendarId='primary', timeMin=now, timeMax=to_date,
maxResults=100, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
return format_events(events)
def format_events(events):
formatted_events = []
for event in events:
start = event['start'].get('dateTime', event['start'].get('date'))
formatted_events.append({'description': event['summary'], 'date': start[5:].replace('-', '.') + '.', 'summary': event['description']})
return formatted_events
| 2.90625
| 3
|
rompy/__init__.py
|
pbranson/rompy
| 1
|
12783130
|
<reponame>pbranson/rompy
#-----------------------------------------------------------------------------
# Copyright (c) 2020 - 2021, CSIRO
#
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
import logging
import warnings
logger = logging.getLogger('rompy')
import os
import intake
here = os.path.abspath(os.path.dirname(__file__))
cat = intake.open_catalog(os.path.join(here, 'catalogs', 'master.yaml'))
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 1.5625
| 2
|
functions/id_check.py
|
joao-lanzarini/sales-analytics-python
| 0
|
12783131
|
<gh_stars>0
import csv
def check(file='inventory', action=''):
while True:
line = 0
check = False
i = input(f'Product ID to {action} [0 to exit]: ')
if i != '0':
with open (f'./{file}.csv') as file:
reader = csv.reader(file)
lines = list(reader)
for row in lines:
if row[0] == str(i):
check = True
break
else:
line+=1
if not check:
print('ID NOT FOUND!')
elif check:
break
else:
line=0
break
return line
| 3.203125
| 3
|
nicos/devices/datasinks/text.py
|
jkrueger1/nicos
| 0
|
12783132
|
<gh_stars>0
# -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# <NAME> <<EMAIL>>
#
# *****************************************************************************
from gzip import GzipFile as StdGzipFile
import numpy
from nicos.core.data.sink import GzipFile
from nicos.devices.datasinks.image import ImageFileReader, ImageSink, \
MultipleFileSinkHandler
from nicos.utils import File
class NPImageSinkHandler(MultipleFileSinkHandler):
"""Numpy text format filesaver using `numpy.savetxt`"""
filetype = "txt"
def writeData(self, fp, image):
numpy.savetxt(fp, numpy.asarray(image), fmt="%u")
class NPFileSink(ImageSink):
handlerclass = NPImageSinkHandler
class NPImageFileReader(ImageFileReader):
filetypes = [("txt", "Numpy Text Format (*.txt)")]
@classmethod
def fromfile(cls, filename):
"""Reads numpy array from .txt file."""
return numpy.loadtxt(File(filename, 'r'))
class NPGZImageSinkHandler(NPImageSinkHandler):
"""Compressed Numpy text format filesaver using `numpy.savetxt`"""
filetype = "npgz"
fileclass = GzipFile
def writeData(self, fp, image):
numpy.savetxt(fp, numpy.asarray(image), fmt="%u")
class NPGZFileSink(ImageSink):
handlerclass = NPGZImageSinkHandler
class NPGZImageFileReader(ImageFileReader):
filetypes = [("npgz", "Compressed Numpy Text Format (*.gz)")]
@classmethod
def fromfile(cls, filename):
"""Reads numpy array from .gz file."""
return numpy.loadtxt(StdGzipFile(filename, 'r'))
| 1.898438
| 2
|
tests/core/config/models/bdk_server_config_test.py
|
symphony-elias/symphony-bdk-python
| 17
|
12783133
|
<reponame>symphony-elias/symphony-bdk-python<filename>tests/core/config/models/bdk_server_config_test.py<gh_stars>10-100
from symphony.bdk.core.config.model.bdk_server_config import BdkServerConfig, BdkProxyConfig
def test_get_base_path():
config = BdkServerConfig(scheme="https", host="dev.symphony.com", port=123, context="context")
assert config.get_base_path() == "https://dev.symphony.com:123/context"
def test_wrong_input_types():
config = BdkServerConfig(scheme=2, host=2, port="port", context=2)
assert config.get_formatted_context() == ""
assert config.get_base_path() == "2://2:port"
def test_get_port_as_string():
config = BdkServerConfig(port=None)
assert config.get_port_as_string() == f":{config.DEFAULT_HTTPS_PORT}"
config = BdkServerConfig(port=2)
assert config.get_port_as_string() == ":2"
config.port = "884"
assert config.get_port_as_string() == ":884"
config.port = None
assert config.get_port_as_string() == ""
def test_proxy_config_no_credentials():
proxy = BdkProxyConfig("proxy.symphony.com", 1234)
assert proxy.get_url() == "http://proxy.symphony.com:1234"
assert not proxy.are_credentials_defined()
def test_proxy_config_with_credentials():
proxy = BdkProxyConfig("proxy.symphony.com", 1234, "user", "password")
assert proxy.get_url() == "http://proxy.symphony.com:1234"
assert proxy.are_credentials_defined()
assert proxy.get_credentials() == "user:password"
| 1.96875
| 2
|
maintenance/tests_old/test_ale_consistency.py
|
sfpd/rlreloaded
| 0
|
12783134
|
import alepy
import numpy as np
import os.path as osp
from control3 import CTRL_ROOT
# import cv2
world = alepy.AtariWorld(osp.join(CTRL_ROOT,"domain_data/atari_roms/space_invaders.bin"))
for j in xrange(5):
x0 = world.GetInitialState(np.random.randint(0,50))
u0 = np.array([0],'uint8')
y,r,o,d = world.Step(x0,u0)
for i in xrange(3):
y1,r1,o1,d1 = world.Step(x0,u0)
assert (y==y1).all() and (r==r1) and (np.array(o)==np.array(o1)).all()
nsteps = np.random.randint(10)
x = x0
for t in xrange(nsteps):
u = np.array([np.random.randint(0,10)],dtype='uint8')
x,_,_,_ = world.Step(x,u)
| 2.375
| 2
|
src/models/XGboost/Model_Training_XGboost-MLflow.py
|
shunbolt/BDA-Project-Tran-Torrado-EFREI
| 0
|
12783135
|
import pandas as pd
import argparse
import mlflow
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
import xgboost as xgb
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from urllib.parse import urlparse
if __name__ == "__main__":
df_train = pd.read_csv('../../../data/processed/processed_application_train.csv')
df_test = pd.read_csv('../../../data/processed/processed_application_test.csv')
# get argument for the model
def parse_args():
parser = argparse.ArgumentParser(description="XGBoost example")
parser.add_argument(
"--learning-rate",
type=float,
default=0.3,
help="learning rate to update step size at each boosting step (default: 0.3)",
)
parser.add_argument(
"--n-estimators",
type=int,
default=10,
help="Number of boosting rounds. (default: 10)",
)
return parser.parse_args()
args = parse_args()
# Separate majority and minority classes
df_majority = df_train[df_train["TARGET"] == 0]
df_minority = df_train[df_train["TARGET"] == 1]
# Downsample majority class
df_majority_downsampled = resample(df_majority,
replace=False, # sample without replacement
n_samples=50000, # to match minority class
random_state=123) # reproducible results
# Combine minority class with downsampled majority class
df_downsampled = pd.concat([df_majority_downsampled, df_minority])
X = df_downsampled.drop(columns="TARGET")
y = df_downsampled['TARGET']
# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test, label=y_test)
#Run mlflow
with mlflow.start_run():
xgb_model = xgb.XGBClassifier(n_estimators=args.n_estimators,learning_rate=args.learning_rate,random_state=42)
xgb_model.fit(X_train, y_train)
xgb_pred = xgb_model.predict(X_test)
print("This is the accuracy score for XGBClassifier : ")
acc = accuracy_score(y_test, xgb_pred)
print(acc)
print("This is the confusion matrix score for XGBClassifier : ")
cm = confusion_matrix(y_test, xgb_pred)
print(confusion_matrix(y_test, xgb_pred))
#log metric confusion metrix
t_n, f_p, f_n, t_p = cm.ravel()
mlflow.log_metric("true_negative", t_n)
mlflow.log_metric("false_positive", f_p)
mlflow.log_metric("false_negative", f_n)
mlflow.log_metric("true_positive", t_p)
mlflow.log_metrics({"accuracy": acc})
mlflow.log_param("learning_rate",args.learning_rate)
mlflow.log_param("estimators",args.n_estimators)
tracking_url_type_store = urlparse(mlflow.get_tracking_uri()).scheme
# Model registry does not work with file store
if tracking_url_type_store != "file":
# Register the model
mlflow.sklearn.log_model(xgb_model, "model", registered_model_name="XGboost")
else:
mlflow.sklearn.log_model(xgb_model, "model")
| 3.078125
| 3
|
examples/02_advanced_examples/plot_custom_arguments.py
|
jhosoume/pymfe
| 86
|
12783136
|
"""
Customizing measures arguments
==============================
In this example we will show you how to custorize the measures.
"""
# Load a dataset
from sklearn.datasets import load_iris
from pymfe.mfe import MFE
data = load_iris()
y = data.target
X = data.data
###############################################################################
# Custom Arguments
# ----------------
#
# It is possible to pass custom arguments to every meta-feature using PyMFE
# extract method kwargs. The keywords must be the target meta-feature name, and
# the value must be a dictionary in the format {argument: value}, i.e., each
# key in the dictionary is a target argument with its respective value. In the
# example below, the extraction of metafeatures ``min`` and ``max`` happens as
# usual, but the meta-features ``sd``, ``nr_norm`` and ``nr_cor_attr`` will
# receive user custom argument values, which will interfere in each metafeature
# result.
# Extract measures with custom user arguments
mfe = MFE(features=["sd", "nr_norm", "nr_cor_attr", "min", "max"])
mfe.fit(X, y)
ft = mfe.extract(
sd={"ddof": 0},
nr_norm={"method": "all", "failure": "hard", "threshold": 0.025},
nr_cor_attr={"threshold": 0.6},
)
print("\n".join("{:50} {:30}".format(x, y) for x, y in zip(ft[0], ft[1])))
| 3.125
| 3
|
pingsweep.py
|
strohmy86/Scripts
| 1
|
12783137
|
#!/usr/bin/env python3
# MIT License
# Copyright (c) 2020 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import ipaddress
from subprocess import DEVNULL, Popen
class Color:
PURPLE = "\033[95m"
CYAN = "\033[96m"
DARKCYAN = "\033[36m"
BLUE = "\033[94m"
GREEN = "\033[92m"
YELLOW = "\033[93m"
RED = "\033[91m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
END = "\033[0m"
def cred():
print(
Color.DARKCYAN
+ "\n"
+ "*********************************\n"
+ "* Pingsweep script *\n"
+ "* *\n"
+ "* Written and maintained by: *\n"
+ "* <NAME> *\n"
+ "* <EMAIL> *\n"
+ "* https://github.com/strohmy86 *\n"
+ "* *\n"
+ "*********************************\n"
+ "\n"
+ Color.END
)
def main(net_addr, file):
net = net_addr.replace(".", "_")
net = net.replace("/", "-")
# Create the network
ip_net = ipaddress.ip_network(net_addr)
# Get all hosts on that network
all_hosts = list(ip_net.hosts())
p = {} # ip -> process
for n in range(len(all_hosts)): # start ping process
ip = str(all_hosts[n])
p[ip] = Popen(
["ping", "-n", "-c", "1", "-w", "2", ip],
stdout=DEVNULL,
stderr=DEVNULL,
)
t = [] # List for active IP addresses
if file is True:
f = open("/home/lstrohm/ActiveIps-" + net + ".txt", "w")
f.close()
while p:
for ip, proc in p.items():
if proc.poll() is not None: # ping finished
del p[ip] # remove from the process list
if proc.returncode == 0 and file is False:
print("%s active" % ip)
t.append(ip)
elif proc.returncode == 0 and file is True:
f = open("/home/lstrohm/ActiveIps-" + net + ".txt", "a")
f.write("%s\n" % ip)
# else:
# print('%s error' % ip)
break
# Count total number of active IP addresses
if file is True:
fr = open("/home/lstrohm/ActiveIps-" + net + ".txt", "r")
total = len(fr.readlines())
fr.close()
fw = open("/home/lstrohm/ActiveIps-" + net + ".txt", "a")
fw.write("Total Active Devices: %s" % total)
fw.close()
print(
Color.CYAN
+ "Saved list to ~/ActiveIps-"
+ net
+ ".txt"
+ Color.END
)
elif file is False:
print(Color.YELLOW + "Total Active Devices: %s" % len(t) + Color.END)
# Starts the script.
parser = argparse.ArgumentParser(description="Script ping sweep a subnet")
parser.add_argument(
"-f",
"--file",
default=False,
action="store_const",
const=True,
help="Write results to a text file.",
)
parser.add_argument(
"net",
metavar="Network Subnet",
default="",
type=str,
help="network address in CIDR format (ex.192.168.1.0/24)",
)
args = parser.parse_args()
net_addr = args.net
file = args.file
cred()
main(net_addr, file)
| 1.992188
| 2
|
Zip_Codes/Zipcodebase/unit_tests.py
|
Jay4C/API
| 1
|
12783138
|
import unittest
import requests
endpoint = "https://app.zipcodebase.com/api/v1"
apikey = ""
# https://app.zipcodebase.com/documentation
class UnitTestsZipcodebaseWithTorNetwork(unittest.TestCase):
def test_Authentification_Remaining_Credits(self):
print("test_Authentification_Remaining_Credits")
headers = {
'apikey': apikey
}
url = endpoint + "/status"
r = requests.get(url, headers=headers)
print(r.text)
def test_Postal_code_to_location_information(self):
print("test_Postal_code_to_location_information")
headers = {
'apikey': apikey
}
params = (
("codes", "10005,51503"),
)
url = endpoint + "/search"
r = requests.get(url, headers=headers, params=params)
print(r.text)
def test_Distance_calculation_between_postal_codes(self):
print("test_Distance_calculation_between_postal_codes")
headers = {
'apikey': apikey
}
params = (
("code", "10005"),
("compare", "10006,10007"),
("country", "us"),
)
url = endpoint + "/distance"
r = requests.get(url, headers=headers, params=params)
print(r.text)
def test_Postal_codes_within_a_radius(self):
print("test_Postal_codes_within_a_radius")
headers = {
'apikey': apikey
}
params = (
("code", "10005"),
("radius", "100"),
("country", "us"),
)
url = endpoint + "/radius"
r = requests.get(url, headers=headers, params=params)
print(r.text)
def test_Postal_codes_within_a_certain_distance(self):
print("test_Postal_codes_within_a_certain_distance")
headers = {
'apikey': apikey
}
params = (
("codes", "10005,10006,10009,90001"),
("distance", "100"),
("country", "us"),
)
url = endpoint + "/match"
r = requests.get(url, headers=headers, params=params)
print(r.text)
def test_Postal_codes_by_city(self):
print("test_Postal_codes_by_city")
headers = {
'apikey': apikey
}
params = (
("city", "Amsterdam"),
("state_name", "Noord-Holland"),
("country", "nl"),
)
url = endpoint + "/code/city"
r = requests.get(url, headers=headers, params=params)
print(r.text)
def test_Postal_codes_by_state(self):
print("test_Postal_codes_by_state")
headers = {
'apikey': apikey
}
params = (
("state_name", "Noord-Holland"),
("country", "nl"),
)
url = endpoint + "/code/state"
r = requests.get(url, headers=headers, params=params)
print(r.text)
def test_Provinces_states_of_a_country(self):
print("test_Provinces_states_of_a_country")
headers = {
'apikey': apikey
}
params = (
("country", "de"),
)
url = endpoint + "/country/province"
r = requests.get(url, headers=headers, params=params)
print(r.text)
if __name__ == '__main__':
unittest.main()
| 3.03125
| 3
|
cameraServer.py
|
thedropbears/vision-2019
| 0
|
12783139
|
#!/usr/bin/env python3
import json
import sys
import numpy as np
import cv2
import math
import time
from collections import namedtuple
from cscore import CameraServer
from networktables import NetworkTables
# Magic Numbers
lowerGreen = (50, 120, 130) # Our Robot's Camera
higherGreen = (100, 220, 220)
minContourArea = 10
angleOffset = 10
rightAngleSize = -14
leftAngleSize = -75.5
screenX = 320
screenY = 240
screenSize = (screenX, screenY)
distance_away = 110
realTapeDistance = 0.2 # metres between closest tape points
focal_length = 325
# Initialisation
configFile = "/boot/frc.json"
CameraConfig = namedtuple("CameraConfig", ["name", "path", "config"])
def readCameraConfig(config):
"""Read single camera configuration."""
return CameraConfig(config["name"], config["path"], config)
def readConfig():
"""Read configuration file."""
# parse file
with open(configFile) as f:
j = json.load(f)
# cameras
cameras = j["cameras"]
cameras = [readCameraConfig(camera) for camera in cameras]
return cameras
# Our code begins here
def startCamera(config):
"""Start running the camera."""
cs = CameraServer.getInstance()
camera = cs.startAutomaticCapture(name=config.name, path=config.path)
camera.setConfigJson(json.dumps(config.config))
return cs, camera
# Process Functions
def getDistance(boxes):
if boxes is None:
return math.nan, math.nan
Lpoint = max(boxes[0], key=lambda x: x[0])
Rpoint = min(boxes[1], key=lambda x: x[0])
width = abs(Lpoint[0] - Rpoint[0])
mid = (Rpoint[0] + Lpoint[0]) / 2
distance_from_center = mid - screenX / 2
offset = getOffset(width, distance_from_center)
if width > 0:
dist = (realTapeDistance * focal_length) / width
return dist, offset
else:
return math.nan, offset
def getOffset(width, x):
# if width = 20cm then what is x in cm
offset = x / (width / (realTapeDistance))
return -offset
def createAnnotatedDisplay(
frame: np.array, pairs: list, closestToMiddle: tuple, circle: tuple
) -> np.array:
frame = cv2.line(frame, (160, 0), (160, 240), (255, 0, 0), thickness=1)
for pair in pairs:
if (pair[0][1][0] == closestToMiddle[0][0]).all():
colour = (0, 255, 0) #Green
frame = cv2.circle(
frame, (int(circle[0][0]), int(circle[0][1])), int(circle[1]), colour
)
else:
colour = (0, 0, 255) #Red
for tape in pair:
frame = cv2.drawContours(
frame, [np.int0(tape[1])], 0, colour, thickness=2
)
return frame
def getRetroPos(frame: np.array, annotated: bool, hsv: np.array, mask: np.array) -> (np.array, float, float):
"""Function for finding retro-reflective tape"""
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV, dst=hsv)
# Convert to HSV to make the mask easier
mask = cv2.inRange(hsv, lowerGreen, higherGreen, dst=mask)
# Create a mask of everything in between the greens
_, contours, _ = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
# Find the contours
if len(contours) <= 1:
# Get contours with area above magic number 10 and append its smallest rectangle
return frame, math.nan, math.nan
rects = []
for cnt in contours:
if cv2.contourArea(cnt) > minContourArea:
rects.append(cv2.minAreaRect(cnt))
boxed_and_angles = []
for rect in rects:
if math.isclose(rect[2], leftAngleSize, abs_tol=angleOffset):
boxed_and_angles.append([False, np.array(cv2.boxPoints(rect)), cv2.contourArea(cv2.boxPoints(rect))])
elif math.isclose(rect[2], rightAngleSize, abs_tol=angleOffset):
boxed_and_angles.append([True, np.array(cv2.boxPoints(rect)), cv2.contourArea(cv2.boxPoints(rect))])
pairs = []
leftRect = None
for rect in sorted(
boxed_and_angles, key=lambda x: max(x[1][:, 0]) if x[0] else min(x[1][:, 0])
): # Get rectangle pairs
if not rect[0]:
leftRect = rect
elif leftRect and math.isclose(leftRect[2], rect[2], abs_tol=0.3*leftRect[2]):
pairs.append((leftRect, rect))
leftRect = None
if len(pairs) < 1:
return frame, math.nan, math.nan
closestToMiddle = list(min(
pairs, key=lambda x: abs(np.mean([x[0][1][:,0] + x[1][1][:,0]]) - screenSize[0])
))
closestToMiddle = [closestToMiddle[0][1], closestToMiddle[1][1]]
(x, y), radius = cv2.minEnclosingCircle(np.array(closestToMiddle).reshape(-1, 2))
if annotated:
frame = createAnnotatedDisplay(frame, pairs, closestToMiddle, ((x, y), radius))
dist, offset = getDistance(closestToMiddle)
return (
frame,
dist,
offset,
)
if __name__ == "__main__":
if len(sys.argv) >= 2:
configFile = sys.argv[1]
# read configuration
cameraConfigs = readConfig()
# start NetworkTables
NetworkTables.initialize(server="10.47.74.2")
NetworkTables.setUpdateRate(1)
nt = NetworkTables.getTable("/vision")
ping = nt.getEntry("ping")
raspi_pong = nt.getEntry("raspi_pong")
rio_pong = nt.getEntry("rio_pong")
entry_game_piece = nt.getEntry("game_piece")
entry_dist = nt.getEntry("fiducial_x")
entry_offset = nt.getEntry("fiducial_y")
entry_fiducial_time = nt.getEntry("fiducial_time")
entry_camera = nt.getEntry("using_cargo_camera")
# start cameras
cameras = []
for cameraConfig in cameraConfigs:
cameras.append(startCamera(cameraConfig))
cargo_rocket_sink = cameras[0][0].getVideo(camera=cameras[0][1])
hatch_sink = cameras[1][0].getVideo(camera=cameras[1][1])
source = cameras[0][0].putVideo("Driver_Stream", screenX, screenY)
frame = np.zeros(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8)
image = np.zeros(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8)
hsv = np.zeros(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8)
mask = np.zeros(shape=(screenSize[1], screenSize[0]), dtype=np.uint8)
img = np.zeros(shape=(screenSize[1], screenSize[0], 3), dtype=np.uint8)
old_ping_time = 0
while True:
ping_time = ping.getNumber(0)
if abs(ping_time - old_ping_time) > 0.00000001:
raspi_pong.setNumber(time.monotonic())
rio_pong.setNumber(ping_time)
old_ping_time = ping_time
game_piece = entry_game_piece.getBoolean(0)
fiducial_time = time.monotonic()
sink = hatch_sink if game_piece == 0 else cargo_rocket_sink
entry_camera.setBoolean(False if not game_piece else True)
frame_time, frame = sink.grabFrameNoTimeout(image=frame)
if frame_time == 0:
print(sink.getError(), file=sys.stderr)
source.notifyError(sink.getError())
outtake = False
percent = math.nan
else:
image, dist, offset = getRetroPos(frame, True, hsv, mask)
source.putFrame(image)
if not math.isnan(dist):
if game_piece == 1:
dist *= -1
offset *= -1
entry_dist.setNumber(dist)
entry_offset.setNumber(offset)
entry_fiducial_time.setNumber(fiducial_time)
NetworkTables.flush()
| 2.3125
| 2
|
kite-python/kite_common/kite/ioutils/stream.py
|
kiteco/kiteco-public
| 17
|
12783140
|
<reponame>kiteco/kiteco-public
import json
def loadjson(f, bufsize=1000000, **kwargs):
"""
Load a sequence of json objects from a stream
"""
decoder = json.JSONDecoder(**kwargs)
last_error = None
cur = ""
while True:
buf = f.read(bufsize)
if buf:
cur += buf.decode()
cur = cur.lstrip() # in rare cases, there are spaces in front causing problems decoding
elif last_error is not None:
raise last_error
else:
return
while cur:
# Consume an object if possible
try:
last_error = None
obj, consumed = decoder.raw_decode(cur)
cur = cur[consumed:]
except ValueError as ex:
last_error = ex
break
# consume whitespace, if any
offset = 0
while offset < len(cur) and cur[offset].isspace():
offset += 1
cur = cur[offset:]
# yield the object that was consumed
yield obj
| 2.921875
| 3
|
vheLC/determinePrior/determinePrior.py
|
orelgueta/blazar-variability-study
| 1
|
12783141
|
#!/usr/bin/python
import os.path
import os
import glob
import subprocess
import numpy as np
import numpy.lib.recfunctions as rfn
from astropy.io import fits
from astropy.stats import bayesian_blocks
import argparse
from collections import defaultdict
def checkDatFile(datFileName):
if not os.path.isfile(datFileName):
print(datFileName + ' is not a file!\n')
return False
return True
def extractErrors(errorStr):
errors = errorStr.replace('(', '').replace(')', '').split(' - ')
return float(errors[0]), float(errors[1])
def calibrate_ncp_prior(flux=None, fluxerr=None, time=None, timebin=None,
p_0=[0.05], n_sims=1000, min_prior=0.2, max_prior=4,
n_steps=20, outPrefix=None):
# path='./gammas/', exp='VERITAS', source=''):
# Calibration of ncp_prior:
# input:
# flux, fluxerr, time, timebin : Lightcurve in format numpy.ndarray or pandas.Series
# p_0 : FPR input array
# n_sims : float
# min_prior : float/int
# max_prior : float/int
# n_stepts : number of steps in [min_prior, max_prior]
sourceNow = outPrefix.split('/')[0]
falsecount = np.zeros(n_steps)
ncp_priors = np.linspace(min_prior, max_prior, n_steps)
result = {}
best = {}
# distance between points not relevant but should be ordered
x = np.arange(len(flux))
average = np.average(flux, weights=fluxerr)
# simulating lightcurves for n_sims times and applying algorithem
# in n_steps steps between min_prior and max_prior. Afterwards
# false positive rate is calculated if a block was detected.
for k in range(n_sims):
if k % 10 == 0:
print(sourceNow, 'current simulation: {}'.format(k))
# simulate the flux values
datapoints = np.random.normal(average, fluxerr, len(fluxerr))
# aply bayesian block and count fpr
for l, ncp_prior in enumerate(ncp_priors):
gamma = 10**(-ncp_prior)
bb = bayesian_blocks(x, datapoints, fluxerr, fitness='measures', gamma=gamma)
if len(bb) > 2:
falsecount[l] += 1
fp_rate = falsecount/n_sims
# Final result of FPR in dependency of ncp_prior
result = np.core.records.fromarrays([ncp_priors, fp_rate], names='ncp, fp')
# Calculation of best results for the values in p_0
for p0 in p_0:
best[str(p0)] = result[(np.abs(result.fp - p0)).argmin()]
# Saving result and best to txt file
with open(outPrefix + '_result.txt', 'wb') as fOut:
np.savetxt(fOut, result)
# with open(outPrefix + '_results_best.txt', 'wb') as fOut:
# np.savetxt(fOut, [best])
return(result, best)
def readSwiftLC(swiftFileName, rebin, veritasObs):
swiftFile = open(swiftFileName, 'r')
date, dateErrUp, dateErrDn = list(), list(), list()
rate, rateErrUp, rateErrDn = list(), list(), list()
mode = list()
for line in swiftFile:
if '!' in line:
if 'WT data' in line:
modeNow = 'WT'
continue
if 'PC data' in line:
modeNow = 'PC'
continue
if 'Upper limit' in line:
break
if '!' not in line and len(line) > 1 and 'NO' not in line and 'READ' not in line:
date.append(float(line.split()[0].strip()))
dateErrUp.append(abs(float(line.split()[1].strip())))
dateErrDn.append(abs(float(line.split()[2].strip())))
rate.append(float(line.split()[3].strip()))
rateErrUp.append(abs(float(line.split()[4].strip())))
rateErrDn.append(abs(float(line.split()[5].strip())))
mode.append(modeNow)
swiftData = np.c_[date, dateErrDn, dateErrUp,
rate, rateErrDn, rateErrUp,
mode]
headersType = {'names': ('Date', 'Date error down', 'Date error up',
'Rate', 'Rate error down', 'Rate error up',
'mode'),
'formats': ('f8', 'f8', 'f8',
'f8', 'f8', 'f8',
'U40')}
swiftData = np.core.records.fromarrays(swiftData.transpose(), dtype=headersType)
if rebin == 'monthly' or rebin == 'weekly' or rebin == 'yearly':
if rebin == 'yearly':
# Take only contemporaneous observations
swiftMask = list()
for swiftObsNow in swiftData['Date']:
keepSwift = False
for veritasObsNow in veritasObs:
if abs(swiftObsNow - veritasObsNow) < 1:
keepSwift = True
swiftMask.append(keepSwift)
swiftData = swiftData[swiftMask]
nDays = 28
if rebin == 'yearly':
nDays = 365
if rebin == 'weekly':
nDays = 7
mjd_min = 53423 # This is exactly 147 weeks before the start day of Fermi
mjd_max = 58465 # This is ~today
nBins = int((mjd_max - mjd_min)/nDays)
timeBins = np.linspace(mjd_min, mjd_max, nBins, False)
date, dateErrDn, dateErrUp = list(), list(), list()
rate, rateErrDn, rateErrUp = list(), list(), list()
mode = list()
for i_bin, edgeDn in enumerate(timeBins):
edgeUp = 1e6
if i_bin < len(timeBins) - 1:
edgeUp = timeBins[i_bin+1]
# TODO - should we divide into the different modes?
tempSwiftData = swiftData[(edgeDn <= swiftData['Date']) & (swiftData['Date'] < edgeUp)]
if len(tempSwiftData) > 0:
date.append(np.average(tempSwiftData['Date']))
dateErrDn.append(date[-1] - np.min(tempSwiftData['Date']))
dateErrUp.append(np.max(tempSwiftData['Date'] - date[-1]))
totalError = tempSwiftData['Rate error down'] + tempSwiftData['Rate error up']
rate.append(np.average(tempSwiftData['Rate'], weights=1./totalError))
rateErrDn.append(np.sqrt(np.sum(np.power(tempSwiftData['Rate error down'], 2))))
rateErrUp.append(np.sqrt(np.sum(np.power(tempSwiftData['Rate error up'], 2))))
mode.append('Combined')
swiftData = np.c_[date, dateErrDn, dateErrUp,
rate, rateErrDn, rateErrUp,
mode]
swiftData = np.core.records.fromarrays(swiftData.transpose(), dtype=headersType)
return swiftData
def rebinFermi(fermiLC, veritasObs):
# First convert to numpy array to make it easier
fermiLC = np.c_[fermiLC['tmax_mjd'], fermiLC['tmin_mjd'], fermiLC['flux'], fermiLC['flux_err']]
headersType = {'names': ('tmax_mjd', 'tmin_mjd', 'flux', 'flux_err'),
'formats': ('f8', 'f8', 'f8', 'f8')}
fermiLC = np.core.records.fromarrays(fermiLC.transpose(), dtype=headersType)
# Take only contemporaneous observations (in this case, within a month)
# fermiBlocks = bayesian_blocks(fermiLC['tmax_mjd'], fermiLC['flux'], fermiLC['flux_err']
fermiMask = list()
for fermiDataPoint in fermiLC['tmax_mjd']:
keepFermi = False
for veritasObsNow in veritasObs:
if abs(fermiDataPoint - veritasObsNow) < 28:
keepFermi = True
fermiMask.append(keepFermi)
fermiLC = fermiLC[fermiMask]
nDays = 365
mjd_min = 53423 # This is exactly 147 weeks before the start day of Fermi
mjd_max = 58465 # This is ~today
nBins = int((mjd_max - mjd_min)/nDays)
timeBins = np.linspace(mjd_min, mjd_max, nBins, False)
rebinnedFermi = defaultdict(list)
for i_bin, edgeDn in enumerate(timeBins):
edgeUp = 1e6
if i_bin < len(timeBins) - 1:
edgeUp = timeBins[i_bin+1]
tempFermiData = fermiLC[(edgeDn <= fermiLC['tmax_mjd']) & (fermiLC['tmax_mjd'] < edgeUp)]
if len(tempFermiData) > 0:
rebinnedFermi['tmax_mjd'].append(np.average(tempFermiData['tmax_mjd']))
rebinnedFermi['tmin_mjd'].append(np.average(tempFermiData['tmin_mjd']))
rebinnedFermi['flux'].append(np.average(tempFermiData['flux'],
weights=1./tempFermiData['flux_err']))
rebinnedFermi['flux_err'].append(np.sqrt(np.sum(np.power(tempFermiData['flux_err'],
2))))
fermiLC = np.c_[rebinnedFermi['tmax_mjd'], rebinnedFermi['tmin_mjd'],
rebinnedFermi['flux'], rebinnedFermi['flux_err']]
fermiLC = np.core.records.fromarrays(fermiLC.transpose(), dtype=headersType)
return fermiLC
def readCorrTable(corrTableFile):
headersType = {'names': ('Left edges', 'Right edges',
'Correction factor', 'CorrFactorError',
'CorrFactorErrorCons'),
'formats': ('f8', 'f8', 'f8', 'f8', 'f8')}
return np.loadtxt(corrTableFile, dtype=headersType)
def correctFluxesFromCrabLC(origLC, corrTable):
corrLC = np.copy(origLC)
for i_point, dateNow in enumerate(corrLC['DateMJD']):
corrBin = np.argmax(dateNow < corrTable['Right edges'])
if corrTable['Correction factor'][corrBin] != 1:
corrLC['Flux'][i_point] = (corrLC['Flux'][i_point] /
corrTable['Correction factor'][corrBin])
corrLC['Flux Error'][i_point] = np.sqrt(np.power(corrLC['Flux Error'][i_point], 2) +
np.power(corrTable['CorrFactorError'][corrBin] *
corrLC['Flux'][i_point], 2))
return corrLC
def correctFluxes(origLC, corrTable):
corrLC = correctFluxesFromCrabLC(origLC, corrTable)
# We increased the threshold, so no need to add a systematic uncertainty anymore
return corrLC
def determinePriors(veritasDatFileName, fermiFile, swiftFullFileName, corrTable,
veritasObsFile, sourceNow, binning):
for fileNow in [veritasDatFileName, fermiFile, swiftFullFileName]:
if not checkDatFile(fileNow):
return
veritasDatFile = open(veritasDatFileName, 'r')
headersType = {'names': ('DateMJD', 'Date Error',
'Flux', 'Flux Error'),
'formats': ('f8', 'f8',
'f8', 'f8')}
veritasData = np.loadtxt(veritasDatFile, dtype=headersType)
veritasFluxes = veritasData[veritasData['Flux Error'] > 0]
veritasFluxes = correctFluxes(veritasFluxes, corrTable)
nsims = 15000
n_steps = 40
experiment = 'veritas'
outPrefix = '{}/{}_{}_{}'.format(sourceNow,
experiment,
binning,
str(nsims))
result, best = calibrate_ncp_prior(flux=veritasFluxes['Flux'],
fluxerr=veritasFluxes['Flux Error'],
time=veritasFluxes['DateMJD'],
timebin=veritasFluxes['Date Error'],
p_0=[0.01, 0.05], n_sims=nsims,
min_prior=0.2, max_prior=4,
n_steps=n_steps, outPrefix=outPrefix)
gamma = 10**(- best[str(0.01)].ncp)
print(sourceNow, 'VERITAS', 'gamma - ', gamma)
if binning == 'yearly' or binning == 'monthly':
fermiDatFile = open(fermiFile, 'rb')
fermiLC = np.load(fermiDatFile, encoding='latin1').flat[0]
if binning == 'yearly':
headersType = {'names': ('run', 'date', 'flux', 'fluxError',
'significance', 'ze'),
'formats': ('f8', 'f8', 'f8', 'f8',
'f8', 'f8')}
veritasObs = np.loadtxt(veritasObsFile, dtype=headersType)
fermiLC = rebinFermi(fermiLC, veritasObs['date'])
experiment = 'fermi'
outPrefix = '{}/{}_{}_{}'.format(sourceNow,
experiment,
binning,
str(nsims))
result, best = calibrate_ncp_prior(flux=fermiLC['flux'],
fluxerr=fermiLC['flux_err'],
time=fermiLC['tmax_mjd'],
timebin=fermiLC['tmax_mjd'] - fermiLC['tmin_mjd'],
p_0=[0.01, 0.05], n_sims=nsims,
min_prior=0.2, max_prior=4,
n_steps=n_steps, outPrefix=outPrefix)
gamma = 10**(- best[str(0.01)].ncp)
print(sourceNow, 'Fermi', 'gamma - ', gamma)
swiftBinnings = [binning]
if binning == 'yearly': # run also the daily for Swift in this case
swiftBinnings = ['daily', 'yearly']
for swiftBinNow in swiftBinnings:
if swiftBinNow == 'yearly':
veritasObsDates = veritasObs['date']
else:
veritasObsDates = list()
swiftData = readSwiftLC(swiftFile, swiftBinNow, veritasObsDates)
experiment = 'swift'
outPrefix = '{}/{}_{}_{}'.format(sourceNow,
experiment,
swiftBinNow,
str(nsims))
swiftRateErrorAverage = (swiftData['Rate error down'] + swiftData['Rate error up'])/2.
result, best = calibrate_ncp_prior(flux=swiftData['Rate'],
fluxerr=swiftRateErrorAverage,
time=swiftData['Date'],
timebin=(swiftData['Date error down'] +
swiftData['Date error up']),
p_0=[0.01, 0.05], n_sims=nsims,
min_prior=0.2, max_prior=4,
n_steps=n_steps, outPrefix=outPrefix)
gamma = 10**(- best[str(0.01)].ncp)
print(sourceNow, 'Swift', swiftBinNow, 'gamma - ', gamma)
return
if __name__ == '__main__':
np.random.seed(1234)
parser = argparse.ArgumentParser(description=('Calculate optimal '
'priors for Bayesian blocks.'))
parser.add_argument('source')
parser.add_argument('binning')
args = parser.parse_args()
sources = {'1ES0033': '1ES 0033+595',
'1ES0502': '1ES 0502+675',
'1ES1011': '1ES 1011+496',
'1ES1218': '1ES 1218+304',
'1ES0229': '1ES 0229+200',
'RGBJ0710': 'RGB J0710+591',
'PG1553': 'PG 1553+113',
'PKS1424': 'PKS 1424+240'
}
if args.source not in sources:
print('Source', args.source, 'not known')
hdulist = fits.open(('/afs/ifh.de/group/cta/scratch/ogueta/sw/anaconda/envs/fermi/'
'lib/python2.7/site-packages/fermipy/data/catalogs/gll_psc_8year_v5.fit'))
sourceCatalog = hdulist[1].data
workDir = os.getcwd() + '/'
fermiPrefix = '/lustre/fs19/group/cta/users/ogueta/fermi/variabilityStudy/'
veritasPrefix = '/afs/ifh.de/group/cta/scratch/ogueta/vts/variabilityStudy/makeLC/'
swiftPrefix = '/afs/ifh.de/group/cta/scratch/ogueta/vts/variabilityStudy/swift/onlineTool/'
corrTableFile = ('/afs/ifh.de/group/cta/scratch/ogueta/vts/variabilityStudy/'
'crabStability/plotLC/correctionFactors.txt')
veritasObsPrefix = '/afs/ifh.de/group/cta/scratch/ogueta/vts/variabilityStudy/spectra/'
for i_src, sourceTeV in enumerate(sourceCatalog['ASSOC_TEV']):
if sources[args.source] in sourceTeV:
fermiLC = sourceCatalog['Source_Name'][i_src].replace(' ', '_').lower()
fermiLC += '_lightcurve.npy'
fermiBinning = args.binning
if fermiBinning != 'monthly':
fermiBinning = 'monthly'
fermiFile = os.path.join(fermiPrefix, '{}LightCurves'.format(fermiBinning),
args.source, args.source, fermiLC)
veritasDirectory = os.path.join(veritasPrefix, args.source)
veritasLC = glob.glob(os.path.join(veritasDirectory,
'{}*fullEnergyRange*.txt'.format(args.binning)))[0]
veritasFile = os.path.join(veritasDirectory, veritasLC)
corrTable = readCorrTable(corrTableFile)
veritasObsFile = os.path.join(os.path.join(veritasObsPrefix, args.source), 'fluxPerRun.txt')
swiftFile = os.path.join(swiftPrefix, args.source,
'dailyBins', '{}_lightcurve.qdp'.format(args.source))
try:
subprocess.check_call(['mkdir', '-p', args.source])
except subprocess.CalledProcessError as e:
print('Could not create output directory')
sys.exit(1)
determinePriors(veritasFile, fermiFile, swiftFile, corrTable,
veritasObsFile, args.source, args.binning)
| 2.0625
| 2
|
app/src/yolov3/models/layers/upsample_layer.py
|
customr/detection_demo
| 58
|
12783142
|
import tensorflow as tf
def upsample_layer(name, inputs):
"""
Takes the outputs of the previous convolutional layer and upsamples them by a factor of two
using the 'nearest neighbor' method.
Parameters
----------
name : string
The name of the tensor to be used in TensorBoard.
inputs : tensor
The output of the previous convolutional layer.
This tensor will have the shape of:
[batch_size, h, w, c]
Returns
-------
inputs : tensor
A tensor of shape:
[batch_size, 2 * h, 2 * w, c]
"""
with tf.variable_scope(name):
inputs = tf.image.resize_nearest_neighbor(inputs, (inputs.shape[1]*2, inputs.shape[2]*2))
return inputs
| 3.65625
| 4
|
oldqa/qa/src/core_tests/Core_Tests.py
|
KDahlgren/pyLDFI
| 6
|
12783143
|
#!/usr/bin/env python
'''
Core_Tests.py
Defines unit tests for core.
'''
#############
# IMPORTS #
#############
# standard python packages
import inspect, os, sqlite3, sys, unittest
from StringIO import StringIO
# ------------------------------------------------------ #
# import sibling packages HERE!!!
sys.path.append( os.path.abspath( __file__ + "/../../../../src" ) )
from dedt import dedt, dedalusParser, clockRelation, dedalusRewriter
from utils import tools
# ------------------------------------------------------ #
testPath = os.path.abspath(__file__+"/../../../../qa")
################
# CORE TESTS #
################
class Core_Tests( unittest.TestCase ) :
##########################
# INSTANTIATE LDFICORE #
##########################
#
#############
# ATTRIBS #
#############
def test_LDFICoreAttribs_dedt( self ) :
return None
#########################
# THREAD OF EXECUTION #
#########################
# use this main if running this script exclusively.
if __name__ == "__main__" :
unittest.main( verbosity=2 )
#########
# EOF #
#########
| 2.203125
| 2
|
Coursera/Google_IT_Automation_with_Python/01_Crash_Course_on_Python/Week_3/wk3_mod1_ex3.py
|
ssolomon2020/Self_Study_Python_Training
| 0
|
12783144
|
<gh_stars>0
# Specialization: Google IT Automation with Python
# Course 01: Crash Course with Python
# Week 3 Module Part 1 Exercise 03
# Student: <NAME>
# Learning Platform: Coursera.org
# The following code causes an infinite loop. Can you figure out what’s missing and how to fix it?
# def print_range(start, end):
# # Loop through the numbers from start to end
# n = start
# while n <= end:
# print(n)
#
# print_range(1, 5) # Should print 1 2 3 4 5 (each number on its own line)
def print_range(start, end):
# Loop through the numbers from start to end
n = start
while n <= end:
print(n)
n += 1 #! <-- Counter was initialized but it did not increment beforehand.
print_range(1, 5) # Should print 1 2 3 4 5 (each number on its own line)
| 4.1875
| 4
|
setup.py
|
diamond-org/puppet-diamond
| 0
|
12783145
|
<reponame>diamond-org/puppet-diamond<gh_stars>0
# -*- coding: utf-8 -*-
# Puppet-Diamond (c) <NAME>
import os
import re
import codecs
from setuptools import setup
from setuptools import find_packages
def read(*rnames):
return codecs.open(os.path.join(os.path.dirname(__file__), *rnames), 'r', 'utf-8').read()
def grep(attrname):
pattern = r"{0}\W*=\W*'([^']+)'".format(attrname)
strval, = re.findall(pattern, read('puppet_diamond/__meta__.py'))
return strval
setup(
version=grep('__version__'),
name='Puppet-Diamond',
description="Puppet-Diamond can manage an IT Enterprise consisting of many Linux servers.",
scripts=[
"bin/pup",
"bin/generate_sshd_keys.sh",
"bin/get_submodules.sh",
],
long_description=read('Readme.rst'),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python :: 2.7",
"Topic :: System :: Clustering",
"Topic :: System :: Systems Administration",
],
packages=find_packages(),
include_package_data=True,
keywords='',
author=grep('__author__'),
author_email=grep('__email__'),
url=grep('__url__'),
install_requires=read('requirements.txt'),
license='MIT',
zip_safe=False,
)
| 1.890625
| 2
|
pipedrive/abstract.py
|
bindlock/pipedrivepy
| 0
|
12783146
|
from .chain import Chain
class AbstractClient:
ENDPOINT = 'https://{domain}.pipedrive.com/api/{version}/{path}'
def __init__(self, domain: str, token: str, version: str = 'v1'):
self.domain = domain
self.token = token
self.version = version
def __getattr__(self, name: str) -> Chain:
return Chain(self).__getattr__(name)
def get_endpoint_url(self, path: str) -> str:
return self.ENDPOINT.format(domain=self.domain, version=self.version, path=path)
| 2.765625
| 3
|
yt-pi.py
|
R2Boyo25/yt-pi
| 6
|
12783147
|
from flask import Flask, abort, render_template, redirect, url_for, request, session, send_from_directory, flash, jsonify
from markupsafe import escape
import random, os, database, json, requests
from werkzeug.utils import secure_filename
# create the application object
app = Flask(__name__)
app.config['DataFolder'] = "/".join(os.path.abspath(__file__).split("/")[:-1]) + "/" + "data"
app.secret_key = os.urandom(24)
def programExists(name):
"""Check whether `name` is on PATH and marked as executable."""
# from whichcraft import which
from shutil import which
return which(name) is not None
def eflash(error, back, title="Error!", backt=None, extra=None):
extra = extra if extra else ""
backt = backt if backt else ""
return render_template("error.html", e=error, url=back, error=title, urlt=backt, extra=extra)
def CoSo(version):
version = str(version)
return render_template("comingSoon.html", ver=version, background=database.Database("config.json").get('background'))
@app.route("/", methods=['GET'])
def homePage():
try:
popup = request.args['popup']
except Exception as e:
popup = None
return render_template('homePage.html', version=database.Database("config.json").get("version"), popup = popup, background=database.Database("config.json").get('background'))
@app.route('/data/<path:filename>/')
def returnData(filename):
return send_from_directory(app.config['DataFolder'],
filename)
@app.route('/videos/')
def videosList():
links=['<!--This Page Was Auto Generated-->\n<div align=\"center\">\n<br>\n<a href=/ ><img src=/data/home.png height=17px /></a> <input type="text" id="mySearch" onkeyup="myFunction()" placeholder="Search.." title="Type in a category">\n<br><br><ul id="myMenu">']
f = []
for (dirpath, dirnames, filenames) in os.walk(database.Database("config.json").get("videofolder")):
f.extend(dirnames)
break
for thing in f:
links.append("\n <li><a align='center' href='{}'><img src='{}' height=12% width=15% /><br><b>{}</b></a><br></li>".format('/videos/'+thing.replace("'", "%27"), '/videos/'+thing.replace("'", "%27")+'/thumb',thing))
links.append('</ul></div>')
return render_template('videos.html', links=''.join(links), background=database.Database("config.json").get('background'))
@app.route('/videos/<video>')
def videoPage(video):
for root, dirs, files in os.walk(database.Database("config.json").get("videofolder") + '/' + video):
for file in files:
if file.endswith('.description'):
with open(database.Database("config.json").get("videofolder") + '/' + video + '/' + file, 'r') as de:
desc = de.read()
try:
desc
except:
desc=''
break
for root, dirs, files in os.walk(database.Database("config.json").get("videofolder") + '/' + video):
for file in files:
if file.endswith('.mp4') or file.endswith('.webm'):
return render_template("video.html", path='/vidfile/' + video.replace("'", "%27") + "/" + file, description=desc.replace("\n", "\n<br>"), title=video, background=database.Database("config.json").get('background'))
break
@app.route('/videos/<video>/thumb')
def videoPageThumb(video):
for root, dirs, files in os.walk(database.Database("config.json").get("videofolder") + '/' + video):
print(files)
for file in files:
if file.endswith('.png') or file.endswith('.jpg') or file.endswith('.webp') or file.endswith('.jpeg'):
return send_from_directory(database.Database("config.json").get("videofolder") + "/" + video + "/",
file)
break
return send_from_directory("data", "eye.png")
@app.route("/vidfile/<folder>/<file>")
def videourlpagething(folder, file):
return send_from_directory(database.Database("config.json").get("videofolder") + "/" + folder + "/",
file)
@app.route('/credits/')
def creditsPage():
return render_template('credits.html', background=database.Database("config.json").get('background'))
@app.route('/add/')
def addVideoPage():
return render_template('addVideo.html', background=database.Database("config.json").get('background'))
@app.route('/add/yt/', methods=['GET', 'POST'])
def downloadYtVideo():
if not programExists("youtube-dl"):
return eflash('youtube-dl is not installed or is not on your PATH.', request.url)
if request.method == 'POST':
url = request.form['url']
if url != '':
os.system("python3 -m youtube_dl -f best -o \"" + database.Database("config.json").get("videofolder") + "/%(title)s/%(title)s.%(ext)s\"" + " --write-thumbnail --write-description " + url)
return redirect('/')
else:
return render_template('download.html', error='You must specify a URL!', background=database.Database("config.json").get('background'))
else:
return render_template("download.html", background=database.Database("config.json").get('background'))
@app.route('/add/mp4/', methods=['GET', 'POST'])
def downloadYtMP4():
if not programExists("youtube-dl"):
return eflash('youtube-dl is not installed or is not on your PATH.', request.url)
if request.method == 'POST':
url = request.form['url']
if url != '':
if os.path.exists("download.mp4"):
os.rm("download.mp4")
os.system("python3 -m youtube_dl -f best -o " + "download0.mp4 " + url)
return send_from_directory(".",
"download0.mp4", as_attachment=True)
else:
return render_template('download.html', error='You must specify a URL!', background=database.Database("config.json").get('background'))
else:
return render_template("download.html", background=database.Database("config.json").get('background'))
@app.route('/add/upload/', methods=['GET', 'POST'])
def uploadLocalVideo():
if request.method == 'POST':
if 'file' not in request.files:
return eflash('No selected file', request.url)
file = request.files['file']
if file.filename == '':
return eflash('No selected file', request.url)
elif request.form['title'] == '':
return eflash('Title is required', request.url)
else:
filename = secure_filename(file.filename)
os.mkdir(database.Database("config.json").get("videofolder") + "/" + request.form['title'])
file.save(os.path.join(database.Database("config.json").get("videofolder") + "/"+request.form['title'], filename))
with open(database.Database("config.json").get("videofolder") + "/"+request.form['title'] + '/' + request.form['title'] + ".description", 'w') as file1:
file1.write(request.form['nm'])
return redirect('/videos/{}'.format(request.form['title']))
return "Nothing happened???"
else:
return render_template("upload.html", background=database.Database("config.json").get('background'))
@app.route('/settings/', methods=['GET', 'POST'])
def settingsPage():
if request.method == 'POST':
config = database.Database("config.json")
for field in request.form:
config.set(field, request.form[field])
return redirect("/?popup=Settings%20Successfully%20Saved")
else:
config = database.Database("config.json")
return render_template("settings.html", config=config, background=database.Database("config.json").get('background'))
@app.errorhandler(404)
def page_not_found(e):
return eflash(e, url_for("homePage"), "404: Not Found", "Go Home", "Feature you want added? Submit a request at <a href=https://github.com/r2boyo25/yt-pi/issues/new/choose>my GitHub page. </a>")
#return render_template('404.html', error=e)
@app.errorhandler(400)
def bad_requesthandler(e):
return eflash(e, url_for("homePage"), "404: Not Found", "Go Home", "Submit a bug report at <a href=https://github.com/r2boyo25/yt-pi/issues/new/choose>my GitHub page. </a>")
#return render_template('400.html', error=e)
if __name__ == "__main__":
currentConfig = json.loads(requests.get("https://raw.githubusercontent.com/R2Boyo25/yt-pi/master/config.json").text)
if float(currentConfig["version"]) > float(database.Database("config.json").get('version')):
if not ("/" + ( '/'.join(os.path.abspath(database.Database("config.json").get("videofolder")).split("/")) ) in os.path.abspath("yt-pi.py")):
os.chdir("./..")
os.system("rm -rf yt-pi")
os.system("git clone https://github.com/r2boyo25/yt-pi")
os.chdir("yt-pi")
app.run(debug=True, host='0.0.0.0', port=database.Database("config.json").get("port"))
| 2.125
| 2
|
src/apps/backup/management/commands/run_backup.py
|
tuxis/BuckuPy
| 0
|
12783148
|
<filename>src/apps/backup/management/commands/run_backup.py<gh_stars>0
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
from apps.backup.functions import backup_process
bk_obj = backup_process()
bk_obj.run_backup()
| 1.617188
| 2
|
individual.py
|
gabrielbiasi/evolutionary-rubiks-cube
| 1
|
12783149
|
# -*- coding: utf-8 -*-
"""
Universidade Federal de Minas Gerais
Departamento de Ciência da Computação
Programa de Pós-Graduação em Ciência da Computação
Computação Natural
Trabalho Prático 1
Feito por <NAME>, 2016672212.
"""
#--------------------------------------------------------------#
#------------------------ CONSTANTES --------------------------#
#--------------------------------------------------------------#
# G0, G1.1, G1.2, G2.1, G2.2, G3
PHASE_START = [0, 1, 1, 2, 2, 4]
PHASE_END = [7, 13, 13, 15, 15, 17]
MOVES_SET = [ #-#-#-#-
['L', 'R', 'F', 'B', 'U', 'D' ], # G0 #
['L2', 'R2', 'F', 'B', 'U', 'D' ], # G1.1 #
['L2', 'R2', 'F', 'B', 'U', 'D' ], # G1.2 #
['L2', 'R2', 'F2', 'B2', 'U', 'D' ], # G2.1 #
['L2', 'R2', 'F2', 'B2', 'U', 'D' ], # G2.2 #
['F2', 'R2', 'F2', 'B2', 'U2', 'D2'] # G3 #
] #-#-#-#-
OP = {'O':'R','R':'O','W':'Y','Y':'W','G':'B','B':'G'}
INVERSE = {'F': 'Fi','L': 'Li','R': 'Ri','B': 'Bi',
'U': 'Ui','D': 'Di','Fi': 'F','Li': 'L','Ri': 'R',
'Bi': 'B','Ui': 'U','Di': 'D','F2': 'F2','L2': 'L2',
'R2': 'R2','B2': 'B2','U2': 'U2','D2': 'D2'}
SIMPLE_180 = {'F F2': 'Fi','L L2': 'Li','R R2': 'Ri',
'B B2': 'Bi','U U2': 'Ui','D D2': 'Di','F2 F': 'Fi',
'L2 L': 'Li','R2 R': 'Ri','B2 B': 'Bi','U2 U': 'Ui',
'D2 D': 'Di','Fi F2': 'F','Li L2': 'L','Ri R2': 'R',
'Bi B2': 'B','Ui U2': 'U','Di D2': 'D','F2 Fi': 'F',
'L2 Li': 'L','R2 Ri': 'R','B2 Bi': 'B','U2 Ui': 'U',
'D2 Di': 'D'}
#--------------------------------------------------------------#
import copy, random
from cube import Cube
class Color():
'''
Importante!
Esta classe se comporta como um 'Enum' à fim
de facilitar a troca de cores de referência
caso o arquivo de entrada esteja invertido
de alguma forma.
'''
TOP = 'Y'
BOTTOM = 'W'
FRONT = 'O'
BACK = 'R'
LEFT = 'G'
RIGHT = 'B'
class Individual(object):
def __init__(self, ind):
'''
Construtor do Indivíduo
Se o parâmetro passado for um indivíduo, é
feita então uma cópia.
'''
if isinstance(ind, Individual):
self.cube = copy.deepcopy(ind.cube)
self.genes = list(ind.genes)
self.fitness = ind.fitness
self.phase = ind.phase
self.size = ind.size
else:
self.cube = copy.deepcopy(ind)
self.genes = list()
self.fitness = -1
self.phase = 0
self.size = 0
def __repr__(self):
return self.__str__()
def __str__(self):
'''
Representação gráfica do indivíduo.
'''
self.cube.colored_printf()
return "{}[PH{}][L{}][F{}]".format(self.phase, self.genes, self.size, self.fitness)
def apply(self, new_moves):
'''
Este método aplica os novos movimentos gerados
pela mutação para o cubo que pertence à este
indivíduo.
'''
for gene in new_moves:
mode = 0 # Movimento horário
if len(gene) == 2:
mode = 1 if gene[1] == 'i' else 2 # Movimento anti-horário ou 180
if gene[0] == 'F':
self.cube.move_f(mode)
elif gene[0] == 'R':
self.cube.move_r(mode)
elif gene[0] == 'U':
self.cube.move_u(mode)
elif gene[0] == 'B':
self.cube.move_b(mode)
elif gene[0] == 'L':
self.cube.move_l(mode)
elif gene[0] == 'D':
self.cube.move_d(mode)
def mutation(self, phase):
'''
Este método cria uma nova jogada à ser aplicada no cubo
mágico, de acordo com a fase atual do algoritmo, temos
movimentos específicos e quantidade limitada, pelas listas
PHASE_START, PHASE_END e MOVES_SET. Após a criação a os
movimentos são "limpos" e acrescentados ao indivíduo.
'''
# Atualiza a fase e reseta o fitness
self.phase = phase
self.fitness = -1
# Geração aleatória de uma jogada
new_genes = list()
new_size = random.randint(PHASE_START[self.phase], PHASE_END[self.phase])
for i in range(new_size):
new_genes.append(random.choice(MOVES_SET[self.phase]))
# A jogada é aplicada ao cubo
self.apply(new_genes)
# Limpeza de movimentos
self.genes += new_genes
self.size += new_size
self.clean()
def clean(self):
'''
Este método recebe os novos movimentos
obtidos pela mutação e realiza uma limpeza
em busca de movimentos complementares ou
movimentos que não geram efeito final no cubo.
'''
i = 0
removed = 0
new_list = list(self.genes)
while i < self.size - removed - 1:
x = new_list[i]
y = new_list[i+1]
#-#-# Genes inversos seguidos são removidos #-#-#
if x == INVERSE[y]:
del new_list[i]
del new_list[i]
removed += 2
if i > 0:
i -= 1
#-#-# Genes iguais seguidos são convertidos para um gene 180 #-#-#
elif x == y:
del new_list[i]
new_list[i] = str(new_list[i][0]+'2')
removed += 1
if i > 0:
i -= 1
#-# Simplificação de um 90 e 180 para um 90 invertido #-#
elif str(x+' '+y) in SIMPLE_180:
del new_list[i]
new_list[i] = SIMPLE_180[str(x+' '+y)]
removed += 1
if i > 0:
i -= 1
else:
i += 1
#-#-#
self.genes = new_list
self.size -= removed
def get_fitness(self, phase):
'''
Cálculo da fitness
Recebe por parâmetro a fase atual do
algoritmo para realizar o cálculo corretamente.
'''
from main import CONST_PHASES
self.phase = phase
c = self.size
if self.fitness == -1:
result = 0
if self.phase == 0:
'''
Cálculo da fitness G0 -> G1
Os meios precisam ser orientados da maneira correta,
ou seja, é possível colocá-los em seus lugares sem uso
dos movimentos L e R.
'''
w = 0
#-#-# Mapeamento de todos os meios do cubo #-#-#
# Deu trabalho :(
edge_pieces = [
(self.cube.matrix[0][1][0], self.cube.matrix[1][1][2]), # O->G
(self.cube.matrix[0][1][2], self.cube.matrix[2][1][0]), # O->B
(self.cube.matrix[0][0][1], self.cube.matrix[4][2][1]), # O->Y
(self.cube.matrix[0][2][1], self.cube.matrix[5][0][1]), # O->W
(self.cube.matrix[3][1][0], self.cube.matrix[2][1][2]), # R->B
(self.cube.matrix[3][1][2], self.cube.matrix[1][1][0]), # R->G
(self.cube.matrix[3][0][1], self.cube.matrix[4][0][1]), # R->Y
(self.cube.matrix[3][2][1], self.cube.matrix[5][2][1]), # R->W
(self.cube.matrix[1][0][1], self.cube.matrix[4][1][0]), # G->Y
(self.cube.matrix[1][2][1], self.cube.matrix[5][1][0]), # G->W
(self.cube.matrix[2][0][1], self.cube.matrix[4][1][2]), # B->Y
(self.cube.matrix[2][2][1], self.cube.matrix[5][1][2]) # B->W
]
# A cada meio não-orientado, 1 ponto de punição.
for piece in edge_pieces:
if piece[0] in [Color.TOP, Color.BOTTOM]:
w += 1
elif piece[0] in [Color.LEFT, Color.RIGHT] and \
piece[1] in [Color.FRONT, Color.BACK]:
w += 1
# Parâmetros de multiplicação
# Aumenta ou diminui a pressão seletiva da fase.
result = (CONST_PHASES[0] * w) + c
elif self.phase == 1:
'''
Cálculo da fitness G1 -> G2 (Parte 1)
Nesta parte 1, é colocado os meios na camada do meio
apenas. Este processo facilita a convergência para o
real cálculo da fitness G1->G2 na parte 2.
'''
w = 0
#-#-# Punição por meios fora da camada do meio. #-#-#
f = self.cube.matrix[0][1] # Face da frente, camada do meio.
w += 0 if f[0] == Color.FRONT or f[0] == Color.BACK else 1
w += 0 if f[2] == Color.FRONT or f[2] == Color.BACK else 1
f = self.cube.matrix[3][1] # Face de trás, camada do meio.
w += 0 if f[0] == Color.FRONT or f[0] == Color.BACK else 1
w += 0 if f[2] == Color.FRONT or f[2] == Color.BACK else 1
f = self.cube.matrix[1][1] # Face da esquerda, camada do meio.
w += 0 if f[0] == Color.LEFT or f[0] == Color.RIGHT else 1
w += 0 if f[2] == Color.LEFT or f[2] == Color.RIGHT else 1
f = self.cube.matrix[2][1] # Face da direita, camada do meio.
w += 0 if f[0] == Color.LEFT or f[0] == Color.RIGHT else 1
w += 0 if f[2] == Color.LEFT or f[2] == Color.RIGHT else 1
# Parâmetros de multiplicação
# Aumenta ou diminui a pressão seletiva da fase.
result = (CONST_PHASES[1] * w) + c
elif self.phase == 2:
'''
Cálculo da fitness G1 -> G2 (Parte 2)
Todos as cores FRONT e BACK precisam estar
nas faces FRONT e BACK.
'''
# Mesmo código da fase 1 #
w = 0
#-#-# Punição por meios fora da camada do meio. #-#-#
f = self.cube.matrix[0][1] # Face da frente, camada do meio.
w += 0 if f[0] == Color.FRONT or f[0] == Color.BACK else 1
w += 0 if f[2] == Color.FRONT or f[2] == Color.BACK else 1
f = self.cube.matrix[3][1] # Face de trás, camada do meio.
w += 0 if f[0] == Color.FRONT or f[0] == Color.BACK else 1
w += 0 if f[2] == Color.FRONT or f[2] == Color.BACK else 1
f = self.cube.matrix[1][1] # Face da esquerda, camada do meio.
w += 0 if f[0] == Color.LEFT or f[0] == Color.RIGHT else 1
w += 0 if f[2] == Color.LEFT or f[2] == Color.RIGHT else 1
f = self.cube.matrix[2][1] # Face da direita, camada do meio.
w += 0 if f[0] == Color.LEFT or f[0] == Color.RIGHT else 1
w += 0 if f[2] == Color.LEFT or f[2] == Color.RIGHT else 1
result = (CONST_PHASES[1] * w) + c
# Fim do mesmo código da fase 1 #
v = 0
#-#-# Punição para cada canto não orientado. #-#-#
f = self.cube.matrix[4] # Face de cima
v += 0 if f[0][0] == Color.TOP or f[0][0] == Color.BOTTOM else 1
v += 0 if f[0][2] == Color.TOP or f[0][2] == Color.BOTTOM else 1
v += 0 if f[2][0] == Color.TOP or f[2][0] == Color.BOTTOM else 1
v += 0 if f[2][2] == Color.TOP or f[2][2] == Color.BOTTOM else 1
f = self.cube.matrix[5] # Face de baixo
v += 0 if f[0][0] == Color.TOP or f[0][0] == Color.BOTTOM else 1
v += 0 if f[0][2] == Color.TOP or f[0][2] == Color.BOTTOM else 1
v += 0 if f[2][0] == Color.TOP or f[2][0] == Color.BOTTOM else 1
v += 0 if f[2][2] == Color.TOP or f[2][2] == Color.BOTTOM else 1
# Parâmetros de multiplicação
# Aumenta ou diminui a pressão seletiva da fase.
result = (CONST_PHASES[2] * v) + result
elif self.phase == 3:
'''
Cálculo da fitness G2 -> G3 (Parte 1)
Todos as faces precisam ter sua cor original ou sua cor oposta,
além dos cantos vizinhos precisam compartilhar a mesma cor "lateral",
não importando o topo/baixo.
'''
y = 0
#-#-# Mapeamento de todos os cantos do cubo #-#-#
# Também deu trabalho :(
all_corners = [
(self.cube.matrix[0][0][0], self.cube.matrix[1][0][2]), #Y-O-G
(self.cube.matrix[0][2][0], self.cube.matrix[1][2][2]), #W-O-G
(self.cube.matrix[0][0][2], self.cube.matrix[2][0][0]), #Y-O-B
(self.cube.matrix[0][2][2], self.cube.matrix[2][2][0]), #W-O-B
(self.cube.matrix[3][0][0], self.cube.matrix[2][0][2]), #Y-R-B
(self.cube.matrix[3][2][0], self.cube.matrix[2][2][2]), #W-R-B
(self.cube.matrix[3][0][2], self.cube.matrix[1][0][0]), #Y-R-G
(self.cube.matrix[3][2][2], self.cube.matrix[1][2][0]), #W-R-G
]
#-#-# Punição para cada canto da camada superior que não combina
# sua cor com o canto da camada inferior (formando uma "coluna"). #-#-#
for i in range(0, 8, 2):
if all_corners[i][0] != all_corners[i+1][0] or \
all_corners[i][1] != all_corners[i+1][1]:
y += 1
# Parâmetros de multiplicação
# Aumenta ou diminui a pressão seletiva da fase.
result = (CONST_PHASES[3] * y) + c
elif self.phase == 4:
x, y = 0, 0
# Mesmo código da fase 3 #
#-#-# Mapeamento de todos os cantos do cubo #-#-#
# Também deu trabalho :(
all_corners = [
(self.cube.matrix[0][0][0], self.cube.matrix[1][0][2]), #Y-O-G
(self.cube.matrix[0][2][0], self.cube.matrix[1][2][2]), #W-O-G
(self.cube.matrix[0][0][2], self.cube.matrix[2][0][0]), #Y-O-B
(self.cube.matrix[0][2][2], self.cube.matrix[2][2][0]), #W-O-B
(self.cube.matrix[3][0][0], self.cube.matrix[2][0][2]), #Y-R-B
(self.cube.matrix[3][2][0], self.cube.matrix[2][2][2]), #W-R-B
(self.cube.matrix[3][0][2], self.cube.matrix[1][0][0]), #Y-R-G
(self.cube.matrix[3][2][2], self.cube.matrix[1][2][0]), #W-R-G
]
#-#-# Punição para cada canto da camada superior que não combina
# sua cor com o canto da camada inferior (formando uma "coluna"). #-#-#
for i in range(0, 8, 2):
if all_corners[i][0] != all_corners[i+1][0] or \
all_corners[i][1] != all_corners[i+1][1]:
y += 1
result = (CONST_PHASES[3] * y) + c
# Fim do mesmo código da fase 3 #
#-#-# Recebe uma punição cada cor de cubo que não é a
# cor da correta ou não é a cor oposta da face. #-#-#
for face in self.cube.matrix:
center = face[1][1]
for i in range(3):
for j in range(3):
if face[i][j] != center and face[i][j] != OP[center]:
x += 1
# Parâmetros de multiplicação
# Aumenta ou diminui a pressão seletiva da fase.
result = (CONST_PHASES[4] * x) + result
elif self.phase == 5:
'''
Cálculo da fitness G3 -> G4 (Resolvido)
Agora apenas movimentos de 180 graus são permitidos, a função
de fitness simplesmente olha a cor de cada cubo e verifica com
o centro.
'''
z = 0
#-#-# Fase final, recebe uma punição por cada cor
# que não é a cor da face atual. #-#-#
for face in self.cube.matrix:
center = face[1][1]
for i in range(3):
for j in range(3):
if face[i][j] != center:
z += 1
# Parâmetros de multiplicação
# Aumenta ou diminui a pressão seletiva da fase.
result = (CONST_PHASES[5] * z) + c
self.fitness = result
return self.fitness
| 2.078125
| 2
|
Interview-Preparation/Amazon/treesgraphs-flood-fill.py
|
shoaibur/SWE
| 1
|
12783150
|
<filename>Interview-Preparation/Amazon/treesgraphs-flood-fill.py<gh_stars>1-10
class Solution:
def floodFill(self, image: List[List[int]], sr: int, sc: int, newColor: int) -> List[List[int]]:
if not image: return image
nrow = len(image)
ncol = len(image[0])
color = image[sr][sc]
if color == newColor: return image
def dfs(image, i, j):
if image[i][j] == color:
image[i][j] = newColor
if i > 0:
dfs(image, i-1, j)
if i < nrow-1:
dfs(image, i+1, j)
if j > 0:
dfs(image, i, j-1)
if j < ncol-1:
dfs(image, i, j+1)
dfs(image, sr, sc)
return image
| 3.390625
| 3
|