hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
301985af44c06bf96e405cb532bf976c7a4bcc1a | 2,687 | py | Python | fastestimator/op/numpyop/univariate/random_fog.py | DwijayDS/fastestimator | 9b288cb2bd870f971ec4cee09d0b3205e1316a94 | [
"Apache-2.0"
] | 57 | 2019-05-21T21:29:26.000Z | 2022-02-23T05:55:21.000Z | fastestimator/op/numpyop/univariate/random_fog.py | vbvg2008/fastestimator | 6061a4fbbeb62a2194ef82ba8017f651710d0c65 | [
"Apache-2.0"
] | 93 | 2019-05-23T18:36:07.000Z | 2022-03-23T17:15:55.000Z | fastestimator/op/numpyop/univariate/random_fog.py | vbvg2008/fastestimator | 6061a4fbbeb62a2194ef82ba8017f651710d0c65 | [
"Apache-2.0"
] | 47 | 2019-05-09T15:41:37.000Z | 2022-03-26T17:00:08.000Z | # Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Iterable, Union
from albumentations.augmentations.transforms import RandomFog as RandomFogAlb
from fastestimator.op.numpyop.univariate.univariate import ImageOnlyAlbumentation
from fastestimator.util.traceability_util import traceable
@traceable()
class RandomFog(ImageOnlyAlbumentation):
"""Add fog to an image.
Args:
inputs: Key(s) of images to be modified.
outputs: Key(s) into which to write the modified images.
mode: What mode(s) to execute this Op in. For example, "train", "eval", "test", or "infer". To execute
regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument
like "!infer" or "!train".
ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all
ds_ids except for a particular one, you can pass an argument like "!ds1".
fog_coef_lower: Lower limit for fog intensity coefficient. Should be in the range [0, 1].
fog_coef_upper: Upper limit for fog intensity coefficient. Should be in the range [0, 1].
alpha_coef: Transparency of the fog circles. Should be in the range [0, 1].
Image types:
uint8, float32
"""
| 45.542373 | 120 | 0.637514 | # Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Iterable, Union
from albumentations.augmentations.transforms import RandomFog as RandomFogAlb
from fastestimator.op.numpyop.univariate.univariate import ImageOnlyAlbumentation
from fastestimator.util.traceability_util import traceable
@traceable()
class RandomFog(ImageOnlyAlbumentation):
"""Add fog to an image.
Args:
inputs: Key(s) of images to be modified.
outputs: Key(s) into which to write the modified images.
mode: What mode(s) to execute this Op in. For example, "train", "eval", "test", or "infer". To execute
regardless of mode, pass None. To execute in all modes except for a particular one, you can pass an argument
like "!infer" or "!train".
ds_id: What dataset id(s) to execute this Op in. To execute regardless of ds_id, pass None. To execute in all
ds_ids except for a particular one, you can pass an argument like "!ds1".
fog_coef_lower: Lower limit for fog intensity coefficient. Should be in the range [0, 1].
fog_coef_upper: Upper limit for fog intensity coefficient. Should be in the range [0, 1].
alpha_coef: Transparency of the fog circles. Should be in the range [0, 1].
Image types:
uint8, float32
"""
def __init__(self,
inputs: Union[str, Iterable[str]],
outputs: Union[str, Iterable[str]],
mode: Union[None, str, Iterable[str]] = None,
ds_id: Union[None, str, Iterable[str]] = None,
fog_coef_lower: float = 0.3,
fog_coef_upper: float = 1.0,
alpha_coef: float = 0.08):
super().__init__(
RandomFogAlb(fog_coef_lower=fog_coef_lower,
fog_coef_upper=fog_coef_upper,
alpha_coef=alpha_coef,
always_apply=True),
inputs=inputs,
outputs=outputs,
mode=mode,
ds_id=ds_id)
| 700 | 0 | 26 |
aa5b62bfd1e2843397168ea55c152c0530cbfc95 | 3,914 | py | Python | tests/traces/test_loaders.py | pedrofreitascampospro/locintel | eb9c56cdc308660c31d90abe9fe62bd3634ba273 | [
"MIT"
] | null | null | null | tests/traces/test_loaders.py | pedrofreitascampospro/locintel | eb9c56cdc308660c31d90abe9fe62bd3634ba273 | [
"MIT"
] | null | null | null | tests/traces/test_loaders.py | pedrofreitascampospro/locintel | eb9c56cdc308660c31d90abe9fe62bd3634ba273 | [
"MIT"
] | null | null | null | from locintel.traces.loaders import DruidLoader
import pytest
from unittest.mock import Mock, patch, call
@pytest.fixture
@pytest.fixture
| 35.581818 | 84 | 0.626469 | from locintel.traces.loaders import DruidLoader
import pytest
from unittest.mock import Mock, patch, call
@pytest.fixture
def fake_config():
return "https://fakeurl.com", "/endpoint/v1", "datasource", "user", "password"
@pytest.fixture
def mock_connector():
interval = "1970/1981"
druid_connector_mock = Mock()
druid_connector_mock.interval = interval
druid_connector_mock.set_basic_auth_credentials = Mock()
druid_connector_mock.time_boundary = Mock(
return_value=Mock(
result=[
{
"result": {
"minTime": interval.split("/")[0],
"maxTime": interval.split("/")[1],
}
}
]
)
)
event = {"event": {"bookingid": 1}}
probe1 = Mock(__getitem__=lambda x, y: event.__getitem__(y))
probe2 = Mock(__getitem__=lambda x, y: event.__getitem__(y))
probe3 = Mock(__getitem__=lambda x, y: event.__getitem__(y))
result1 = {"result": {"events": [probe1, probe2, probe3]}}
result2 = {"result": {"events": [probe1, probe1, probe2]}}
results = [result1, result2]
druid_connector_mock.select = Mock(return_value=results)
druid_connector_mock.result1 = result1
druid_connector_mock.result2 = result2
druid_connector_mock.probe1 = probe1
druid_connector_mock.probe2 = probe2
druid_connector_mock.probe3 = probe3
return druid_connector_mock
class TestDruidLoader(object):
@patch("locintel.traces.loaders.PyDruid")
def test_druid_loader(self, druid_mock, mock_connector, fake_config):
url, endpoint, datasource, user, password = fake_config
druid_mock.return_value = mock_connector
loader = DruidLoader(
url=url,
endpoint=endpoint,
datasource=datasource,
username=user,
password=password,
)
assert loader.url == url
assert loader.endpoint == endpoint
assert loader.datasource == datasource
assert not getattr(loader, "username", None)
assert not getattr(loader, "password", None)
assert loader.connector == mock_connector
assert loader.interval == mock_connector.interval
assert loader.default_query["datasource"] == datasource
assert loader.default_query["intervals"] == mock_connector.interval
druid_mock.assert_called_with(url, endpoint)
mock_connector.set_basic_auth_credentials.assert_called_with(user, password)
mock_connector.time_boundary.assert_called_with(datasource=datasource)
@patch("locintel.traces.loaders.PyDruid")
@patch("locintel.traces.loaders.Probe")
@patch("locintel.traces.loaders.Trace")
def test_load(
self, trace_mock, probe_mock, druid_mock, mock_connector, fake_config
):
url, endpoint, datasource, user, password = fake_config
druid_mock.return_value = mock_connector
probes = [Mock(), Mock(), Mock(), Mock(), Mock(), Mock()] # Called 6 times
probe_mock.from_druid = Mock(side_effect=probes)
traces = [Mock(), Mock()]
trace_mock.side_effect = traces
loader = DruidLoader(
url=url,
endpoint=endpoint,
datasource=datasource,
username=user,
password=password,
)
result = list(loader.load())
assert result == traces
probe_mock.from_druid.assert_has_calls(
[
call(mock_connector.probe1),
call(mock_connector.probe2),
call(mock_connector.probe3),
call(mock_connector.probe1),
call(mock_connector.probe1),
call(mock_connector.probe2),
]
)
trace_mock.assert_has_calls(
[call(probes[:3], identifier=1), call(probes[3:], identifier=1)]
)
| 3,461 | 242 | 67 |
8894490eaa5ba3d12669c67bf53ad57c286b015b | 396 | py | Python | Competitive_Programming/HackerRank/game_of_thrones_Hackerrank.py | amitagarwalaa57/OneDayOneAlgo | 9336fc751ee9b0b84c3e4a1e8383cb14b178c936 | [
"MIT"
] | 32 | 2020-05-23T07:40:31.000Z | 2021-02-02T18:14:30.000Z | Competitive_Programming/HackerRank/game_of_thrones_Hackerrank.py | amitagarwalaa57/OneDayOneAlgo | 9336fc751ee9b0b84c3e4a1e8383cb14b178c936 | [
"MIT"
] | 45 | 2020-05-22T10:30:51.000Z | 2020-12-28T08:17:13.000Z | Competitive_Programming/HackerRank/game_of_thrones_Hackerrank.py | amitagarwalaa57/OneDayOneAlgo | 9336fc751ee9b0b84c3e4a1e8383cb14b178c936 | [
"MIT"
] | 31 | 2020-05-22T10:18:16.000Z | 2020-10-23T07:52:35.000Z | #if length of string is even,all characters should occure even times. If its odd,only one should occur odd times to occupy space in middle bcz rest even characters will be in pairs ,making string as palindrome
from collections import Counter
print(gameOfThrones(input())) | 44 | 210 | 0.724747 | #if length of string is even,all characters should occure even times. If its odd,only one should occur odd times to occupy space in middle bcz rest even characters will be in pairs ,making string as palindrome
from collections import Counter
def gameOfThrones(s):
add = 0
for x in Counter(s).values():
add+ = x%2
return "NO" if add>1 else "YES"
print(gameOfThrones(input())) | 101 | 0 | 22 |
2d8622b5b5742eaffe685c5a53c9f895f601a053 | 2,768 | py | Python | ichnaea/api/locate/blue.py | mikiec84/ichnaea | ec223cefb788bb921c0e7f5f51bd3b20eae29edd | [
"Apache-2.0"
] | 348 | 2015-01-13T11:48:07.000Z | 2022-03-31T08:33:07.000Z | ichnaea/api/locate/blue.py | mikiec84/ichnaea | ec223cefb788bb921c0e7f5f51bd3b20eae29edd | [
"Apache-2.0"
] | 1,274 | 2015-01-02T18:15:56.000Z | 2022-03-23T15:29:08.000Z | ichnaea/api/locate/blue.py | mikiec84/ichnaea | ec223cefb788bb921c0e7f5f51bd3b20eae29edd | [
"Apache-2.0"
] | 149 | 2015-01-04T21:15:07.000Z | 2021-12-10T06:05:09.000Z | """Search implementation using a database of Bluetooth networks."""
from collections import defaultdict
from ichnaea.api.locate.constants import (
MAX_BLUE_CLUSTER_METERS,
MAX_BLUES_IN_CLUSTER,
BLUE_MAX_ACCURACY,
BLUE_MIN_ACCURACY,
)
from ichnaea.api.locate.mac import (
aggregate_cluster_position,
cluster_networks,
query_macs,
)
from ichnaea.api.locate.result import (
Position,
PositionResultList,
Region,
RegionResultList,
)
from ichnaea.api.locate.score import station_score
from ichnaea.geocode import GEOCODER
from ichnaea.models import BlueShard
from ichnaea.models.constants import MIN_BLUE_SIGNAL
from ichnaea import util
class BluePositionMixin(object):
"""
A BluePositionMixin implements a position search using
the Bluetooth models and a series of clustering algorithms.
"""
raven_client = None
result_list = PositionResultList
result_type = Position
class BlueRegionMixin(object):
"""
A BlueRegionMixin implements a region search using our Bluetooth data.
"""
raven_client = None
result_list = RegionResultList
result_type = Region
| 27.68 | 75 | 0.629335 | """Search implementation using a database of Bluetooth networks."""
from collections import defaultdict
from ichnaea.api.locate.constants import (
MAX_BLUE_CLUSTER_METERS,
MAX_BLUES_IN_CLUSTER,
BLUE_MAX_ACCURACY,
BLUE_MIN_ACCURACY,
)
from ichnaea.api.locate.mac import (
aggregate_cluster_position,
cluster_networks,
query_macs,
)
from ichnaea.api.locate.result import (
Position,
PositionResultList,
Region,
RegionResultList,
)
from ichnaea.api.locate.score import station_score
from ichnaea.geocode import GEOCODER
from ichnaea.models import BlueShard
from ichnaea.models.constants import MIN_BLUE_SIGNAL
from ichnaea import util
class BluePositionMixin(object):
"""
A BluePositionMixin implements a position search using
the Bluetooth models and a series of clustering algorithms.
"""
raven_client = None
result_list = PositionResultList
result_type = Position
def should_search_blue(self, query, results):
return bool(query.blue)
def search_blue(self, query):
results = self.result_list()
blues = query_macs(query, query.blue, self.raven_client, BlueShard)
for cluster in cluster_networks(
blues,
query.blue,
min_radius=BLUE_MIN_ACCURACY,
min_signal=MIN_BLUE_SIGNAL,
max_distance=MAX_BLUE_CLUSTER_METERS,
):
result = aggregate_cluster_position(
cluster,
self.result_type,
"blue",
max_networks=MAX_BLUES_IN_CLUSTER,
min_accuracy=BLUE_MIN_ACCURACY,
max_accuracy=BLUE_MAX_ACCURACY,
)
results.add(result)
return results
class BlueRegionMixin(object):
"""
A BlueRegionMixin implements a region search using our Bluetooth data.
"""
raven_client = None
result_list = RegionResultList
result_type = Region
def should_search_blue(self, query, results):
return bool(query.blue)
def search_blue(self, query):
results = self.result_list()
now = util.utcnow()
regions = defaultdict(int)
blues = query_macs(query, query.blue, self.raven_client, BlueShard)
for blue in blues:
regions[blue.region] += station_score(blue, now)
for code, score in regions.items():
region = GEOCODER.region_for_code(code)
if region:
results.add(
self.result_type(
region_code=code,
region_name=region.name,
accuracy=region.radius,
score=score,
)
)
return results
| 1,510 | 0 | 108 |
3f5628a51b35e66f1edcdf38e4de6a7643888e33 | 2,794 | py | Python | data/model/__init__.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 2,027 | 2019-11-12T18:05:48.000Z | 2022-03-31T22:25:04.000Z | data/model/__init__.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 496 | 2019-11-12T18:13:37.000Z | 2022-03-31T10:43:45.000Z | data/model/__init__.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 249 | 2019-11-12T18:02:27.000Z | 2022-03-22T12:19:19.000Z | from data.database import db, db_transaction
config = Config()
# There MUST NOT be any circular dependencies between these subsections. If there are fix it by
# moving the minimal number of things to _basequery
from data.model import (
appspecifictoken,
blob,
build,
gc,
image,
label,
log,
message,
modelutil,
notification,
oauth,
organization,
permission,
repositoryactioncount,
repo_mirror,
release,
repo_mirror,
repository,
service_keys,
storage,
team,
token,
user,
)
| 18.381579 | 95 | 0.755548 | from data.database import db, db_transaction
class DataModelException(Exception):
pass
class InvalidLabelKeyException(DataModelException):
pass
class InvalidMediaTypeException(DataModelException):
pass
class BlobDoesNotExist(DataModelException):
pass
class InvalidBlobUpload(DataModelException):
pass
class InvalidEmailAddressException(DataModelException):
pass
class InvalidOrganizationException(DataModelException):
pass
class InvalidPasswordException(DataModelException):
pass
class InvalidRobotException(DataModelException):
pass
class InvalidUsernameException(DataModelException):
pass
class InvalidRepositoryBuildException(DataModelException):
pass
class InvalidBuildTriggerException(DataModelException):
pass
class InvalidTokenException(DataModelException):
pass
class InvalidNotificationException(DataModelException):
pass
class InvalidImageException(DataModelException):
pass
class UserAlreadyInTeam(DataModelException):
pass
class InvalidTeamException(DataModelException):
pass
class InvalidTeamMemberException(DataModelException):
pass
class InvalidManifestException(DataModelException):
pass
class ServiceKeyDoesNotExist(DataModelException):
pass
class ServiceKeyAlreadyApproved(DataModelException):
pass
class ServiceNameInvalid(DataModelException):
pass
class TagAlreadyCreatedException(DataModelException):
pass
class StaleTagException(DataModelException):
pass
class TooManyLoginAttemptsException(Exception):
def __init__(self, message, retry_after):
super(TooManyLoginAttemptsException, self).__init__(message)
self.retry_after = retry_after
class Config(object):
def __init__(self):
self.app_config = None
self.store = None
self.image_cleanup_callbacks = []
self.repo_cleanup_callbacks = []
def register_image_cleanup_callback(self, callback):
self.image_cleanup_callbacks.append(callback)
return lambda: self.image_cleanup_callbacks.remove(callback)
def register_repo_cleanup_callback(self, callback):
self.repo_cleanup_callbacks.append(callback)
return lambda: self.repo_cleanup_callbacks.remove(callback)
config = Config()
# There MUST NOT be any circular dependencies between these subsections. If there are fix it by
# moving the minimal number of things to _basequery
from data.model import (
appspecifictoken,
blob,
build,
gc,
image,
label,
log,
message,
modelutil,
notification,
oauth,
organization,
permission,
repositoryactioncount,
repo_mirror,
release,
repo_mirror,
repository,
service_keys,
storage,
team,
token,
user,
)
| 571 | 926 | 704 |
c16681299ca04540e40b740fabae95decce047bb | 865 | py | Python | omaha_server/omaha/migrations/0004_partialupdate.py | fiadm/omaha-server | 72bf5eb33ffc267ebe45a9461acdc79e31ed894b | [
"Apache-2.0"
] | 142 | 2015-02-10T05:46:28.000Z | 2020-03-21T13:18:31.000Z | omaha_server/omaha/migrations/0004_partialupdate.py | tuladhar/omaha-server | 6cfd86e4319e03af0eb319fae6c867691ffc2c36 | [
"Apache-2.0"
] | 272 | 2015-01-15T09:43:49.000Z | 2020-03-30T08:29:30.000Z | omaha_server/omaha/migrations/0004_partialupdate.py | tuladhar/omaha-server | 6cfd86e4319e03af0eb319fae6c867691ffc2c36 | [
"Apache-2.0"
] | 77 | 2015-01-29T19:13:39.000Z | 2020-03-21T06:45:35.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import omaha.fields
| 28.833333 | 114 | 0.554913 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import omaha.fields
class Migration(migrations.Migration):
dependencies = [
('omaha', '0003_version_is_enabled'),
]
operations = [
migrations.CreateModel(
name='PartialUpdate',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_enabled', models.BooleanField(default=True)),
('percent', omaha.fields.PercentField()),
('start_date', models.DateField()),
('end_date', models.DateField()),
('version', models.OneToOneField(to='omaha.Version')),
],
options={
},
bases=(models.Model,),
),
]
| 0 | 715 | 23 |
2374cd0a11dba901068bc67e8f2980ac5a3956c5 | 5,439 | py | Python | bin/search_irc.py | AlexFaraino/cve-search | 4be22d43c4cd3ed7b6b2db2393b3d27575f23d35 | [
"BSD-3-Clause"
] | 377 | 2018-05-02T06:55:15.000Z | 2022-03-29T06:13:10.000Z | bin/search_irc.py | AlexFaraino/cve-search | 4be22d43c4cd3ed7b6b2db2393b3d27575f23d35 | [
"BSD-3-Clause"
] | 52 | 2015-01-05T15:13:06.000Z | 2018-04-13T14:33:06.000Z | bin/search_irc.py | AlexFaraino/cve-search | 4be22d43c4cd3ed7b6b2db2393b3d27575f23d35 | [
"BSD-3-Clause"
] | 91 | 2018-05-03T02:05:15.000Z | 2022-03-29T15:45:58.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Simple IRC bot to query for the last entries in the CVE database
#
# current command supported is:
#
# last <max>
# cvetweet <max>
# browse
# search <vendor>\<product>
# get <cve>
#
# You need to connect the IRC bot to the IRC Server you want to access it from.
#
# Software is free software released under the "Modified BSD license"
#
# Copyright (c) 2015 Pieter-Jan Moreels - pieterjan.moreels@gmail.com
# Imports
import argparse
import irc.bot
import irc.strings
import json
import os
import signal
import ssl
import sys
runPath = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(runPath, ".."))
# BSON MongoDB include ugly stuff that needs to be processed for standard JSON
from bson import json_util
from web.api import API
argParser = argparse.ArgumentParser(description='IRC bot to query cve-search')
argParser.add_argument('-s', type=str, help='server ip', default='localhost')
argParser.add_argument('-p', type=int, help='server port)', default=6667)
argParser.add_argument('-n', type=str, help='nickname', default='cve-search')
argParser.add_argument('-w', type=str, help='password')
argParser.add_argument('-u', type=str, help='username', default='cve-search')
argParser.add_argument('-c', nargs="*", help='channel list', default=['cve-search'])
argParser.add_argument('-t', type=str, help='trigger prefix', default='.')
argParser.add_argument('-v', action='store_true', help='channel list', default=['cve-search'])
argParser.add_argument('-m', type=int, help='maximum query amount', default=20)
argParser.add_argument('--ssl', action='store_true', help='Use SSL')
args = argParser.parse_args()
# signal handlers
if __name__ == "__main__":
main()
| 33.574074 | 104 | 0.646442 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Simple IRC bot to query for the last entries in the CVE database
#
# current command supported is:
#
# last <max>
# cvetweet <max>
# browse
# search <vendor>\<product>
# get <cve>
#
# You need to connect the IRC bot to the IRC Server you want to access it from.
#
# Software is free software released under the "Modified BSD license"
#
# Copyright (c) 2015 Pieter-Jan Moreels - pieterjan.moreels@gmail.com
# Imports
import argparse
import irc.bot
import irc.strings
import json
import os
import signal
import ssl
import sys
runPath = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(runPath, ".."))
# BSON MongoDB include ugly stuff that needs to be processed for standard JSON
from bson import json_util
from web.api import API
argParser = argparse.ArgumentParser(description='IRC bot to query cve-search')
argParser.add_argument('-s', type=str, help='server ip', default='localhost')
argParser.add_argument('-p', type=int, help='server port)', default=6667)
argParser.add_argument('-n', type=str, help='nickname', default='cve-search')
argParser.add_argument('-w', type=str, help='password')
argParser.add_argument('-u', type=str, help='username', default='cve-search')
argParser.add_argument('-c', nargs="*", help='channel list', default=['cve-search'])
argParser.add_argument('-t', type=str, help='trigger prefix', default='.')
argParser.add_argument('-v', action='store_true', help='channel list', default=['cve-search'])
argParser.add_argument('-m', type=int, help='maximum query amount', default=20)
argParser.add_argument('--ssl', action='store_true', help='Use SSL')
args = argParser.parse_args()
class IRCBot(irc.bot.SingleServerIRCBot):
def __init__(self, channel, nickname, server, port, password=None, username=None, **kwargs):
if not username:
username=nickname
irc.bot.SingleServerIRCBot.__init__(self, [(server, port)], nickname, username, **kwargs)
self.channel = channel
self.api = API()
def on_nicknameinuse(self, c, e):
c.nick(c.get_nickname() + "_")
def on_welcome(self, c, e):
if args.v:
print("Server welcomed us")
for chan in self.channel:
if not chan.startswith('#'):chan=("#%s"%chan)
if args.v:
print("joining %s"%chan)
c.join(chan)
def on_privmsg(self, c, e):
self.do_command(e, e.arguments[0])
def on_pubmsg(self, c, e):
line = e.arguments[0]
if line.startswith(args.t):
self.do_command(e, line[len(args.t):])
return
def reply(self, e, reply):
if type(reply) in [dict, list]:
#reply = json.dumps(reply, sort_keys=True, indent=4, default=json_util.default, ensure_ascii=True)
reply = json.dumps(reply, sort_keys=True, ensure_ascii=True, default=json_util.default)
else:
reply = str(reply)
if e.target == self.connection.nickname:
target=e.source.nick
else:
target=e.target
_list = reply.split('\n')
chunk_size = 512 - 12 - len(e.target) # 512 - len("PRIVMSG") - len(" :") - CR/LF - target
_list = [[x[i:i+chunk_size] for i in range(0, len(x), chunk_size)] for x in _list]
_list = [item for sublist in _list for item in sublist] # flatten list
for r in _list[:4]:
self.connection.privmsg(target, r)
def do_command(self, e, cmd):
def last(option):
limit = int(option) if option else 10
if limit > args.m or limit < 1:
self.reply(e, "Request not in range 0-%d" % args.m)
self.reply(e, self.api.api_last(limit))
def cve(option):
if option is None:
return "A cve-id must be specified"
return self.api.api_cve(option)
if not cmd: pass
parts = cmd.split(' ', 1)
cmd = parts[0]
option = parts[1] if len(parts) == 2 else None
if cmd == "die": self.die()
elif cmd in ["last", "recent"]: self.reply(e, last(option))
elif cmd in ["get", "cve"]: self.reply(e, cve(option))
elif cmd in ["browse", "vendor"]: self.reply(e, self.api.api_browse(option))
elif cmd in ["search", "product"]:
parts = option.split()
if len(parts) < 2:
return self.reply(e, "Usage: search <vendor> <product>")
return self.reply(e, self.api.api_search(parts[0], parts[1]))
elif cmd in ["cvetweet", "tweet"]:
text = ""
cves = []
if option.lower().startswith("cve-"): cves.append(cve(option))
else: cves = last(option)
for t in cves:
text += str(t['id']) + " , " + str(t['summary']) + " " + " , ".join(t['references']) + "\n"
return self.reply(e, text)
else:
self.reply(e, "Not understood: " + cmd)
# signal handlers
def sig_handler(sig, frame):
print('Caught signal: %s\nShutting down' % sig)
bot.die()
def main():
server = args.s
port = args.p
nick = args.n
password = args.w
user = args.u
chans = args.c
global bot
if args.ssl:
print("using ssl")
ssl_factory = irc.connection.Factory(wrapper=ssl.wrap_socket)
bot=IRCBot(chans, nick, server, port, password=password,username=user, connect_factory=ssl_factory)
else:
bot=IRCBot(chans, nick, server, port, password=password,username=user)
signal.signal(signal.SIGTERM, sig_handler)
signal.signal(signal.SIGINT, sig_handler)
if args.v:
print("Connecting to server")
bot.start()
if __name__ == "__main__":
main()
| 3,404 | 20 | 242 |
b6f0deef92403d4f622da80bbc5308c42215e2e3 | 1,878 | py | Python | examples/basic_examples.py | yoramzarai/TpTnOsc | f1fdd1a0ebc7d64177f4281bf3be5f2665fdcf90 | [
"MIT"
] | null | null | null | examples/basic_examples.py | yoramzarai/TpTnOsc | f1fdd1a0ebc7d64177f4281bf3be5f2665fdcf90 | [
"MIT"
] | null | null | null | examples/basic_examples.py | yoramzarai/TpTnOsc | f1fdd1a0ebc7d64177f4281bf3be5f2665fdcf90 | [
"MIT"
] | null | null | null | '''
Few basic examples of the tools in TpTnOsc.utils.
See also osc_exp_examples.ipynb notebook for examples of tools in TpTnOsc.osc_exp.
Execute: python basic_examples.py
'''
import numpy as np
import TpTnOsc.utils as ut
# computing the p'th order multiplicative compound matrix
A = np.array([[1,6,0,0], [2,13,4,20], [2,13,5,25], [0,0,3,16]])
p = 2
mc, lp = ut.compute_MC_matrix( A, p )
print(f"mc=\n{mc}\nlp=\n{lp}")
print(f"\nA is I-TN: {ut.is_ITN(A)}")
# SEB factorization
Lmat, Dmat, Umat, Um, valsL, valsU = ut.EB_factorization_ITN(A)
print(f"\nvalsL={valsL}, valsU={valsU}")
# generare an oscillatory matrix
valsL = np.array([1,0,1,2,1,0])
valsU = np.array([1,3,2,3,0,0])
valsD = np.array([2,1,4,2])
B = ut.compute_matrix_from_EB_factorization( valsL, valsD, valsU )
print(f"\nB=\n{B}\nB is OSC={ut.is_OSC(B, tol=10*np.finfo(np.float).eps)}")
print(f"\nSEB factorization = {ut.show_EB_config(valsL, valsU, valsD, True)}")
# format matrix in latex form
print("\nB in Latex form:")
ut.show_mat_latex_format(B)
# sign variations
v = np.array([-1,2,-3,0])
print(f"\ns-(v)={ut.s_minus(v)}\ns+(v)={ut.s_plus(v)}")
print(f"\nsc-(v)={ut.sc_minus(v)}\nsc+(v)={ut.sc_plus(v)}")
# draw SEB factorization
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(12, 6))
ut.draw_EB_factorization_ITN( valsL, valsD, valsU, ax)
plt.show()
# computing the exponent of an oscillatory matrix
import TpTnOsc.osc_exp as oscp
print("\n\nComputing families of vertex-disjoing paths and exponent (r()) of B:\n")
osc_cls = oscp.OSC_exp(B)
osc_cls.run()
print("lower-left and upper-right corner minors families of vertex-disjoint paths:")
osc_cls.display_results()
for k in osc_cls.G.keys():
print(f'Triangle graph of {k}:')
_, ax = plt.subplots(figsize=(9,6))
osc_cls.G[k].draw_me(ax, nd_offset=[0.3, 0.4])
ax.margins(.1, .1)
plt.show()
plt.close()
| 28.892308 | 84 | 0.690096 | '''
Few basic examples of the tools in TpTnOsc.utils.
See also osc_exp_examples.ipynb notebook for examples of tools in TpTnOsc.osc_exp.
Execute: python basic_examples.py
'''
import numpy as np
import TpTnOsc.utils as ut
# computing the p'th order multiplicative compound matrix
A = np.array([[1,6,0,0], [2,13,4,20], [2,13,5,25], [0,0,3,16]])
p = 2
mc, lp = ut.compute_MC_matrix( A, p )
print(f"mc=\n{mc}\nlp=\n{lp}")
print(f"\nA is I-TN: {ut.is_ITN(A)}")
# SEB factorization
Lmat, Dmat, Umat, Um, valsL, valsU = ut.EB_factorization_ITN(A)
print(f"\nvalsL={valsL}, valsU={valsU}")
# generare an oscillatory matrix
valsL = np.array([1,0,1,2,1,0])
valsU = np.array([1,3,2,3,0,0])
valsD = np.array([2,1,4,2])
B = ut.compute_matrix_from_EB_factorization( valsL, valsD, valsU )
print(f"\nB=\n{B}\nB is OSC={ut.is_OSC(B, tol=10*np.finfo(np.float).eps)}")
print(f"\nSEB factorization = {ut.show_EB_config(valsL, valsU, valsD, True)}")
# format matrix in latex form
print("\nB in Latex form:")
ut.show_mat_latex_format(B)
# sign variations
v = np.array([-1,2,-3,0])
print(f"\ns-(v)={ut.s_minus(v)}\ns+(v)={ut.s_plus(v)}")
print(f"\nsc-(v)={ut.sc_minus(v)}\nsc+(v)={ut.sc_plus(v)}")
# draw SEB factorization
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(12, 6))
ut.draw_EB_factorization_ITN( valsL, valsD, valsU, ax)
plt.show()
# computing the exponent of an oscillatory matrix
import TpTnOsc.osc_exp as oscp
print("\n\nComputing families of vertex-disjoing paths and exponent (r()) of B:\n")
osc_cls = oscp.OSC_exp(B)
osc_cls.run()
print("lower-left and upper-right corner minors families of vertex-disjoint paths:")
osc_cls.display_results()
for k in osc_cls.G.keys():
print(f'Triangle graph of {k}:')
_, ax = plt.subplots(figsize=(9,6))
osc_cls.G[k].draw_me(ax, nd_offset=[0.3, 0.4])
ax.margins(.1, .1)
plt.show()
plt.close()
| 0 | 0 | 0 |
60139b5004c5cb7c26974dc0ad0792715143538c | 328 | py | Python | lib/handlers/base.py | WXSD-Sales/APMBot | 8fb61d28447048a026ab2c16312a6ac4cc473f3b | [
"MIT"
] | null | null | null | lib/handlers/base.py | WXSD-Sales/APMBot | 8fb61d28447048a026ab2c16312a6ac4cc473f3b | [
"MIT"
] | null | null | null | lib/handlers/base.py | WXSD-Sales/APMBot | 8fb61d28447048a026ab2c16312a6ac4cc473f3b | [
"MIT"
] | 1 | 2021-06-25T13:15:18.000Z | 2021-06-25T13:15:18.000Z | import json
import tornado.web
| 29.818182 | 83 | 0.661585 | import json
import tornado.web
class BaseHandler(tornado.web.RequestHandler):
def get_current_user(self):
cookie = self.get_secure_cookie("sessionId", max_age_days=1, min_version=1)
if cookie != None:
cookie = cookie.decode('utf-8')
cookie = json.loads(cookie)
return cookie
| 223 | 25 | 49 |
1168d40803149df71eeb31f4b14e3345e88d0278 | 1,652 | py | Python | geo/data/f.py | liucsny/Processing-Data | 41eb12ecb53869771d266e1d9afccd39d4ca8234 | [
"MIT"
] | null | null | null | geo/data/f.py | liucsny/Processing-Data | 41eb12ecb53869771d266e1d9afccd39d4ca8234 | [
"MIT"
] | null | null | null | geo/data/f.py | liucsny/Processing-Data | 41eb12ecb53869771d266e1d9afccd39d4ca8234 | [
"MIT"
] | null | null | null | import pandas as pd
import pprint as pp
r=[1000,25000,100000,500000]
df=pd.read_csv("f.csv");
# df=df.sort(["金额"],ascending=False)
print(df)
# df.to_csv("f.csv",index=False,encoding="utf-8")
df_sum={"0~1000":{},"1000~25000":{},"25000~50000":{},"50000~500000":{},">500000":{}}
for index,rows in df.iterrows():
if rows.性别 == "男":
if rows.金额<r[0]:
if constrain_age(rows.年龄) in df_sum["0~1000"]:
df_sum["0~1000"][constrain_age(rows.年龄)]+=1#rows.金额
else:
df_sum["0~1000"][constrain_age(rows.年龄)]=1#rows.金额
elif (rows.金额>r[0])&(rows.金额<r[1]):
if constrain_age(rows.年龄) in df_sum["1000~25000"]:
df_sum["1000~25000"][constrain_age(rows.年龄)]+=1#rows.金额
else:
df_sum["1000~25000"][constrain_age(rows.年龄)]=1#rows.金额
elif (rows.金额>r[1])&(rows.金额<r[2]):
if constrain_age(rows.年龄) in df_sum["25000~50000"]:
df_sum["25000~50000"][constrain_age(rows.年龄)]+=1#rows.金额
else:
df_sum["25000~50000"][constrain_age(rows.年龄)]=1#rows.金额
elif (rows.金额>r[2])&(rows.金额<r[3]):
if constrain_age(rows.年龄) in df_sum["50000~500000"]:
df_sum["50000~500000"][constrain_age(rows.年龄)]+=1#rows.金额
else:
df_sum["50000~500000"][constrain_age(rows.年龄)]=1#rows.金额
elif rows.金额>r[3]:
if constrain_age(constrain_age(rows.年龄)) in df_sum[">500000"]:
# print("in")
df_sum[">500000"][constrain_age(rows.年龄)]+=1#rows.金额
else:
df_sum[">500000"][constrain_age(rows.年龄)]=1#rows.金额
s=pd.DataFrame(df_sum)
s=s.T
print(s)
s.to_csv("f_people_nan.csv",index=True,encoding="utf-8")
# pp.pprint(df_sum)
| 26.222222 | 84 | 0.654358 | import pandas as pd
import pprint as pp
r=[1000,25000,100000,500000]
df=pd.read_csv("f.csv");
# df=df.sort(["金额"],ascending=False)
def constrain_age(age):
if age==float('NaN'):
return 17.0
elif age<17:
return 17.0
else:
return age
print(df)
# df.to_csv("f.csv",index=False,encoding="utf-8")
df_sum={"0~1000":{},"1000~25000":{},"25000~50000":{},"50000~500000":{},">500000":{}}
for index,rows in df.iterrows():
if rows.性别 == "男":
if rows.金额<r[0]:
if constrain_age(rows.年龄) in df_sum["0~1000"]:
df_sum["0~1000"][constrain_age(rows.年龄)]+=1#rows.金额
else:
df_sum["0~1000"][constrain_age(rows.年龄)]=1#rows.金额
elif (rows.金额>r[0])&(rows.金额<r[1]):
if constrain_age(rows.年龄) in df_sum["1000~25000"]:
df_sum["1000~25000"][constrain_age(rows.年龄)]+=1#rows.金额
else:
df_sum["1000~25000"][constrain_age(rows.年龄)]=1#rows.金额
elif (rows.金额>r[1])&(rows.金额<r[2]):
if constrain_age(rows.年龄) in df_sum["25000~50000"]:
df_sum["25000~50000"][constrain_age(rows.年龄)]+=1#rows.金额
else:
df_sum["25000~50000"][constrain_age(rows.年龄)]=1#rows.金额
elif (rows.金额>r[2])&(rows.金额<r[3]):
if constrain_age(rows.年龄) in df_sum["50000~500000"]:
df_sum["50000~500000"][constrain_age(rows.年龄)]+=1#rows.金额
else:
df_sum["50000~500000"][constrain_age(rows.年龄)]=1#rows.金额
elif rows.金额>r[3]:
if constrain_age(constrain_age(rows.年龄)) in df_sum[">500000"]:
# print("in")
df_sum[">500000"][constrain_age(rows.年龄)]+=1#rows.金额
else:
df_sum[">500000"][constrain_age(rows.年龄)]=1#rows.金额
s=pd.DataFrame(df_sum)
s=s.T
print(s)
s.to_csv("f_people_nan.csv",index=True,encoding="utf-8")
# pp.pprint(df_sum)
| 87 | 0 | 22 |
2e9c716ce199b66cae4a4b9941304d98830da5d6 | 294 | py | Python | student/models.py | yutanguyen25/weather-api | 1615fab62e5d7118535db86bb5315bf46a8a9a5f | [
"MIT"
] | null | null | null | student/models.py | yutanguyen25/weather-api | 1615fab62e5d7118535db86bb5315bf46a8a9a5f | [
"MIT"
] | null | null | null | student/models.py | yutanguyen25/weather-api | 1615fab62e5d7118535db86bb5315bf46a8a9a5f | [
"MIT"
] | null | null | null | from django.db import models
| 29.4 | 54 | 0.758503 | from django.db import models
class Student(models.Model):
student_reg_number = models.TextField(unique=True)
student_name = models.TextField()
student_email = models.TextField()
student_mobile = models.TextField(null=True)
created_at = models.DateTimeField(auto_now=True)
| 0 | 241 | 23 |
15b36164ed23efe8438cfbafbe8c7d696787e514 | 18,108 | py | Python | chemevolve/ReactionFunctions.py | colemathis/chemevolvePrivate | 53e09a3bccf6d0e46a93ed1c2378869706dac7a0 | [
"MIT"
] | null | null | null | chemevolve/ReactionFunctions.py | colemathis/chemevolvePrivate | 53e09a3bccf6d0e46a93ed1c2378869706dac7a0 | [
"MIT"
] | null | null | null | chemevolve/ReactionFunctions.py | colemathis/chemevolvePrivate | 53e09a3bccf6d0e46a93ed1c2378869706dac7a0 | [
"MIT"
] | null | null | null | import numpy as np
import PropensityFunctions as Propensity
import CoreClasses as Core
import random
import math
import OutputFunctions as Out
import InitializeFunctions as Init
####################################################
### Load C library
####################################################
from ctypes import cdll
from ctypes import byref, c_int, c_ulong, c_double, POINTER
def get_libpath():
"""
Get the library path of the the distributed SSA library.
"""
import os
import re
from os.path import dirname, abspath, realpath, join
from platform import system
root = dirname(abspath(realpath(__file__)))
if system() == 'Linux':
library = 'Linux-SSA.so'
elif system() == 'Darwin':
library = 'OSX-SSA.so'
elif system() == 'Windows':
library = "Win-SSA.so"
else:
raise RuntimeError("unsupported platform - \"{}\"".format(system()))
return os.path.join(root, 'clibs', library)
_SSA_LIB = cdll.LoadLibrary(get_libpath())
# current_t, next_t, r_seed, max_x, max_y, num_m, num_r, concentrations, constants propensity_ints, reaction_arr, catalyst_arr
_SSA_LIB.SSA_update.argtypes = (c_double, c_double, c_int, c_int, c_int, c_int, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_int), POINTER(c_int), POINTER(c_double))
_SSA_LIB.SSA_update.restype = c_double
SSA_update = _SSA_LIB.SSA_update ### Renaming function for convinence
####################################################
####################################################
def pick_reaction(dice_roll, CRS, concentrations, **kwargs):
''' Picks a reaction to occur stochastically
Arguements:
- dice_roll: float which should be a number between zero and the total propensity of reactions
- CRS: the CRS object which contains all possible reactions and molecules
- concentrations: the list of concentrations indexed by molecule ID
- propensity_function: which propensity function to use, default: standard
Return:
- rxn: a Reaction object'''
checkpoint = 0.0
for rxn in CRS.reactions:
reactant_concentrations = [concentrations[i] for i in rxn.reactants]
catalyst_concentrations = [concentrations[i] for i in rxn.catalysts]
reactant_coeff = rxn.reactant_coeff
catalyzed_constants = rxn.catalyzed_constants
#print rxn.catalysts
if rxn.prop == 'STD':
# print "Reactant concentrations: ", reactant_concentrations
# print 'Product ID numbers: ',rxn.products
checkpoint += Propensity.standard_propensity(rxn, CRS, concentrations)
#print "dice_roll: ", dice_roll, ' checkpoint: ', checkpoint
if checkpoint >= dice_roll:
break
elif rxn.prop == 'RM8':
mu = kwargs['mu']
checkpoint += Propensity.replicator_composition_propensity_envMutation8(rxn, CRS, concentrations, mu = mu)
if checkpoint >= dice_roll:
mutation_dice = random.random()*Propensity.replicator_composition_propensity_envMutation8(rxn, CRS, concentrations, mu = mu)
rxn = pick_replicator8(mutation_dice, rxn,CRS, concentrations, mu)
#print CRS.molecule_list[rxn.products[0]]
break
elif rxn.prop == 'RM2':
mu = kwargs['mu']
checkpoint += Propensity.replicator_composition_propensity_envMutation2(rxn, CRS, concentrations, mu = mu)
if checkpoint >= dice_roll:
mutation_dice = random.random()*Propensity.replicator_composition_propensity_envMutation2(rxn, CRS, concentrations, mu = mu)
rxn = pick_replicator2(mutation_dice, rxn,CRS, concentrations, mu)
#print CRS.molecule_list[rxn.products[0]]
break
elif rxn.prop == 'RM1':
mu = kwargs['mu']
checkpoint += Propensity.replicator_composition_propensity_envMutation2(rxn, CRS, concentrations, mu = mu)
if checkpoint >= dice_roll:
mutation_dice = random.random()*Propensity.replicator_composition_propensity_envMutation2(rxn, CRS, concentrations, mu = mu)
rxn = pick_replicator2(mutation_dice, rxn,CRS, concentrations, mu)
#print CRS.molecule_list[rxn.products[0]]
break
elif rxn.prop[:2] == 'MM':
expon = int(rxn.prop[2])
kcat = 10**expon
checkpoint += Propensity.MM_kinetics(rxn, CRS, concentrations, kcat)
if checkpoint >= dice_roll:
break
#raw_input("Enter")
return rxn
####################################################
def execute_rxn(rxn, CRS, concentrations):
''' Executes a single reaction instance
Arguements:
- rxn: Reaction object to execute_rxn
- CRS: CRS object containing the entire system
- concentrations: list of molecule concentrations indexed by ID
Return:
- concentrations: updated list of molecule concentrations indexed by ID '''
num_reactants = len(rxn.reactants)
num_products = len(rxn.products)
# Reduce Reactants
for i in range(num_reactants):
reactant_index = rxn.reactants[i]
concentrations[reactant_index] -= rxn.reactant_coeff[i]
# Increase Products
for i in range(num_products):
product_index =rxn.products[i]
concentrations[product_index] += rxn.product_coeff[i]
return concentrations
####################################################
####################################################
def pick_replicator8(dice_roll, rxn, CRS, concentrations, mu = 0.001):
'''Given a dice_roll and a replication reaction, determine the mutation outcome, return rxn object
Arguements:
- dice_roll: random number between 0 and total mutation propensity
- rxn: original replication reaction
- CRS: CRS object
- concentrations: concentration array containing all replicators and monomer concentrations
- mu: per-base mutation rate
Return:
- picked_rxn: a Reaction object containing the new sequence to be produced and monomers to be consumed
If not enough resources present to replicate, a null Reaction object is returned
'''
checkpoint = 0.0
seq_found = False
seq = CRS.molecule_list[rxn.products[0]]
# print "Copying seqeunce: ", seq
# print "Dice Roll: ", dice_roll
R_L = len(seq)
reactant_concentrations = concentrations[rxn.reactants]
replicator_concentration = concentrations[rxn.products]
reactant_coeff = rxn.reactant_coeff
#catalyzed_constants = rxn.catalyzed_constants
#Calculate Propensity
Ap = rxn.constant
nA = reactant_coeff[0] # If you're reading this you should confirm that 'A' is stored at index 0
nB = reactant_coeff[1] # If you're reading this you should confirm that 'B' is stored at index 1
binomialA = 0 #Used for calculating the contribution from copying A-residues
binomialB = 0 #Used for calculating the intermediate of contribution from copying A-residues and B-residues
q_error = 0.0
for eA in range(0, nA + 1):
#Here eA is the number of errors in copying A-residues
if seq_found == True:
break
binomialA = (math.factorial(nA)/(math.factorial(nA - eA)*math.factorial(eA)))*pow(reactant_concentrations[0], nA - eA)*pow(reactant_concentrations[1], eA) #calculates number of sequences with eA errors in copying A and the resource contribution to these sequences
for eB in range(0, nB + 1):
# Here eB is the number of errors in copying B-residues
if eA == 0 and eB == 0:
# Keeps perfect copying probability seperate from copies made with errors
q_p = pow(1 - mu, R_L)*pow(reactant_concentrations[0], nA)*pow(reactant_concentrations[1], nB)
checkpoint += rxn.constant*q_p*replicator_concentration
else:
binomialB = (math.factorial(nB)/(math.factorial(nB - eB)*math.factorial(eB)))*pow(reactant_concentrations[1], nB - eB)*pow(reactant_concentrations[0], eB) #adds number of mutants with eB B-errors
q_error = pow(mu, eA + eB)*pow(1 - mu, R_L - eA - eB)*binomialA*binomialB
checkpoint += rxn.constant*q_error*replicator_concentration
if checkpoint >= dice_roll:
A_errors = eA
B_errors = eB
seq_found = True
break
# print "eA, eB: ", eA, eB
# print "checkpoint: ", checkpoint
Astring = 'B'*A_errors + 'A'*(nA - A_errors)
Bstring = 'A'*B_errors + 'B'*(nB - B_errors)
Alist = list(Astring)
Blist = list(Bstring)
random.shuffle(Alist)
random.shuffle(Blist)
new_seq = ''
for i in range(R_L):
if seq[i] == 'A':
new_seq += Alist.pop()
elif seq[i] == 'B':
new_seq += Blist.pop()
Acount = new_seq.count('A')
Bcount = new_seq.count('B')
# print "Mutated Seq: ", new_seq
if (Acount != 0 and Acount > reactant_concentrations[0]) or (Bcount != 0 and Bcount > reactant_concentrations[1]):
print 'New check: Not enough food to replicate'
picked_rxn = Core.Reaction(-1,products = [0,1], product_coeff = [0,0], reactants =[0, 1], reactant_coeff = [0, 0], prop = 'RCM')
else:
new_seq_ID = CRS.molecule_dict[new_seq]
picked_rxn = Core.Reaction(-1,products = [new_seq_ID], product_coeff = [1], reactants =[0, 1], reactant_coeff = [Acount, Bcount], prop = 'RCM')
#raw_input("Enter")
return picked_rxn
def pick_replicator2(dice_roll, rxn, CRS, concentrations, mu = 0.001):
'''Given a dice_roll and a replication reaction, determine the mutation outcome, return rxn object
Arguements:
- dice_roll: random number between 0 and total mutation propensity
- rxn: original replication reaction
- CRS: CRS object
- concentrations: concentration array containing all replicators and monomer concentrations
- mu: per-base mutation rate
Return:
- picked_rxn: a Reaction object containing the new sequence to be produced and monomers to be consumed
If not enough resources present to replicate, a null Reaction object is returned
'''
checkpoint = 0.0
seq_found = False
seq = CRS.molecule_list[rxn.products[0]]
# print "Copying seqeunce: ", seq
# print "Dice Roll: ", dice_roll
R_L = len(seq)
reactant_concentrations = concentrations[rxn.reactants]
replicator_concentration = concentrations[rxn.products]
reactant_coeff = rxn.reactant_coeff
#catalyzed_constants = rxn.catalyzed_constants
#Calculate Propensity
Ap = rxn.constant
nA = reactant_coeff[0] # If you're reading this you should confirm that 'A' is stored at index 0
nB = reactant_coeff[1] # If you're reading this you should confirm that 'B' is stored at index 1
binomialA = 0 #Used for calculating the contribution from copying A-residues
binomialB = 0 #Used for calculating the intermediate of contribution from copying A-residues and B-residues
q_error = 0.0
for eA in range(0, nA + 1):
if seq_found == True:
break
#Here eA is the number of errors in copying A-residues
binomialA = (math.factorial(nA)/(math.factorial(nA - eA)*math.factorial(eA)))
for eB in range(0, nB + 1):
# Here eB is the number of errors in copying B-residues
if eA == 0 and eB == 0:
# Keeps perfect copying probability seperate from copies made with errors
q_p = pow(1 - mu, R_L)*(reactant_concentrations[0]*nA)*(reactant_concentrations[1]*nB)
checkpoint += rxn.constant*q_p*replicator_concentration
else:
binomialB = (math.factorial(nB)/(math.factorial(nB - eB)*math.factorial(eB))) #adds number of mutants with eB B-errors
q_error = pow(mu, eA + eB)*pow(1 - mu, R_L - eA - eB)*binomialA*binomialB*( reactant_concentrations[0]*(nA - eA +eB)*reactant_concentrations[1]*(nB - eB + eA ) )
checkpoint += rxn.constant*q_error*replicator_concentration
if checkpoint >= dice_roll:
A_errors = eA
B_errors = eB
seq_found = True
break
# print "eA, eB: ", eA, eB
# print "checkpoint: ", checkpoint
Astring = 'B'*A_errors + 'A'*(nA - A_errors)
Bstring = 'A'*B_errors + 'B'*(nB - B_errors)
Alist = list(Astring)
Blist = list(Bstring)
random.shuffle(Alist)
random.shuffle(Blist)
new_seq = ''
for i in range(R_L):
if seq[i] == 'A':
new_seq += Alist.pop()
elif seq[i] == 'B':
new_seq += Blist.pop()
Acount = new_seq.count('A')
Bcount = new_seq.count('B')
# print "Mutated Seq: ", new_seq
if (Acount != 0 and Acount > reactant_concentrations[0]) or (Bcount != 0 and Bcount > reactant_concentrations[1]):
print 'New check: Not enough food to replicate'
picked_rxn = Core.Reaction(-1,products = [0,1], product_coeff = [0,0], reactants =[0, 1], reactant_coeff = [0, 0], prop = 'RCM')
else:
new_seq_ID = CRS.molecule_dict[new_seq]
picked_rxn = Core.Reaction(-1,products = [new_seq_ID], product_coeff = [1], reactants =[0, 1], reactant_coeff = [Acount, Bcount], prop = 'RCM')
#raw_input("Enter")
return picked_rxn
####################################################
def pick_replicator1(dice_roll, rxn, CRS, concentrations, mu = 0.001):
'''Given a dice_roll and a replication reaction, determine the mutation outcome, return rxn object
Arguements:
- dice_roll: random number between 0 and total mutation propensity
- rxn: original replication reaction
- CRS: CRS object
- concentrations: concentration array containing all replicators and monomer concentrations
- mu: per-base mutation rate
Return:
- picked_rxn: a Reaction object containing the new sequence to be produced and monomers to be consumed
If not enough resources present to replicate, a null Reaction object is returned
'''
checkpoint = 0.0
seq_found = False
seq = CRS.molecule_list[rxn.products[0]]
# print "Copying seqeunce: ", seq
# print "Dice Roll: ", dice_roll
R_L = len(seq)
reactant_concentrations = concentrations[rxn.reactants]
replicator_concentration = concentrations[rxn.products]
reactant_coeff = rxn.reactant_coeff
#catalyzed_constants = rxn.catalyzed_constants
#Calculate Propensity
Ap = rxn.constant
nA = reactant_coeff[0] # If you're reading this you should confirm that 'A' is stored at index 0
nB = reactant_coeff[1] # If you're reading this you should confirm that 'B' is stored at index 1
binomialA = 0 #Used for calculating the contribution from copying A-residues
binomialB = 0 #Used for calculating the intermediate of contribution from copying A-residues and B-residues
q_error = 0.0
for eA in range(0, nA + 1):
#Here eA is the number of errors in copying A-residues
if seq_found == True:
break
binomialA = (math.factorial(nA)/(math.factorial(nA - eA)*math.factorial(eA)))*pow(reactant_concentrations[0], (nA - eA)/R_L )*pow(reactant_concentrations[1], eA/R_L) #calculates number of sequences with eA errors in copying A and the resource contribution to these sequences
for eB in range(0, nB + 1):
# Here eB is the number of errors in copying B-residues
if eA == 0 and eB == 0:
# Keeps perfect copying probability seperate from copies made with errors
q_p = pow(1 - mu, R_L)*pow(reactant_concentrations[0], nA/R_L)*pow(reactant_concentrations[1], nB/R_L)
checkpoint += rxn.constant*q_p*replicator_concentration
else:
binomialB = (math.factorial(nB)/(math.factorial(nB - eB)*math.factorial(eB)))*pow(reactant_concentrations[1], (nB - eB)/R_L)*pow(reactant_concentrations[0], eB/R_L) #adds number of mutants with eB B-errors
q_error = pow(mu, eA + eB)*pow(1 - mu, R_L - eA - eB)*binomialA*binomialB
checkpoint += rxn.constant*q_error*replicator_concentration
if checkpoint >= dice_roll:
A_errors = eA
B_errors = eB
seq_found = True
break
# print "eA, eB: ", eA, eB
# print "checkpoint: ", checkpoint
Astring = 'B'*A_errors + 'A'*(nA - A_errors)
Bstring = 'A'*B_errors + 'B'*(nB - B_errors)
Alist = list(Astring)
Blist = list(Bstring)
random.shuffle(Alist)
random.shuffle(Blist)
new_seq = ''
for i in range(R_L):
if seq[i] == 'A':
new_seq += Alist.pop()
elif seq[i] == 'B':
new_seq += Blist.pop()
Acount = new_seq.count('A')
Bcount = new_seq.count('B')
# print "Mutated Seq: ", new_seq
if (Acount != 0 and Acount > reactant_concentrations[0]) or (Bcount != 0 and Bcount > reactant_concentrations[1]):
print 'New check: Not enough food to replicate'
picked_rxn = Core.Reaction(-1,products = [0,1], product_coeff = [0,0], reactants =[0, 1], reactant_coeff = [0, 0], prop = 'RCM')
else:
new_seq_ID = CRS.molecule_dict[new_seq]
picked_rxn = Core.Reaction(-1,products = [new_seq_ID], product_coeff = [1], reactants =[0, 1], reactant_coeff = [Acount, Bcount], prop = 'RCM')
#raw_input("Enter")
return picked_rxn
| 40.150776 | 278 | 0.684394 | import numpy as np
import PropensityFunctions as Propensity
import CoreClasses as Core
import random
import math
import OutputFunctions as Out
import InitializeFunctions as Init
####################################################
### Load C library
####################################################
from ctypes import cdll
from ctypes import byref, c_int, c_ulong, c_double, POINTER
def get_libpath():
"""
Get the library path of the the distributed SSA library.
"""
import os
import re
from os.path import dirname, abspath, realpath, join
from platform import system
root = dirname(abspath(realpath(__file__)))
if system() == 'Linux':
library = 'Linux-SSA.so'
elif system() == 'Darwin':
library = 'OSX-SSA.so'
elif system() == 'Windows':
library = "Win-SSA.so"
else:
raise RuntimeError("unsupported platform - \"{}\"".format(system()))
return os.path.join(root, 'clibs', library)
_SSA_LIB = cdll.LoadLibrary(get_libpath())
# current_t, next_t, r_seed, max_x, max_y, num_m, num_r, concentrations, constants propensity_ints, reaction_arr, catalyst_arr
_SSA_LIB.SSA_update.argtypes = (c_double, c_double, c_int, c_int, c_int, c_int, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_int), POINTER(c_int), POINTER(c_double))
_SSA_LIB.SSA_update.restype = c_double
SSA_update = _SSA_LIB.SSA_update ### Renaming function for convinence
####################################################
####################################################
def pick_reaction(dice_roll, CRS, concentrations, **kwargs):
''' Picks a reaction to occur stochastically
Arguements:
- dice_roll: float which should be a number between zero and the total propensity of reactions
- CRS: the CRS object which contains all possible reactions and molecules
- concentrations: the list of concentrations indexed by molecule ID
- propensity_function: which propensity function to use, default: standard
Return:
- rxn: a Reaction object'''
checkpoint = 0.0
for rxn in CRS.reactions:
reactant_concentrations = [concentrations[i] for i in rxn.reactants]
catalyst_concentrations = [concentrations[i] for i in rxn.catalysts]
reactant_coeff = rxn.reactant_coeff
catalyzed_constants = rxn.catalyzed_constants
#print rxn.catalysts
if rxn.prop == 'STD':
# print "Reactant concentrations: ", reactant_concentrations
# print 'Product ID numbers: ',rxn.products
checkpoint += Propensity.standard_propensity(rxn, CRS, concentrations)
#print "dice_roll: ", dice_roll, ' checkpoint: ', checkpoint
if checkpoint >= dice_roll:
break
elif rxn.prop == 'RM8':
mu = kwargs['mu']
checkpoint += Propensity.replicator_composition_propensity_envMutation8(rxn, CRS, concentrations, mu = mu)
if checkpoint >= dice_roll:
mutation_dice = random.random()*Propensity.replicator_composition_propensity_envMutation8(rxn, CRS, concentrations, mu = mu)
rxn = pick_replicator8(mutation_dice, rxn,CRS, concentrations, mu)
#print CRS.molecule_list[rxn.products[0]]
break
elif rxn.prop == 'RM2':
mu = kwargs['mu']
checkpoint += Propensity.replicator_composition_propensity_envMutation2(rxn, CRS, concentrations, mu = mu)
if checkpoint >= dice_roll:
mutation_dice = random.random()*Propensity.replicator_composition_propensity_envMutation2(rxn, CRS, concentrations, mu = mu)
rxn = pick_replicator2(mutation_dice, rxn,CRS, concentrations, mu)
#print CRS.molecule_list[rxn.products[0]]
break
elif rxn.prop == 'RM1':
mu = kwargs['mu']
checkpoint += Propensity.replicator_composition_propensity_envMutation2(rxn, CRS, concentrations, mu = mu)
if checkpoint >= dice_roll:
mutation_dice = random.random()*Propensity.replicator_composition_propensity_envMutation2(rxn, CRS, concentrations, mu = mu)
rxn = pick_replicator2(mutation_dice, rxn,CRS, concentrations, mu)
#print CRS.molecule_list[rxn.products[0]]
break
elif rxn.prop[:2] == 'MM':
expon = int(rxn.prop[2])
kcat = 10**expon
checkpoint += Propensity.MM_kinetics(rxn, CRS, concentrations, kcat)
if checkpoint >= dice_roll:
break
#raw_input("Enter")
return rxn
####################################################
def execute_rxn(rxn, CRS, concentrations):
''' Executes a single reaction instance
Arguements:
- rxn: Reaction object to execute_rxn
- CRS: CRS object containing the entire system
- concentrations: list of molecule concentrations indexed by ID
Return:
- concentrations: updated list of molecule concentrations indexed by ID '''
num_reactants = len(rxn.reactants)
num_products = len(rxn.products)
# Reduce Reactants
for i in range(num_reactants):
reactant_index = rxn.reactants[i]
concentrations[reactant_index] -= rxn.reactant_coeff[i]
# Increase Products
for i in range(num_products):
product_index =rxn.products[i]
concentrations[product_index] += rxn.product_coeff[i]
return concentrations
####################################################
def SSA_evolve(tau, tau_max, concentrations, CRS, random_seed, output_prefix= None, t_out= None):
if (output_prefix != None and t_out == None):
raise ValueError('Output file prefix specified but no output frequency given, please provide an output time frequency')
elif (output_prefix == None and type(t_out) == float):
raise ValueError('Output frequency provided but output file prefix was not provided, please provide a file prefix name')
import sys
import random
from ctypes import c_int, c_double, POINTER
constants, propensity_ints, reaction_arr, catalyst_arr = Init.convert_CRS_to_npArrays(CRS)
concentrations_ptr, constants_ptr, propensity_ints_ptr, reaction_arr_ptr, catalyst_arr_ptr= Init.get_c_pointers(concentrations, constants, propensity_ints, reaction_arr, catalyst_arr)
freq_counter = 0.0
random.seed(random_seed)
while tau < tau_max:
# Get seed
r_seed = random.randint(0, sys.maxint)
# Update concentrations in place using C function
c_tau = SSA_update(c_double(tau), c_double(freq_counter),r_seed, c_int(1),c_int(1), c_int(len(CRS.molecule_list)), c_int(len(constants)), concentrations_ptr, constants_ptr, propensity_ints_ptr, reaction_arr_ptr, catalyst_arr_ptr )
# Update Time
tau = c_tau
# Update random seed
random.jumpahead(tau-freq_counter)
print tau
# Output data
Out.output_concentrations(concentrations, 'tutorial_data',time = freq_counter)
freq_counter += t_out
Out.tidy_timeseries(CRS.molecule_list, 'tutorial_data', delete_dat = True)
return concentrations
####################################################
def pick_replicator8(dice_roll, rxn, CRS, concentrations, mu = 0.001):
'''Given a dice_roll and a replication reaction, determine the mutation outcome, return rxn object
Arguements:
- dice_roll: random number between 0 and total mutation propensity
- rxn: original replication reaction
- CRS: CRS object
- concentrations: concentration array containing all replicators and monomer concentrations
- mu: per-base mutation rate
Return:
- picked_rxn: a Reaction object containing the new sequence to be produced and monomers to be consumed
If not enough resources present to replicate, a null Reaction object is returned
'''
checkpoint = 0.0
seq_found = False
seq = CRS.molecule_list[rxn.products[0]]
# print "Copying seqeunce: ", seq
# print "Dice Roll: ", dice_roll
R_L = len(seq)
reactant_concentrations = concentrations[rxn.reactants]
replicator_concentration = concentrations[rxn.products]
reactant_coeff = rxn.reactant_coeff
#catalyzed_constants = rxn.catalyzed_constants
#Calculate Propensity
Ap = rxn.constant
nA = reactant_coeff[0] # If you're reading this you should confirm that 'A' is stored at index 0
nB = reactant_coeff[1] # If you're reading this you should confirm that 'B' is stored at index 1
binomialA = 0 #Used for calculating the contribution from copying A-residues
binomialB = 0 #Used for calculating the intermediate of contribution from copying A-residues and B-residues
q_error = 0.0
for eA in range(0, nA + 1):
#Here eA is the number of errors in copying A-residues
if seq_found == True:
break
binomialA = (math.factorial(nA)/(math.factorial(nA - eA)*math.factorial(eA)))*pow(reactant_concentrations[0], nA - eA)*pow(reactant_concentrations[1], eA) #calculates number of sequences with eA errors in copying A and the resource contribution to these sequences
for eB in range(0, nB + 1):
# Here eB is the number of errors in copying B-residues
if eA == 0 and eB == 0:
# Keeps perfect copying probability seperate from copies made with errors
q_p = pow(1 - mu, R_L)*pow(reactant_concentrations[0], nA)*pow(reactant_concentrations[1], nB)
checkpoint += rxn.constant*q_p*replicator_concentration
else:
binomialB = (math.factorial(nB)/(math.factorial(nB - eB)*math.factorial(eB)))*pow(reactant_concentrations[1], nB - eB)*pow(reactant_concentrations[0], eB) #adds number of mutants with eB B-errors
q_error = pow(mu, eA + eB)*pow(1 - mu, R_L - eA - eB)*binomialA*binomialB
checkpoint += rxn.constant*q_error*replicator_concentration
if checkpoint >= dice_roll:
A_errors = eA
B_errors = eB
seq_found = True
break
# print "eA, eB: ", eA, eB
# print "checkpoint: ", checkpoint
Astring = 'B'*A_errors + 'A'*(nA - A_errors)
Bstring = 'A'*B_errors + 'B'*(nB - B_errors)
Alist = list(Astring)
Blist = list(Bstring)
random.shuffle(Alist)
random.shuffle(Blist)
new_seq = ''
for i in range(R_L):
if seq[i] == 'A':
new_seq += Alist.pop()
elif seq[i] == 'B':
new_seq += Blist.pop()
Acount = new_seq.count('A')
Bcount = new_seq.count('B')
# print "Mutated Seq: ", new_seq
if (Acount != 0 and Acount > reactant_concentrations[0]) or (Bcount != 0 and Bcount > reactant_concentrations[1]):
print 'New check: Not enough food to replicate'
picked_rxn = Core.Reaction(-1,products = [0,1], product_coeff = [0,0], reactants =[0, 1], reactant_coeff = [0, 0], prop = 'RCM')
else:
new_seq_ID = CRS.molecule_dict[new_seq]
picked_rxn = Core.Reaction(-1,products = [new_seq_ID], product_coeff = [1], reactants =[0, 1], reactant_coeff = [Acount, Bcount], prop = 'RCM')
#raw_input("Enter")
return picked_rxn
def pick_replicator2(dice_roll, rxn, CRS, concentrations, mu = 0.001):
'''Given a dice_roll and a replication reaction, determine the mutation outcome, return rxn object
Arguements:
- dice_roll: random number between 0 and total mutation propensity
- rxn: original replication reaction
- CRS: CRS object
- concentrations: concentration array containing all replicators and monomer concentrations
- mu: per-base mutation rate
Return:
- picked_rxn: a Reaction object containing the new sequence to be produced and monomers to be consumed
If not enough resources present to replicate, a null Reaction object is returned
'''
checkpoint = 0.0
seq_found = False
seq = CRS.molecule_list[rxn.products[0]]
# print "Copying seqeunce: ", seq
# print "Dice Roll: ", dice_roll
R_L = len(seq)
reactant_concentrations = concentrations[rxn.reactants]
replicator_concentration = concentrations[rxn.products]
reactant_coeff = rxn.reactant_coeff
#catalyzed_constants = rxn.catalyzed_constants
#Calculate Propensity
Ap = rxn.constant
nA = reactant_coeff[0] # If you're reading this you should confirm that 'A' is stored at index 0
nB = reactant_coeff[1] # If you're reading this you should confirm that 'B' is stored at index 1
binomialA = 0 #Used for calculating the contribution from copying A-residues
binomialB = 0 #Used for calculating the intermediate of contribution from copying A-residues and B-residues
q_error = 0.0
for eA in range(0, nA + 1):
if seq_found == True:
break
#Here eA is the number of errors in copying A-residues
binomialA = (math.factorial(nA)/(math.factorial(nA - eA)*math.factorial(eA)))
for eB in range(0, nB + 1):
# Here eB is the number of errors in copying B-residues
if eA == 0 and eB == 0:
# Keeps perfect copying probability seperate from copies made with errors
q_p = pow(1 - mu, R_L)*(reactant_concentrations[0]*nA)*(reactant_concentrations[1]*nB)
checkpoint += rxn.constant*q_p*replicator_concentration
else:
binomialB = (math.factorial(nB)/(math.factorial(nB - eB)*math.factorial(eB))) #adds number of mutants with eB B-errors
q_error = pow(mu, eA + eB)*pow(1 - mu, R_L - eA - eB)*binomialA*binomialB*( reactant_concentrations[0]*(nA - eA +eB)*reactant_concentrations[1]*(nB - eB + eA ) )
checkpoint += rxn.constant*q_error*replicator_concentration
if checkpoint >= dice_roll:
A_errors = eA
B_errors = eB
seq_found = True
break
# print "eA, eB: ", eA, eB
# print "checkpoint: ", checkpoint
Astring = 'B'*A_errors + 'A'*(nA - A_errors)
Bstring = 'A'*B_errors + 'B'*(nB - B_errors)
Alist = list(Astring)
Blist = list(Bstring)
random.shuffle(Alist)
random.shuffle(Blist)
new_seq = ''
for i in range(R_L):
if seq[i] == 'A':
new_seq += Alist.pop()
elif seq[i] == 'B':
new_seq += Blist.pop()
Acount = new_seq.count('A')
Bcount = new_seq.count('B')
# print "Mutated Seq: ", new_seq
if (Acount != 0 and Acount > reactant_concentrations[0]) or (Bcount != 0 and Bcount > reactant_concentrations[1]):
print 'New check: Not enough food to replicate'
picked_rxn = Core.Reaction(-1,products = [0,1], product_coeff = [0,0], reactants =[0, 1], reactant_coeff = [0, 0], prop = 'RCM')
else:
new_seq_ID = CRS.molecule_dict[new_seq]
picked_rxn = Core.Reaction(-1,products = [new_seq_ID], product_coeff = [1], reactants =[0, 1], reactant_coeff = [Acount, Bcount], prop = 'RCM')
#raw_input("Enter")
return picked_rxn
####################################################
def pick_replicator1(dice_roll, rxn, CRS, concentrations, mu = 0.001):
'''Given a dice_roll and a replication reaction, determine the mutation outcome, return rxn object
Arguements:
- dice_roll: random number between 0 and total mutation propensity
- rxn: original replication reaction
- CRS: CRS object
- concentrations: concentration array containing all replicators and monomer concentrations
- mu: per-base mutation rate
Return:
- picked_rxn: a Reaction object containing the new sequence to be produced and monomers to be consumed
If not enough resources present to replicate, a null Reaction object is returned
'''
checkpoint = 0.0
seq_found = False
seq = CRS.molecule_list[rxn.products[0]]
# print "Copying seqeunce: ", seq
# print "Dice Roll: ", dice_roll
R_L = len(seq)
reactant_concentrations = concentrations[rxn.reactants]
replicator_concentration = concentrations[rxn.products]
reactant_coeff = rxn.reactant_coeff
#catalyzed_constants = rxn.catalyzed_constants
#Calculate Propensity
Ap = rxn.constant
nA = reactant_coeff[0] # If you're reading this you should confirm that 'A' is stored at index 0
nB = reactant_coeff[1] # If you're reading this you should confirm that 'B' is stored at index 1
binomialA = 0 #Used for calculating the contribution from copying A-residues
binomialB = 0 #Used for calculating the intermediate of contribution from copying A-residues and B-residues
q_error = 0.0
for eA in range(0, nA + 1):
#Here eA is the number of errors in copying A-residues
if seq_found == True:
break
binomialA = (math.factorial(nA)/(math.factorial(nA - eA)*math.factorial(eA)))*pow(reactant_concentrations[0], (nA - eA)/R_L )*pow(reactant_concentrations[1], eA/R_L) #calculates number of sequences with eA errors in copying A and the resource contribution to these sequences
for eB in range(0, nB + 1):
# Here eB is the number of errors in copying B-residues
if eA == 0 and eB == 0:
# Keeps perfect copying probability seperate from copies made with errors
q_p = pow(1 - mu, R_L)*pow(reactant_concentrations[0], nA/R_L)*pow(reactant_concentrations[1], nB/R_L)
checkpoint += rxn.constant*q_p*replicator_concentration
else:
binomialB = (math.factorial(nB)/(math.factorial(nB - eB)*math.factorial(eB)))*pow(reactant_concentrations[1], (nB - eB)/R_L)*pow(reactant_concentrations[0], eB/R_L) #adds number of mutants with eB B-errors
q_error = pow(mu, eA + eB)*pow(1 - mu, R_L - eA - eB)*binomialA*binomialB
checkpoint += rxn.constant*q_error*replicator_concentration
if checkpoint >= dice_roll:
A_errors = eA
B_errors = eB
seq_found = True
break
# print "eA, eB: ", eA, eB
# print "checkpoint: ", checkpoint
Astring = 'B'*A_errors + 'A'*(nA - A_errors)
Bstring = 'A'*B_errors + 'B'*(nB - B_errors)
Alist = list(Astring)
Blist = list(Bstring)
random.shuffle(Alist)
random.shuffle(Blist)
new_seq = ''
for i in range(R_L):
if seq[i] == 'A':
new_seq += Alist.pop()
elif seq[i] == 'B':
new_seq += Blist.pop()
Acount = new_seq.count('A')
Bcount = new_seq.count('B')
# print "Mutated Seq: ", new_seq
if (Acount != 0 and Acount > reactant_concentrations[0]) or (Bcount != 0 and Bcount > reactant_concentrations[1]):
print 'New check: Not enough food to replicate'
picked_rxn = Core.Reaction(-1,products = [0,1], product_coeff = [0,0], reactants =[0, 1], reactant_coeff = [0, 0], prop = 'RCM')
else:
new_seq_ID = CRS.molecule_dict[new_seq]
picked_rxn = Core.Reaction(-1,products = [new_seq_ID], product_coeff = [1], reactants =[0, 1], reactant_coeff = [Acount, Bcount], prop = 'RCM')
#raw_input("Enter")
return picked_rxn
| 1,542 | 0 | 23 |
ea3e82a696109ee05f01667dac00553b894ab925 | 1,895 | py | Python | siamreppoints/models/model_builder.py | zhangximing213/RPT_tracker | abbecf94974dbd320dff175838a923bd2113135a | [
"MIT"
] | 80 | 2020-09-04T06:18:47.000Z | 2022-03-14T11:23:53.000Z | siamreppoints/models/model_builder.py | songheony/RPT | ff4992e8e7d265d6df9db5a86e4e0868647dc34a | [
"MIT"
] | 20 | 2020-11-05T11:05:21.000Z | 2022-03-22T09:38:42.000Z | siamreppoints/models/model_builder.py | songheony/RPT | ff4992e8e7d265d6df9db5a86e4e0868647dc34a | [
"MIT"
] | 17 | 2020-09-28T06:55:28.000Z | 2022-03-24T02:59:55.000Z | # Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
from siamreppoints.core.config import cfg
from siamreppoints.models.backbone import get_backbone
from siamreppoints.models.head import get_rpn_head
from siamreppoints.models.neck import get_neck
| 29.609375 | 89 | 0.576253 | # Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
from siamreppoints.core.config import cfg
from siamreppoints.models.backbone import get_backbone
from siamreppoints.models.head import get_rpn_head
from siamreppoints.models.neck import get_neck
class ModelBuilder(nn.Module):
def __init__(self):
super(ModelBuilder, self).__init__()
# build backbone
self.backbone = get_backbone(cfg.BACKBONE.TYPE,
**cfg.BACKBONE.KWARGS)
# build adjust layer
if cfg.ADJUST.ADJUST:
self.neck = get_neck(cfg.ADJUST.TYPE,
**cfg.ADJUST.KWARGS)
# build rpn head
self.rpn_head = get_rpn_head(cfg.RPN.TYPE,
**cfg.RPN.KWARGS)
def instance(self, x):
xf = self.backbone(x)
if cfg.ADJUST.ADJUST:
xf = self.neck(xf)
#self.cf = xf[cfg.ADJUST.LAYER-1]
self.cf = torch.cat([xf[2], xf[1]], dim=1)
def template(self, z):
zf = self.backbone(z)
if cfg.ADJUST.ADJUST:
zf = self.neck(zf)
self.zf = zf
def track(self, x, instance_size):
xf = self.backbone(x)
if cfg.ADJUST.ADJUST:
xf = self.neck(xf)
cls, pts_preds_init, pts_preds_refine = self.rpn_head(self.zf, xf, instance_size)
cls = cls.permute(0, 2, 3, 1)
cls = cls.reshape(cls.shape[0], -1, 1)
cls = torch.sigmoid(cls)
#self.cf = xf[cfg.ADJUST.LAYER-1]
self.cf = torch.cat([xf[2], xf[1]], dim=1)
return {
'score': cls,
'bbox': pts_preds_refine,
}
| 1,309 | 9 | 142 |
6465776eed91773a22f3fe8b2c38f5d43dc3a1c0 | 7,548 | py | Python | strf_cmd.py | dkratzert/StructureFinder | e0be67cb47ad589b87c7175a02c908734e415ee8 | [
"MIT"
] | 12 | 2017-11-23T08:45:17.000Z | 2022-02-16T18:02:35.000Z | strf_cmd.py | dkratzert/StructureFinder | e0be67cb47ad589b87c7175a02c908734e415ee8 | [
"MIT"
] | 4 | 2019-12-12T15:28:50.000Z | 2022-02-22T06:28:48.000Z | strf_cmd.py | dkratzert/StructureFinder | e0be67cb47ad589b87c7175a02c908734e415ee8 | [
"MIT"
] | null | null | null | import argparse
import sys
import time
from pathlib import Path
from sqlite3 import DatabaseError
from misc import update_check
from misc.version import VERSION
from pymatgen.core.lattice import Lattice
from searcher.database_handler import DatabaseRequest, StructureTable
from searcher.filecrawler import put_files_in_db
from searcher.misc import vol_unitcell
parser = argparse.ArgumentParser(description='Command line version of StructureFinder to collect .cif/.res files to a '
'database.\n'
'StructureFinder will search for cif files in the given directory(s) '
'recursively. (Either -c, -r or both options must be active!)')
parser.add_argument("-d",
dest="dir",
metavar='"directory"',
type=str,
action='append',
help='Directory(s) where cif files are located.')
parser.add_argument("-e",
dest="ex",
metavar='"directory"',
type=str,
action='append',
help='Directory names to be excluded from the file search. Default is:\n'
'"ROOT", ".OLEX", "TMP", "TEMP", "Papierkorb", "Recycle.Bin" '
'Modifying -e option discards the default.')
parser.add_argument("-o",
dest="outfile",
metavar='"file name"',
type=str,
help='Name of the output database file. Default: "structuredb.sqlite"')
parser.add_argument("-c",
dest="fillcif",
default=False,
action='store_true',
help='Add .cif files (crystallographic information file) to the database.')
parser.add_argument("-r",
dest="fillres",
default=False,
action='store_true',
help='Add SHELX .res files to the database.')
parser.add_argument("--delete",
dest="delete",
default=False,
action='store_true',
help="Delete and do not append to previous database.")
parser.add_argument("-f",
dest='cell',
#nargs=6,
type=lambda s: [float(item) for item in s.split()],
help='Search for the specified unit cell.'
)
def find_cell(cell: list):
"""
Searches for unit cells by command line parameters
"""
cell = [float(x) for x in cell]
no_result = '\nNo similar unit cell found.'
if args.outfile:
dbfilename = args.outfile
else:
dbfilename = 'structuredb.sqlite'
db, structures = get_database(dbfilename)
# if args.more_results:
# # more results:
# print('more results on')
# vol_threshold = 0.04
# ltol = 0.08
# atol = 1.8
# else:
# regular:
vol_threshold = 0.02
ltol = 0.03
atol = 1.0
volume = vol_unitcell(*cell)
# the fist number in the result is the structureid:
cells = structures.find_by_volume(volume, vol_threshold)
idlist = []
if not cells:
print(no_result)
sys.exit()
lattice1 = Lattice.from_parameters(*cell)
for num, curr_cell in enumerate(cells):
try:
lattice2 = Lattice.from_parameters(*curr_cell[1:7])
except ValueError:
continue
mapping = lattice1.find_mapping(lattice2, ltol, atol, skip_rotation_matrix=True)
if mapping:
idlist.append(curr_cell[0])
if not idlist:
print(no_result)
sys.exit()
else:
print('\n{} Structures found:'.format(len(idlist)))
searchresult = structures.get_all_structure_names(idlist)
print('ID | path | filename | data ')
print('-' * 130)
for res in searchresult:
Id = res[0]
path, filename, dataname = [x.decode('utf-8') for x in res if isinstance(x, bytes)]
print('{:} | {:70s} | {:<25s} | {:s}'.format(Id, path, filename, dataname, ))
if __name__ == '__main__':
args = parser.parse_args()
if args.cell:
find_cell(args.cell)
else:
try:
if not args.dir:
parser.print_help()
check_update()
sys.exit()
except IndexError:
print("No valid search directory given.\n")
print("Please run this as 'python3 stdb_cmd.py -d [directory]'\n")
print("stdb_cmd will search for .cif files in [directory] recoursively.")
run_index(args)
# find_cell('10.5086 20.9035 20.5072 90.000 94.130 90.000'.split())
| 37.182266 | 127 | 0.536301 | import argparse
import sys
import time
from pathlib import Path
from sqlite3 import DatabaseError
from misc import update_check
from misc.version import VERSION
from pymatgen.core.lattice import Lattice
from searcher.database_handler import DatabaseRequest, StructureTable
from searcher.filecrawler import put_files_in_db
from searcher.misc import vol_unitcell
parser = argparse.ArgumentParser(description='Command line version of StructureFinder to collect .cif/.res files to a '
'database.\n'
'StructureFinder will search for cif files in the given directory(s) '
'recursively. (Either -c, -r or both options must be active!)')
parser.add_argument("-d",
dest="dir",
metavar='"directory"',
type=str,
action='append',
help='Directory(s) where cif files are located.')
parser.add_argument("-e",
dest="ex",
metavar='"directory"',
type=str,
action='append',
help='Directory names to be excluded from the file search. Default is:\n'
'"ROOT", ".OLEX", "TMP", "TEMP", "Papierkorb", "Recycle.Bin" '
'Modifying -e option discards the default.')
parser.add_argument("-o",
dest="outfile",
metavar='"file name"',
type=str,
help='Name of the output database file. Default: "structuredb.sqlite"')
parser.add_argument("-c",
dest="fillcif",
default=False,
action='store_true',
help='Add .cif files (crystallographic information file) to the database.')
parser.add_argument("-r",
dest="fillres",
default=False,
action='store_true',
help='Add SHELX .res files to the database.')
parser.add_argument("--delete",
dest="delete",
default=False,
action='store_true',
help="Delete and do not append to previous database.")
parser.add_argument("-f",
dest='cell',
#nargs=6,
type=lambda s: [float(item) for item in s.split()],
help='Search for the specified unit cell.'
)
def check_update():
if update_check.is_update_needed(VERSION=VERSION):
print('A new Version of StructureFinder is available at '
'https://dkratzert.de/structurefinder.html')
def find_cell(cell: list):
"""
Searches for unit cells by command line parameters
"""
cell = [float(x) for x in cell]
no_result = '\nNo similar unit cell found.'
if args.outfile:
dbfilename = args.outfile
else:
dbfilename = 'structuredb.sqlite'
db, structures = get_database(dbfilename)
# if args.more_results:
# # more results:
# print('more results on')
# vol_threshold = 0.04
# ltol = 0.08
# atol = 1.8
# else:
# regular:
vol_threshold = 0.02
ltol = 0.03
atol = 1.0
volume = vol_unitcell(*cell)
# the fist number in the result is the structureid:
cells = structures.find_by_volume(volume, vol_threshold)
idlist = []
if not cells:
print(no_result)
sys.exit()
lattice1 = Lattice.from_parameters(*cell)
for num, curr_cell in enumerate(cells):
try:
lattice2 = Lattice.from_parameters(*curr_cell[1:7])
except ValueError:
continue
mapping = lattice1.find_mapping(lattice2, ltol, atol, skip_rotation_matrix=True)
if mapping:
idlist.append(curr_cell[0])
if not idlist:
print(no_result)
sys.exit()
else:
print('\n{} Structures found:'.format(len(idlist)))
searchresult = structures.get_all_structure_names(idlist)
print('ID | path | filename | data ')
print('-' * 130)
for res in searchresult:
Id = res[0]
path, filename, dataname = [x.decode('utf-8') for x in res if isinstance(x, bytes)]
print('{:} | {:70s} | {:<25s} | {:s}'.format(Id, path, filename, dataname, ))
def run_index(args=None):
ncifs = 0
if not args:
print('')
else:
if not any([args.fillres, args.fillcif]):
print("Error: You need to give either option -c, -r or both.")
sys.exit()
if args.outfile:
dbfilename = args.outfile
else:
dbfilename = 'structuredb.sqlite'
if args.delete:
try:
dbf = Path(dbfilename)
dbf.unlink()
except FileNotFoundError:
pass
except PermissionError:
print('Could not acess database file "{}". Is it used elsewhere?'.format(dbfilename))
print('Giving up...')
sys.exit()
db, structures = get_database(dbfilename)
time1 = time.perf_counter()
for p in args.dir:
# the command line version
lastid = db.get_lastrowid()
if not lastid:
lastid = 1
else:
lastid += 1
try:
ncifs = put_files_in_db(searchpath=p, excludes=args.ex,
structures=structures, lastid=lastid,
fillres=args.fillres, fillcif=args.fillcif)
except OSError as e:
print("Unable to collect files:")
print(e)
except KeyboardInterrupt:
sys.exit()
print("---------------------")
try:
if db and structures:
db.init_textsearch()
structures.populate_fulltext_search_table()
structures.make_indexes()
except TypeError:
print('No valid files found. They might be in excluded subdirectories.')
time2 = time.perf_counter()
diff = time2 - time1
m, s = divmod(diff, 60)
h, m = divmod(m, 60)
tmessage = "\nTotal {3} cif/res files in '{4}'. Duration: {0:>2d} h, {1:>2d} m, {2:>3.2f} s"
print(tmessage.format(int(h), int(m), s, ncifs, dbfilename))
check_update()
def get_database(dbfilename):
db = DatabaseRequest(dbfilename)
try:
db.initialize_db()
except DatabaseError:
print('Database is corrupt! Delete the file first.')
sys.exit()
structures = StructureTable(dbfilename)
structures.set_database_version(0) # not an APEX db
return db, structures
if __name__ == '__main__':
args = parser.parse_args()
if args.cell:
find_cell(args.cell)
else:
try:
if not args.dir:
parser.print_help()
check_update()
sys.exit()
except IndexError:
print("No valid search directory given.\n")
print("Please run this as 'python3 stdb_cmd.py -d [directory]'\n")
print("stdb_cmd will search for .cif files in [directory] recoursively.")
run_index(args)
# find_cell('10.5086 20.9035 20.5072 90.000 94.130 90.000'.split())
| 2,572 | 0 | 69 |
5d8971a22575495824e1bcbc6b9426c9dff1d963 | 2,318 | py | Python | python/updateServer_lanyue.py | houko/FrequentlyShell | a12cb0a96def2662453f66e0224957da126bccac | [
"MIT"
] | 5 | 2017-05-04T08:50:12.000Z | 2019-04-23T06:51:08.000Z | python/updateServer_lanyue.py | houko/FrequentlyShell | a12cb0a96def2662453f66e0224957da126bccac | [
"MIT"
] | 1 | 2017-05-04T09:07:28.000Z | 2017-05-04T09:13:13.000Z | python/updateServer_lanyue.py | houko/FrequentlyShell | a12cb0a96def2662453f66e0224957da126bccac | [
"MIT"
] | 1 | 2019-06-15T03:12:43.000Z | 2019-06-15T03:12:43.000Z | #! /usr/bin/python3
# coding=utf-8
"""
把今天最好的表现当作明天最新的起点..~
いま 最高の表現 として 明日最新の始発..~
Today the best performance as tomorrow newest starter!
author: xiaomo
github: https://github.com/syoubaku
email: xiaomo@xiamoo.info
QQ_NO: 83387856
Date: 17/5/31 14:55
Description: 更新服务器
Copyright(©) 2017 by xiaomo.
"""
import os
import shutil
import sys
import time
if len(sys.argv) < 2 or len(sys.argv) > 3:
print("Usage: python updateServer_lanyue.py version")
exit(1)
# 开服/关服脚本
shell_path = "/data/game/server/s1/bin/"
# 目标根路径
target_base_url = "/data/game/server/s1/"
# 目标jar的路径
target_jar_path = target_base_url + "core/"
# 配置表根目录
target_data_base_path = target_base_url + "data/"
# 版本号
version = sys.argv[1]
# 关服
# 拷贝文件
# 修改版本号
if __name__ == '__main__':
main()
| 22.950495 | 85 | 0.560828 | #! /usr/bin/python3
# coding=utf-8
"""
把今天最好的表现当作明天最新的起点..~
いま 最高の表現 として 明日最新の始発..~
Today the best performance as tomorrow newest starter!
author: xiaomo
github: https://github.com/syoubaku
email: xiaomo@xiamoo.info
QQ_NO: 83387856
Date: 17/5/31 14:55
Description: 更新服务器
Copyright(©) 2017 by xiaomo.
"""
import os
import shutil
import sys
import time
if len(sys.argv) < 2 or len(sys.argv) > 3:
print("Usage: python updateServer_lanyue.py version")
exit(1)
# 开服/关服脚本
shell_path = "/data/game/server/s1/bin/"
# 目标根路径
target_base_url = "/data/game/server/s1/"
# 目标jar的路径
target_jar_path = target_base_url + "core/"
# 配置表根目录
target_data_base_path = target_base_url + "data/"
# 版本号
version = sys.argv[1]
def svn_up():
os.chdir("/root/version")
os.system("svn up")
# 关服
def close_server():
os.chdir(shell_path)
os.system("sh serverOptions.sh all stop")
# 拷贝文件
def update_code():
# 处理jar包
os.chdir(target_jar_path)
os.system("rm -rf *.jar")
os.chdir("/root/version/" + version + "/server/core/")
files = os.listdir(os.getcwd())
for file in files:
if file.endswith(".jar"):
shutil.copy(file, target_jar_path)
# 处理配置表
os.chdir(target_base_url)
os.system("rm -rf data")
source_data_url = "/root/version/" + version + "/server/data"
shutil.copytree(source_data_url, target_data_base_path)
os.chdir(target_data_base_path)
os.system("rm -rf .git")
# 修改版本号
def change_version():
os.chdir(target_base_url)
with open('version', 'w+', encoding='utf-8') as f:
f.write(version)
def start_server():
os.chdir(shell_path)
os.system("sh serverOptions.sh all start")
def main():
print("---------------------------------更新代码---------------------------------")
svn_up()
print("---------------------------------关闭服务器---------------------------------")
close_server()
time.sleep(3)
print("---------------------------------更新jar包---------------------------------")
update_code()
time.sleep(3)
print("---------------------------------修改版本号---------------------------------")
change_version()
print("---------------------------------开启服务器---------------------------------")
start_server()
print("\n脚本执行完毕")
if __name__ == '__main__':
main()
| 1,462 | 0 | 135 |
f7f92b3d5ade59f2c089c5df9977c0f9a575a74e | 13,383 | py | Python | athena/models/speech_transformer.py | hyx100e/athena-1 | 2668de7acdd51a6d12a2768a57351b666b4edbf3 | [
"Apache-2.0"
] | 1 | 2019-12-25T06:38:03.000Z | 2019-12-25T06:38:03.000Z | athena/models/speech_transformer.py | cookingbear/athena | 2ff02d5f54070563c6a600199ae9e8d3ca3c66dd | [
"Apache-2.0"
] | null | null | null | athena/models/speech_transformer.py | cookingbear/athena | 2ff02d5f54070563c6a600199ae9e8d3ca3c66dd | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright (C) 2019 ATHENA AUTHORS; Xiangang Li; Dongwei Jiang; Xiaoning Lei
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Only support eager mode
# pylint: disable=no-member, invalid-name, relative-beyond-top-level
# pylint: disable=too-many-locals, too-many-statements, too-many-arguments, too-many-instance-attributes
""" speech transformer implementation"""
from absl import logging
import tensorflow as tf
from .base import BaseModel
from ..loss import Seq2SeqSparseCategoricalCrossentropy
from ..metrics import Seq2SeqSparseCategoricalAccuracy
from ..utils.misc import generate_square_subsequent_mask, insert_sos_in_labels
from ..layers.commons import PositionalEncoding
from ..layers.transformer import Transformer
from ..utils.hparam import register_and_parse_hparams
from ..tools.beam_search import BeamSearchDecoder
from ..tools.lm_scorer import NGramScorer, RNNScorer
class SpeechTransformer(BaseModel):
""" Standard implementation of a SpeechTransformer. Model mainly consists of three parts:
the x_net for input preparation, the y_net for output preparation and the transformer itself
"""
default_config = {
"return_encoder_output": False,
"num_filters": 512,
"d_model": 512,
"num_heads": 8,
"num_encoder_layers": 12,
"num_decoder_layers": 6,
"dff": 1280,
"rate": 0.1,
"schedual_sampling_rate": 0.9,
"label_smoothing_rate": 0.0
}
@staticmethod
def _create_masks(x, input_length, y):
r""" Generate a square mask for the sequence. The masked positions are
filled with float(1.0). Unmasked positions are filled with float(0.0).
"""
input_mask, output_mask = None, None
if x is not None:
input_mask = 1.0 - tf.sequence_mask(
input_length, tf.shape(x)[1], dtype=tf.float32
)
input_mask = input_mask[:, tf.newaxis, tf.newaxis, :]
input_mask.set_shape([None, None, None, None])
if y is not None:
output_mask = tf.cast(tf.math.equal(y, 0), tf.float32)
output_mask = output_mask[:, tf.newaxis, tf.newaxis, :]
look_ahead_mask = generate_square_subsequent_mask(tf.shape(y)[1])
output_mask = tf.maximum(output_mask, look_ahead_mask)
output_mask.set_shape([None, None, None, None])
return input_mask, output_mask
def compute_logit_length(self, samples):
""" used for get logit length """
input_length = tf.cast(samples["input_length"], tf.float32)
logit_length = tf.math.ceil(input_length / 2)
logit_length = tf.math.ceil(logit_length / 2)
logit_length = tf.cast(logit_length, tf.int32)
return logit_length
def time_propagate(self, history_logits, history_predictions, step, enc_outputs):
""" TODO: doctring
last_predictions: the predictions of last time_step, [beam_size]
history_predictions: the predictions of history from 0 to time_step,
[beam_size, time_steps]
states: (step)
"""
# merge
(encoder_output, memory_mask) = enc_outputs
step = step + 1
output_mask = generate_square_subsequent_mask(step)
# propagate 1 step
logits = self.y_net(tf.transpose(history_predictions.stack()), training=False)
logits = self.transformer.decoder(
logits,
encoder_output,
tgt_mask=output_mask,
memory_mask=memory_mask,
training=False,
)
logits = self.final_layer(logits)
logits = logits[:, -1, :]
history_logits = history_logits.write(step - 1, logits)
return logits, history_logits, step
def decode(self, samples, hparams, lm_model=None, return_encoder=False):
""" beam search decoding """
x0 = samples["input"]
batch = tf.shape(x0)[0]
x = self.x_net(x0, training=False)
input_length = self.compute_logit_length(samples)
input_mask, _ = self._create_masks(x, input_length, None)
encoder_output = self.transformer.encoder(x, input_mask, training=False)
if return_encoder:
return encoder_output, input_mask
# init op
last_predictions = tf.ones([batch], dtype=tf.int32) * self.sos
history_predictions = tf.TensorArray(
tf.int32, size=1, dynamic_size=True, clear_after_read=False
)
step = 0
history_predictions.write(0, last_predictions)
history_predictions = history_predictions.stack()
init_cand_states = [history_predictions]
beam_size = 1 if not hparams.beam_search else hparams.beam_size
beam_search_decoder = BeamSearchDecoder(
self.num_class, self.sos, self.eos, beam_size=beam_size
)
beam_search_decoder.build(self.time_propagate)
if hparams.lm_weight != 0:
if hparams.lm_path is None:
raise ValueError("lm path should not be none")
if hparams.lm_type == "ngram":
lm_scorer = NGramScorer(
hparams.lm_path,
self.sos,
self.eos,
self.num_class,
lm_weight=hparams.lm_weight,
)
elif hparams.lm_type == "rnn":
lm_scorer = RNNScorer(
lm_model,
lm_weight=hparams.lm_weight)
beam_search_decoder.add_scorer(lm_scorer)
predictions = beam_search_decoder(
history_predictions, init_cand_states, step, (encoder_output, input_mask)
)
return predictions
class SpeechTransformer2(SpeechTransformer):
""" Decoder for SpeechTransformer2 works for two pass schedual sampling"""
def mix_target_sequence(self, gold_token, predicted_token, training, top_k=5):
""" to mix gold token and prediction
param gold_token: true labels
param predicted_token: predictions by first pass
return: mix of the gold_token and predicted_token
"""
mix_result = tf.TensorArray(
tf.float32, size=1, dynamic_size=True, clear_after_read=False
)
for i in tf.range(tf.shape(gold_token)[-1]):
if self.random_num([1]) > self.hparams.schedual_sampling_rate:# do schedual sampling
selected_input = predicted_token[:, i, :]
selected_idx = tf.nn.top_k(selected_input, top_k).indices
embedding_input = self.y_net.layers[1](selected_idx, training=training)
embedding_input = tf.reduce_mean(embedding_input, axis=1)
mix_result = mix_result.write(i, embedding_input)
else:
selected_input = tf.reshape(gold_token[:, i], [-1, 1])
embedding_input = self.y_net.layers[1](selected_input, training=training)
mix_result = mix_result.write(i, embedding_input[:, 0, :])
final_input = self.y_net.layers[2](tf.transpose(mix_result.stack(), [1, 0, 2]),
training=training)
final_input = self.y_net.layers[3](final_input, training=training)
return final_input
| 41.305556 | 104 | 0.62826 | # coding=utf-8
# Copyright (C) 2019 ATHENA AUTHORS; Xiangang Li; Dongwei Jiang; Xiaoning Lei
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Only support eager mode
# pylint: disable=no-member, invalid-name, relative-beyond-top-level
# pylint: disable=too-many-locals, too-many-statements, too-many-arguments, too-many-instance-attributes
""" speech transformer implementation"""
from absl import logging
import tensorflow as tf
from .base import BaseModel
from ..loss import Seq2SeqSparseCategoricalCrossentropy
from ..metrics import Seq2SeqSparseCategoricalAccuracy
from ..utils.misc import generate_square_subsequent_mask, insert_sos_in_labels
from ..layers.commons import PositionalEncoding
from ..layers.transformer import Transformer
from ..utils.hparam import register_and_parse_hparams
from ..tools.beam_search import BeamSearchDecoder
from ..tools.lm_scorer import NGramScorer, RNNScorer
class SpeechTransformer(BaseModel):
""" Standard implementation of a SpeechTransformer. Model mainly consists of three parts:
the x_net for input preparation, the y_net for output preparation and the transformer itself
"""
default_config = {
"return_encoder_output": False,
"num_filters": 512,
"d_model": 512,
"num_heads": 8,
"num_encoder_layers": 12,
"num_decoder_layers": 6,
"dff": 1280,
"rate": 0.1,
"schedual_sampling_rate": 0.9,
"label_smoothing_rate": 0.0
}
def __init__(self, data_descriptions, config=None):
super().__init__()
self.hparams = register_and_parse_hparams(self.default_config, config, cls=self.__class__)
self.num_class = data_descriptions.num_class + 1
self.sos = self.num_class - 1
self.eos = self.num_class - 1
ls_rate = self.hparams.label_smoothing_rate
self.loss_function = Seq2SeqSparseCategoricalCrossentropy(
num_classes=self.num_class, eos=self.eos, label_smoothing=ls_rate
)
self.metric = Seq2SeqSparseCategoricalAccuracy(eos=self.eos, name="Accuracy")
# for the x_net
num_filters = self.hparams.num_filters
d_model = self.hparams.d_model
layers = tf.keras.layers
input_features = layers.Input(shape=data_descriptions.sample_shape["input"], dtype=tf.float32)
inner = layers.Conv2D(
filters=num_filters,
kernel_size=(3, 3),
strides=(2, 2),
padding="same",
use_bias=False,
data_format="channels_last",
)(input_features)
inner = layers.BatchNormalization()(inner)
inner = tf.nn.relu6(inner)
inner = layers.Conv2D(
filters=num_filters,
kernel_size=(3, 3),
strides=(2, 2),
padding="same",
use_bias=False,
data_format="channels_last",
)(inner)
inner = layers.BatchNormalization()(inner)
inner = tf.nn.relu6(inner)
_, _, dim, channels = inner.get_shape().as_list()
output_dim = dim * channels
inner = layers.Reshape((-1, output_dim))(inner)
inner = layers.Dense(d_model, activation=tf.nn.relu6)(inner)
inner = PositionalEncoding(d_model, scale=False)(inner)
inner = layers.Dropout(self.hparams.rate)(inner) # self.hparams.rate
self.x_net = tf.keras.Model(inputs=input_features, outputs=inner, name="x_net")
print(self.x_net.summary())
# y_net for target
input_labels = layers.Input(shape=data_descriptions.sample_shape["output"], dtype=tf.int32)
inner = layers.Embedding(self.num_class, d_model)(input_labels)
inner = PositionalEncoding(d_model, scale=True)(inner)
inner = layers.Dropout(self.hparams.rate)(inner)
self.y_net = tf.keras.Model(inputs=input_labels, outputs=inner, name="y_net")
print(self.y_net.summary())
# transformer layer
self.transformer = Transformer(
self.hparams.d_model,
self.hparams.num_heads,
self.hparams.num_encoder_layers,
self.hparams.num_decoder_layers,
self.hparams.dff,
self.hparams.rate,
)
# last layer for output
self.final_layer = layers.Dense(self.num_class, input_shape=(d_model,))
# some temp function
self.random_num = tf.random_uniform_initializer(0, 1)
def call(self, samples, training: bool = None):
x0 = samples["input"]
y0 = insert_sos_in_labels(samples["output"], self.sos)
x = self.x_net(x0, training=training)
y = self.y_net(y0, training=training)
input_length = self.compute_logit_length(samples)
input_mask, output_mask = self._create_masks(x, input_length, y0)
y, encoder_output = self.transformer(
x,
y,
input_mask,
output_mask,
input_mask,
training=training,
return_encoder_output=True,
)
y = self.final_layer(y)
if self.hparams.return_encoder_output:
return y, encoder_output
return y
@staticmethod
def _create_masks(x, input_length, y):
r""" Generate a square mask for the sequence. The masked positions are
filled with float(1.0). Unmasked positions are filled with float(0.0).
"""
input_mask, output_mask = None, None
if x is not None:
input_mask = 1.0 - tf.sequence_mask(
input_length, tf.shape(x)[1], dtype=tf.float32
)
input_mask = input_mask[:, tf.newaxis, tf.newaxis, :]
input_mask.set_shape([None, None, None, None])
if y is not None:
output_mask = tf.cast(tf.math.equal(y, 0), tf.float32)
output_mask = output_mask[:, tf.newaxis, tf.newaxis, :]
look_ahead_mask = generate_square_subsequent_mask(tf.shape(y)[1])
output_mask = tf.maximum(output_mask, look_ahead_mask)
output_mask.set_shape([None, None, None, None])
return input_mask, output_mask
def compute_logit_length(self, samples):
""" used for get logit length """
input_length = tf.cast(samples["input_length"], tf.float32)
logit_length = tf.math.ceil(input_length / 2)
logit_length = tf.math.ceil(logit_length / 2)
logit_length = tf.cast(logit_length, tf.int32)
return logit_length
def time_propagate(self, history_logits, history_predictions, step, enc_outputs):
""" TODO: doctring
last_predictions: the predictions of last time_step, [beam_size]
history_predictions: the predictions of history from 0 to time_step,
[beam_size, time_steps]
states: (step)
"""
# merge
(encoder_output, memory_mask) = enc_outputs
step = step + 1
output_mask = generate_square_subsequent_mask(step)
# propagate 1 step
logits = self.y_net(tf.transpose(history_predictions.stack()), training=False)
logits = self.transformer.decoder(
logits,
encoder_output,
tgt_mask=output_mask,
memory_mask=memory_mask,
training=False,
)
logits = self.final_layer(logits)
logits = logits[:, -1, :]
history_logits = history_logits.write(step - 1, logits)
return logits, history_logits, step
def decode(self, samples, hparams, lm_model=None, return_encoder=False):
""" beam search decoding """
x0 = samples["input"]
batch = tf.shape(x0)[0]
x = self.x_net(x0, training=False)
input_length = self.compute_logit_length(samples)
input_mask, _ = self._create_masks(x, input_length, None)
encoder_output = self.transformer.encoder(x, input_mask, training=False)
if return_encoder:
return encoder_output, input_mask
# init op
last_predictions = tf.ones([batch], dtype=tf.int32) * self.sos
history_predictions = tf.TensorArray(
tf.int32, size=1, dynamic_size=True, clear_after_read=False
)
step = 0
history_predictions.write(0, last_predictions)
history_predictions = history_predictions.stack()
init_cand_states = [history_predictions]
beam_size = 1 if not hparams.beam_search else hparams.beam_size
beam_search_decoder = BeamSearchDecoder(
self.num_class, self.sos, self.eos, beam_size=beam_size
)
beam_search_decoder.build(self.time_propagate)
if hparams.lm_weight != 0:
if hparams.lm_path is None:
raise ValueError("lm path should not be none")
if hparams.lm_type == "ngram":
lm_scorer = NGramScorer(
hparams.lm_path,
self.sos,
self.eos,
self.num_class,
lm_weight=hparams.lm_weight,
)
elif hparams.lm_type == "rnn":
lm_scorer = RNNScorer(
lm_model,
lm_weight=hparams.lm_weight)
beam_search_decoder.add_scorer(lm_scorer)
predictions = beam_search_decoder(
history_predictions, init_cand_states, step, (encoder_output, input_mask)
)
return predictions
def restore_from_pretrained_model(self, pretrained_model, model_type=""):
if model_type == "":
return
if model_type == "mpc":
logging.info("loading from pretrained mpc model")
self.x_net = pretrained_model.x_net
self.transformer.encoder = pretrained_model.encoder
elif model_type == "SpeechTransformer":
logging.info("loading from pretrained SpeechTransformer model")
self.x_net = pretrained_model.x_net
self.y_net = pretrained_model.y_net
self.transformer = pretrained_model.transformer
self.final_layer = pretrained_model.final_layer
else:
raise ValueError("NOT SUPPORTED")
class SpeechTransformer2(SpeechTransformer):
""" Decoder for SpeechTransformer2 works for two pass schedual sampling"""
def call(self, samples, training: bool = None):
x0 = samples["input"]
y0 = insert_sos_in_labels(samples["output"], self.sos)
x = self.x_net(x0, training=training)
y = self.y_net(y0, training=training)
input_length = self.compute_logit_length(samples)
input_mask, output_mask = self._create_masks(x, input_length, y0)
# first pass
y, encoder_output = self.transformer(
x,
y,
input_mask,
output_mask,
input_mask,
training=training,
return_encoder_output=True,
)
y_pre = self.final_layer(y)
# second pass
y = self.mix_target_sequence(y0, y_pre, training)
y, encoder_output = self.transformer(
x,
y,
input_mask,
output_mask,
input_mask,
training=training,
return_encoder_output=True,
)
y = self.final_layer(y)
if self.hparams.return_encoder_output:
return y, encoder_output
return y
def mix_target_sequence(self, gold_token, predicted_token, training, top_k=5):
""" to mix gold token and prediction
param gold_token: true labels
param predicted_token: predictions by first pass
return: mix of the gold_token and predicted_token
"""
mix_result = tf.TensorArray(
tf.float32, size=1, dynamic_size=True, clear_after_read=False
)
for i in tf.range(tf.shape(gold_token)[-1]):
if self.random_num([1]) > self.hparams.schedual_sampling_rate:# do schedual sampling
selected_input = predicted_token[:, i, :]
selected_idx = tf.nn.top_k(selected_input, top_k).indices
embedding_input = self.y_net.layers[1](selected_idx, training=training)
embedding_input = tf.reduce_mean(embedding_input, axis=1)
mix_result = mix_result.write(i, embedding_input)
else:
selected_input = tf.reshape(gold_token[:, i], [-1, 1])
embedding_input = self.y_net.layers[1](selected_input, training=training)
mix_result = mix_result.write(i, embedding_input[:, 0, :])
final_input = self.y_net.layers[2](tf.transpose(mix_result.stack(), [1, 0, 2]),
training=training)
final_input = self.y_net.layers[3](final_input, training=training)
return final_input
| 5,406 | 0 | 108 |
77321d71dd125f8674c51d0e785252ce6c808d90 | 13,160 | py | Python | tests/test_rest.py | KarrLab/bcforms | 053765e83b89430f288539e0a211012d23bf2e6f | [
"MIT"
] | null | null | null | tests/test_rest.py | KarrLab/bcforms | 053765e83b89430f288539e0a211012d23bf2e6f | [
"MIT"
] | null | null | null | tests/test_rest.py | KarrLab/bcforms | 053765e83b89430f288539e0a211012d23bf2e6f | [
"MIT"
] | null | null | null | """ Test of bcforms.rest
:Author: Mike Zheng <xzheng20@colby.edu>
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2019-7-4
:Copyright: 2019, Karr Lab
:License: MIT
"""
import bcforms
from bcforms import core
from bcforms import rest
import unittest
| 28.301075 | 238 | 0.423252 | """ Test of bcforms.rest
:Author: Mike Zheng <xzheng20@colby.edu>
:Author: Jonathan Karr <karr@mssm.edu>
:Date: 2019-7-4
:Copyright: 2019, Karr Lab
:License: MIT
"""
import bcforms
from bcforms import core
from bcforms import rest
import unittest
class RestTestCase(unittest.TestCase):
def test_PrefixMiddleware(self):
rest.PrefixMiddleware(rest.app).__call__({'PATH_INFO': 'x'}, lambda x, y: None)
def test_get_bcform_properties(self):
client = rest.app.test_client()
# test validate
rv = client.post('/api/bcform/', json=dict({
"form": "abc_a + abc_b"
}))
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.get_json(), {
'form': '1 * abc_a + 1 * abc_b'
})
# test when all structure is known (protein)
rv = client.post('/api/bcform/', json={
"form": "abc_a + abc_b",
"subunits": [
{
"name": "abc_a",
"encoding": "bpforms.ProteinForm",
"structure": "A"
},
{
"name": "abc_b",
"encoding": "bpforms.ProteinForm",
"structure": "M"
}
]
})
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.get_json(), {
"form": "1 * abc_a + 1 * abc_b",
"structure": "C[C@H]([NH3+])C(=O)O.CSCC[C@H]([NH3+])C(=O)O",
"formula": "C8H20N2O4S",
"mol_wt": 240.318,
"charge": 2
})
# test when all structure is known (dna)
rv = client.post('/api/bcform/', json={
"form": "abc_a + abc_b",
"subunits": [
{
"name": "abc_a",
"encoding": "bpforms.DnaForm",
"structure": "A"
},
{
"name": "abc_b",
"encoding": "bpforms.DnaForm",
"structure": "T"
}
]
})
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.get_json(), {
"form": "1 * abc_a + 1 * abc_b",
"structure": "OC1CC(OC1COP(=O)([O-])[O-])n1cnc2c1ncnc2N.OC1CC(OC1COP(=O)([O-])[O-])n1cc(C)c(=O)[nH]c1=O",
"formula": "C20H25N7O14P2",
"mol_wt": 649.402523996,
"charge": -4
})
# test when all structure is known (rna)
rv = client.post('/api/bcform/', json={
"form": "abc_a + abc_b",
"subunits": [
{
"name": "abc_a",
"encoding": "bpforms.RnaForm",
"structure": "A"
},
{
"name": "abc_b",
"encoding": "bpforms.RnaForm",
"structure": "U"
}
]
})
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.get_json(), {
"form": "1 * abc_a + 1 * abc_b",
"structure": "OC1C(O)C(OC1n1cnc2c1ncnc2N)COP(=O)([O-])[O-].OC1C(O)C(OC1n1ccc(=O)[nH]c1=O)COP(=O)([O-])[O-]",
"formula": "C19H23N7O16P2",
"mol_wt": 667.3735239959999,
"charge": -4
})
# test when combination of bpform and smiles
rv = client.post('/api/bcform/', json={
"form": "abc_a + abc_b",
"subunits": [
{
"name": "abc_a",
"encoding": "bpforms.ProteinForm",
"structure": "A"
},
{
"name": "abc_b",
"encoding": "smiles",
"structure": "[Zn+2]"
}
]
})
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.get_json(), {
"form": "1 * abc_a + 1 * abc_b",
"structure": "C[C@H]([NH3+])C(=O)O.[Zn+2]",
"formula": "C3H8NO2Zn",
"mol_wt": 155.482,
"charge": 3
})
# when no structure is known, and some properties are known
# all formula known -> formula + mol_wt
rv = client.post('/api/bcform/', json={
"form": "abc_a + abc_b",
"subunits": [
{
"name": "abc_a",
"formula": "CH4"
},
{
"name": "abc_b",
"formula": "H2O"
}
]
})
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.get_json(), {
"form": "1 * abc_a + 1 * abc_b",
"formula": "CH6O",
"mol_wt": 34.058
})
# all mol_wt known -> mol_wt
rv = client.post('/api/bcform/', json={
"form": "abc_a + abc_b",
"subunits": [
{
"name": "abc_a",
"mol_wt": 16
},
{
"name": "abc_b",
"mol_wt": 18
}
]
})
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.get_json(), {
"form": "1 * abc_a + 1 * abc_b",
"mol_wt": 34.0
})
# all charge known -> charge
rv = client.post('/api/bcform/', json={
"form": "abc_a + abc_b",
"subunits": [
{
"name": "abc_a",
"charge": 1
},
{
"name": "abc_b",
"charge": -1
}
]
})
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.get_json(), {
"form": "1 * abc_a + 1 * abc_b",
"charge": 0
})
# when mix-and-match known information
# some known structure + some known formula -> formula, mol_wt
rv = client.post('/api/bcform/', json={
"form": "abc_a + abc_b",
"subunits": [
{
"name": "abc_a",
"encoding": "bpforms.ProteinForm",
"structure": "A"
},
{
"name": "abc_b",
"formula": "CH4"
}
]
})
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.get_json(), {
"form": "1 * abc_a + 1 * abc_b",
"formula": "C4H12NO2",
"mol_wt": 106.14500000000001
})
# some known structure + some known mol_wt -> mol_wt
rv = client.post('/api/bcform/', json={
"form": "abc_a + abc_b",
"subunits": [
{
"name": "abc_a",
"encoding": "bpforms.ProteinForm",
"structure": "A"
},
{
"name": "abc_b",
"mol_wt": 16
}
]
})
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.get_json(), {
"form": "1 * abc_a + 1 * abc_b",
"mol_wt": 106.102
})
# some known formula + some known mol_wt -> mol_wt
rv = client.post('/api/bcform/', json={
"form": "abc_a + abc_b",
"subunits": [
{
"name": "abc_a",
"formula": "CH4"
},
{
"name": "abc_b",
"mol_wt": 16
}
]
})
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.get_json(), {
"form": "1 * abc_a + 1 * abc_b",
"mol_wt": 32.043
})
# some known formula + some known charge -> nothing
rv = client.post('/api/bcform/', json={
"form": "abc_a + abc_b",
"subunits": [
{
"name": "abc_a",
"formula": "CH4"
},
{
"name": "abc_b",
"charge": 1
}
]
})
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.get_json(), {
"form": "1 * abc_a + 1 * abc_b"
})
# some known structure + some known nothing -> nothing
# test when all structure is known (protein)
rv = client.post('/api/bcform/', json={
"form": "abc_a + abc_b",
"subunits": [
{
"name": "abc_a",
"encoding": "bpforms.ProteinForm",
"structure": "A"
}
]
})
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.get_json(), {
"form": "1 * abc_a + 1 * abc_b"
})
# test too large biocomplex
rv = client.post('/api/bcform/', json={
"form": "55 * abc_a",
"subunits": [
{
"name": "abc_a",
"encoding": "bpforms.ProteinForm",
"structure": "A"
}
]
})
self.assertEqual(rv.status_code, 200)
self.assertEqual(rv.get_json(), {
"form": "55 * abc_a",
"structure": None,
"formula": "C165H440N55O110",
"mol_wt": 4955.610000000001,
"charge": 55,
"warnings": "The sum of length of bpforms-encoded subunits is 55, which exceeds the max length limit 50."
})
def test_get_bcform_properties_errors(self):
client = rest.app.test_client()
# invalid form
rv = client.post('/api/bcform/', json={
"form": "HELLO"
})
self.assertEqual(rv.status_code, 400)
rv = client.post('/api/bcform/', json=dict(form='abc_a + abc_b | x-link: [l-bond-atom: abc_c(1)-2O1 | l-displaced-atom: abc_d(1)-2H1 | r-bond-atom: abc_b(1)-3C1 | r-displaced-atom: abc_b(1)-3H1 | r-displaced-atom: abc_b(1)-3O1]'))
self.assertEqual(rv.status_code, 400)
# bad protein form
rv = client.post('/api/bcform/', json={
"form": "2 * abc_b",
"subunits": [
{
"name": "abc_b",
"encoding": "bpforms.ProteinForm",
"structure": "B"
}
]
})
self.assertEqual(rv.status_code, 400)
# bad dna form
rv = client.post('/api/bcform/', json={
"form": "2 * abc_b",
"subunits": [
{
"name": "abc_b",
"encoding": "bpforms.DnaForm",
"structure": "D"
}
]
})
self.assertEqual(rv.status_code, 400)
# bad rna form
rv = client.post('/api/bcform/', json={
"form": "2 * abc_b",
"subunits": [
{
"name": "abc_b",
"encoding": "bpforms.RnaForm",
"structure": "D"
}
]
})
self.assertEqual(rv.status_code, 400)
# bad smiles form
rv = client.post('/api/bcform/', json={
"form": "2 * abc_b",
"subunits": [
{
"name": "abc_b",
"encoding": "SMILES",
"structure": "CH3"
}
]
})
self.assertEqual(rv.status_code, 400)
# either encoding or structure, not both
rv = client.post('/api/bcform/', json={
"form": "2 * abc_b",
"subunits": [
{
"name": "abc_b",
"encoding": "bpforms.ProteinForm",
}
]
})
self.assertEqual(rv.status_code, 400)
rv = client.post('/api/bcform/', json={
"form": "2 * abc_b",
"subunits": [
{
"name": "abc_b",
"structure": "AAA",
}
]
})
self.assertEqual(rv.status_code, 400)
# subunit name not present
# test when all structure is known (protein)
rv = client.post('/api/bcform/', json={
"form": "abc_a + abc_b",
"subunits": [
{
"name": "abc_a",
"encoding": "bpforms.ProteinForm",
"structure": "A"
},
{
"name": "abc_c",
"encoding": "bpforms.ProteinForm",
"structure": "M"
}
]
})
self.assertEqual(rv.status_code, 400)
# bad formula
rv = client.post('/api/bcform/', json={
"form": "2 * abc_b",
"subunits": [
{
"name": "abc_b",
"formula": "hello"
}
]
})
self.assertEqual(rv.status_code, 400)
# bad mol_wt
rv = client.post('/api/bcform/', json={
"form": "2 * abc_b",
"subunits": [
{
"name": "abc_b",
"mol_wt": -5
}
]
})
self.assertEqual(rv.status_code, 400)
# bad charge
rv = client.post('/api/bcform/', json={
"form": "2 * abc_b",
"subunits": [
{
"name": "abc_b",
"charge": 0.5
}
]
})
self.assertEqual(rv.status_code, 400)
def test_get_crosslinks(self):
client = rest.app.test_client()
rv = client.get('/api/crosslink/')
self.assertEqual(rv.status_code, 200)
| 12,762 | 17 | 131 |
d2a6c1f581ed633f9dad9ab649ec2b380b41883a | 5,366 | py | Python | gunnery/account/models.py | timgates42/gunnery | 733a261cae6243a11883a40e18b14f57cf6e47b2 | [
"Apache-2.0"
] | 314 | 2015-01-01T06:17:34.000Z | 2022-03-10T03:34:02.000Z | gunnery/account/models.py | timgates42/gunnery | 733a261cae6243a11883a40e18b14f57cf6e47b2 | [
"Apache-2.0"
] | 20 | 2015-04-03T13:34:59.000Z | 2021-06-10T20:37:25.000Z | gunnery/account/models.py | pkucmus/gunnery | 30fce7f3fd74947621da6e91c1e872f383fc1e71 | [
"Apache-2.0"
] | 57 | 2015-01-07T05:41:34.000Z | 2021-10-31T19:56:50.000Z | from django.conf import settings
from django.db.models.signals import post_save
from guardian.shortcuts import assign_perm
from timezone_field import TimeZoneField
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.core.mail import send_mail
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
from task.models import Task
class CustomUser(AbstractBaseUser, PermissionsMixin):
"""
A fully featured User model with admin-compliant permissions that uses
a full-length email field as the username.
Email and password are required. Other fields are optional.
"""
email = models.EmailField(_('email address'), max_length=254, unique=True)
name = models.CharField(_('first name'), max_length=30, blank=True)
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = CustomUserManager()
timezone = TimeZoneField(default='UTC')
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
from django.contrib.auth.models import Group
from core.models import Department, Application, Environment
post_save.connect(DepartmentGroup.on_create_department, sender=Department)
post_save.connect(DepartmentGroup.on_create_application, sender=Application)
post_save.connect(DepartmentGroup.on_create_environment, sender=Environment)
post_save.connect(DepartmentGroup.on_create_task, sender=Task) | 39.455882 | 115 | 0.681513 | from django.conf import settings
from django.db.models.signals import post_save
from guardian.shortcuts import assign_perm
from timezone_field import TimeZoneField
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.core.mail import send_mail
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
from task.models import Task
class CustomUserManager(BaseUserManager):
def _create_user(self, email, password,
is_superuser, **extra_fields):
"""
Creates and saves a User with the given email and password.
"""
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(email=email, is_active=True,
is_superuser=is_superuser, last_login=now,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
return self._create_user(email, password, False, **extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, **extra_fields)
class CustomUser(AbstractBaseUser, PermissionsMixin):
"""
A fully featured User model with admin-compliant permissions that uses
a full-length email field as the username.
Email and password are required. Other fields are optional.
"""
email = models.EmailField(_('email address'), max_length=254, unique=True)
name = models.CharField(_('first name'), max_length=30, blank=True)
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = CustomUserManager()
timezone = TimeZoneField(default='UTC')
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
def get_absolute_url(self):
return "/account/profile/%d/" % self.id
def get_full_name(self):
if self.name:
return self.name
else:
return self.email
def get_short_name(self):
return self.name
def email_user(self, subject, message, from_email=None):
send_mail(subject, message, from_email, [self.email])
from django.contrib.auth.models import Group
from core.models import Department, Application, Environment
class DepartmentGroup(Group):
department = models.ForeignKey(Department, related_name="groups")
local_name = models.CharField(max_length=124)
system_name = models.CharField(max_length=12)
class Meta:
ordering = ['name']
def save(self, *args, **kwargs):
self.name = "%s_%s" % (self.department_id, self.local_name)
super(DepartmentGroup, self).save(*args, **kwargs)
def assign_department_perms(self, department):
assign_perm('core.view_department', self, department)
@staticmethod
def on_create_department(sender, instance, created, **kwargs):
if created:
for system_name, group_name in settings.DEFAULT_DEPARTMENT_GROUPS.items():
group = DepartmentGroup(department=instance, local_name=group_name, system_name=system_name)
group.save()
DepartmentGroup.assign_department_perms(group, instance)
if system_name == 'admin':
assign_perm('core.change_department', group, instance)
@staticmethod
def on_create_application(sender, instance, created, **kwargs):
if created:
DepartmentGroup._assign_default_perms('core', 'application', instance.department, instance)
@staticmethod
def on_create_environment(sender, instance, created, **kwargs):
if created:
DepartmentGroup._assign_default_perms('core', 'environment', instance.application.department, instance)
@staticmethod
def on_create_task(sender, instance, created, **kwargs):
if created:
DepartmentGroup._assign_default_perms('task', 'task', instance.application.department, instance)
@staticmethod
def _assign_default_perms(app, model, department, instance):
groups = DepartmentGroup.objects.filter(department=department, system_name__in=['user', 'admin'])
for group in groups:
for action in ['view', 'execute']:
assign_perm('%s.%s_%s' % (app, action, model), group, instance)
if group.system_name == 'admin':
assign_perm('%s.%s_%s' % (app, 'change', model), group, instance)
def __str__(self):
return self.local_name
post_save.connect(DepartmentGroup.on_create_department, sender=Department)
post_save.connect(DepartmentGroup.on_create_application, sender=Application)
post_save.connect(DepartmentGroup.on_create_environment, sender=Environment)
post_save.connect(DepartmentGroup.on_create_task, sender=Task) | 2,167 | 1,283 | 181 |
3666d77a16a91dbdde97afeb1cd27f402d5134ed | 1,701 | py | Python | landshark/iteration.py | basaks/landshark | 87ec1fada74addd58f37bdaf3b1adbc10b1544b2 | [
"Apache-2.0"
] | null | null | null | landshark/iteration.py | basaks/landshark | 87ec1fada74addd58f37bdaf3b1adbc10b1544b2 | [
"Apache-2.0"
] | null | null | null | landshark/iteration.py | basaks/landshark | 87ec1fada74addd58f37bdaf3b1adbc10b1544b2 | [
"Apache-2.0"
] | null | null | null | """Utilities to support iteration."""
# Copyright 2019 CSIRO (Data61)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import Iterator, List, Tuple, TypeVar
import numpy as np
from landshark.basetypes import FixedSlice
T = TypeVar("T")
def batch(it: Iterator[T], batchsize: int) -> Iterator[List[T]]:
"""Group iterator into batches."""
while True:
batch = list(itertools.islice(it, batchsize))
if not batch:
return
yield batch
def batch_slices(batchsize: int, total_size: int) -> Iterator[FixedSlice]:
"""Group range indices into slices of a given batchsize."""
n = total_size // batchsize
ret = [(i * batchsize, (i + 1) * batchsize) for i in range(n)]
if total_size % batchsize != 0:
ret.append((n * batchsize, total_size))
for start, stop in ret:
yield FixedSlice(start, stop)
def with_slices(it: Iterator[np.ndarray]) -> Iterator[Tuple[FixedSlice, np.ndarray]]:
"""Add slice into vstacked array to each sub array in `it`."""
start_idx = 0
for d in it:
end_idx = start_idx + d.shape[0]
yield FixedSlice(start_idx, end_idx), d
start_idx = end_idx
| 31.5 | 85 | 0.687243 | """Utilities to support iteration."""
# Copyright 2019 CSIRO (Data61)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import Iterator, List, Tuple, TypeVar
import numpy as np
from landshark.basetypes import FixedSlice
T = TypeVar("T")
def batch(it: Iterator[T], batchsize: int) -> Iterator[List[T]]:
"""Group iterator into batches."""
while True:
batch = list(itertools.islice(it, batchsize))
if not batch:
return
yield batch
def batch_slices(batchsize: int, total_size: int) -> Iterator[FixedSlice]:
"""Group range indices into slices of a given batchsize."""
n = total_size // batchsize
ret = [(i * batchsize, (i + 1) * batchsize) for i in range(n)]
if total_size % batchsize != 0:
ret.append((n * batchsize, total_size))
for start, stop in ret:
yield FixedSlice(start, stop)
def with_slices(it: Iterator[np.ndarray]) -> Iterator[Tuple[FixedSlice, np.ndarray]]:
"""Add slice into vstacked array to each sub array in `it`."""
start_idx = 0
for d in it:
end_idx = start_idx + d.shape[0]
yield FixedSlice(start_idx, end_idx), d
start_idx = end_idx
| 0 | 0 | 0 |
d9125919f74656e88ab4503f939ade9eda4adc73 | 1,149 | py | Python | app/log_auth/geodata.py | carverdo/scrap | d431095cadc1bc3f60aa0a3f473e726ecace9078 | [
"MIT"
] | null | null | null | app/log_auth/geodata.py | carverdo/scrap | d431095cadc1bc3f60aa0a3f473e726ecace9078 | [
"MIT"
] | null | null | null | app/log_auth/geodata.py | carverdo/scrap | d431095cadc1bc3f60aa0a3f473e726ecace9078 | [
"MIT"
] | null | null | null | """
HAVE NOT TESTED FOR SPEED (but second one seems better)
Decent link here about ip spoofing -
http://esd.io/blog/flask-apps-heroku-real-ip-spoofing.html
I think it has already been incorporated
and my code works on his committed change.
"""
__author__ = 'donal'
__project__ = 'ribcage'
from json import loads
from urllib2 import urlopen
from flask import request
from flask.ext.login import current_user
from config_vars import VALID_IP
# ========================
# PRIMARY CALL
# ========================
| 31.916667 | 74 | 0.70235 | """
HAVE NOT TESTED FOR SPEED (but second one seems better)
Decent link here about ip spoofing -
http://esd.io/blog/flask-apps-heroku-real-ip-spoofing.html
I think it has already been incorporated
and my code works on his committed change.
"""
__author__ = 'donal'
__project__ = 'ribcage'
from json import loads
from urllib2 import urlopen
from flask import request
from flask.ext.login import current_user
from config_vars import VALID_IP
# ========================
# PRIMARY CALL
# ========================
def get_clientdata():
# DO NOT UNDERSTAND DISTINCTIONS
# ip = request.access_route[0] or request.remote_addr
# ip = request.headers.getlist('X-Forwarded-For', request.remote_addr)
# ip = request.environ.get('HTTP_X_REAL_IP', request.remote_addr)
client_data = {}
ip = request.environ.get('REMOTE_ADDR', request.remote_addr)
if not VALID_IP.match(ip):
raise ValueError('Invalid IPv4 format')
client_data['ip_address'] = ip
client_data['browser'] = request.headers.get("User-Agent")
if current_user.is_active:
client_data['member_id'] = current_user.get_id()
return client_data
| 614 | 0 | 22 |
420619b55f9f04db78fb5c5dee172744ada95023 | 115 | py | Python | IntroProPython/aula9-Arquivos/listagem09-18.py | SweydAbdul/estudos-python | b052708d0566a0afb9a1c04d035467d45f820879 | [
"MIT"
] | null | null | null | IntroProPython/aula9-Arquivos/listagem09-18.py | SweydAbdul/estudos-python | b052708d0566a0afb9a1c04d035467d45f820879 | [
"MIT"
] | null | null | null | IntroProPython/aula9-Arquivos/listagem09-18.py | SweydAbdul/estudos-python | b052708d0566a0afb9a1c04d035467d45f820879 | [
"MIT"
] | null | null | null | import os
print(os.listdir('.'))
print(os.listdir('avo'))
print(os.listdir('avo/pai'))
print(os.listdir('avo/mae')) | 23 | 28 | 0.686957 | import os
print(os.listdir('.'))
print(os.listdir('avo'))
print(os.listdir('avo/pai'))
print(os.listdir('avo/mae')) | 0 | 0 | 0 |
b404db9394afec7677247fb0e1b9290302380d38 | 379 | py | Python | allure-pytest/test/clean/no_clean_test.py | vdsbenoit/allure-python | 7b56b031c42369dd73844105382e9ceb9a88d6cd | [
"Apache-2.0"
] | 1 | 2021-02-19T21:00:11.000Z | 2021-02-19T21:00:11.000Z | allure-pytest/test/clean/no_clean_test.py | vdsbenoit/allure-python | 7b56b031c42369dd73844105382e9ceb9a88d6cd | [
"Apache-2.0"
] | null | null | null | allure-pytest/test/clean/no_clean_test.py | vdsbenoit/allure-python | 7b56b031c42369dd73844105382e9ceb9a88d6cd | [
"Apache-2.0"
] | 1 | 2020-08-05T05:40:44.000Z | 2020-08-05T05:40:44.000Z | def test_two_runs_no_clean():
"""
>>> report_fixture = getfixture('allure_report_with_params')
>>> allure_report_first_run = report_fixture(cache=False)
>>> allure_report_second_run = report_fixture(cache=False)
>>> assert_that(allure_report_second_run,
... has_only_n_test_cases('test_two_runs_no_clean', 2)
... )
"""
assert True
| 34.454545 | 70 | 0.680739 | def test_two_runs_no_clean():
"""
>>> report_fixture = getfixture('allure_report_with_params')
>>> allure_report_first_run = report_fixture(cache=False)
>>> allure_report_second_run = report_fixture(cache=False)
>>> assert_that(allure_report_second_run,
... has_only_n_test_cases('test_two_runs_no_clean', 2)
... )
"""
assert True
| 0 | 0 | 0 |
2f5e8394385156696972b52a4a20a9f3de7e6a4c | 2,524 | py | Python | 2019-presentations/11-november/regex.py | OuhscBbmc/StatisticalComputing | c2b4d5b12c906754abed0f5e3cfe4decbaa10948 | [
"MIT"
] | 7 | 2017-09-01T20:02:52.000Z | 2021-07-15T16:28:27.000Z | 2016-presentations/05-may/regex.py | OuhscBbmc/StatisticalComputing | c2b4d5b12c906754abed0f5e3cfe4decbaa10948 | [
"MIT"
] | 9 | 2015-02-02T21:33:21.000Z | 2019-11-06T18:31:07.000Z | 2016-presentations/05-may/regex.py | OuhscBbmc/StatisticalComputing | c2b4d5b12c906754abed0f5e3cfe4decbaa10948 | [
"MIT"
] | 3 | 2015-02-02T20:09:56.000Z | 2016-03-21T21:57:58.000Z | import re
import pandas as pd
m = re.search('(?<=abc)def', 'abcdef')
print(m.group(0))
print("###### Example 1 ###########")
example_1 = """
1916-1918 subscales for a subject
1998-1914 subscales for a subject
subscales for a subject 1998-1920
"""
r = re.sub('(\d{2})(\d{2})', '20\\2', example_1)
print(r)
print("###### Example 2 ###########")
example_2 = """
1234
23
14a
1a3
234
1.39
"""
m = re.search('\\d', 'abcdef')
print(m)
print("###### Example 3 ###########")
example_3 = """
"CL_ID" = "ClientID"
, "RMSEQ" = "RemovedSequence"
, "RMVL_BEGDT" = "RemovalBeginDate"
, "RMVL_ENDDT" = "RemovalEndDate"
, "END_TYP_CDE" = "EndTypeID"
, "REMOVED_FROM_TYP_CDE" = "RemovedFromTypeID"
, "CURR_RMVL_TYP_CDE" = "RemovalTypeCurrentID"
, "ORIG_RMVL_TYP_CDE" = "RemovalTypeOriginalID"
, "FMLY_STRUCTURE_TYP_CDE" = "FamilyStructureTypeID"
, "ADDRESS" = "Address"
, "CITY" = "City"
, "STATE" = "StateID"
, "ZIP" = "ZipFull"
, "COUNTY_TYP_CDE" = "CountyOfficeID"
, "REFER_THAT_CAUSED_RMVL" = "ReferralCausedRemoval"
, "REFERRAL_DT" = "ReferralDate"
, "CARE_TAKER1_ID" = "CareTaker1ID"
, "CARE_TAKER2_ID" = "CareTaker2ID"
"""
print("###### Example 4 ###########")
example_4 = """
requireNamespace("dplyr", quietly=T) #hadley/dplyr
requireNamespace("lubridate")
requireNamespace("OuhscMunge", quietly=TRUE) #OuhscBbmc/OuhscMunge
"""
print("###### Example 5 ###########")
example_5 = """
9
4
34
3
62
43
1
"""
print("###### Example 6 ###########")
example_6 = """
Time,Gender,Genetype,Treatment,MouseID,OR-Recognition Index,FC-t-F %,FC-b-F %,FC-a-F %
4M,Male,WILD,Control,c9-1,0.32,11.9,0,25.7
4M,Male,WILD,Control,c13-2,0.47,23.7,0,11.
4M,Male,WILD,Prozac,c10-2,0.62,40.7,11.4,51.4
4M,Male,WILD,Prozac,c14-3,0.63,10.2,0,28.6
4M,Male,YFP,Control,c9-2,0.42,42.4,11.4,22.9
4M,Male,YFP,Control,c13-1,0.5,15.3,0,54.1
4M,Male,YFP,Control,c13-nm,1,27.1,0,31.4
4M,Male,YFP,Prozac,c10-1,0.65,20.3,17.1,54.3
4M,Male,YFP,Prozac,c10-4,0.43,44.1,5.7,40
4M,Male,YFP,Prozac,c10-nm,0.5,15.3,5.7,34.3
4M,Male,YFP,Prozac,c14-1,0.47,8.5,0,60
4M,Male,YFP,Prozac,c14-2,0.65,16.9,0,8.6
4M,Male,YFP,Prozac,c14-3,1,30.5,5.7,20
"""
| 28.681818 | 90 | 0.541601 | import re
import pandas as pd
m = re.search('(?<=abc)def', 'abcdef')
print(m.group(0))
print("###### Example 1 ###########")
example_1 = """
1916-1918 subscales for a subject
1998-1914 subscales for a subject
subscales for a subject 1998-1920
"""
r = re.sub('(\d{2})(\d{2})', '20\\2', example_1)
print(r)
print("###### Example 2 ###########")
example_2 = """
1234
23
14a
1a3
234
1.39
"""
m = re.search('\\d', 'abcdef')
print(m)
print("###### Example 3 ###########")
example_3 = """
"CL_ID" = "ClientID"
, "RMSEQ" = "RemovedSequence"
, "RMVL_BEGDT" = "RemovalBeginDate"
, "RMVL_ENDDT" = "RemovalEndDate"
, "END_TYP_CDE" = "EndTypeID"
, "REMOVED_FROM_TYP_CDE" = "RemovedFromTypeID"
, "CURR_RMVL_TYP_CDE" = "RemovalTypeCurrentID"
, "ORIG_RMVL_TYP_CDE" = "RemovalTypeOriginalID"
, "FMLY_STRUCTURE_TYP_CDE" = "FamilyStructureTypeID"
, "ADDRESS" = "Address"
, "CITY" = "City"
, "STATE" = "StateID"
, "ZIP" = "ZipFull"
, "COUNTY_TYP_CDE" = "CountyOfficeID"
, "REFER_THAT_CAUSED_RMVL" = "ReferralCausedRemoval"
, "REFERRAL_DT" = "ReferralDate"
, "CARE_TAKER1_ID" = "CareTaker1ID"
, "CARE_TAKER2_ID" = "CareTaker2ID"
"""
print("###### Example 4 ###########")
example_4 = """
requireNamespace("dplyr", quietly=T) #hadley/dplyr
requireNamespace("lubridate")
requireNamespace("OuhscMunge", quietly=TRUE) #OuhscBbmc/OuhscMunge
"""
print("###### Example 5 ###########")
example_5 = """
9
4
34
3
62
43
1
"""
print("###### Example 6 ###########")
example_6 = """
Time,Gender,Genetype,Treatment,MouseID,OR-Recognition Index,FC-t-F %,FC-b-F %,FC-a-F %
4M,Male,WILD,Control,c9-1,0.32,11.9,0,25.7
4M,Male,WILD,Control,c13-2,0.47,23.7,0,11.
4M,Male,WILD,Prozac,c10-2,0.62,40.7,11.4,51.4
4M,Male,WILD,Prozac,c14-3,0.63,10.2,0,28.6
4M,Male,YFP,Control,c9-2,0.42,42.4,11.4,22.9
4M,Male,YFP,Control,c13-1,0.5,15.3,0,54.1
4M,Male,YFP,Control,c13-nm,1,27.1,0,31.4
4M,Male,YFP,Prozac,c10-1,0.65,20.3,17.1,54.3
4M,Male,YFP,Prozac,c10-4,0.43,44.1,5.7,40
4M,Male,YFP,Prozac,c10-nm,0.5,15.3,5.7,34.3
4M,Male,YFP,Prozac,c14-1,0.47,8.5,0,60
4M,Male,YFP,Prozac,c14-2,0.65,16.9,0,8.6
4M,Male,YFP,Prozac,c14-3,1,30.5,5.7,20
"""
| 0 | 0 | 0 |
f0b29ce98138387ee64d4ee75f69200e75c7f7a7 | 284 | py | Python | ARClass1/ComputerVision/testFast.py | lucho1/AugmentedReality | 62c35040ff3cfe3e1bff9d95659c55c1a7a2aa1f | [
"MIT"
] | null | null | null | ARClass1/ComputerVision/testFast.py | lucho1/AugmentedReality | 62c35040ff3cfe3e1bff9d95659c55c1a7a2aa1f | [
"MIT"
] | null | null | null | ARClass1/ComputerVision/testFast.py | lucho1/AugmentedReality | 62c35040ff3cfe3e1bff9d95659c55c1a7a2aa1f | [
"MIT"
] | null | null | null | import cv2
target_imgPath = input("Introduce Path for Target Image: ") #ex_img/img1.png
input_img = cv2.imread(target_imgPath, cv2.IMREAD_COLOR)
winName = 'Input Image'
cv2.namedWindow(winName, cv2.WINDOW_NORMAL)
cv2.imshow(winName, input_img)
cv2.waitKey(0)
cv2.destroyAllWindows() | 28.4 | 76 | 0.792254 | import cv2
target_imgPath = input("Introduce Path for Target Image: ") #ex_img/img1.png
input_img = cv2.imread(target_imgPath, cv2.IMREAD_COLOR)
winName = 'Input Image'
cv2.namedWindow(winName, cv2.WINDOW_NORMAL)
cv2.imshow(winName, input_img)
cv2.waitKey(0)
cv2.destroyAllWindows() | 0 | 0 | 0 |
b76ccdca69d9db06b57637e63d45ce8ec654b786 | 1,077 | py | Python | employee_integration/app/migrations/0002_auto_20210917_1204.py | misha143/employee_integration | 0e4be49d3ba93700fd48547847664ffbe28318ea | [
"Apache-2.0"
] | null | null | null | employee_integration/app/migrations/0002_auto_20210917_1204.py | misha143/employee_integration | 0e4be49d3ba93700fd48547847664ffbe28318ea | [
"Apache-2.0"
] | null | null | null | employee_integration/app/migrations/0002_auto_20210917_1204.py | misha143/employee_integration | 0e4be49d3ba93700fd48547847664ffbe28318ea | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.2 on 2021-09-17 07:04
from django.db import migrations, models
| 37.137931 | 243 | 0.64624 | # Generated by Django 3.2.2 on 2021-09-17 07:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='question',
name='image_url',
field=models.CharField(help_text="Загрузите на imgur.com и правым кликом по фото нажмите 'Копировать ссылку на изображение'. Вставьте ссылку. Пример: https://i.imgur.com/uNjl5Rf.jpg", max_length=200, verbose_name='Ссылка на фото'),
),
migrations.AlterField(
model_name='question',
name='valid_until',
field=models.DateTimeField(help_text='До какого времени квест можно проходить?', verbose_name='Время окончания теста'),
),
migrations.AlterField(
model_name='question',
name='video_yt_url',
field=models.CharField(help_text='Вставьте идентификатор видео с YouTube. Пример: vX3fXef2F4M', max_length=200, verbose_name='Ссылка на видео'),
),
]
| 0 | 1,160 | 23 |
970fbf34da6224bb1ed9eda700a7e79159025150 | 4,155 | py | Python | cogs/utility.py | exunious/Noella-Bot | ef119cf1ffc102188954962d54f07d895f4a7a94 | [
"MIT"
] | null | null | null | cogs/utility.py | exunious/Noella-Bot | ef119cf1ffc102188954962d54f07d895f4a7a94 | [
"MIT"
] | 1 | 2019-01-08T21:53:38.000Z | 2019-01-08T21:53:38.000Z | cogs/utility.py | exunious/Noella-Bot | ef119cf1ffc102188954962d54f07d895f4a7a94 | [
"MIT"
] | 3 | 2018-01-05T02:58:01.000Z | 2018-06-14T20:56:51.000Z | ##########################################
############ Utility Commands ############
##########################################
import discord
import random
from discord.ext import commands
from .musicutils.paginator import Pages
from config import *
| 48.313953 | 365 | 0.683032 | ##########################################
############ Utility Commands ############
##########################################
import discord
import random
from discord.ext import commands
from .musicutils.paginator import Pages
from config import *
class Utility():
def __init__(self, bot):
self.bot = bot
### Ping/Latency Command ###
@commands.guild_only()
@commands.command(aliases = ['ping'])
async def latency(self, ctx):
pingms = "{}".format(int(self.bot.latency * 1000))
pings = "{}".format(int(self.bot.latency * 1))
message = await ctx.send("Ping - Calculating some shit in the background... beep beep...")
await asyncio.sleep(3)
await message.edit(content = f"Pong! - My latency is **{pings}**s | **{pingms}**ms")
### Server Information Command ###
@commands.guild_only()
@commands.command(aliases=['si'])
async def serverinfo(self, ctx):
vchannels = ctx.guild.voice_channels
tchannels = ctx.guild.text_channels
tmembers = ctx.guild.member_count
omembers = sum(m.status is discord.Status.online for m in ctx.guild.members)
time = str(ctx.guild.created_at); time = time.split(' '); time= time[0];
roles = [x.name for x in ctx.guild.role_hierarchy]
role_length = len(roles)
roles = ', '.join(roles);
if str(ctx.guild.verification_level) == "none":
verification_text = "Protection: **None**\n*No further protection!*"
elif str(ctx.guild.verification_level) == "low":
verification_text = "Protection: **Low**\n*Verified Email*"
elif str(ctx.guild.verification_level) == "medium":
verification_text = "Protection: **Medium**\n*Registered for 5 Minutes*"
elif str(ctx.guild.verification_level) == "high":
verification_text = "Protection: **High**\n*Member for 10 Minutes*"
elif str(ctx.guild.verification_level) == "extreme":
verification_text = "Protection: **Extreme**\n*Verified Phone Number*"
else:
verification_text = "Protection: **N/A**\n*Cant find any protection*"
embed = discord.Embed(colour = embed_color)
if ctx.guild.icon_url:
embed.set_thumbnail(url = ctx.guild.icon_url)
else:
embed.set_thumbnail(url = "https://cdn.discordapp.com/embed/avatars/0.png")
embed.set_author(name = "Server Information", icon_url = "http://icons.iconarchive.com/icons/graphicloads/100-flat/128/information-icon.png")
embed.add_field(name="Server Name:", value = str(ctx.guild), inline=True)
embed.add_field(name="Server ID:", value = str(ctx.guild.id), inline=True)
embed.add_field(name="Server Owner:", value = str(ctx.guild.owner), inline=True)
embed.add_field(name="Server Owner ID:", value = ctx.guild.owner.id, inline=True)
embed.add_field(name="Member Count:", value = f'Members Online: **{omembers}**\nMembers Total: **{tmembers}**', inline=True)
embed.add_field(name="Channels Count:", value = "Text Channels: **"+ str(len(tchannels)) +"** \nVoice Channels: **"+ str(len(vchannels)) +"**", inline=True)
embed.add_field(name="Verification Level:", value = f"{verification_text}", inline=True)
embed.add_field(name="AFK Channel & Time:", value = f"Channel: **{ctx.guild.afk_channel}**\n" "Time: **{} minutes**".format(int(ctx.guild.afk_timeout / 60)), inline=True)
embed.add_field(name="Server Region:", value = '%s'%str(ctx.guild.region), inline=True)
embed.add_field(name="Server Roles:", value = '%s'%str(role_length), inline=True)
embed.set_footer(text ='Server Created: %s'%time);
await ctx.send(embed = embed)
### List Servers Command ###
@commands.guild_only()
@commands.command(aliases = ['ls'])
async def listservers(self, ctx):
for guild in self.bot.guilds:
guilds = [f"**{guild.name}** \nServer Owner: **{guild.owner.name}#{guild.owner.discriminator}**\nOnline Members: **{sum(m.status is discord.Status.online for m in guild.members)}** - Total Members: **{guild.member_count}**\nText Channels: **{str(len(guild.text_channels))}** - Voice Channels: **{str(len(guild.voice_channels))}**\n" for guild in self.bot.guilds]
try:
p = Pages(ctx, entries=guilds, per_page=5)
p.embed.colour = embed_color
await p.paginate()
except Exception as e:
await ctx.send(e)
def setup(bot):
bot.add_cog(Utility(bot))
| 3,490 | 366 | 46 |
72c27d084bf7d3b92c5b63ec0ad31223f7e1962e | 6,041 | py | Python | src/compress_dataset.py | yutake27/HMDM | a16c6e77cae9509ccf49140171797680068709aa | [
"MIT"
] | 2 | 2021-12-09T00:12:35.000Z | 2022-01-10T13:05:22.000Z | src/compress_dataset.py | yutake27/HMDM | a16c6e77cae9509ccf49140171797680068709aa | [
"MIT"
] | null | null | null | src/compress_dataset.py | yutake27/HMDM | a16c6e77cae9509ccf49140171797680068709aa | [
"MIT"
] | null | null | null | import argparse
import tarfile
from pathlib import Path
import tqdm
import pandas as pd
route_path = Path('..')
"""
# Compress necessary files of the dataset
Source
* pdb / dataset_name / target / sampling
* fasta / dataset_name
* native_pdb / dataset_name
* score
Output
* README.md
write about dataset details
* pdb
* native_pdb
* fasta
* score
"""
rename_columns_dict = {'model': 'Model', 'target': 'Target', 'template': 'Template', 'seq_len': 'SeqLength'}
label_columns = [
'Model', 'Target', 'Template', 'GDT_TS', 'GDT_HA',
'SeqLength', 'identity', 'positive', 'coverage',
'identity(-misres)', 'positive(-misres)', 'coverage(-misres)', 'num_misres'
]
score_columns = [
'Model', 'Target', 'identity(%)', 'positive(%)', 'coverage(%)',
'identity(-misres)(%)', 'positive(-misres)(%)', 'coverage(-misres)(%)',
'DOPE', 'SOAP', 'SBROD', 'ProQ2D', 'ProQRosCenD', 'ProQRosFAD', 'ProQ3D',
'P3CMQA', 'DeepAccNet', 'DeepAccNet-Bert'
]
def make_scop_score(dataset_name: str, output_dir: Path) -> (str, str, str):
"""load scop final score and split it into target, label, and mqa score.
Args:
dataset_name (str): Created dataset name
output_dir (Path): Output directory path
Return:
(str): path to target.csv
(str): path to label.csv
(str): path to score.csv
"""
csv_path = route_path / 'score' / dataset_name / (dataset_name + '_final_all_score.csv')
df = pd.read_csv(csv_path, index_col=0)
output_score_dir = output_dir / 'score'
output_score_dir.mkdir(exist_ok=True)
# Rename columns
df = df.rename(rename_columns_dict, axis=1)
# Drop columns
label_df = df[label_columns]
label_output_path = output_score_dir / 'label.csv'
label_df.to_csv(label_output_path)
if dataset_name[: 4] == 'scop':
target_df = df[[
'Target', 'SeqLength', 'SF-DOMID', 'SF', 'len_SF',
'FA-DOMID', 'FA-PDBID', 'FA-PDBREG', 'FA-UNIID', 'FA-UNIREG', 'SF-PDBID',
'SF-PDBREG', 'SF-UNIID', 'SF-UNIREG', 'TP', 'CL', 'CF', 'FA', 'Class'
]]
elif dataset_name[: 6] == 'pisces':
target_df = df[[
'Target', 'SeqLength', 'IDs', 'Exptl.', 'resolution', 'R-factor',
'FreeRvalue', 'PDB_ID', 'Chain', 'Domain_num'
]]
target_df = target_df.rename({'Domain_num': 'DomainNum'}, axis=1)
else:
raise ValueError()
target_df = target_df.groupby('Target').head(1).reset_index(drop=True)
target_output_path = output_score_dir / 'target.csv'
target_df.to_csv(target_output_path)
score_df = df[score_columns]
score_output_path = output_score_dir / 'mqa_score.csv'
score_df.to_csv(score_output_path)
return target_output_path, label_output_path, score_output_path
if __name__ == '__main__':
main()
| 35.745562 | 108 | 0.65221 | import argparse
import tarfile
from pathlib import Path
import tqdm
import pandas as pd
route_path = Path('..')
"""
# Compress necessary files of the dataset
Source
* pdb / dataset_name / target / sampling
* fasta / dataset_name
* native_pdb / dataset_name
* score
Output
* README.md
write about dataset details
* pdb
* native_pdb
* fasta
* score
"""
def compress_pdb(dataset_name: str, output_dir: Path) -> Path:
print('Compress pdb')
output_tar_pdb_path = output_dir / 'pdb.tar.gz'
if output_tar_pdb_path.exists():
return output_tar_pdb_path
pdb_dir = route_path / 'pdb' / dataset_name
with tarfile.open(output_tar_pdb_path, 'w:gz') as f:
for target in tqdm.tqdm(pdb_dir.glob('*'), total=100):
for pdb in (target / 'sampling').glob('*.pdb'):
f.add(str(pdb), 'pdb/' + target.stem + '/' + pdb.name)
return output_tar_pdb_path
def compress_native_pdb(tf: tarfile.TarFile, dataset_name: str, arcdir: str) -> None:
print('Compress native pdb')
native_pdb_dir = route_path / 'native_pdb' / dataset_name
for native_pdb in native_pdb_dir.glob('*.pdb'):
pdb_dir = route_path / 'pdb' / dataset_name / native_pdb.stem
if pdb_dir.exists():
tf.add(str(native_pdb), arcname=arcdir + '/native_pdb/' + native_pdb.name)
def compress_fasta(tf: tarfile.TarFile, dataset_name: str, arcdir: str) -> None:
print('Compress fasta')
fasta_dir = route_path / 'fasta' / dataset_name
for fasta in fasta_dir.glob('*.fasta'):
pdb_dir = route_path / 'pdb' / dataset_name / fasta.stem
if pdb_dir.exists():
tf.add(str(fasta), arcname=arcdir + '/fasta/' + fasta.name)
rename_columns_dict = {'model': 'Model', 'target': 'Target', 'template': 'Template', 'seq_len': 'SeqLength'}
label_columns = [
'Model', 'Target', 'Template', 'GDT_TS', 'GDT_HA',
'SeqLength', 'identity', 'positive', 'coverage',
'identity(-misres)', 'positive(-misres)', 'coverage(-misres)', 'num_misres'
]
score_columns = [
'Model', 'Target', 'identity(%)', 'positive(%)', 'coverage(%)',
'identity(-misres)(%)', 'positive(-misres)(%)', 'coverage(-misres)(%)',
'DOPE', 'SOAP', 'SBROD', 'ProQ2D', 'ProQRosCenD', 'ProQRosFAD', 'ProQ3D',
'P3CMQA', 'DeepAccNet', 'DeepAccNet-Bert'
]
def make_scop_score(dataset_name: str, output_dir: Path) -> (str, str, str):
"""load scop final score and split it into target, label, and mqa score.
Args:
dataset_name (str): Created dataset name
output_dir (Path): Output directory path
Return:
(str): path to target.csv
(str): path to label.csv
(str): path to score.csv
"""
csv_path = route_path / 'score' / dataset_name / (dataset_name + '_final_all_score.csv')
df = pd.read_csv(csv_path, index_col=0)
output_score_dir = output_dir / 'score'
output_score_dir.mkdir(exist_ok=True)
# Rename columns
df = df.rename(rename_columns_dict, axis=1)
# Drop columns
label_df = df[label_columns]
label_output_path = output_score_dir / 'label.csv'
label_df.to_csv(label_output_path)
if dataset_name[: 4] == 'scop':
target_df = df[[
'Target', 'SeqLength', 'SF-DOMID', 'SF', 'len_SF',
'FA-DOMID', 'FA-PDBID', 'FA-PDBREG', 'FA-UNIID', 'FA-UNIREG', 'SF-PDBID',
'SF-PDBREG', 'SF-UNIID', 'SF-UNIREG', 'TP', 'CL', 'CF', 'FA', 'Class'
]]
elif dataset_name[: 6] == 'pisces':
target_df = df[[
'Target', 'SeqLength', 'IDs', 'Exptl.', 'resolution', 'R-factor',
'FreeRvalue', 'PDB_ID', 'Chain', 'Domain_num'
]]
target_df = target_df.rename({'Domain_num': 'DomainNum'}, axis=1)
else:
raise ValueError()
target_df = target_df.groupby('Target').head(1).reset_index(drop=True)
target_output_path = output_score_dir / 'target.csv'
target_df.to_csv(target_output_path)
score_df = df[score_columns]
score_output_path = output_score_dir / 'mqa_score.csv'
score_df.to_csv(score_output_path)
return target_output_path, label_output_path, score_output_path
def compress_score(tf: tarfile.TarFile, dataset_name: str, arcdir: str, output_dir: Path) -> None:
print('Compress score file')
target_path, label_path, score_path = make_scop_score(dataset_name, output_dir)
arcdir_prefix = arcdir + '/data/'
tf.add(target_path, arcdir_prefix + 'target.csv')
tf.add(label_path, arcdir_prefix + 'label.csv')
tf.add(score_path, arcdir_prefix + 'mqa_score.csv')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('dataset_pdb_path', type=str)
parser.add_argument('output_dataset_name', type=str, help='[Single-domain, Multi-domain]')
args = parser.parse_args()
dataset_name = Path(args.dataset_pdb_path).stem
assert dataset_name[:4] == 'scop' or dataset_name[:6] == 'pisces'
output_dataset_name = args.output_dataset_name
output_dir = route_path / 'dataset' / output_dataset_name
output_dir.mkdir(parents=True, exist_ok=True)
# Compress pdb
output_tar_pdb_path = compress_pdb(dataset_name, output_dir)
# Compress other files
output_tar_path = (output_dir / output_dataset_name).with_suffix('.tar.gz')
with tarfile.open(output_tar_path, 'w:gz') as tf:
arcdir: str = output_dataset_name
# pdb
tf.add(output_tar_pdb_path, arcname=arcdir + '/pdb.tar.gz')
# native pdb
compress_native_pdb(tf, dataset_name, arcdir)
# fasta
compress_fasta(tf, dataset_name, arcdir)
# score
compress_score(tf, dataset_name, arcdir, output_dir)
# README
dataset_dir = route_path / 'dataset'
if dataset_name[:4] == 'scop':
readme_file = 'README-single.md'
else: # dataset_name[: 6] == 'pisces'
readme_file = 'README-multi.md'
readme_path = dataset_dir / readme_file
tf.add(readme_path, arcname=arcdir + '/README.md')
if __name__ == '__main__':
main()
| 3,088 | 0 | 115 |
fe281cc3b3ab67d09de7540069e231882145860e | 9,038 | py | Python | tests/conftest.py | beatMeDev/beatMeBackend | 82270d16835c970e42530133668e2a8ab047af31 | [
"MIT"
] | null | null | null | tests/conftest.py | beatMeDev/beatMeBackend | 82270d16835c970e42530133668e2a8ab047af31 | [
"MIT"
] | 1 | 2020-08-02T05:17:28.000Z | 2020-08-02T11:04:39.000Z | tests/conftest.py | beatMeDev/beatMeBackend | 82270d16835c970e42530133668e2a8ab047af31 | [
"MIT"
] | null | null | null | """Test pre running stuff"""
import warnings
from datetime import datetime
from datetime import timedelta
from typing import Any
from typing import AsyncGenerator
from typing import Dict
from typing import Optional
from uuid import UUID
from uuid import uuid4
import pytest
from asyncpg import ObjectInUseError
from fastapi import FastAPI
from tortoise import Tortoise
from tortoise.exceptions import DBConnectionError
from app.models.db import AuthAccount
from app.models.db import Challenge
from app.models.db import Playlist
from app.models.db import Submission
from app.models.db import Text
from app.models.db import Track
from app.models.db import User
from app.models.db import Vote
from app.models.db.user import AuthProvider
from app.services.auth.base import bearer_auth
from app.settings import APP_MODELS
from app.settings import TORTOISE_TEST_DB
from tests.test_services.test_auth.test_base import USER_UUID
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import imp # pylint: disable=unused-import
@pytest.fixture(scope="function", autouse=True)
@pytest.mark.asyncio
async def test_db() -> AsyncGenerator: # type: ignore
"""Initialize db connection before run test."""
try:
await Tortoise.init(db_url=TORTOISE_TEST_DB, modules={"models": APP_MODELS})
except DBConnectionError:
await Tortoise.init(
db_url=TORTOISE_TEST_DB, modules={"models": APP_MODELS}, _create_db=True,
)
await Tortoise.generate_schemas()
yield
try:
await Tortoise._drop_databases() # pylint: disable=protected-access
except ObjectInUseError:
pass
await Tortoise.close_connections()
POPULATE_TRACK_ID: str = str(uuid4())
async def bearer_auth_mock() -> str:
"""Auth method mock."""
return str(USER_UUID)
def mock_auth(application: FastAPI) -> FastAPI:
"""Mock auth dependency and token middleware."""
application.dependency_overrides[bearer_auth] = bearer_auth_mock
application.user_middleware = []
application.middleware_stack = application.build_middleware_stack()
return application
@pytest.fixture()
@pytest.mark.asyncio
async def populate_texts() -> Text:
"""Populate text for utils routes tests."""
text, _ = await Text.get_or_create(content="test")
return text
test_track_info: Dict[str, Any] = {
"id": POPULATE_TRACK_ID,
"name": "test",
"author_name": "test",
"cover_url": "test",
"preview_url": "test",
"youtube_id": "test",
"spotify_id": "test",
"recommended": True,
"meta": {},
}
async def populate_track() -> Track:
"""Populate track for routes tests."""
track, _ = await Track.get_or_create(**test_track_info)
return track
@pytest.fixture()
@pytest.mark.asyncio
async def track_fixture() -> Track:
"""Populate track for utils routes tests."""
return await populate_track()
async def populate_playlist(track: Optional[Track] = None) -> Playlist:
"""Populate playlist with track for routes tests."""
playlist, _ = await Playlist.get_or_create(
name="test",
url="test",
spotify_id="test",
recommended=True,
)
if not track:
track = await populate_track()
await playlist.tracks.add(track)
return playlist
@pytest.fixture()
@pytest.mark.asyncio
async def playlist_fixture() -> Playlist:
"""Populate playlist with track for routes testing."""
return await populate_playlist()
async def populate_user(user_id: Optional[UUID] = USER_UUID) -> User:
"""Populate user for routes testing."""
# if not user_id:
# user_id = uuid4()
user, _ = await User.get_or_create(id=user_id)
await AuthAccount.get_or_create(
_id="test",
name="test",
image="test",
url="test",
provider=AuthProvider.DEFAULT,
access_token="test",
refresh_token="test",
expires=0,
user=user,
)
await user.fetch_related("auth_accounts")
return user
@pytest.fixture()
@pytest.mark.asyncio
async def user_fixture() -> User:
"""Default user tests fixture."""
return await populate_user()
POPULATE_CHALLENGE_ID = uuid4()
POPULATE_CHALLENGE_SECRET = Challenge(id=POPULATE_CHALLENGE_ID).secret_key()
POPULATE_CHALLENGE_FOREIGN_ID = uuid4()
POPULATE_CHALLENGE_FOREIGN_SECRET = Challenge(
id=POPULATE_CHALLENGE_FOREIGN_ID, is_public=False,
).secret_key()
async def populate_challenge(
challenge_status: str = "process",
is_public: bool = True,
user_id: Optional[UUID] = USER_UUID,
challenge_id: UUID = POPULATE_CHALLENGE_ID,
) -> Challenge:
"""Populate challenge for routes testings."""
if not user_id:
user_id = uuid4()
user: User = await populate_user(user_id=user_id)
track, _ = await Track.get_or_create(test_track_info)
await populate_playlist()
challenge_end = datetime.utcnow() + timedelta(days=1)
vote_end = datetime.utcnow() + timedelta(days=2)
if challenge_status == "vote":
challenge_end = datetime.utcnow() - timedelta(days=1)
vote_end = datetime.utcnow() + timedelta(days=2)
if challenge_status == "end":
challenge_end = datetime.utcnow() - timedelta(days=2)
vote_end = datetime.utcnow() - timedelta(days=1)
challenge, _ = await Challenge.get_or_create(
id=challenge_id,
name="test",
challenge_end=challenge_end,
vote_end=vote_end,
is_public=is_public,
owner=user,
track=track,
)
await challenge.participants.add(user)
return challenge
@pytest.fixture()
@pytest.mark.asyncio
async def challenge_process_fixture() -> Challenge:
"""
Populate challenge with:
- Default user
- Is open
- Challenge in process
"""
return await populate_challenge()
@pytest.fixture()
@pytest.mark.asyncio
async def challenge_vote_fixture() -> Challenge:
"""
Populate challenge with:
- Default user
- Is open
- Challenge in voting
"""
return await populate_challenge(challenge_status="vote")
@pytest.fixture()
@pytest.mark.asyncio
async def challenge_end_fixture() -> Challenge:
"""
Populate challenge with:
- Default user
- Is open
- Challenge ended
"""
return await populate_challenge(challenge_status="end")
@pytest.fixture()
@pytest.mark.asyncio
async def challenge_private_fixture() -> Challenge:
"""
Populate challenge with:
- Default user
- Is private
- Challenge is open
"""
return await populate_challenge(is_public=False)
@pytest.fixture()
@pytest.mark.asyncio
async def challenge_foreign_fixture() -> Challenge:
"""
Populate challenge with:
- Random user
- Is private
- Challenge is open
"""
return await populate_challenge(
is_public=False,
user_id=None,
challenge_id=POPULATE_CHALLENGE_FOREIGN_ID,
)
POPULATE_SUBMISSION_ID = uuid4()
async def populate_submission(
challenge: Challenge,
submission_id: Optional[UUID] = POPULATE_SUBMISSION_ID,
) -> Submission:
"""Populate submission for routes testing."""
if not submission_id:
submission_id = uuid4()
submission, _ = await Submission.get_or_create(
id=submission_id,
url="test",
challenge=challenge,
user=challenge.owner,
)
return submission
@pytest.fixture()
@pytest.mark.asyncio
async def submission_fixture() -> Submission:
"""
Populate submission with:
- Default user
- Challenge in process
- Challenge is open
"""
challenge: Challenge = await populate_challenge()
return await populate_submission(challenge=challenge)
@pytest.fixture()
@pytest.mark.asyncio
async def submission_vote_fixture() -> Submission:
"""
Populate submission with:
- Default user
- Challenge is voting
- Challenge is open
"""
challenge: Challenge = await populate_challenge(challenge_status="vote")
return await populate_submission(challenge=challenge)
@pytest.fixture()
@pytest.mark.asyncio
async def submission_ended_fixture() -> Submission:
"""
Populate submission with:
- Default user
- Challenge is ended
- Challenge is open
"""
challenge: Challenge = await populate_challenge(challenge_status="end")
return await populate_submission(challenge=challenge)
async def populate_vote(submission: Submission) -> Vote:
"""Populate vote for routes testing."""
vote, _ = await Vote.get_or_create(
submission=submission,
user=submission.challenge.owner, # type: ignore
)
return vote
@pytest.fixture()
@pytest.mark.asyncio
async def vote_fixture() -> Vote:
"""Vote fixture with challenge on voting."""
challenge: Challenge = await populate_challenge(challenge_status="vote")
submission: Submission = await populate_submission(challenge=challenge)
return await populate_vote(submission=submission)
| 25.24581 | 85 | 0.694955 | """Test pre running stuff"""
import warnings
from datetime import datetime
from datetime import timedelta
from typing import Any
from typing import AsyncGenerator
from typing import Dict
from typing import Optional
from uuid import UUID
from uuid import uuid4
import pytest
from asyncpg import ObjectInUseError
from fastapi import FastAPI
from tortoise import Tortoise
from tortoise.exceptions import DBConnectionError
from app.models.db import AuthAccount
from app.models.db import Challenge
from app.models.db import Playlist
from app.models.db import Submission
from app.models.db import Text
from app.models.db import Track
from app.models.db import User
from app.models.db import Vote
from app.models.db.user import AuthProvider
from app.services.auth.base import bearer_auth
from app.settings import APP_MODELS
from app.settings import TORTOISE_TEST_DB
from tests.test_services.test_auth.test_base import USER_UUID
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import imp # pylint: disable=unused-import
@pytest.fixture(scope="function", autouse=True)
@pytest.mark.asyncio
async def test_db() -> AsyncGenerator: # type: ignore
"""Initialize db connection before run test."""
try:
await Tortoise.init(db_url=TORTOISE_TEST_DB, modules={"models": APP_MODELS})
except DBConnectionError:
await Tortoise.init(
db_url=TORTOISE_TEST_DB, modules={"models": APP_MODELS}, _create_db=True,
)
await Tortoise.generate_schemas()
yield
try:
await Tortoise._drop_databases() # pylint: disable=protected-access
except ObjectInUseError:
pass
await Tortoise.close_connections()
POPULATE_TRACK_ID: str = str(uuid4())
async def bearer_auth_mock() -> str:
"""Auth method mock."""
return str(USER_UUID)
def mock_auth(application: FastAPI) -> FastAPI:
"""Mock auth dependency and token middleware."""
application.dependency_overrides[bearer_auth] = bearer_auth_mock
application.user_middleware = []
application.middleware_stack = application.build_middleware_stack()
return application
@pytest.fixture()
@pytest.mark.asyncio
async def populate_texts() -> Text:
"""Populate text for utils routes tests."""
text, _ = await Text.get_or_create(content="test")
return text
test_track_info: Dict[str, Any] = {
"id": POPULATE_TRACK_ID,
"name": "test",
"author_name": "test",
"cover_url": "test",
"preview_url": "test",
"youtube_id": "test",
"spotify_id": "test",
"recommended": True,
"meta": {},
}
async def populate_track() -> Track:
"""Populate track for routes tests."""
track, _ = await Track.get_or_create(**test_track_info)
return track
@pytest.fixture()
@pytest.mark.asyncio
async def track_fixture() -> Track:
"""Populate track for utils routes tests."""
return await populate_track()
async def populate_playlist(track: Optional[Track] = None) -> Playlist:
"""Populate playlist with track for routes tests."""
playlist, _ = await Playlist.get_or_create(
name="test",
url="test",
spotify_id="test",
recommended=True,
)
if not track:
track = await populate_track()
await playlist.tracks.add(track)
return playlist
@pytest.fixture()
@pytest.mark.asyncio
async def playlist_fixture() -> Playlist:
"""Populate playlist with track for routes testing."""
return await populate_playlist()
async def populate_user(user_id: Optional[UUID] = USER_UUID) -> User:
"""Populate user for routes testing."""
# if not user_id:
# user_id = uuid4()
user, _ = await User.get_or_create(id=user_id)
await AuthAccount.get_or_create(
_id="test",
name="test",
image="test",
url="test",
provider=AuthProvider.DEFAULT,
access_token="test",
refresh_token="test",
expires=0,
user=user,
)
await user.fetch_related("auth_accounts")
return user
@pytest.fixture()
@pytest.mark.asyncio
async def user_fixture() -> User:
"""Default user tests fixture."""
return await populate_user()
POPULATE_CHALLENGE_ID = uuid4()
POPULATE_CHALLENGE_SECRET = Challenge(id=POPULATE_CHALLENGE_ID).secret_key()
POPULATE_CHALLENGE_FOREIGN_ID = uuid4()
POPULATE_CHALLENGE_FOREIGN_SECRET = Challenge(
id=POPULATE_CHALLENGE_FOREIGN_ID, is_public=False,
).secret_key()
async def populate_challenge(
challenge_status: str = "process",
is_public: bool = True,
user_id: Optional[UUID] = USER_UUID,
challenge_id: UUID = POPULATE_CHALLENGE_ID,
) -> Challenge:
"""Populate challenge for routes testings."""
if not user_id:
user_id = uuid4()
user: User = await populate_user(user_id=user_id)
track, _ = await Track.get_or_create(test_track_info)
await populate_playlist()
challenge_end = datetime.utcnow() + timedelta(days=1)
vote_end = datetime.utcnow() + timedelta(days=2)
if challenge_status == "vote":
challenge_end = datetime.utcnow() - timedelta(days=1)
vote_end = datetime.utcnow() + timedelta(days=2)
if challenge_status == "end":
challenge_end = datetime.utcnow() - timedelta(days=2)
vote_end = datetime.utcnow() - timedelta(days=1)
challenge, _ = await Challenge.get_or_create(
id=challenge_id,
name="test",
challenge_end=challenge_end,
vote_end=vote_end,
is_public=is_public,
owner=user,
track=track,
)
await challenge.participants.add(user)
return challenge
@pytest.fixture()
@pytest.mark.asyncio
async def challenge_process_fixture() -> Challenge:
"""
Populate challenge with:
- Default user
- Is open
- Challenge in process
"""
return await populate_challenge()
@pytest.fixture()
@pytest.mark.asyncio
async def challenge_vote_fixture() -> Challenge:
"""
Populate challenge with:
- Default user
- Is open
- Challenge in voting
"""
return await populate_challenge(challenge_status="vote")
@pytest.fixture()
@pytest.mark.asyncio
async def challenge_end_fixture() -> Challenge:
"""
Populate challenge with:
- Default user
- Is open
- Challenge ended
"""
return await populate_challenge(challenge_status="end")
@pytest.fixture()
@pytest.mark.asyncio
async def challenge_private_fixture() -> Challenge:
"""
Populate challenge with:
- Default user
- Is private
- Challenge is open
"""
return await populate_challenge(is_public=False)
@pytest.fixture()
@pytest.mark.asyncio
async def challenge_foreign_fixture() -> Challenge:
"""
Populate challenge with:
- Random user
- Is private
- Challenge is open
"""
return await populate_challenge(
is_public=False,
user_id=None,
challenge_id=POPULATE_CHALLENGE_FOREIGN_ID,
)
POPULATE_SUBMISSION_ID = uuid4()
async def populate_submission(
challenge: Challenge,
submission_id: Optional[UUID] = POPULATE_SUBMISSION_ID,
) -> Submission:
"""Populate submission for routes testing."""
if not submission_id:
submission_id = uuid4()
submission, _ = await Submission.get_or_create(
id=submission_id,
url="test",
challenge=challenge,
user=challenge.owner,
)
return submission
@pytest.fixture()
@pytest.mark.asyncio
async def submission_fixture() -> Submission:
"""
Populate submission with:
- Default user
- Challenge in process
- Challenge is open
"""
challenge: Challenge = await populate_challenge()
return await populate_submission(challenge=challenge)
@pytest.fixture()
@pytest.mark.asyncio
async def submission_vote_fixture() -> Submission:
"""
Populate submission with:
- Default user
- Challenge is voting
- Challenge is open
"""
challenge: Challenge = await populate_challenge(challenge_status="vote")
return await populate_submission(challenge=challenge)
@pytest.fixture()
@pytest.mark.asyncio
async def submission_ended_fixture() -> Submission:
"""
Populate submission with:
- Default user
- Challenge is ended
- Challenge is open
"""
challenge: Challenge = await populate_challenge(challenge_status="end")
return await populate_submission(challenge=challenge)
async def populate_vote(submission: Submission) -> Vote:
"""Populate vote for routes testing."""
vote, _ = await Vote.get_or_create(
submission=submission,
user=submission.challenge.owner, # type: ignore
)
return vote
@pytest.fixture()
@pytest.mark.asyncio
async def vote_fixture() -> Vote:
"""Vote fixture with challenge on voting."""
challenge: Challenge = await populate_challenge(challenge_status="vote")
submission: Submission = await populate_submission(challenge=challenge)
return await populate_vote(submission=submission)
| 0 | 0 | 0 |
643121031961826d285600bddf8954ef245399d6 | 1,538 | py | Python | QRFactorization/QRFactors.py | drreynolds/Math4315-codes | b8be1c1254417a96d3bc23e48444731a75ed0d3b | [
"CC0-1.0"
] | 2 | 2021-01-26T19:13:57.000Z | 2021-03-05T05:43:59.000Z | QRFactorization/QRFactors.py | drreynolds/Math4315-codes | b8be1c1254417a96d3bc23e48444731a75ed0d3b | [
"CC0-1.0"
] | null | null | null | QRFactorization/QRFactors.py | drreynolds/Math4315-codes | b8be1c1254417a96d3bc23e48444731a75ed0d3b | [
"CC0-1.0"
] | 1 | 2021-02-26T22:12:24.000Z | 2021-02-26T22:12:24.000Z | # QRFactors.py
#
# Daniel R. Reynolds
# SMU Mathematics
# Math 4315
# imports
import numpy
def QRFactors(A):
"""
usage: Q, R = QRFactors(A)
Function to compute the QR factorization of a (possibly rank-deficient)
'thin' matrix A (m x n, with m >=n) using Householder reflection matrices.
Input: A - thin matrix
Outputs: Q - orthogonal matrix
R - "upper triangular" matrix, i.e. R = [ Rhat ]
[ 0 ]
with Rhat an (n x n) upper-triangular matrix
"""
# get dimensions of A
m, n = numpy.shape(A)
# initialize results
Q = numpy.identity(m)
R = A.copy()
# iterate over columns
for k in range(n):
# extract subvector from diagonal down and compute norm
z = R[k:m,k]
v = -z;
v[0] = -numpy.sign(z[0])*numpy.linalg.norm(z) - z[0];
vnorm = numpy.linalg.norm(v)
# if subvector has norm zero, continue to next column
if (vnorm < numpy.finfo(float).eps):
continue
# compute u = u = v/||v||;
# the Householder matrix is then Qk = I-2*u*u'
u = v/vnorm
# update rows k through m of R
for j in range(k,n):
utR = 2 * u.T @ R[k:m, j]
R[k:m, j] -= u*utR
# update rows k through m of Q
for j in range(m):
utQ = 2 * u.T @ Q[k:m, j]
Q[k:m, j] -= u*utQ
# transpose Q before return
Q = Q.T
return [Q, R]
# end function
| 24.03125 | 78 | 0.513004 | # QRFactors.py
#
# Daniel R. Reynolds
# SMU Mathematics
# Math 4315
# imports
import numpy
def QRFactors(A):
"""
usage: Q, R = QRFactors(A)
Function to compute the QR factorization of a (possibly rank-deficient)
'thin' matrix A (m x n, with m >=n) using Householder reflection matrices.
Input: A - thin matrix
Outputs: Q - orthogonal matrix
R - "upper triangular" matrix, i.e. R = [ Rhat ]
[ 0 ]
with Rhat an (n x n) upper-triangular matrix
"""
# get dimensions of A
m, n = numpy.shape(A)
# initialize results
Q = numpy.identity(m)
R = A.copy()
# iterate over columns
for k in range(n):
# extract subvector from diagonal down and compute norm
z = R[k:m,k]
v = -z;
v[0] = -numpy.sign(z[0])*numpy.linalg.norm(z) - z[0];
vnorm = numpy.linalg.norm(v)
# if subvector has norm zero, continue to next column
if (vnorm < numpy.finfo(float).eps):
continue
# compute u = u = v/||v||;
# the Householder matrix is then Qk = I-2*u*u'
u = v/vnorm
# update rows k through m of R
for j in range(k,n):
utR = 2 * u.T @ R[k:m, j]
R[k:m, j] -= u*utR
# update rows k through m of Q
for j in range(m):
utQ = 2 * u.T @ Q[k:m, j]
Q[k:m, j] -= u*utQ
# transpose Q before return
Q = Q.T
return [Q, R]
# end function
| 0 | 0 | 0 |
54787a4a9a904635a9d79399b77217323f7cf446 | 1,504 | py | Python | src/nexusformat/__init__.py | tschoonj/nexusformat | a521170ec56c9631980b65e264bd9afcdbc164e8 | [
"BSD-3-Clause-Clear"
] | null | null | null | src/nexusformat/__init__.py | tschoonj/nexusformat | a521170ec56c9631980b65e264bd9afcdbc164e8 | [
"BSD-3-Clause-Clear"
] | null | null | null | src/nexusformat/__init__.py | tschoonj/nexusformat | a521170ec56c9631980b65e264bd9afcdbc164e8 | [
"BSD-3-Clause-Clear"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2014, NeXpy Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING, distributed with this software.
#-----------------------------------------------------------------------------
__package_name__ = u'nexusformat'
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
__documentation_author__ = u'Ray Osborn'
__documentation_copyright__ = u'2013-2016, Ray Osborn'
__license__ = u'BSD'
__author_name__ = u'NeXpy Development Team'
__author_email__ = u'nexpydev@gmail.com'
__author__ = __author_name__ + u' <' + __author_email__ + u'>'
__url__ = u'http://nexpy.github.io/nexpy/'
__download_url__ = u'https://github.com/nexpy/nexusformat/'
__description__ = u'nexusformat: Python API to access NeXus data'
__long_description__ = \
u"""
This package provides a Python API to open, create, and manipulate `NeXus data
<http://www.nexusformat.org/>`_ written in the HDF5 format. The 'nexusformat'
package provides the underlying API for `NeXpy
<http://nexpy.github.io/nexpy>`_, which provides a GUI interface. It also
contains a command-line script, `nxstack` for merging TIFF or CBF files into a
single HDF5 array.
The latest development version is always available from `NeXpy's GitHub
site <https://github.com/nexpy/nexusformat>`_.
"""
| 36.682927 | 79 | 0.675532 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2013-2014, NeXpy Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING, distributed with this software.
#-----------------------------------------------------------------------------
__package_name__ = u'nexusformat'
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
__documentation_author__ = u'Ray Osborn'
__documentation_copyright__ = u'2013-2016, Ray Osborn'
__license__ = u'BSD'
__author_name__ = u'NeXpy Development Team'
__author_email__ = u'nexpydev@gmail.com'
__author__ = __author_name__ + u' <' + __author_email__ + u'>'
__url__ = u'http://nexpy.github.io/nexpy/'
__download_url__ = u'https://github.com/nexpy/nexusformat/'
__description__ = u'nexusformat: Python API to access NeXus data'
__long_description__ = \
u"""
This package provides a Python API to open, create, and manipulate `NeXus data
<http://www.nexusformat.org/>`_ written in the HDF5 format. The 'nexusformat'
package provides the underlying API for `NeXpy
<http://nexpy.github.io/nexpy>`_, which provides a GUI interface. It also
contains a command-line script, `nxstack` for merging TIFF or CBF files into a
single HDF5 array.
The latest development version is always available from `NeXpy's GitHub
site <https://github.com/nexpy/nexusformat>`_.
"""
| 0 | 0 | 0 |
eb7334b45895e9273f1edaa206023449ca9e6da0 | 539 | py | Python | app.py | eskape/calculator | f9b106c1cf7323a137936b31a32c0d5e5b47325f | [
"MIT"
] | null | null | null | app.py | eskape/calculator | f9b106c1cf7323a137936b31a32c0d5e5b47325f | [
"MIT"
] | null | null | null | app.py | eskape/calculator | f9b106c1cf7323a137936b31a32c0d5e5b47325f | [
"MIT"
] | null | null | null | from flask import Flask
from flask import request
from flask_restful import Resource, Api, reqparse
from Calculator import Calculator
app = Flask(__name__)
api = Api(app)
api.add_resource(Add, '/add')
if __name__ == '__main__':
app.run(port='5002') | 21.56 | 53 | 0.662338 | from flask import Flask
from flask import request
from flask_restful import Resource, Api, reqparse
from Calculator import Calculator
app = Flask(__name__)
api = Api(app)
class Add(Resource):
def post(self):
calculator = Calculator()
json_data = request.get_json(force=True)
val1 = json_data['val1']
val2 = json_data['val2']
result = calculator.add(int(val1), int(val2))
return {'Result': result}
api.add_resource(Add, '/add')
if __name__ == '__main__':
app.run(port='5002') | 233 | -1 | 50 |
04c03b7a6ff384fe54d34834cbf5910575157a29 | 1,027 | py | Python | syphon/tests/test_context.py | ethall/syphon | dd75fd33f3f9164653f24b33c875615dc1d04182 | [
"MIT"
] | null | null | null | syphon/tests/test_context.py | ethall/syphon | dd75fd33f3f9164653f24b33c875615dc1d04182 | [
"MIT"
] | 23 | 2018-01-06T17:59:58.000Z | 2019-02-27T15:52:20.000Z | syphon/tests/test_context.py | ethall/syphon | dd75fd33f3f9164653f24b33c875615dc1d04182 | [
"MIT"
] | 1 | 2019-02-20T17:17:40.000Z | 2019-02-20T17:17:40.000Z | """syphon.tests.test_context.py
Copyright (c) 2017-2018 Keithley Instruments, LLC.
Licensed under MIT (https://github.com/ehall/syphon/blob/master/LICENSE)
"""
from syphon import Context
| 23.883721 | 75 | 0.771178 | """syphon.tests.test_context.py
Copyright (c) 2017-2018 Keithley Instruments, LLC.
Licensed under MIT (https://github.com/ehall/syphon/blob/master/LICENSE)
"""
from syphon import Context
def test_context_archive_property_default():
assert Context().archive is None
def test_context_cache_property_default():
assert Context().cache is None
def test_context_data_property_default():
assert Context().data is None
def test_context_meta_property_default():
assert Context().meta is None
def test_context_overwrite_property_default():
assert Context().overwrite is False
assert isinstance(Context().overwrite, bool)
def test_context_schema_property_default():
assert Context().schema is None
def test_context_schema_file_property_default():
assert Context().schema_file is Context()._schema_file
assert isinstance(Context().schema_file, str)
def test_context_verbose_property_default():
assert Context().verbose is False
assert isinstance(Context().verbose, bool)
| 640 | 0 | 184 |
9499c5ca6a6ec4df2cc72e31c2e030d298408c9b | 11,416 | py | Python | cs15211/RedundantConnectionII.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | 1 | 2021-07-05T01:53:30.000Z | 2021-07-05T01:53:30.000Z | cs15211/RedundantConnectionII.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | null | null | null | cs15211/RedundantConnectionII.py | JulyKikuAkita/PythonPrac | 0ba027d9b8bc7c80bc89ce2da3543ce7a49a403c | [
"Apache-2.0"
] | 1 | 2018-01-08T07:14:08.000Z | 2018-01-08T07:14:08.000Z | __source__ = 'https://leetcode.com/problems/redundant-connection-ii/'
# Time: O(V) numbert of vertices
# Space: O(V)
#
# Description: Leetcode # 685. Redundant Connection II
#
# In this problem, a rooted tree is a directed graph such that,
# there is exactly one node (the root) for which all other nodes are descendants of this node,
# plus every node has exactly one parent, except for the root node which has no parents.
#
# The given input is a directed graph that started as a rooted tree with N nodes (with distinct values 1, 2, ..., N),
# with one additional directed edge added. The added edge has two different vertices chosen from 1 to N,
# and was not an edge that already existed.
#
# The resulting graph is given as a 2D-array of edges.
# Each element of edges is a pair [u, v] that represents a directed edge connecting nodes u and v,
# where u is a parent of child v.
#
# Return an edge that can be removed so that the resulting graph is a rooted tree of N nodes.
# If there are multiple answers, return the answer that occurs last in the given 2D-array.
#
# Example 1:
# Input: [[1,2], [1,3], [2,3]]
# Output: [2,3]
# Explanation: The given directed graph will be like this:
# 1
# / \
# v v
# 2-->3
#
# Example 2:
# Input: [[1,2], [2,3], [3,4], [4,1], [1,5]]
# Output: [4,1]
# Explanation: The given directed graph will be like this:
# 5 <- 1 -> 2
# ^ |
# | v
# 4 <- 3
# Note:
# The size of the input 2D-array will be between 3 and 1000.
# Every integer represented in the 2D-array will be between 1 and N, where N is the size of the input array.
#
# Companies
# Google
# Related Topics
# Graph
# Similar Questions
# Redundant Connection
#
import collections
import unittest
# 40ms 24.16%
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/redundant-connection-ii/solution/
# https://leetcode.com/problems/redundant-connection-ii/discuss/108058/one-pass-disjoint-set-solution-with-explai
# https://leetcode.com/problems/redundant-connection-ii/discuss/218692/Swift-union-find-solution
# summary:
# 1) Check whether there is a node having two parents.
# If so, store them as candidates A and B, and set the second edge invalid.
# 2) Perform normal union find.
# If the tree is now valid
# simply return candidate B
# else if candidates not existing
# we find a circle, return current edge;
# else
# remove candidate A instead of B.
#
# In the following code,
# last == -1 means "no cycle found" which is scenario 1 or 2
# second != -1 && last != -1 means "one edge removed and the result tree has cycle" which is scenario 3
# second == -1 means "no edge skipped or removed" which is scenario 4
#
# Union Find
# 3ms 99.49%
class Solution {
public int[] findRedundantDirectedConnection(int[][] edges) {
int[] roots = new int[edges.length + 1], ds = new int[edges.length + 1];
Arrays.fill(roots, -1);
int first = -1, second = -1, last = -1;
for (int i = 0; i < edges.length; i++) {
int parent = edges[i][0];
int child = edges[i][1];
if (roots[child] != -1) {
first = roots[child];
second = i;
continue;
}
roots[child] = i;
int x = find(ds, parent);
if (x == child) last = i;
else ds[child] = x;
}
if (last == -1) return edges[second];
if (second == -1) return edges[last];
return edges[first];
}
private int find(int[] ds, int x){
return ds[x] == 0 ? x : (ds[x] = find(ds, ds[x]));
}
}
Approach #1: Depth-First Search [Accepted]
Complexity Analysis
Time Complexity: O(N) where N is the number of vertices (and also the number of edges) in the graph.
We perform a depth-first search.
Space Complexity: O(N), the size of the graph.
# 11ms 12.82%
class Solution {
public int[] findRedundantDirectedConnection(int[][] edges) {
int N = edges.length;
Map<Integer, Integer> parent = new HashMap();
List<int[]> candidates = new ArrayList();
for (int[] edge: edges) {
if (parent.containsKey(edge[1])) {
candidates.add(new int[]{parent.get(edge[1]), edge[1]});
candidates.add(edge);
} else {
parent.put(edge[1], edge[0]);
}
}
int root = orbit(1, parent).node;
if (candidates.isEmpty()) {
Set<Integer> cycle = orbit(root, parent).seen;
int[] ans = new int[]{0, 0};
for (int[] edge: edges) {
if (cycle.contains(edge[0]) && cycle.contains(edge[1])) {
ans = edge;
}
}
return ans;
}
Map<Integer, List<Integer>> children = new HashMap();
for (int v: parent.keySet()) {
int pv = parent.get(v);
if (!children.containsKey(pv))
children.put(pv, new ArrayList<Integer>());
children.get(pv).add(v);
}
boolean[] seen = new boolean[N+1];
seen[0] = true;
Stack<Integer> stack = new Stack();
stack.add(root);
while (!stack.isEmpty()) {
int node = stack.pop();
if (!seen[node]) {
seen[node] = true;
if (children.containsKey(node)) {
for (int c: children.get(node))
stack.push(c);
}
}
}
for (boolean b: seen) if (!b)
return candidates.get(0);
return candidates.get(1);
}
public OrbitResult orbit(int node, Map<Integer, Integer> parent) {
Set<Integer> seen = new HashSet();
while (parent.containsKey(node) && !seen.contains(node)) {
seen.add(node);
node = parent.get(node);
}
return new OrbitResult(node, seen);
}
}
class OrbitResult {
int node;
Set<Integer> seen;
OrbitResult(int n, Set<Integer> s) {
node = n;
seen = s;
}
}
This problem is limited to a graph with N nodes and N edges.
No node is singled out if a edge is removed.
For example, [[1,2],[2,4],[3,4]], 4 nodes 3 edges, is not applicable to this problem.
You cannot remove [3,4] to single out node 3.
There are 3 cases:
Case 1) No loop, but there is one node who has 2 parents.
Case 2) A loop, and there is one node who has 2 parents, that node must be inside the loop.
Case 3) A loop, and every node has only 1 parent.
Case 1: e.g. [[1,2],[1,3],[2,3]] ,node 3 has 2 parents ([1,3] and [2,3]).
Return the edge that occurs last that is, return [2,3].
Case 2: e.g. [[1,2],[2,3],[3,1],[4,1]] , {1->2->3->1} is a loop, node 1 has 2 parents ([4,1] and [3,1]).
Return the edge that is inside the loop, that is, return [3,1].
Case 3: e.g. [[1,2],[2,3],[3,1],[1,4]] , {1->2->3->1} is a loop, you can remove any edge in a loop,
the graph is still valid. Thus, return the one that occurs last, that is, return [3,1].
Also, [[2,1],[3,1],[4,2],[1,4]] is a good example
# Union Find
# 3ms 99.49%
class Solution {
public int[] findRedundantDirectedConnection(int[][] edges) {
int[] ancestor = new int[edges.length + 1];
int[][] res = new int[2][2];
for(int[]node : edges) {
if(node[1] != getAncestor(ancestor, node[1]))
res[0] = node;
else if(getAncestor(ancestor, node[0]) == getAncestor(ancestor, node[1]))
res[1] = node;
else
ancestor[node[1]] = ancestor[node[0]];
if(res[0][0] != 0 && res[1][0] != 0)
return find(edges, ancestor, res[0], res[1]);
}
return res[0][0] == 0 ? res[1] : res[0];
}
public int getAncestor(int[] ancestor, int node) {
if(node != ancestor[node])
ancestor[node] = ancestor[node] == 0 ? node : getAncestor(ancestor, ancestor[node]);
return ancestor[node];
}
public int[] find(int[][] edges, int[] ancestor, int[] removed0, int[] removed1) {
for(int[] res : edges)
if(res[1] == removed0[1] && getAncestor(ancestor, res[1]) == getAncestor(ancestor, removed1[1]))
return res;
return new int[2];
}
}
# Union Find
# 3ms 99.49%
class Solution {
public int[] findRedundantDirectedConnection(int[][] edges) {
int[] parent = new int[edges.length+1];
for (int i = 0; i < parent.length; i++) {
parent[i] = i;
}
int[] cycleEdge = null;
int[] mParent = null;
for (int[] edge : edges) {
int x = find(parent, edge[0]);
int y = find(parent, edge[1]);
if (x == y)
cycleEdge = edge;
else {
if (y != edge[1])
mParent = edge;
else
parent[y] = x;
}
}
// means we only got the multiparent problem, and the edges we recorded using parent so far are good, so just return this one.
if (cycleEdge == null)
return mParent;
// means we only got the cycle problem, in this case we can remove any edge in the cycle, so just remove this one.
if (mParent == null)
return cycleEdge;
// now, it means we have both cycle and multi-parent problem.
// In my code, i didn't record an edge into parent if we think it's involved into the multi-parent problem,
// but we are still getting the cycle problem. Since in this problem we can only have edges point to the same
// node, so, current mParent edge is the wrong one, we need to remove the other one pointing to the same
// dest as mParent ex: [[2,1],[3,1],[4,2],[1,4]]
for (int[] edge : edges) {
if (edge[1] == mParent[1])
return edge;
}
return new int[2];
}
public int find(int[] parent, int x) {
if (parent[x] == x)
return x;
return find(parent, parent[x]);
}
}
'''
| 34.077612 | 134 | 0.556938 | __source__ = 'https://leetcode.com/problems/redundant-connection-ii/'
# Time: O(V) numbert of vertices
# Space: O(V)
#
# Description: Leetcode # 685. Redundant Connection II
#
# In this problem, a rooted tree is a directed graph such that,
# there is exactly one node (the root) for which all other nodes are descendants of this node,
# plus every node has exactly one parent, except for the root node which has no parents.
#
# The given input is a directed graph that started as a rooted tree with N nodes (with distinct values 1, 2, ..., N),
# with one additional directed edge added. The added edge has two different vertices chosen from 1 to N,
# and was not an edge that already existed.
#
# The resulting graph is given as a 2D-array of edges.
# Each element of edges is a pair [u, v] that represents a directed edge connecting nodes u and v,
# where u is a parent of child v.
#
# Return an edge that can be removed so that the resulting graph is a rooted tree of N nodes.
# If there are multiple answers, return the answer that occurs last in the given 2D-array.
#
# Example 1:
# Input: [[1,2], [1,3], [2,3]]
# Output: [2,3]
# Explanation: The given directed graph will be like this:
# 1
# / \
# v v
# 2-->3
#
# Example 2:
# Input: [[1,2], [2,3], [3,4], [4,1], [1,5]]
# Output: [4,1]
# Explanation: The given directed graph will be like this:
# 5 <- 1 -> 2
# ^ |
# | v
# 4 <- 3
# Note:
# The size of the input 2D-array will be between 3 and 1000.
# Every integer represented in the 2D-array will be between 1 and N, where N is the size of the input array.
#
# Companies
# Google
# Related Topics
# Graph
# Similar Questions
# Redundant Connection
#
import collections
import unittest
# 40ms 24.16%
class Solution(object):
def findRedundantDirectedConnection(self, edges):
N = len(edges)
parent = {}
candidates = []
for u, v in edges:
if v in parent:
candidates.append((parent[v], v))
candidates.append((u, v))
else:
parent[v] = u
def orbit(node):
seen = set()
while node in parent and node not in seen:
seen.add(node)
node = parent[node]
return node, seen
root = orbit(1)[0]
if not candidates:
cycle = orbit(root)[1]
for u, v in edges:
if u in cycle and v in cycle:
ans = u, v
return ans
children = collections.defaultdict(list)
for v in parent:
children[parent[v]].append(v)
seen = [True] + [False] * N
stack = [root]
while stack:
node = stack.pop()
if not seen[node]:
seen[node] = True
stack.extend(children[node])
return candidates[all(seen)]
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/redundant-connection-ii/solution/
# https://leetcode.com/problems/redundant-connection-ii/discuss/108058/one-pass-disjoint-set-solution-with-explai
# https://leetcode.com/problems/redundant-connection-ii/discuss/218692/Swift-union-find-solution
# summary:
# 1) Check whether there is a node having two parents.
# If so, store them as candidates A and B, and set the second edge invalid.
# 2) Perform normal union find.
# If the tree is now valid
# simply return candidate B
# else if candidates not existing
# we find a circle, return current edge;
# else
# remove candidate A instead of B.
#
# In the following code,
# last == -1 means "no cycle found" which is scenario 1 or 2
# second != -1 && last != -1 means "one edge removed and the result tree has cycle" which is scenario 3
# second == -1 means "no edge skipped or removed" which is scenario 4
#
# Union Find
# 3ms 99.49%
class Solution {
public int[] findRedundantDirectedConnection(int[][] edges) {
int[] roots = new int[edges.length + 1], ds = new int[edges.length + 1];
Arrays.fill(roots, -1);
int first = -1, second = -1, last = -1;
for (int i = 0; i < edges.length; i++) {
int parent = edges[i][0];
int child = edges[i][1];
if (roots[child] != -1) {
first = roots[child];
second = i;
continue;
}
roots[child] = i;
int x = find(ds, parent);
if (x == child) last = i;
else ds[child] = x;
}
if (last == -1) return edges[second];
if (second == -1) return edges[last];
return edges[first];
}
private int find(int[] ds, int x){
return ds[x] == 0 ? x : (ds[x] = find(ds, ds[x]));
}
}
Approach #1: Depth-First Search [Accepted]
Complexity Analysis
Time Complexity: O(N) where N is the number of vertices (and also the number of edges) in the graph.
We perform a depth-first search.
Space Complexity: O(N), the size of the graph.
# 11ms 12.82%
class Solution {
public int[] findRedundantDirectedConnection(int[][] edges) {
int N = edges.length;
Map<Integer, Integer> parent = new HashMap();
List<int[]> candidates = new ArrayList();
for (int[] edge: edges) {
if (parent.containsKey(edge[1])) {
candidates.add(new int[]{parent.get(edge[1]), edge[1]});
candidates.add(edge);
} else {
parent.put(edge[1], edge[0]);
}
}
int root = orbit(1, parent).node;
if (candidates.isEmpty()) {
Set<Integer> cycle = orbit(root, parent).seen;
int[] ans = new int[]{0, 0};
for (int[] edge: edges) {
if (cycle.contains(edge[0]) && cycle.contains(edge[1])) {
ans = edge;
}
}
return ans;
}
Map<Integer, List<Integer>> children = new HashMap();
for (int v: parent.keySet()) {
int pv = parent.get(v);
if (!children.containsKey(pv))
children.put(pv, new ArrayList<Integer>());
children.get(pv).add(v);
}
boolean[] seen = new boolean[N+1];
seen[0] = true;
Stack<Integer> stack = new Stack();
stack.add(root);
while (!stack.isEmpty()) {
int node = stack.pop();
if (!seen[node]) {
seen[node] = true;
if (children.containsKey(node)) {
for (int c: children.get(node))
stack.push(c);
}
}
}
for (boolean b: seen) if (!b)
return candidates.get(0);
return candidates.get(1);
}
public OrbitResult orbit(int node, Map<Integer, Integer> parent) {
Set<Integer> seen = new HashSet();
while (parent.containsKey(node) && !seen.contains(node)) {
seen.add(node);
node = parent.get(node);
}
return new OrbitResult(node, seen);
}
}
class OrbitResult {
int node;
Set<Integer> seen;
OrbitResult(int n, Set<Integer> s) {
node = n;
seen = s;
}
}
This problem is limited to a graph with N nodes and N edges.
No node is singled out if a edge is removed.
For example, [[1,2],[2,4],[3,4]], 4 nodes 3 edges, is not applicable to this problem.
You cannot remove [3,4] to single out node 3.
There are 3 cases:
Case 1) No loop, but there is one node who has 2 parents.
Case 2) A loop, and there is one node who has 2 parents, that node must be inside the loop.
Case 3) A loop, and every node has only 1 parent.
Case 1: e.g. [[1,2],[1,3],[2,3]] ,node 3 has 2 parents ([1,3] and [2,3]).
Return the edge that occurs last that is, return [2,3].
Case 2: e.g. [[1,2],[2,3],[3,1],[4,1]] , {1->2->3->1} is a loop, node 1 has 2 parents ([4,1] and [3,1]).
Return the edge that is inside the loop, that is, return [3,1].
Case 3: e.g. [[1,2],[2,3],[3,1],[1,4]] , {1->2->3->1} is a loop, you can remove any edge in a loop,
the graph is still valid. Thus, return the one that occurs last, that is, return [3,1].
Also, [[2,1],[3,1],[4,2],[1,4]] is a good example
# Union Find
# 3ms 99.49%
class Solution {
public int[] findRedundantDirectedConnection(int[][] edges) {
int[] ancestor = new int[edges.length + 1];
int[][] res = new int[2][2];
for(int[]node : edges) {
if(node[1] != getAncestor(ancestor, node[1]))
res[0] = node;
else if(getAncestor(ancestor, node[0]) == getAncestor(ancestor, node[1]))
res[1] = node;
else
ancestor[node[1]] = ancestor[node[0]];
if(res[0][0] != 0 && res[1][0] != 0)
return find(edges, ancestor, res[0], res[1]);
}
return res[0][0] == 0 ? res[1] : res[0];
}
public int getAncestor(int[] ancestor, int node) {
if(node != ancestor[node])
ancestor[node] = ancestor[node] == 0 ? node : getAncestor(ancestor, ancestor[node]);
return ancestor[node];
}
public int[] find(int[][] edges, int[] ancestor, int[] removed0, int[] removed1) {
for(int[] res : edges)
if(res[1] == removed0[1] && getAncestor(ancestor, res[1]) == getAncestor(ancestor, removed1[1]))
return res;
return new int[2];
}
}
# Union Find
# 3ms 99.49%
class Solution {
public int[] findRedundantDirectedConnection(int[][] edges) {
int[] parent = new int[edges.length+1];
for (int i = 0; i < parent.length; i++) {
parent[i] = i;
}
int[] cycleEdge = null;
int[] mParent = null;
for (int[] edge : edges) {
int x = find(parent, edge[0]);
int y = find(parent, edge[1]);
if (x == y)
cycleEdge = edge;
else {
if (y != edge[1])
mParent = edge;
else
parent[y] = x;
}
}
// means we only got the multiparent problem, and the edges we recorded using parent so far are good, so just return this one.
if (cycleEdge == null)
return mParent;
// means we only got the cycle problem, in this case we can remove any edge in the cycle, so just remove this one.
if (mParent == null)
return cycleEdge;
// now, it means we have both cycle and multi-parent problem.
// In my code, i didn't record an edge into parent if we think it's involved into the multi-parent problem,
// but we are still getting the cycle problem. Since in this problem we can only have edges point to the same
// node, so, current mParent edge is the wrong one, we need to remove the other one pointing to the same
// dest as mParent ex: [[2,1],[3,1],[4,2],[1,4]]
for (int[] edge : edges) {
if (edge[1] == mParent[1])
return edge;
}
return new int[2];
}
public int find(int[] parent, int x) {
if (parent[x] == x)
return x;
return find(parent, parent[x]);
}
}
'''
| 1,123 | 18 | 97 |
3690e31d3dc0579d742dbd37b89a379e1320107a | 85,990 | py | Python | src/harvesters/core.py | batrlatom/harvesters | 2c3cb52538afb8999275a81fe72cb7ee51a0e7af | [
"Apache-2.0"
] | null | null | null | src/harvesters/core.py | batrlatom/harvesters | 2c3cb52538afb8999275a81fe72cb7ee51a0e7af | [
"Apache-2.0"
] | null | null | null | src/harvesters/core.py | batrlatom/harvesters | 2c3cb52538afb8999275a81fe72cb7ee51a0e7af | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# ----------------------------------------------------------------------------
#
# Copyright 2018 EMVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ----------------------------------------------------------------------------
# Standard library imports
from datetime import datetime
import io
import os
import pathlib
import signal
import sys
from threading import Lock, Thread, Event
from threading import current_thread, main_thread
import time
from urllib.parse import unquote
import weakref
import zipfile
import tempfile
from zipfile import BadZipFile
# Related third party imports
import numpy as np
from genicam.genapi import NodeMap
from genicam.genapi import LogicalErrorException, RuntimeException
from genicam.genapi import ChunkAdapterGeneric, ChunkAdapterU3V, \
ChunkAdapterGEV
from genicam.gentl import TimeoutException, \
NotImplementedException, ParsingChunkDataException, NoDataException, \
ErrorException, InvalidBufferException, InvalidParameterException
from genicam.gentl import GenericException
from genicam.gentl import GenTLProducer, BufferToken, EventManagerNewBuffer
from genicam.gentl import DEVICE_ACCESS_FLAGS_LIST, EVENT_TYPE_LIST, \
ACQ_START_FLAGS_LIST, ACQ_STOP_FLAGS_LIST, ACQ_QUEUE_TYPE_LIST, \
PAYLOADTYPE_INFO_IDS
# Local application/library specific imports
from harvesters._private.core.port import ConcretePort
from harvesters._private.core.statistics import Statistics
from harvesters.util.logging import get_logger
from harvesters.util.pfnc import symbolics
from harvesters.util.pfnc import uint16_formats, uint32_formats, \
float32_formats, uint8_formats
from harvesters.util.pfnc import component_2d_formats
from harvesters.util.pfnc import lmn_444_location_formats, \
lmno_4444_location_formats, lmn_422_location_formats, \
lmn_411_location_formats, mono_location_formats, bayer_location_formats
_is_logging_buffer_manipulation = True if 'HARVESTERS_LOG_BUFFER_MANIPULATION' in os.environ else False
_sleep_duration_default = 0.000001 # s
class ThreadBase:
"""
By default, :class:`ImageAcquirer` class internally uses Python's
built-in :mod:`threading` module. However, you may want to use your
preferred threading module such as :class:`QThread` of PyQt for some
technical reasons. To allow us your preferred threading module, Harvester
provides you a base proxy class to allow you implementing your threading
functionality.
"""
def __init__(self, *, mutex=None, logger=None):
"""
:param mutex:
:param logger:
"""
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__()
#
self._is_running = False
self._mutex = mutex
def start(self):
"""
:return: None.
"""
self._is_running = True
self._start()
self._logger.debug(
'Started thread {:0X}.'.format(self.id_)
)
def _start(self):
"""
This method is abstract and should be reimplemented in any sub-class.
Starts its worker running.
:return: None.
"""
raise NotImplementedError
def stop(self):
"""
This method is abstract and should be reimplemented in any sub-class.
Stops its worker running.
:return: None.
"""
raise NotImplementedError
def acquire(self):
"""
This method is abstract and should be reimplemented in any sub-class.
Acquires a mutex.
:return: None.
"""
raise NotImplementedError
def release(self):
"""
This method is abstract and should be reimplemented in any sub-class.
Releases the acquired mutex.
:return: None.
"""
raise NotImplementedError
def is_running(self):
"""
:return: :const:`True` if the worker is still running. Otherwise :const:`False`.
"""
return self._is_running
@property
def worker(self):
"""
This method is abstract and should be reimplemented in any sub-class.
:return: None.
"""
raise NotImplementedError
@worker.setter
def worker(self, obj):
"""
This method is abstract and should be reimplemented in any sub-class.
:return: None.
"""
raise NotImplementedError
@property
def mutex(self):
"""
This method is abstract and should be reimplemented in any sub-class.
:return: None.
"""
raise NotImplementedError
@property
def id_(self):
"""
This method is abstract and should be reimplemented in any sub-class.
:return: None.
"""
raise NotImplementedError
class ComponentBase:
"""
Is a base class of various data component types.
"""
def __init__(self, *, buffer=None):
"""
:param buffer:
"""
#
assert buffer
#
super().__init__()
#
self._buffer = buffer
self._data = None
@property
def data_format(self):
"""
:return: The data type of the data component.
"""
return self._buffer.data_format
@property
def data_format_namespace(self):
"""
:return: The data type namespace of the data component.
"""
return self._buffer.data_format
@property
def source_id(self):
"""
:return: The source ID of the data component.
"""
return self._buffer.source_id
@property
def data(self):
"""
:return: The component data.
"""
return self._data
class ComponentUnknown(ComponentBase):
"""
Represents a data component that is classified as
:const:`PART_DATATYPE_UNKNOWN` by the GenTL Standard.
"""
class Component2DImage(ComponentBase):
"""
Represents a data component that is classified as
:const:`PART_DATATYPE_2D_IMAGE` by the GenTL Standard.
"""
def __init__(self, *, buffer=None, part=None, node_map=None, logger=None):
"""
:param buffer:
:param part:
:param node_map:
"""
#
assert buffer
assert node_map
#
super().__init__(buffer=buffer)
self._logger = logger or get_logger(name=__name__)
#
self._part = part
self._node_map = node_map
self._data = None
self._num_components_per_pixel = 0
symbolic = self.data_format
# Determine the data type:
if self.x_padding > 0:
# In this case, the client will have to trim the padding part.
# so we create a NumPy array that consists of uint8 elements
# first. The client will interpret the array in an appropriate
# dtype in the end once he trimmed:
dtype = 'uint8'
bytes_per_pixel_data_component = 1
else:
if symbolic in uint16_formats:
dtype = 'uint16'
bytes_per_pixel_data_component = 2
elif symbolic in uint32_formats:
dtype = 'uint32'
bytes_per_pixel_data_component = 4
elif symbolic in float32_formats:
dtype = 'float32'
bytes_per_pixel_data_component = 4
elif symbolic in uint8_formats:
dtype = 'uint8'
bytes_per_pixel_data_component = 1
else:
# Sorry, Harvester can't handle this:
self._data = None
return
# Determine the number of components per pixel:
if symbolic in lmn_444_location_formats:
num_components_per_pixel = 3.
elif symbolic in lmn_422_location_formats:
num_components_per_pixel = 2.
elif symbolic in lmn_411_location_formats:
num_components_per_pixel = 1.5
elif symbolic in lmno_4444_location_formats:
num_components_per_pixel = 4.
elif symbolic in mono_location_formats or \
symbolic in bayer_location_formats:
num_components_per_pixel = 1.
else:
# Sorry, Harvester can't handle this:
self._data = None
return
self._num_components_per_pixel = num_components_per_pixel
self._symbolic = symbolic
#
width = self.width
height = self.height
#
if self._part:
count = self._part.data_size
count //= bytes_per_pixel_data_component
data_offset = self._part.data_offset
else:
count = width * height
count *= num_components_per_pixel
count += self.y_padding
data_offset = 0
# Convert the Python's built-in bytes array to a Numpy array:
if _is_logging_buffer_manipulation:
self._logger.debug(
'Component 2D image ('
'len(raw_buffer): {0}, '
'int(count): {1}, '
'dtype: {2}, '
'offset: {3}, '
'pixel format: {4},'
'x padding: {5},'
'y padding: {6}'
')'.format(
len(self._buffer.raw_buffer),
int(count),
dtype,
data_offset,
symbolic,
self.x_padding,
self.y_padding,
)
)
self._data = np.frombuffer(
self._buffer.raw_buffer,
count=int(count),
dtype=dtype,
offset=data_offset
)
def represent_pixel_location(self):
"""
Returns a NumPy array that represents the 2D pixel location,
which is defined by PFNC, of the original image data.
You may use the returned NumPy array for a calculation to map the
original image to another format.
:return: A NumPy array that represents the 2D pixel location.
"""
if self.data is None:
return None
#
return self._data.reshape(
self.height + self.y_padding,
int(self.width * self._num_components_per_pixel + self.x_padding)
)
@property
def num_components_per_pixel(self):
"""
:return: The number of data components per pixel.
"""
return self._num_components_per_pixel
@property
def width(self):
"""
:return: The width of the data component in the buffer in number of pixels.
"""
try:
if self._part:
value = self._part.width
else:
value = self._buffer.width
except GenericException:
value = self._node_map.Width.value
return value
@property
def height(self):
"""
:return: The height of the data component in the buffer in number of pixels.
"""
try:
if self._part:
value = self._part.height
else:
value = self._buffer.height
except GenericException:
value = self._node_map.Height.value
return value
@property
def data_format_value(self):
"""
:return: The data type of the data component as integer value.
"""
try:
if self._part:
value = self._part.data_format
else:
value = self._buffer.pixel_format
except GenericException:
value = self._node_map.PixelFormat.value
return value
@property
def data_format(self):
"""
:return: The data type of the data component as string.
"""
return symbolics[self.data_format_value]
@property
def delivered_image_height(self):
"""
:return: The image height of the data component.
"""
try:
if self._part:
value = self._part.delivered_image_height
else:
value = self._buffer.delivered_image_height
except GenericException:
value = 0
return value
@property
def x_offset(self): # TODO: Check the naming convention.
"""
:return: The X offset of the data in the buffer in number of pixels from the image origin to handle areas of interest.
"""
try:
if self._part:
value = self._part.x_offset
else:
value = self._buffer.offset_x
except GenericException:
value = self._node_map.OffsetX.value
return value
@property
def y_offset(self):
"""
:return: The Y offset of the data in the buffer in number of pixels from the image origin to handle areas of interest.
"""
try:
if self._part:
value = self._part.y_offset
else:
value = self._buffer.offset_y
except GenericException:
value = self._node_map.OffsetY.value
return value
@property
def x_padding(self):
"""
Returns
:return: The X padding of the data component in the buffer in number of pixels.
"""
try:
if self._part:
value = self._part.x_padding
else:
value = self._buffer.padding_x
except GenericException:
value = 0
return value
@property
def y_padding(self):
"""
:return: The Y padding of the data component in the buffer in number of pixels.
"""
try:
if self._part:
value = self._part.y_padding
else:
value = self._buffer.padding_y
except GenericException:
value = 0
return value
class Buffer:
"""
Is provided by an :class:`ImageAcquire` object when you call its
:meth:`~harvesters.core.ImageAcquirer.fetch_buffer` method. It provides
you a way to access acquired data and its relevant information.
Note that it will never be necessary to create this object by yourself
in general.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__()
#
self._buffer = buffer
self._node_map = node_map
self._payload = self._build_payload(
buffer=buffer,
node_map=node_map,
logger=self._logger
)
@property
def timestamp_ns(self):
"""
:return: The timestamp in nano-second.
"""
return self._buffer.timestamp_ns
@property
def timestamp(self):
"""
:return: The timestamp in the TL specific unit.
"""
timestamp = 0
try:
timestamp = self._buffer.timestamp_ns
except GenericException:
try:
_ = self.timestamp_frequency
except GenericException:
pass
else:
try:
timestamp = self._buffer.timestamp
except GenericException:
timestamp = 0
return timestamp
@property
def timestamp_frequency(self):
"""
:return: The timestamp frequency which is used to represent a timestamp.
"""
#
frequency = 1000000000 # Hz
try:
_ = self._buffer.timestamp_ns
except GenericException:
try:
frequency = self._buffer.parent.parent.timestamp_frequency
except GenericException:
try:
frequency = self._node_map.GevTimestampTickFrequency.value
except GenericException:
pass
return frequency
@property
def payload_type(self):
"""
:return: The payload type that the :class:`Buffer` object contains.
"""
return self._buffer.payload_type
@property
def payload(self):
"""
:return: A containing object which derives from :class:`PayloadBase` class.
"""
return self._payload
def queue(self):
"""
Queues the buffer to prepare for the upcoming image acquisition. Once
the buffer is queued, the :class:`Buffer` object will be obsolete.
You'll have nothing to do with it.
Note that you have to return the ownership of the fetched buffers to
the :class:`ImageAcquirer` object before stopping image acquisition
calling this method because the :class:`ImageAcquirer` object tries
to clear the self-allocated buffers when it stops image acquisition.
"""
#
if _is_logging_buffer_manipulation:
self._logger.debug(
'Queued Buffer module #{0}'
' containing frame #{1}'
' to DataStream module {2}'
' of Device module {3}'
'.'.format(
self._buffer.context,
self._buffer.frame_id,
self._buffer.parent.id_,
self._buffer.parent.parent.id_
)
)
self._buffer.parent.queue_buffer(self._buffer)
@staticmethod
class PayloadBase:
"""
Is a base class of various payload types. The types are defined by the
GenTL Standard.
"""
def __init__(self, *, buffer=None, logger=None):
"""
:param buffer:
:param logger:
"""
#
assert buffer
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__()
self._buffer = buffer
self._components = []
@property
def payload_type(self):
"""
TODO:
:return:
"""
return self._buffer.payload_type
@property
def components(self):
"""
:return: A :class:`list` containing objects that derive from :const:`ComponentBase` class.
"""
return self._components
class PayloadUnknown(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_UNKNOWN`
by the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
class PayloadImage(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_IMAGE` by
the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
# Build data components.
self._components.append(
self._build_component(
buffer=buffer, node_map=node_map
)
)
class PayloadRawData(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_RAW_DATA`
by the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
class PayloadFile(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_FILE` by
the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
class PayloadJPEG(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_JPEG` by
the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
class PayloadJPEG2000(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_JPEG2000`
by the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
class PayloadH264(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_H264` by
the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
class PayloadChunkOnly(PayloadBase):
"""
Represents a payload that is classified as
:const:`PAYLOAD_TYPE_CHUNK_ONLY` by the GenTL Standard.
"""
class PayloadMultiPart(PayloadBase):
"""
Represents a payload that is classified as
:const:`PAYLOAD_TYPE_MULTI_PART` by the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
#
# Build data components.
# We know the buffer consists of a set of "part" that is
# defined by the GenTL standard.
for i, part in enumerate(self._buffer.parts):
self._components.append(
self._build_component(
buffer=buffer, part=part, node_map=node_map
)
)
class ImageAcquirer:
"""
Manages everything you need to acquire images from the connecting device.
"""
#
_event = Event()
_specialized_tl_type = ['U3V', 'GEV']
def __init__(
self, *, parent=None, device=None,
profiler=None, logger=None,
sleep_duration=_sleep_duration_default,
file_path=None
):
"""
:param parent:
:param device:
:param profiler:
:param logger:
:param sleep_duration:
:param file_path: (Optional) Set a path to camera description file which you want to load on the target node map instead of the one which the device declares.
"""
#
self._logger = logger or get_logger(name=__name__)
#
assert parent
assert device
#
super().__init__()
#
self._parent = parent
#
interface = device.parent
system = interface.parent
env_var = 'HARVESTERS_XML_FILE_DIR'
if env_var in os.environ:
self._xml_dir = os.getenv(env_var)
else:
self._xml_dir = None
#
try:
node_map = _get_port_connected_node_map(
port=system.port, logger=self._logger,
xml_dir=self._xml_dir
)
except GenericException as e:
self._logger.error(e, exc_info=True)
else:
self._system = System(module=system, node_map=node_map)
#
try:
node_map = _get_port_connected_node_map(
port=interface.port, logger=self._logger,
xml_dir=self._xml_dir
)
except GenericException as e:
self._logger.error(e, exc_info=True)
else:
self._interface = Interface(
module=interface, node_map=node_map, parent=self._system
)
#
try:
node_map = _get_port_connected_node_map(
port=device.local_port, logger=self._logger,
xml_dir=self._xml_dir
) # Local device's node map
except GenericException as e:
self._logger.error(e, exc_info=True)
else:
self._device = Device(
module=device, node_map=node_map, parent=self._interface
)
#
try:
node_map = _get_port_connected_node_map(
port=device.remote_port, logger=self._logger,
file_path=file_path, xml_dir=self._xml_dir
) # Remote device's node map
except GenericException as e:
self._logger.error(e, exc_info=True)
else:
self._remote_device = RemoteDevice(
module=self._device, node_map=node_map, parent=self._device
)
#
self._data_streams = []
self._event_new_buffer_managers = []
self._create_ds_at_connection = True
if self._create_ds_at_connection:
self._setup_data_streams()
#
self._profiler = profiler
#
self._mutex = Lock()
self._thread_image_acquisition = _BuiltInThread(
mutex=self._mutex,
worker=self._worker_image_acquisition,
logger=self._logger,
sleep_duration=sleep_duration
)
# Prepare handling the SIGINT event:
self._threads = []
self._threads.append(self._thread_image_acquisition)
# Create a signal handler if it's being run in the main thread:
self._sigint_handler = None
if current_thread() is main_thread():
self._sigint_handler = _SignalHandler(
event=self._event, threads=self._threads, logger=self._logger
)
signal.signal(signal.SIGINT, self._sigint_handler)
self._logger.info('Created a signal handler for SIGINT.')
#
self._num_filled_buffers_to_hold = 1
#
self._num_images_to_acquire = -1
#
self._timeout_for_image_acquisition = 1 # ms
#
self._statistics = Statistics()
#
self._announced_buffers = []
self._holding_filled_buffers = []
#
self._has_acquired_1st_image = False
self._is_acquiring_images = False
self._keep_latest = True
# Determine the default value:
num_buffers_default = 16
try:
self._min_num_buffers = self._data_streams[0].buffer_announce_min
except InvalidParameterException as e:
# In general, a GenTL Producer should not raise the
# InvalidParameterException to the inquiry for
# STREAM_INFO_BUF_ANNOUNCE_MIN because it is totally legal
# but we have observed a fact that there is at least one on
# the market. As a workaround we involve this try-except block:
self._logger.debug(e, exc_info=True)
self._min_num_buffers = num_buffers_default
self._num_buffers = num_buffers_default
else:
self._num_buffers = max(
num_buffers_default, self._min_num_buffers
)
#
self._signal_stop_image_acquisition = None
#
self._logger.info(
'Instantiated an ImageAcquirer object for {0}.'.format(
self._device.id_
)
)
#
self._chunk_adapter = self._get_chunk_adapter(
device=self.device, node_map=self.remote_device.node_map
)
# A callback method when it's called when a new buffer is delivered:
self._on_new_buffer_arrival = None
#
self._finalizer = weakref.finalize(self, self._destroy)
@staticmethod
@property
@on_new_buffer_arrival.setter
@property
@keep_latest.setter
@property
@num_buffers.setter
@property
@property
@num_filled_buffers_to_hold.setter
@property
@property
@property
def remote_device(self):
"""
:return: The remote device.
"""
return self._remote_device
@property
def device(self):
"""
:return: The proxy :class:`Device` module object of the connecting remote device.
"""
return self._device
@property
def interface(self):
"""
:return: The parent :class:`Interface` module object of the connecting remote device.
"""
return self._interface
@property
def system(self):
"""
:return: The parent :class:`System` module object of the connecting remote device.
"""
return self._system
def is_acquiring_images(self):
"""
:return: :const:`True` if it's acquiring images. Otherwise :const:`False`.
"""
return self._is_acquiring_images
@property
@timeout_for_image_acquisition.setter
@property
@thread_image_acquisition.setter
@property
@signal_stop_image_acquisition.setter
@property
@keep_latest.setter
def start_image_acquisition(self):
"""
Starts image acquisition.
:return: None.
"""
if not self._create_ds_at_connection:
self._setup_data_streams()
#
num_required_buffers = self._num_buffers
for data_stream in self._data_streams:
try:
num_buffers = data_stream.buffer_announce_min
if num_buffers < num_required_buffers:
num_buffers = num_required_buffers
except GenericException as e:
num_buffers = num_required_buffers
self._logger.debug(e, exc_info=True)
if data_stream.defines_payload_size():
buffer_size = data_stream.payload_size
else:
buffer_size = self.remote_device.node_map.PayloadSize.value
raw_buffers = self._create_raw_buffers(
num_buffers, buffer_size
)
buffer_tokens = self._create_buffer_tokens(
raw_buffers
)
self._announced_buffers = self._announce_buffers(
data_stream=data_stream, _buffer_tokens=buffer_tokens
)
self._queue_announced_buffers(
data_stream=data_stream, buffers=self._announced_buffers
)
# Reset the number of images to acquire.
try:
acq_mode = self.remote_device.node_map.AcquisitionMode.value
if acq_mode == 'Continuous':
num_images_to_acquire = -1
elif acq_mode == 'SingleFrame':
num_images_to_acquire = 1
elif acq_mode == 'MultiFrame':
num_images_to_acquire = self.remote_device.node_map.AcquisitionFrameCount.value
else:
num_images_to_acquire = -1
except GenericException as e:
# The node doesn't exist.
num_images_to_acquire = -1
self._logger.debug(e, exc_info=True)
self._num_images_to_acquire = num_images_to_acquire
try:
# We're ready to start image acquisition. Lock the device's
# transport layer related features:
self.remote_device.node_map.TLParamsLocked.value = 1
except GenericException:
# SFNC < 2.0
pass
# Start image acquisition.
self._is_acquiring_images = True
for data_stream in self._data_streams:
data_stream.start_acquisition(
ACQ_START_FLAGS_LIST.ACQ_START_FLAGS_DEFAULT,
self._num_images_to_acquire
)
#
if self.thread_image_acquisition:
self.thread_image_acquisition.start()
#
self.remote_device.node_map.AcquisitionStart.execute()
self._logger.info(
'{0} started image acquisition.'.format(self._device.id_)
)
if self._profiler:
self._profiler.print_diff()
def fetch_buffer(self, *, timeout=0, is_raw=False):
"""
Fetches the latest :class:`Buffer` object and returns it.
:param timeout: Set timeout value in second.
:param is_raw: Set :const:`True` if you need a raw GenTL Buffer module.
:return: A :class:`Buffer` object.
"""
if not self.is_acquiring_images():
raise TimeoutException
watch_timeout = True if timeout > 0 else False
buffer = None
base = time.time()
while buffer is None:
if watch_timeout and (time.time() - base) > timeout:
raise TimeoutException
else:
with MutexLocker(self.thread_image_acquisition):
if len(self._holding_filled_buffers) > 0:
if is_raw:
buffer = self._holding_filled_buffers.pop(0)
else:
# Update the chunk data:
_buffer = self._holding_filled_buffers.pop(0)
self._update_chunk_data(buffer=_buffer)
#
buffer = Buffer(
buffer=_buffer,
node_map=self.remote_device.node_map,
logger=self._logger
)
if _is_logging_buffer_manipulation:
self._logger.debug(
'Fetched Buffer module #{0}'
' containing frame #{1}'
' of DataStream module {2}'
' of Device module {2}'
'.'.format(
buffer._buffer.context,
buffer._buffer.frame_id,
buffer._buffer.parent.id_,
buffer._buffer.parent.parent.id_
)
)
return buffer
@staticmethod
@staticmethod
def stop_image_acquisition(self):
"""
Stops image acquisition.
:return: None.
"""
if self.is_acquiring_images():
#
self._is_acquiring_images = False
#
if self.thread_image_acquisition.is_running(): # TODO
self.thread_image_acquisition.stop()
with MutexLocker(self.thread_image_acquisition):
#
self.remote_device.node_map.AcquisitionStop.execute()
try:
# Unlock TLParamsLocked in order to allow full device
# configuration:
self.remote_device.node_map.TLParamsLocked.value = 0
except GenericException:
# SFNC < 2.0
pass
for data_stream in self._data_streams:
# Stop image acquisition.
try:
data_stream.stop_acquisition(
ACQ_STOP_FLAGS_LIST.ACQ_STOP_FLAGS_KILL
)
except GenericException as e:
self._logger.error(e, exc_info=True)
# Flash the queue for image acquisition process.
data_stream.flush_buffer_queue(
ACQ_QUEUE_TYPE_LIST.ACQ_QUEUE_ALL_DISCARD
)
for event_manager in self._event_new_buffer_managers:
event_manager.flush_event_queue()
if self._create_ds_at_connection:
self._release_buffers()
else:
self._release_data_streams()
#
self._has_acquired_1st_image = False
#
self._chunk_adapter.detach_buffer()
#
self._logger.info(
'{0} stopped image acquisition.'.format(self._device.id_)
)
if self._profiler:
self._profiler.print_diff()
def _destroy(self):
"""
Destroys the :class:`ImageAcquirer` object. Once you called this
method, all allocated resources, including buffers and the remote
device, are released.
:return: None.
"""
# Ask its parent to destroy it:
if self._device:
self._parent._destroy_image_acquirer(self)
class Harvester:
"""
Is the class that works for you as Harvester Core. Everything begins with
this class.
"""
#
def __init__(self, *, profile=False, logger=None):
"""
:param profile:
:param logger:
"""
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__()
#
self._cti_files = []
self._producers = []
self._systems = []
self._interfaces = []
self._device_info_list = []
self._ias = []
#
self._has_revised_device_list = False
self._timeout_for_update = 1000 # ms
#
if profile:
from harvesters._private.core.helper.profiler import Profiler
self._profiler = Profiler()
else:
self._profiler = None
if self._profiler:
self._profiler.print_diff()
#
self._finalizer = weakref.finalize(self, self._reset)
@property
def cti_files(self):
"""
:return: A :class:`list` object containing :class:`str` objects.
"""
return self._cti_files
@property
def device_info_list(self):
"""
:return: A :class:`list` object containing :class:`DeviceInfo` objects
"""
return self._device_info_list
@property
@timeout_for_update.setter
@property
@has_revised_device_info_list.setter
def create_image_acquirer(
self, list_index=None, *, id_=None,
vendor=None, model=None, tl_type=None, user_defined_name=None,
serial_number=None, version=None,
sleep_duration=_sleep_duration_default, file_path=None,
privilege='exclusive'
):
"""
Creates an image acquirer for the specified remote device and return
the created :class:`ImageAcquirer` object.
:param list_index: (Optional) Set an item index of the list of :class:`DeviceInfo` objects.
:param id_: (Optional) Set an index of the device information list.
:param vendor: (Optional) Set a vendor name of the target device.
:param model: (Optional) Set a model name of the target device.
:param tl_type: (Optional) Set a transport layer type of the target device.
:param user_defined_name: (Optional) Set a user defined name string of the target device.
:param serial_number: (Optional) Set a serial number string of the target device.
:param version: (Optional) Set a version number string of the target device.
:param sleep_duration: (Optional) Set a sleep duration in second that is inserted after the image acquisition worker is executed.
:param file_path: (Optional) Set a path to camera description file which you want to load on the target node map instead of the one which the device declares.
:param privilege: (Optional) Set an access privilege. `exclusive`, `contorl`, and `read_only` are supported. The default is `exclusive`.
:return: An :class:`ImageAcquirer` object that associates with the specified device.
Note that you have to close it when you are ready to release the
device that you have been controlled. As long as you hold it, the
controlled device will be not available from other clients.
"""
#
if self.device_info_list is None:
# TODO: Throw an exception to tell clients that there's no
# device to connect.
return
# Instantiate a GenTL Device module.
if list_index is not None:
device = self.device_info_list[list_index].create_device()
else:
keys = [
'id_', 'vendor', 'model', 'tl_type',
'user_defined_name', 'serial_number', 'version',
]
# Create a copy of the list. Do not use the original list:
candidates = self.device_info_list.copy()
for key in keys:
key_value = eval(key)
if key_value:
items_to_be_removed = []
# Find out the times to be removed from the candidates.
for item in candidates:
try:
if key_value != eval('item.' + key):
items_to_be_removed.append(item)
except GenericException as e:
# The candidate doesn't support the information.
self._logger.warn(e, exc_info=True)
pass
# Remove irrelevant items from the candidates.
for item in items_to_be_removed:
candidates.remove(item)
num_candidates = len(candidates)
if num_candidates > 1:
raise ValueError(
'You have two or more candidates. '
'You have to pass one or more keys so that '
'a single candidate is specified.'
)
elif num_candidates == 0:
raise ValueError(
'You have no candidate. '
'You have to pass one or more keys so that '
'a single candidate is specified.'
)
else:
device = candidates[0].create_device()
# Then open it.
try:
#
if privilege == 'exclusive':
_privilege = DEVICE_ACCESS_FLAGS_LIST.DEVICE_ACCESS_EXCLUSIVE
elif privilege == 'control':
_privilege = DEVICE_ACCESS_FLAGS_LIST.DEVICE_ACCESS_CONTROL
elif privilege == 'read_only':
_privilege = DEVICE_ACCESS_FLAGS_LIST.DEVICE_ACCESS_READONLY
else:
raise NotImplementedError(
'{0} is not supported.'.format(privilege)
)
#
device.open(_privilege)
except GenericException as e:
self._logger.debug(e, exc_info=True)
# Just re-throw the exception. The decision should be made by
# the client but not Harvester:
raise
else:
self._logger.info(
'Opened Device module, {0}.'.format(device.id_)
)
# Create an :class:`ImageAcquirer` object and return it.
ia = ImageAcquirer(
parent=self, device=device, profiler=self._profiler,
logger=self._logger, sleep_duration=sleep_duration,
file_path=file_path
)
self._ias.append(ia)
if self._profiler:
self._profiler.print_diff()
return ia
def add_cti_file(self, file_path: str):
"""
Adds a CTI file to work with to the CTI file list.
:param file_path: Set a file path to the target CTI file.
:return: None.
"""
if not os.path.exists(file_path):
self._logger.warning(
'Attempted to add {0} which does not exist.'.format(file_path)
)
if file_path not in self._cti_files:
self._cti_files.append(file_path)
self._logger.info(
'Added {0} to the CTI file list.'.format(file_path)
)
def remove_cti_file(self, file_path: str):
"""
Removes the specified CTI file from the CTI file list.
:param file_path: Set a file path to the target CTI file.
:return: None.
"""
if file_path in self._cti_files:
self._cti_files.remove(file_path)
self._logger.info(
'Removed {0} from the CTI file list.'.format(file_path)
)
def remove_cti_files(self):
"""
Removes all CTI files in the CTI file list.
:return: None.
"""
self._cti_files.clear()
#
self._logger.info('Removed the all CTI file from the list.')
def _reset(self):
"""
Initializes the :class:`Harvester` object. Once you reset the
:class:`Harvester` object, all allocated resources, including buffers
and remote device, will be released.
:return: None.
"""
#
for ia in self._ias:
ia._destroy()
self._ias.clear()
#
self._logger.info('Started resetting the Harvester object.')
self.remove_cti_files()
self._release_gentl_producers()
if self._profiler:
self._profiler.print_diff()
#
self._logger.info('Completed resetting the Harvester object.')
def update_device_info_list(self):
"""
Updates the device information list. You'll have to call this method
every time you added CTI files or plugged/unplugged devices.
:return: None.
"""
#
self._release_gentl_producers()
try:
self._open_gentl_producers()
self._open_systems()
#
for system in self._systems:
#
system.update_interface_info_list(self.timeout_for_update)
#
for i_info in system.interface_info_list:
iface = i_info.create_interface()
try:
iface.open()
except GenericException as e:
self._logger.debug(e, exc_info=True)
else:
self._logger.info(
'Opened Interface module, {0}.'.format(iface.id_)
)
iface.update_device_info_list(self.timeout_for_update)
self._interfaces.append(iface)
for d_info in iface.device_info_list:
self.device_info_list.append(
DeviceInfo(device_info=d_info)
)
except GenericException as e:
self._logger.error(e, exc_info=True)
self._has_revised_device_list = False
else:
self._has_revised_device_list = True
#
self._logger.info('Updated the device information list.')
def _destroy_image_acquirer(self, ia):
"""
Releases all external resources including the controlling device.
"""
id_ = None
if ia.device:
#
ia.stop_image_acquisition()
#
ia._release_data_streams()
#
id_ = ia._device.id_
#
if ia.remote_device.node_map:
#
if ia._chunk_adapter:
ia._chunk_adapter.detach_buffer()
ia._chunk_adapter = None
self._logger.info(
'Detached a buffer from the chunk adapter of {0}.'.format(
id_
)
)
ia.device.node_map.disconnect()
self._logger.info(
'Disconnected the port from the NodeMap of {0}.'.format(
id_
)
)
#
if ia._device.is_open():
ia._device.close()
self._logger.info(
'Closed Device module, {0}.'.format(id_)
)
ia._device = None
#
if id_:
self._logger.info(
'Destroyed the ImageAcquirer object which {0} '
'had belonged to.'.format(id_)
)
else:
self._logger.info(
'Destroyed an ImageAcquirer.'
)
if self._profiler:
self._profiler.print_diff()
self._ias.remove(ia)
if __name__ == '__main__':
pass
| 29.258251 | 166 | 0.552971 | #!/usr/bin/env python3
# ----------------------------------------------------------------------------
#
# Copyright 2018 EMVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ----------------------------------------------------------------------------
# Standard library imports
from datetime import datetime
import io
import os
import pathlib
import signal
import sys
from threading import Lock, Thread, Event
from threading import current_thread, main_thread
import time
from urllib.parse import unquote
import weakref
import zipfile
import tempfile
from zipfile import BadZipFile
# Related third party imports
import numpy as np
from genicam.genapi import NodeMap
from genicam.genapi import LogicalErrorException, RuntimeException
from genicam.genapi import ChunkAdapterGeneric, ChunkAdapterU3V, \
ChunkAdapterGEV
from genicam.gentl import TimeoutException, \
NotImplementedException, ParsingChunkDataException, NoDataException, \
ErrorException, InvalidBufferException, InvalidParameterException
from genicam.gentl import GenericException
from genicam.gentl import GenTLProducer, BufferToken, EventManagerNewBuffer
from genicam.gentl import DEVICE_ACCESS_FLAGS_LIST, EVENT_TYPE_LIST, \
ACQ_START_FLAGS_LIST, ACQ_STOP_FLAGS_LIST, ACQ_QUEUE_TYPE_LIST, \
PAYLOADTYPE_INFO_IDS
# Local application/library specific imports
from harvesters._private.core.port import ConcretePort
from harvesters._private.core.statistics import Statistics
from harvesters.util.logging import get_logger
from harvesters.util.pfnc import symbolics
from harvesters.util.pfnc import uint16_formats, uint32_formats, \
float32_formats, uint8_formats
from harvesters.util.pfnc import component_2d_formats
from harvesters.util.pfnc import lmn_444_location_formats, \
lmno_4444_location_formats, lmn_422_location_formats, \
lmn_411_location_formats, mono_location_formats, bayer_location_formats
_is_logging_buffer_manipulation = True if 'HARVESTERS_LOG_BUFFER_MANIPULATION' in os.environ else False
_sleep_duration_default = 0.000001 # s
class Module:
def __init__(self, module=None, node_map=None, parent=None):
self._module = module
self._node_map = node_map
self._parent = parent
@property
def node_map(self):
return self._node_map
@property
def parent(self):
return self._parent
@property
def port(self):
return self._module.port
def register_event(self, event_type=None):
return self._module.register_event(event_type)
def announce_buffer(self, buffer_token=None):
return self._module.announce_buffer(buffer_token)
class DataStream(Module):
def __init__(self, module=None, node_map=None, parent=None):
super().__init__(module=module, node_map=node_map, parent=parent)
def open(self, data_stream_id=None):
self._module.open(data_stream_id)
@property
def id_(self):
return self._module.id_
@property
def buffer_announce_min(self):
return self._module.buffer_announce_min
def defines_payload_size(self):
return self._module.defines_payload_size()
@property
def payload_size(self):
return self._module.payload_size
def queue_buffer(self, announced_buffer=None):
self._module.queue_buffer(announced_buffer)
def start_acquisition(self, flags=None, num_images=None):
self._module.start_acquisition(flags, num_images)
def is_open(self):
return self._module.is_open()
def stop_acquisition(self, flags=None):
self._module.stop_acquisition(flags)
def revoke_buffer(self, buffer=None):
return self._module.revoke_buffer(buffer)
def flush_buffer_queue(self, operation=None):
self._module.flush_buffer_queue(operation)
def close(self):
self._module.close()
class RemoteDevice(Module):
def __init__(self, module=None, node_map=None, parent=None):
super().__init__(module=module, node_map=node_map, parent=parent)
@property
def port(self):
return self._parent.remote_port
class Device(Module):
def __init__(self, module=None, node_map=None, parent=None):
super().__init__(module=module, node_map=node_map, parent=parent)
@property
def data_stream_ids(self):
return self._module.data_stream_ids
def create_data_stream(self):
return self._module.create_data_stream()
@property
def id_(self):
return self._module.id_
@property
def tl_type(self):
return self._module.tl_type
def is_open(self):
return self._module.is_open()
def close(self):
self._module.close()
@property
def port(self):
return self._module.local_port
class Interface(Module):
def __init__(self, module=None, node_map=None, parent=None):
super().__init__(module=module, node_map=node_map, parent=parent)
class System(Module):
def __init__(self, module=None, node_map=None, parent=None):
super().__init__(module=module, node_map=node_map, parent=parent)
class DeviceInfo:
def __init__(self, device_info=None):
self._device_info = device_info
def create_device(self):
return self._device_info.create_device()
def __repr__(self):
properties = [
'id_',
'vendor',
'model',
'tl_type',
'user_defined_name',
'serial_number',
'version',
]
results = []
for property in properties:
if property is '':
result = None
else:
try:
result = eval('self._device_info.' + property)
except:
result = None
results.append(result)
info = '('
delimiter = ', '
for i, r in enumerate(results):
if r:
r = '\'{0}\''.format(r)
else:
r = 'None'
info += '{0}={1}'.format(properties[i], r)
info += delimiter
info = info[:-len(delimiter)]
info += ')'
return info
@property
def id_(self):
return self._device_info.id_
@property
def vendor(self):
return self._device_info.vendor
@property
def model(self):
return self._device_info.model
@property
def tl_type(self):
return self._device_info.tl_type
@property
def user_defined_name(self):
return self._device_info.user_defined_name
@property
def serial_number(self):
return self._device_info.serial_number
@property
def version(self):
return self._device_info.version
class _SignalHandler:
_event = None
_threads = None
def __init__(self, *, event=None, threads=None, logger=None):
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__()
#
assert event
assert threads
#
self._event = event
self._threads = threads
def __call__(self, signum, frame):
"""
A registered Python signal modules will call this method.
"""
self._logger.debug(
'Going to terminate threads having triggered '
'by the event {0}.'.format(
self._event
)
)
# Set the Event:
self._event.set()
# Terminate the threads:
for thread in self._threads:
thread.stop()
self._logger.debug(
'Has terminated threads having triggered by '
'the event {0}.'.format(
self._event
)
)
class ThreadBase:
"""
By default, :class:`ImageAcquirer` class internally uses Python's
built-in :mod:`threading` module. However, you may want to use your
preferred threading module such as :class:`QThread` of PyQt for some
technical reasons. To allow us your preferred threading module, Harvester
provides you a base proxy class to allow you implementing your threading
functionality.
"""
def __init__(self, *, mutex=None, logger=None):
"""
:param mutex:
:param logger:
"""
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__()
#
self._is_running = False
self._mutex = mutex
def start(self):
"""
:return: None.
"""
self._is_running = True
self._start()
self._logger.debug(
'Started thread {:0X}.'.format(self.id_)
)
def _start(self):
"""
This method is abstract and should be reimplemented in any sub-class.
Starts its worker running.
:return: None.
"""
raise NotImplementedError
def stop(self):
"""
This method is abstract and should be reimplemented in any sub-class.
Stops its worker running.
:return: None.
"""
raise NotImplementedError
def acquire(self):
"""
This method is abstract and should be reimplemented in any sub-class.
Acquires a mutex.
:return: None.
"""
raise NotImplementedError
def release(self):
"""
This method is abstract and should be reimplemented in any sub-class.
Releases the acquired mutex.
:return: None.
"""
raise NotImplementedError
def is_running(self):
"""
:return: :const:`True` if the worker is still running. Otherwise :const:`False`.
"""
return self._is_running
@property
def worker(self):
"""
This method is abstract and should be reimplemented in any sub-class.
:return: None.
"""
raise NotImplementedError
@worker.setter
def worker(self, obj):
"""
This method is abstract and should be reimplemented in any sub-class.
:return: None.
"""
raise NotImplementedError
@property
def mutex(self):
"""
This method is abstract and should be reimplemented in any sub-class.
:return: None.
"""
raise NotImplementedError
@property
def id_(self):
"""
This method is abstract and should be reimplemented in any sub-class.
:return: None.
"""
raise NotImplementedError
class MutexLocker:
def __init__(self, thread: ThreadBase=None):
"""
:param thread:
"""
#
assert thread
#
super().__init__()
#
self._thread = thread
self._locked_mutex = None
def __enter__(self):
#
if self._thread is None:
return None
#
self._locked_mutex = self._thread.acquire()
return self._locked_mutex
def __exit__(self, exc_type, exc_val, exc_tb):
#
if self._thread is None:
return
#
self._thread.release()
class _BuiltInThread(ThreadBase):
def __init__(self, *, mutex=None, worker=None, logger=None,
sleep_duration=_sleep_duration_default):
"""
:param mutex:
:param worker:
:param logger:
:param sleep_duration:
"""
#
super().__init__(mutex=mutex, logger=logger)
#
self._thread = None
self._worker = worker
self._sleep_duration = sleep_duration
def _start(self):
# Create a Thread object. The object is not reusable.
self._thread = _ThreadImpl(
base=self, worker=self._worker,
sleep_duration=self._sleep_duration
)
# Start running its worker method.
self._thread.start()
def stop(self):
#
if self._thread is None:
return
# Prepare to terminate the worker method.
self._thread.stop()
# Wait until the run methods is terminated.
self._thread.join()
self._logger.debug(
'Stopped thread {:0X}.'.format(self._thread.id_)
)
def acquire(self):
#
if self._thread is None:
return None
#
return self._thread.acquire()
def release(self):
#
if self._thread is None:
return
#
self._thread.release()
@property
def worker(self):
#
if self._thread is None:
return None
#
return self._thread.worker
@worker.setter
def worker(self, obj):
#
if self._thread is None:
return
#
self._thread.worker = obj
@property
def mutex(self):
return self._mutex
@property
def id_(self):
return self._thread.id_
class _ThreadImpl(Thread):
def __init__(self, base=None, worker=None,
sleep_duration=_sleep_duration_default):
"""
:param base:
:param worker:
:param sleep_duration:
"""
#
assert base
#
super().__init__(daemon=self._is_interactive())
#
self._worker = worker
self._base = base
self._sleep_duration = sleep_duration
@staticmethod
def _is_interactive():
#
if bool(getattr(sys, 'ps1', sys.flags.interactive)):
return True
#
try:
from traitlets.config.application import Application as App
return App.initialized() and App.instance().interact
except (ImportError, AttributeError):
return False
def stop(self):
with self._base.mutex:
self._base._is_running = False
def run(self):
"""
Runs its worker method.
This method will be terminated once its parent's is_running
property turns False.
"""
while self._base._is_running:
if self._worker:
self._worker()
time.sleep(self._sleep_duration)
def acquire(self):
return self._base.mutex.acquire()
def release(self):
self._base.mutex.release()
@property
def worker(self):
return self._worker
@worker.setter
def worker(self, obj):
self._worker = obj
@property
def id_(self):
return self.ident
class ComponentBase:
"""
Is a base class of various data component types.
"""
def __init__(self, *, buffer=None):
"""
:param buffer:
"""
#
assert buffer
#
super().__init__()
#
self._buffer = buffer
self._data = None
@property
def data_format(self):
"""
:return: The data type of the data component.
"""
return self._buffer.data_format
@property
def data_format_namespace(self):
"""
:return: The data type namespace of the data component.
"""
return self._buffer.data_format
@property
def source_id(self):
"""
:return: The source ID of the data component.
"""
return self._buffer.source_id
@property
def data(self):
"""
:return: The component data.
"""
return self._data
class ComponentUnknown(ComponentBase):
"""
Represents a data component that is classified as
:const:`PART_DATATYPE_UNKNOWN` by the GenTL Standard.
"""
def __init__(self):
#
super().__init__()
class Component2DImage(ComponentBase):
"""
Represents a data component that is classified as
:const:`PART_DATATYPE_2D_IMAGE` by the GenTL Standard.
"""
def __init__(self, *, buffer=None, part=None, node_map=None, logger=None):
"""
:param buffer:
:param part:
:param node_map:
"""
#
assert buffer
assert node_map
#
super().__init__(buffer=buffer)
self._logger = logger or get_logger(name=__name__)
#
self._part = part
self._node_map = node_map
self._data = None
self._num_components_per_pixel = 0
symbolic = self.data_format
# Determine the data type:
if self.x_padding > 0:
# In this case, the client will have to trim the padding part.
# so we create a NumPy array that consists of uint8 elements
# first. The client will interpret the array in an appropriate
# dtype in the end once he trimmed:
dtype = 'uint8'
bytes_per_pixel_data_component = 1
else:
if symbolic in uint16_formats:
dtype = 'uint16'
bytes_per_pixel_data_component = 2
elif symbolic in uint32_formats:
dtype = 'uint32'
bytes_per_pixel_data_component = 4
elif symbolic in float32_formats:
dtype = 'float32'
bytes_per_pixel_data_component = 4
elif symbolic in uint8_formats:
dtype = 'uint8'
bytes_per_pixel_data_component = 1
else:
# Sorry, Harvester can't handle this:
self._data = None
return
# Determine the number of components per pixel:
if symbolic in lmn_444_location_formats:
num_components_per_pixel = 3.
elif symbolic in lmn_422_location_formats:
num_components_per_pixel = 2.
elif symbolic in lmn_411_location_formats:
num_components_per_pixel = 1.5
elif symbolic in lmno_4444_location_formats:
num_components_per_pixel = 4.
elif symbolic in mono_location_formats or \
symbolic in bayer_location_formats:
num_components_per_pixel = 1.
else:
# Sorry, Harvester can't handle this:
self._data = None
return
self._num_components_per_pixel = num_components_per_pixel
self._symbolic = symbolic
#
width = self.width
height = self.height
#
if self._part:
count = self._part.data_size
count //= bytes_per_pixel_data_component
data_offset = self._part.data_offset
else:
count = width * height
count *= num_components_per_pixel
count += self.y_padding
data_offset = 0
# Convert the Python's built-in bytes array to a Numpy array:
if _is_logging_buffer_manipulation:
self._logger.debug(
'Component 2D image ('
'len(raw_buffer): {0}, '
'int(count): {1}, '
'dtype: {2}, '
'offset: {3}, '
'pixel format: {4},'
'x padding: {5},'
'y padding: {6}'
')'.format(
len(self._buffer.raw_buffer),
int(count),
dtype,
data_offset,
symbolic,
self.x_padding,
self.y_padding,
)
)
self._data = np.frombuffer(
self._buffer.raw_buffer,
count=int(count),
dtype=dtype,
offset=data_offset
)
def represent_pixel_location(self):
"""
Returns a NumPy array that represents the 2D pixel location,
which is defined by PFNC, of the original image data.
You may use the returned NumPy array for a calculation to map the
original image to another format.
:return: A NumPy array that represents the 2D pixel location.
"""
if self.data is None:
return None
#
return self._data.reshape(
self.height + self.y_padding,
int(self.width * self._num_components_per_pixel + self.x_padding)
)
@property
def num_components_per_pixel(self):
"""
:return: The number of data components per pixel.
"""
return self._num_components_per_pixel
def __repr__(self):
return '{0} x {1}, {2}, {3} elements,\n{4}'.format(
self.width,
self.height,
self.data_format,
self.data.size,
self.data
)
@property
def width(self):
"""
:return: The width of the data component in the buffer in number of pixels.
"""
try:
if self._part:
value = self._part.width
else:
value = self._buffer.width
except GenericException:
value = self._node_map.Width.value
return value
@property
def height(self):
"""
:return: The height of the data component in the buffer in number of pixels.
"""
try:
if self._part:
value = self._part.height
else:
value = self._buffer.height
except GenericException:
value = self._node_map.Height.value
return value
@property
def data_format_value(self):
"""
:return: The data type of the data component as integer value.
"""
try:
if self._part:
value = self._part.data_format
else:
value = self._buffer.pixel_format
except GenericException:
value = self._node_map.PixelFormat.value
return value
@property
def data_format(self):
"""
:return: The data type of the data component as string.
"""
return symbolics[self.data_format_value]
@property
def delivered_image_height(self):
"""
:return: The image height of the data component.
"""
try:
if self._part:
value = self._part.delivered_image_height
else:
value = self._buffer.delivered_image_height
except GenericException:
value = 0
return value
@property
def x_offset(self): # TODO: Check the naming convention.
"""
:return: The X offset of the data in the buffer in number of pixels from the image origin to handle areas of interest.
"""
try:
if self._part:
value = self._part.x_offset
else:
value = self._buffer.offset_x
except GenericException:
value = self._node_map.OffsetX.value
return value
@property
def y_offset(self):
"""
:return: The Y offset of the data in the buffer in number of pixels from the image origin to handle areas of interest.
"""
try:
if self._part:
value = self._part.y_offset
else:
value = self._buffer.offset_y
except GenericException:
value = self._node_map.OffsetY.value
return value
@property
def x_padding(self):
"""
Returns
:return: The X padding of the data component in the buffer in number of pixels.
"""
try:
if self._part:
value = self._part.x_padding
else:
value = self._buffer.padding_x
except GenericException:
value = 0
return value
@property
def y_padding(self):
"""
:return: The Y padding of the data component in the buffer in number of pixels.
"""
try:
if self._part:
value = self._part.y_padding
else:
value = self._buffer.padding_y
except GenericException:
value = 0
return value
class Buffer:
"""
Is provided by an :class:`ImageAcquire` object when you call its
:meth:`~harvesters.core.ImageAcquirer.fetch_buffer` method. It provides
you a way to access acquired data and its relevant information.
Note that it will never be necessary to create this object by yourself
in general.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__()
#
self._buffer = buffer
self._node_map = node_map
self._payload = self._build_payload(
buffer=buffer,
node_map=node_map,
logger=self._logger
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.queue()
def __repr__(self):
return '{0}'.format(self.payload.__repr__())
@property
def timestamp_ns(self):
"""
:return: The timestamp in nano-second.
"""
return self._buffer.timestamp_ns
@property
def timestamp(self):
"""
:return: The timestamp in the TL specific unit.
"""
timestamp = 0
try:
timestamp = self._buffer.timestamp_ns
except GenericException:
try:
_ = self.timestamp_frequency
except GenericException:
pass
else:
try:
timestamp = self._buffer.timestamp
except GenericException:
timestamp = 0
return timestamp
@property
def timestamp_frequency(self):
"""
:return: The timestamp frequency which is used to represent a timestamp.
"""
#
frequency = 1000000000 # Hz
try:
_ = self._buffer.timestamp_ns
except GenericException:
try:
frequency = self._buffer.parent.parent.timestamp_frequency
except GenericException:
try:
frequency = self._node_map.GevTimestampTickFrequency.value
except GenericException:
pass
return frequency
@property
def payload_type(self):
"""
:return: The payload type that the :class:`Buffer` object contains.
"""
return self._buffer.payload_type
@property
def payload(self):
"""
:return: A containing object which derives from :class:`PayloadBase` class.
"""
return self._payload
def queue(self):
"""
Queues the buffer to prepare for the upcoming image acquisition. Once
the buffer is queued, the :class:`Buffer` object will be obsolete.
You'll have nothing to do with it.
Note that you have to return the ownership of the fetched buffers to
the :class:`ImageAcquirer` object before stopping image acquisition
calling this method because the :class:`ImageAcquirer` object tries
to clear the self-allocated buffers when it stops image acquisition.
"""
#
if _is_logging_buffer_manipulation:
self._logger.debug(
'Queued Buffer module #{0}'
' containing frame #{1}'
' to DataStream module {2}'
' of Device module {3}'
'.'.format(
self._buffer.context,
self._buffer.frame_id,
self._buffer.parent.id_,
self._buffer.parent.parent.id_
)
)
self._buffer.parent.queue_buffer(self._buffer)
@staticmethod
def _build_payload(*, buffer=None, node_map=None, logger=None):
#
assert buffer
assert node_map
#
p_type = buffer.payload_type
if p_type == PAYLOADTYPE_INFO_IDS.PAYLOAD_TYPE_UNKNOWN:
payload = PayloadUnknown(
buffer=buffer, node_map=node_map, logger=logger
)
elif p_type == PAYLOADTYPE_INFO_IDS.PAYLOAD_TYPE_IMAGE or \
buffer.payload_type == PAYLOADTYPE_INFO_IDS.PAYLOAD_TYPE_CHUNK_DATA:
payload = PayloadImage(
buffer=buffer, node_map=node_map, logger=logger
)
elif p_type == PAYLOADTYPE_INFO_IDS.PAYLOAD_TYPE_RAW_DATA:
payload = PayloadRawData(
buffer=buffer, node_map=node_map, logger=logger
)
elif p_type == PAYLOADTYPE_INFO_IDS.PAYLOAD_TYPE_FILE:
payload = PayloadFile(
buffer=buffer, node_map=node_map, logger=logger
)
elif p_type == PAYLOADTYPE_INFO_IDS.PAYLOAD_TYPE_JPEG:
payload = PayloadJPEG(
buffer=buffer, node_map=node_map, logger=logger
)
elif p_type == PAYLOADTYPE_INFO_IDS.PAYLOAD_TYPE_JPEG2000:
payload = PayloadJPEG2000(
buffer=buffer, node_map=node_map, logger=logger
)
elif p_type == PAYLOADTYPE_INFO_IDS.PAYLOAD_TYPE_H264:
payload = PayloadH264(
buffer=buffer, node_map=node_map, logger=logger
)
elif p_type == PAYLOADTYPE_INFO_IDS.PAYLOAD_TYPE_CHUNK_ONLY:
payload = PayloadChunkOnly(
buffer=buffer, node_map=node_map, logger=logger
)
elif p_type == PAYLOADTYPE_INFO_IDS.PAYLOAD_TYPE_MULTI_PART:
payload = PayloadMultiPart(
buffer=buffer, node_map=node_map, logger=logger
)
else:
payload = None
return payload
class PayloadBase:
"""
Is a base class of various payload types. The types are defined by the
GenTL Standard.
"""
def __init__(self, *, buffer=None, logger=None):
"""
:param buffer:
:param logger:
"""
#
assert buffer
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__()
self._buffer = buffer
self._components = []
@property
def payload_type(self):
"""
TODO:
:return:
"""
return self._buffer.payload_type
def _build_component(self, buffer=None, part=None, node_map=None):
#
if part:
data_format = part.data_format
else:
data_format = buffer.pixel_format
#
symbolic = symbolics[data_format]
if symbolic in component_2d_formats:
return Component2DImage(
buffer=buffer, part=part, node_map=node_map,
logger=self._logger
)
return None
@property
def components(self):
"""
:return: A :class:`list` containing objects that derive from :const:`ComponentBase` class.
"""
return self._components
class PayloadUnknown(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_UNKNOWN`
by the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
class PayloadImage(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_IMAGE` by
the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
# Build data components.
self._components.append(
self._build_component(
buffer=buffer, node_map=node_map
)
)
def __repr__(self):
return '{0}'.format(self.components[0].__repr__())
class PayloadRawData(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_RAW_DATA`
by the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
class PayloadFile(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_FILE` by
the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
class PayloadJPEG(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_JPEG` by
the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
class PayloadJPEG2000(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_JPEG2000`
by the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
class PayloadH264(PayloadBase):
"""
Represents a payload that is classified as :const:`PAYLOAD_TYPE_H264` by
the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
class PayloadChunkOnly(PayloadBase):
"""
Represents a payload that is classified as
:const:`PAYLOAD_TYPE_CHUNK_ONLY` by the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
class PayloadMultiPart(PayloadBase):
"""
Represents a payload that is classified as
:const:`PAYLOAD_TYPE_MULTI_PART` by the GenTL Standard.
"""
def __init__(self, *, buffer=None, node_map=None, logger=None):
"""
:param buffer:
:param node_map:
:param logger:
"""
#
assert buffer
assert node_map
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__(buffer=buffer, logger=self._logger)
#
# Build data components.
# We know the buffer consists of a set of "part" that is
# defined by the GenTL standard.
for i, part in enumerate(self._buffer.parts):
self._components.append(
self._build_component(
buffer=buffer, part=part, node_map=node_map
)
)
def __repr__(self):
ret = ''
for i, c in enumerate(self.components):
ret += 'Component #{0}: {1}\n'.format(i, c.__repr__())
ret = ret[:-1]
return ret
class ImageAcquirer:
"""
Manages everything you need to acquire images from the connecting device.
"""
#
_event = Event()
_specialized_tl_type = ['U3V', 'GEV']
def __init__(
self, *, parent=None, device=None,
profiler=None, logger=None,
sleep_duration=_sleep_duration_default,
file_path=None
):
"""
:param parent:
:param device:
:param profiler:
:param logger:
:param sleep_duration:
:param file_path: (Optional) Set a path to camera description file which you want to load on the target node map instead of the one which the device declares.
"""
#
self._logger = logger or get_logger(name=__name__)
#
assert parent
assert device
#
super().__init__()
#
self._parent = parent
#
interface = device.parent
system = interface.parent
env_var = 'HARVESTERS_XML_FILE_DIR'
if env_var in os.environ:
self._xml_dir = os.getenv(env_var)
else:
self._xml_dir = None
#
try:
node_map = _get_port_connected_node_map(
port=system.port, logger=self._logger,
xml_dir=self._xml_dir
)
except GenericException as e:
self._logger.error(e, exc_info=True)
else:
self._system = System(module=system, node_map=node_map)
#
try:
node_map = _get_port_connected_node_map(
port=interface.port, logger=self._logger,
xml_dir=self._xml_dir
)
except GenericException as e:
self._logger.error(e, exc_info=True)
else:
self._interface = Interface(
module=interface, node_map=node_map, parent=self._system
)
#
try:
node_map = _get_port_connected_node_map(
port=device.local_port, logger=self._logger,
xml_dir=self._xml_dir
) # Local device's node map
except GenericException as e:
self._logger.error(e, exc_info=True)
else:
self._device = Device(
module=device, node_map=node_map, parent=self._interface
)
#
try:
node_map = _get_port_connected_node_map(
port=device.remote_port, logger=self._logger,
file_path=file_path, xml_dir=self._xml_dir
) # Remote device's node map
except GenericException as e:
self._logger.error(e, exc_info=True)
else:
self._remote_device = RemoteDevice(
module=self._device, node_map=node_map, parent=self._device
)
#
self._data_streams = []
self._event_new_buffer_managers = []
self._create_ds_at_connection = True
if self._create_ds_at_connection:
self._setup_data_streams()
#
self._profiler = profiler
#
self._mutex = Lock()
self._thread_image_acquisition = _BuiltInThread(
mutex=self._mutex,
worker=self._worker_image_acquisition,
logger=self._logger,
sleep_duration=sleep_duration
)
# Prepare handling the SIGINT event:
self._threads = []
self._threads.append(self._thread_image_acquisition)
# Create a signal handler if it's being run in the main thread:
self._sigint_handler = None
if current_thread() is main_thread():
self._sigint_handler = _SignalHandler(
event=self._event, threads=self._threads, logger=self._logger
)
signal.signal(signal.SIGINT, self._sigint_handler)
self._logger.info('Created a signal handler for SIGINT.')
#
self._num_filled_buffers_to_hold = 1
#
self._num_images_to_acquire = -1
#
self._timeout_for_image_acquisition = 1 # ms
#
self._statistics = Statistics()
#
self._announced_buffers = []
self._holding_filled_buffers = []
#
self._has_acquired_1st_image = False
self._is_acquiring_images = False
self._keep_latest = True
# Determine the default value:
num_buffers_default = 16
try:
self._min_num_buffers = self._data_streams[0].buffer_announce_min
except InvalidParameterException as e:
# In general, a GenTL Producer should not raise the
# InvalidParameterException to the inquiry for
# STREAM_INFO_BUF_ANNOUNCE_MIN because it is totally legal
# but we have observed a fact that there is at least one on
# the market. As a workaround we involve this try-except block:
self._logger.debug(e, exc_info=True)
self._min_num_buffers = num_buffers_default
self._num_buffers = num_buffers_default
else:
self._num_buffers = max(
num_buffers_default, self._min_num_buffers
)
#
self._signal_stop_image_acquisition = None
#
self._logger.info(
'Instantiated an ImageAcquirer object for {0}.'.format(
self._device.id_
)
)
#
self._chunk_adapter = self._get_chunk_adapter(
device=self.device, node_map=self.remote_device.node_map
)
# A callback method when it's called when a new buffer is delivered:
self._on_new_buffer_arrival = None
#
self._finalizer = weakref.finalize(self, self._destroy)
@staticmethod
def _get_chunk_adapter(*, device=None, node_map=None):
if device.tl_type == 'U3V':
return ChunkAdapterU3V(node_map)
elif device.tl_type == 'GEV':
return ChunkAdapterGEV(node_map)
else:
return ChunkAdapterGeneric(node_map)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._finalizer()
def destroy(self):
self._finalizer()
@property
def on_new_buffer_arrival(self):
return self._on_new_buffer_arrival
@on_new_buffer_arrival.setter
def on_new_buffer_arrival(self, value):
self._on_new_buffer_arrival = value
@property
def keep_latest(self):
return self._keep_latest
@keep_latest.setter
def keep_latest(self, value):
self._keep_latest = value
@property
def num_buffers(self):
return self._num_buffers
@num_buffers.setter
def num_buffers(self, value):
#
if value >= self._min_num_buffers:
self._num_buffers = value
else:
raise ValueError(
'The number of buffers must be '
'greater than or equal to {0}'.format(
self._min_num_buffers
)
)
@property
def min_num_buffers(self):
return self._min_num_buffers
@property
def num_filled_buffers_to_hold(self):
return self._num_filled_buffers_to_hold
@num_filled_buffers_to_hold.setter
def num_filled_buffers_to_hold(self, value):
if 0 < value <= self._num_buffers:
self._num_filled_buffers_to_hold = value
else:
raise ValueError(
'The number of filled buffers to hold must be '
'greater than zero and '
'smaller than or equal to {0}'.format(
self._num_buffers
)
)
@property
def num_holding_filled_buffers(self):
return len(self._holding_filled_buffers)
@property
def data_streams(self):
return self._data_streams
@property
def remote_device(self):
"""
:return: The remote device.
"""
return self._remote_device
@property
def device(self):
"""
:return: The proxy :class:`Device` module object of the connecting remote device.
"""
return self._device
@property
def interface(self):
"""
:return: The parent :class:`Interface` module object of the connecting remote device.
"""
return self._interface
@property
def system(self):
"""
:return: The parent :class:`System` module object of the connecting remote device.
"""
return self._system
def is_acquiring_images(self):
"""
:return: :const:`True` if it's acquiring images. Otherwise :const:`False`.
"""
return self._is_acquiring_images
@property
def timeout_for_image_acquisition(self):
return self._timeout_for_image_acquisition
@timeout_for_image_acquisition.setter
def timeout_for_image_acquisition(self, ms):
with self.thread_image_acquisition:
self._timeout_for_image_acquisition = ms
@property
def thread_image_acquisition(self):
return self._thread_image_acquisition
@thread_image_acquisition.setter
def thread_image_acquisition(self, obj):
self._thread_image_acquisition = obj
self._thread_image_acquisition.worker = self._worker_image_acquisition
@property
def signal_stop_image_acquisition(self):
return self._signal_stop_image_acquisition
@signal_stop_image_acquisition.setter
def signal_stop_image_acquisition(self, obj):
self._signal_stop_image_acquisition = obj
@property
def statistics(self):
return self._statistics
@keep_latest.setter
def keep_latest(self, value):
self._keep_latest = value
def _setup_data_streams(self):
for i, stream_id in enumerate(self._device.data_stream_ids):
#
_data_stream = self._device.create_data_stream()
try:
_data_stream.open(stream_id)
except GenericException as e:
self._logger.debug(e, exc_info=True)
else:
self._logger.info(
'Opened DataStream module {0} of {1}.'.format(
_data_stream.id_, _data_stream.parent.id_
)
)
try:
node_map = _get_port_connected_node_map(
port=_data_stream.port, logger=self._logger
)
except GenericException as e:
self._logger.error(e, exc_info=True)
else:
self._data_streams.append(
DataStream(
module=_data_stream, node_map=node_map,
parent=self._device
)
)
# Create an Event Manager object for image acquisition.
event_token = self._data_streams[i].register_event(
EVENT_TYPE_LIST.EVENT_NEW_BUFFER
)
self._event_new_buffer_managers.append(
EventManagerNewBuffer(event_token)
)
def start_image_acquisition(self):
"""
Starts image acquisition.
:return: None.
"""
if not self._create_ds_at_connection:
self._setup_data_streams()
#
num_required_buffers = self._num_buffers
for data_stream in self._data_streams:
try:
num_buffers = data_stream.buffer_announce_min
if num_buffers < num_required_buffers:
num_buffers = num_required_buffers
except GenericException as e:
num_buffers = num_required_buffers
self._logger.debug(e, exc_info=True)
if data_stream.defines_payload_size():
buffer_size = data_stream.payload_size
else:
buffer_size = self.remote_device.node_map.PayloadSize.value
raw_buffers = self._create_raw_buffers(
num_buffers, buffer_size
)
buffer_tokens = self._create_buffer_tokens(
raw_buffers
)
self._announced_buffers = self._announce_buffers(
data_stream=data_stream, _buffer_tokens=buffer_tokens
)
self._queue_announced_buffers(
data_stream=data_stream, buffers=self._announced_buffers
)
# Reset the number of images to acquire.
try:
acq_mode = self.remote_device.node_map.AcquisitionMode.value
if acq_mode == 'Continuous':
num_images_to_acquire = -1
elif acq_mode == 'SingleFrame':
num_images_to_acquire = 1
elif acq_mode == 'MultiFrame':
num_images_to_acquire = self.remote_device.node_map.AcquisitionFrameCount.value
else:
num_images_to_acquire = -1
except GenericException as e:
# The node doesn't exist.
num_images_to_acquire = -1
self._logger.debug(e, exc_info=True)
self._num_images_to_acquire = num_images_to_acquire
try:
# We're ready to start image acquisition. Lock the device's
# transport layer related features:
self.remote_device.node_map.TLParamsLocked.value = 1
except GenericException:
# SFNC < 2.0
pass
# Start image acquisition.
self._is_acquiring_images = True
for data_stream in self._data_streams:
data_stream.start_acquisition(
ACQ_START_FLAGS_LIST.ACQ_START_FLAGS_DEFAULT,
self._num_images_to_acquire
)
#
if self.thread_image_acquisition:
self.thread_image_acquisition.start()
#
self.remote_device.node_map.AcquisitionStart.execute()
self._logger.info(
'{0} started image acquisition.'.format(self._device.id_)
)
if self._profiler:
self._profiler.print_diff()
def _worker_image_acquisition(self):
for event_manager in self._event_new_buffer_managers:
try:
if self.is_acquiring_images():
event_manager.update_event_data(
self._timeout_for_image_acquisition
)
else:
return
except TimeoutException as e:
continue
else:
# Check if the delivered buffer is complete:
if event_manager.buffer.is_complete():
#
if _is_logging_buffer_manipulation:
self._logger.debug(
'Acquired Buffer module #{0}'
' containing frame #{1}'
' from DataStream module {2}'
' of Device module {3}'
'.'.format(
event_manager.buffer.context,
event_manager.buffer.frame_id,
event_manager.parent.id_,
event_manager.parent.parent.id_
)
)
if self.keep_latest:
# We want to keep the latest ones:
with MutexLocker(self.thread_image_acquisition):
if not self._is_acquiring_images:
return
if len(self._holding_filled_buffers) >= self._num_filled_buffers_to_hold:
# Pick up the oldest one:
buffer = self._holding_filled_buffers.pop(0)
if _is_logging_buffer_manipulation:
self._logger.debug(
'Queued Buffer module #{0}'
' containing frame #{1}'
' to DataStream module {2}'
' of Device module {3}'
'.'.format(
buffer.context,
buffer.frame_id,
buffer.parent.id_,
buffer.parent.parent.id_
)
)
# Then discard/queue it:
buffer.parent.queue_buffer(buffer)
# Get the latest buffer:
buffer = event_manager.buffer
# Then append it to the list which the user fetches later:
self._holding_filled_buffers.append(buffer)
# Then update the statistics using the buffer:
self._update_statistics(buffer)
else:
# Get the latest buffer:
buffer = event_manager.buffer
# Then update the statistics using the buffer:
self._update_statistics(buffer)
# We want to keep the oldest ones:
with MutexLocker(self.thread_image_acquisition):
if not self._is_acquiring_images:
return
if len(self._holding_filled_buffers) >= self._num_filled_buffers_to_hold:
# We have not space to keep the latest one.
# Discard/queue the latest buffer:
buffer.parent.queue_buffer(buffer)
else:
# Just append it to the list:
self._holding_filled_buffers.append(buffer)
#
if self._num_images_to_acquire >= 1:
self._num_images_to_acquire -= 1
if self._on_new_buffer_arrival:
self._on_new_buffer_arrival()
if self._num_images_to_acquire == 0:
#
if self.signal_stop_image_acquisition:
self.signal_stop_image_acquisition.emit()
else:
# Discard/queue the latest buffer when incomplete
self._logger.debug(
'Acquired buffer is complete: {0}'.format(
event_manager.buffer.is_complete()
)
)
# Queue the incomplete buffer; we have nothing to do
# with it:
data_stream = event_manager.buffer.parent
data_stream.queue_buffer(event_manager.buffer)
#
with MutexLocker(self.thread_image_acquisition):
if not self._is_acquiring_images:
return
def _update_chunk_data(self, buffer=None):
try:
if buffer.num_chunks == 0:
"""
self._logger.debug(
'The buffer does not contain any chunk data.'
)
"""
return
except (ParsingChunkDataException, ErrorException) as e:
#self._logger.error(e, exc_info=True)
pass
except (
NotImplementedException, NoDataException,
InvalidBufferException
) as e:
#self._logger.debug(e, exc_info=True)
pass
else:
"""
self._logger.debug(
'The buffer contains chunk data.'
)
"""
#
is_generic = False
if buffer.tl_type not in self._specialized_tl_type:
is_generic = True
try:
if is_generic:
self._chunk_adapter.attach_buffer(
buffer.raw_buffer, buffer.chunk_data_info_list
)
else:
self._chunk_adapter.attach_buffer(buffer.raw_buffer)
except GenericException as e:
# Failed to parse the chunk data. Something must be wrong.
self._logger.error(e, exc_info=True)
else:
"""
self._logger.debug(
'Updated the node map of {0}.'.format(
buffer.parent.parent.id_
)
)
"""
pass
def fetch_buffer(self, *, timeout=0, is_raw=False):
"""
Fetches the latest :class:`Buffer` object and returns it.
:param timeout: Set timeout value in second.
:param is_raw: Set :const:`True` if you need a raw GenTL Buffer module.
:return: A :class:`Buffer` object.
"""
if not self.is_acquiring_images():
raise TimeoutException
watch_timeout = True if timeout > 0 else False
buffer = None
base = time.time()
while buffer is None:
if watch_timeout and (time.time() - base) > timeout:
raise TimeoutException
else:
with MutexLocker(self.thread_image_acquisition):
if len(self._holding_filled_buffers) > 0:
if is_raw:
buffer = self._holding_filled_buffers.pop(0)
else:
# Update the chunk data:
_buffer = self._holding_filled_buffers.pop(0)
self._update_chunk_data(buffer=_buffer)
#
buffer = Buffer(
buffer=_buffer,
node_map=self.remote_device.node_map,
logger=self._logger
)
if _is_logging_buffer_manipulation:
self._logger.debug(
'Fetched Buffer module #{0}'
' containing frame #{1}'
' of DataStream module {2}'
' of Device module {2}'
'.'.format(
buffer._buffer.context,
buffer._buffer.frame_id,
buffer._buffer.parent.id_,
buffer._buffer.parent.parent.id_
)
)
return buffer
def _update_statistics(self, buffer):
#
assert buffer
#
self._statistics.increment_num_images()
self._statistics.update_timestamp(buffer)
@staticmethod
def _create_raw_buffers(num_buffers, size):
#
assert num_buffers >= 0
assert size >= 0
# Instantiate a list object.
raw_buffers = []
# Append bytes objects to the list.
# The number is specified by num_buffer and the buffer size is
# specified by size.
for _ in range(num_buffers):
raw_buffers.append(bytes(size))
# Then return the list.
return raw_buffers
@staticmethod
def _create_buffer_tokens(raw_buffers):
#
assert raw_buffers
# Instantiate a list object.
_buffer_tokens = []
# Append Buffer Token object to the list.
for i, buffer in enumerate(raw_buffers):
_buffer_tokens.append(
BufferToken(buffer, i)
)
# Then returns the list.
return _buffer_tokens
def _announce_buffers(self, data_stream=None, _buffer_tokens=None):
#
assert data_stream
#
announced_buffers = []
# Iterate announcing buffers in the Buffer Tokens.
for token in _buffer_tokens:
# Get an announced buffer.
announced_buffer = data_stream.announce_buffer(token)
# And append it to the list.
announced_buffers.append(announced_buffer)
#
self._logger.debug(
'Announced Buffer #{0} to DataStraem {1}.'.format(
announced_buffer.context,
data_stream.id_
)
)
# Then return the list of announced Buffer objects.
return announced_buffers
def _queue_announced_buffers(self, data_stream=None, buffers=None):
#
assert data_stream
#
for buffer in buffers:
data_stream.queue_buffer(buffer)
self._logger.debug(
'Queued Buffer module #{0}'
' to DataStream module {1}'
' of Device module {2}'
'.'.format(
buffer.context,
data_stream.id_,
data_stream.parent.id_
)
)
def stop_image_acquisition(self):
"""
Stops image acquisition.
:return: None.
"""
if self.is_acquiring_images():
#
self._is_acquiring_images = False
#
if self.thread_image_acquisition.is_running(): # TODO
self.thread_image_acquisition.stop()
with MutexLocker(self.thread_image_acquisition):
#
self.remote_device.node_map.AcquisitionStop.execute()
try:
# Unlock TLParamsLocked in order to allow full device
# configuration:
self.remote_device.node_map.TLParamsLocked.value = 0
except GenericException:
# SFNC < 2.0
pass
for data_stream in self._data_streams:
# Stop image acquisition.
try:
data_stream.stop_acquisition(
ACQ_STOP_FLAGS_LIST.ACQ_STOP_FLAGS_KILL
)
except GenericException as e:
self._logger.error(e, exc_info=True)
# Flash the queue for image acquisition process.
data_stream.flush_buffer_queue(
ACQ_QUEUE_TYPE_LIST.ACQ_QUEUE_ALL_DISCARD
)
for event_manager in self._event_new_buffer_managers:
event_manager.flush_event_queue()
if self._create_ds_at_connection:
self._release_buffers()
else:
self._release_data_streams()
#
self._has_acquired_1st_image = False
#
self._chunk_adapter.detach_buffer()
#
self._logger.info(
'{0} stopped image acquisition.'.format(self._device.id_)
)
if self._profiler:
self._profiler.print_diff()
def _destroy(self):
"""
Destroys the :class:`ImageAcquirer` object. Once you called this
method, all allocated resources, including buffers and the remote
device, are released.
:return: None.
"""
# Ask its parent to destroy it:
if self._device:
self._parent._destroy_image_acquirer(self)
def _release_data_streams(self):
#
self._release_buffers()
#
for data_stream in self._data_streams:
if data_stream and data_stream.is_open():
name_ds = data_stream.id_
name_dev = data_stream.parent.id_
data_stream.close()
self._logger.info(
'Closed DataStream module {0} of {1}.'.format(
name_ds, name_dev
)
)
#
self._data_streams.clear()
self._event_new_buffer_managers.clear()
def _release_buffers(self):
for data_stream in self._data_streams:
if data_stream.is_open():
#
for buffer in self._announced_buffers:
self._logger.debug(
'Revoked Buffer module #{0}.'.format(
buffer.context,
data_stream.id_,
data_stream.parent.id_
)
)
_ = data_stream.revoke_buffer(buffer)
self._holding_filled_buffers.clear()
self._announced_buffers.clear()
def _retrieve_file_path(*, port=None, url=None, file_path=None, logger=None, xml_dir=None):
#
_logger = logger or get_logger(name=__name__)
#
if file_path:
# A file that is specified by the client will be used:
if not os.path.exists(file_path):
raise LogicalErrorException(
'{0} does not exist.'.format(file_path)
)
else:
if url is None:
# Inquire its URL information.
if len(port.url_info_list) > 0:
url = port.url_info_list[0].url
else:
raise LogicalErrorException(
'The target port does not hold any URL.'
)
_logger.info('URL: {0}'.format(url))
# And parse the URL.
location, others = url.split(':', 1)
location = location.lower()
if location == 'local':
file_name, address, size = others.split(';')
address = int(address, 16)
# It may specify the schema version.
delimiter = '?'
if delimiter in size:
size, _ = size.split(delimiter)
size = int(size, 16) # From Hex to Dec
# Now we get the file content.
size, binary_data = port.read(address, size)
# Store the XML file on the host side; it may be a Zipped XML
# file or a plain XML file:
file_path = _save_file(
file_dir=xml_dir, file_name=file_name,
binary_data=binary_data
)
elif location == 'file':
# '///c|/program%20files/foo.xml' ->
# '///', 'c|/program%20files/foo.xml'
_, _file_path = others.split('///')
# 'c|/program%20files/foo.xml' -> 'c|/program files/foo.xml'
_file_path = unquote(_file_path)
# 'c|/program files/foo.xml' -> 'c:/program files/foo.xml')
_file_path.replace('|', ':')
# Now we get a file path that we daily use:
file_path = _file_path
elif location == 'http' or location == 'https':
raise NotImplementedError(
'Failed to parse URL {0}: Harvester has not supported '
'downloading a device description file from vendor '
'web site. If you must rely on the current condition,'
'just try to make a request to the Harvester '
'maintainer.'.format(url)
)
else:
raise LogicalErrorException(
'Failed to parse URL {0}: Unknown format.'.format(url)
)
return file_path
def _save_file(*, file_dir=None, file_name=None, binary_data=None):
#
assert binary_data
assert file_name
#
bytes_io = io.BytesIO(binary_data)
if file_dir is not None:
# Create the directory if it didn't exist:
if not os.path.exists(file_dir):
os.makedirs(file_dir)
else:
file_dir = tempfile.mkdtemp(
prefix=datetime.now().strftime('%Y%m%d%H%M%S_'),
)
import sys
os_system = sys.platform
if os_system in ['win32']:
file_name = "." + file_name
file_path = os.path.join(file_dir, file_name)
#
mode = 'w+'
data_to_write = bytes_content = bytes_io.getvalue()
if pathlib.Path(file_path).suffix.lower() == '.zip':
mode += 'b'
else:
data_to_write = bytes_content.decode()
pos = data_to_write.find('\x00')
data_to_write = data_to_write[:pos]
#
with open(file_path, mode) as f:
f.write(data_to_write)
return file_path
def _get_port_connected_node_map(*, port=None, logger=None, file_path=None, xml_dir=None):
#
assert port
#
_logger = logger or get_logger(name=__name__)
# Instantiate a GenICam node map object.
node_map = NodeMap()
#
file_path = _retrieve_file_path(
port=port, file_path=file_path, logger=logger, xml_dir=xml_dir
)
#
if file_path is not None:
# Every valid (zipped) XML file MUST be parsed as expected and the
# method returns the file path where the file is located:
# Then load the XML file content on the node map object.
has_valid_file = True
# In this case, the file has been identified as a Zip file but
# has been diagnosed as BadZipFile due to a technical reason.
# Let the NodeMap object load the file from the path:
try:
node_map.load_xml_from_zip_file(file_path)
except RuntimeException:
try:
node_map.load_xml_from_file(file_path)
except RuntimeException as e:
_logger.error(e, exc_info=True)
has_valid_file = False
if has_valid_file:
# Instantiate a concrete port object of the remote device's
# port.
concrete_port = ConcretePort(port)
# And finally connect the concrete port on the node map
# object.
node_map.connect(concrete_port, port.name)
# Then return the node map:
return node_map
class Harvester:
"""
Is the class that works for you as Harvester Core. Everything begins with
this class.
"""
#
def __init__(self, *, profile=False, logger=None):
"""
:param profile:
:param logger:
"""
#
self._logger = logger or get_logger(name=__name__)
#
super().__init__()
#
self._cti_files = []
self._producers = []
self._systems = []
self._interfaces = []
self._device_info_list = []
self._ias = []
#
self._has_revised_device_list = False
self._timeout_for_update = 1000 # ms
#
if profile:
from harvesters._private.core.helper.profiler import Profiler
self._profiler = Profiler()
else:
self._profiler = None
if self._profiler:
self._profiler.print_diff()
#
self._finalizer = weakref.finalize(self, self._reset)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._finalizer()
def reset(self):
self._finalizer()
@property
def cti_files(self):
"""
:return: A :class:`list` object containing :class:`str` objects.
"""
return self._cti_files
@property
def device_info_list(self):
"""
:return: A :class:`list` object containing :class:`DeviceInfo` objects
"""
return self._device_info_list
@property
def timeout_for_update(self):
return self._timeout_for_update
@timeout_for_update.setter
def timeout_for_update(self, ms):
self._timeout_for_update = ms
@property
def has_revised_device_info_list(self):
return self._has_revised_device_list
@has_revised_device_info_list.setter
def has_revised_device_info_list(self, value):
self._has_revised_device_list = value
def create_image_acquirer(
self, list_index=None, *, id_=None,
vendor=None, model=None, tl_type=None, user_defined_name=None,
serial_number=None, version=None,
sleep_duration=_sleep_duration_default, file_path=None,
privilege='exclusive'
):
"""
Creates an image acquirer for the specified remote device and return
the created :class:`ImageAcquirer` object.
:param list_index: (Optional) Set an item index of the list of :class:`DeviceInfo` objects.
:param id_: (Optional) Set an index of the device information list.
:param vendor: (Optional) Set a vendor name of the target device.
:param model: (Optional) Set a model name of the target device.
:param tl_type: (Optional) Set a transport layer type of the target device.
:param user_defined_name: (Optional) Set a user defined name string of the target device.
:param serial_number: (Optional) Set a serial number string of the target device.
:param version: (Optional) Set a version number string of the target device.
:param sleep_duration: (Optional) Set a sleep duration in second that is inserted after the image acquisition worker is executed.
:param file_path: (Optional) Set a path to camera description file which you want to load on the target node map instead of the one which the device declares.
:param privilege: (Optional) Set an access privilege. `exclusive`, `contorl`, and `read_only` are supported. The default is `exclusive`.
:return: An :class:`ImageAcquirer` object that associates with the specified device.
Note that you have to close it when you are ready to release the
device that you have been controlled. As long as you hold it, the
controlled device will be not available from other clients.
"""
#
if self.device_info_list is None:
# TODO: Throw an exception to tell clients that there's no
# device to connect.
return
# Instantiate a GenTL Device module.
if list_index is not None:
device = self.device_info_list[list_index].create_device()
else:
keys = [
'id_', 'vendor', 'model', 'tl_type',
'user_defined_name', 'serial_number', 'version',
]
# Create a copy of the list. Do not use the original list:
candidates = self.device_info_list.copy()
for key in keys:
key_value = eval(key)
if key_value:
items_to_be_removed = []
# Find out the times to be removed from the candidates.
for item in candidates:
try:
if key_value != eval('item.' + key):
items_to_be_removed.append(item)
except GenericException as e:
# The candidate doesn't support the information.
self._logger.warn(e, exc_info=True)
pass
# Remove irrelevant items from the candidates.
for item in items_to_be_removed:
candidates.remove(item)
num_candidates = len(candidates)
if num_candidates > 1:
raise ValueError(
'You have two or more candidates. '
'You have to pass one or more keys so that '
'a single candidate is specified.'
)
elif num_candidates == 0:
raise ValueError(
'You have no candidate. '
'You have to pass one or more keys so that '
'a single candidate is specified.'
)
else:
device = candidates[0].create_device()
# Then open it.
try:
#
if privilege == 'exclusive':
_privilege = DEVICE_ACCESS_FLAGS_LIST.DEVICE_ACCESS_EXCLUSIVE
elif privilege == 'control':
_privilege = DEVICE_ACCESS_FLAGS_LIST.DEVICE_ACCESS_CONTROL
elif privilege == 'read_only':
_privilege = DEVICE_ACCESS_FLAGS_LIST.DEVICE_ACCESS_READONLY
else:
raise NotImplementedError(
'{0} is not supported.'.format(privilege)
)
#
device.open(_privilege)
except GenericException as e:
self._logger.debug(e, exc_info=True)
# Just re-throw the exception. The decision should be made by
# the client but not Harvester:
raise
else:
self._logger.info(
'Opened Device module, {0}.'.format(device.id_)
)
# Create an :class:`ImageAcquirer` object and return it.
ia = ImageAcquirer(
parent=self, device=device, profiler=self._profiler,
logger=self._logger, sleep_duration=sleep_duration,
file_path=file_path
)
self._ias.append(ia)
if self._profiler:
self._profiler.print_diff()
return ia
def add_cti_file(self, file_path: str):
"""
Adds a CTI file to work with to the CTI file list.
:param file_path: Set a file path to the target CTI file.
:return: None.
"""
if not os.path.exists(file_path):
self._logger.warning(
'Attempted to add {0} which does not exist.'.format(file_path)
)
if file_path not in self._cti_files:
self._cti_files.append(file_path)
self._logger.info(
'Added {0} to the CTI file list.'.format(file_path)
)
def remove_cti_file(self, file_path: str):
"""
Removes the specified CTI file from the CTI file list.
:param file_path: Set a file path to the target CTI file.
:return: None.
"""
if file_path in self._cti_files:
self._cti_files.remove(file_path)
self._logger.info(
'Removed {0} from the CTI file list.'.format(file_path)
)
def remove_cti_files(self):
"""
Removes all CTI files in the CTI file list.
:return: None.
"""
self._cti_files.clear()
#
self._logger.info('Removed the all CTI file from the list.')
def _open_gentl_producers(self):
#
for file_path in self._cti_files:
producer = GenTLProducer.create_producer()
try:
producer.open(file_path)
except GenericException as e:
self._logger.debug(e, exc_info=True)
else:
self._producers.append(producer)
self._logger.info(
'Initialized GenTL Producer, {0}.'.format(
producer.path_name
)
)
def _open_systems(self):
for producer in self._producers:
system = producer.create_system()
try:
system.open()
except GenericException as e:
self._logger.debug(e, exc_info=True)
else:
self._systems.append(system)
self._logger.info('Opened System module, {0}.'.format(
system.id_
)
)
def _reset(self):
"""
Initializes the :class:`Harvester` object. Once you reset the
:class:`Harvester` object, all allocated resources, including buffers
and remote device, will be released.
:return: None.
"""
#
for ia in self._ias:
ia._destroy()
self._ias.clear()
#
self._logger.info('Started resetting the Harvester object.')
self.remove_cti_files()
self._release_gentl_producers()
if self._profiler:
self._profiler.print_diff()
#
self._logger.info('Completed resetting the Harvester object.')
def _release_gentl_producers(self):
#
self._release_systems()
#
for producer in self._producers:
if producer and producer.is_open():
name = producer.path_name
producer.close()
self._logger.info('Closed {0}.'.format(name))
#
self._producers.clear()
def _release_systems(self):
#
self._release_interfaces()
#
for system in self._systems:
if system is not None and system.is_open():
name = system.id_
system.close()
self._logger.info('Closed System module, {0}.'.format(name))
#
self._systems.clear()
def _release_interfaces(self):
#
self._release_device_info_list()
#
if self._interfaces is not None:
for iface in self._interfaces:
if iface.is_open():
name = iface.id_
iface.close()
self._logger.info(
'Closed Interface module, {0}.'.format(name)
)
#
self._interfaces.clear()
def _release_device_info_list(self):
#
if self.device_info_list is not None:
self._device_info_list.clear()
#
self._logger.info('Discarded the device information list.')
def update_device_info_list(self):
"""
Updates the device information list. You'll have to call this method
every time you added CTI files or plugged/unplugged devices.
:return: None.
"""
#
self._release_gentl_producers()
try:
self._open_gentl_producers()
self._open_systems()
#
for system in self._systems:
#
system.update_interface_info_list(self.timeout_for_update)
#
for i_info in system.interface_info_list:
iface = i_info.create_interface()
try:
iface.open()
except GenericException as e:
self._logger.debug(e, exc_info=True)
else:
self._logger.info(
'Opened Interface module, {0}.'.format(iface.id_)
)
iface.update_device_info_list(self.timeout_for_update)
self._interfaces.append(iface)
for d_info in iface.device_info_list:
self.device_info_list.append(
DeviceInfo(device_info=d_info)
)
except GenericException as e:
self._logger.error(e, exc_info=True)
self._has_revised_device_list = False
else:
self._has_revised_device_list = True
#
self._logger.info('Updated the device information list.')
def _destroy_image_acquirer(self, ia):
"""
Releases all external resources including the controlling device.
"""
id_ = None
if ia.device:
#
ia.stop_image_acquisition()
#
ia._release_data_streams()
#
id_ = ia._device.id_
#
if ia.remote_device.node_map:
#
if ia._chunk_adapter:
ia._chunk_adapter.detach_buffer()
ia._chunk_adapter = None
self._logger.info(
'Detached a buffer from the chunk adapter of {0}.'.format(
id_
)
)
ia.device.node_map.disconnect()
self._logger.info(
'Disconnected the port from the NodeMap of {0}.'.format(
id_
)
)
#
if ia._device.is_open():
ia._device.close()
self._logger.info(
'Closed Device module, {0}.'.format(id_)
)
ia._device = None
#
if id_:
self._logger.info(
'Destroyed the ImageAcquirer object which {0} '
'had belonged to.'.format(id_)
)
else:
self._logger.info(
'Destroyed an ImageAcquirer.'
)
if self._profiler:
self._profiler.print_diff()
self._ias.remove(ia)
if __name__ == '__main__':
pass
| 29,838 | 4,006 | 1,857 |
035f90e4f530b151f9199f302d9fcf5d01edb3c7 | 2,954 | py | Python | experiments/do_process_generated.py | michael1788/TED | 247c37f168e178cae1237207c07d4a34679ce126 | [
"MIT"
] | 8 | 2021-11-02T09:45:14.000Z | 2022-03-11T23:15:05.000Z | experiments/do_process_generated.py | michael1788/TED | 247c37f168e178cae1237207c07d4a34679ce126 | [
"MIT"
] | null | null | null | experiments/do_process_generated.py | michael1788/TED | 247c37f168e178cae1237207c07d4a34679ce126 | [
"MIT"
] | null | null | null | import os, sys
import time
import configparser
import argparse
sys.path.append('../src/')
from python import helper as hp
from python import fixed_parameters as FP
parser = argparse.ArgumentParser(description='Process generated data for proba extraction')
parser.add_argument('-c','--configfile', type=str, help='path to config file', required=True)
parser.add_argument('-f','--name_data', type=str, help='Name of the ft file', required=True)
parser.add_argument('-e','--epoch', type=str, help='Which epoch to sample from', required=True)
parser.add_argument('-r','--repeat', type=int, help='Number of repeats', required=True)
if __name__ == '__main__':
start = time.time()
####################################
# get back parameters
args = vars(parser.parse_args())
verbose = True
configfile = args['configfile']
config = configparser.ConfigParser()
config.read(configfile)
name_data = args['name_data']
epoch = args['epoch']
if len(epoch)==1:
epoch = f'0{epoch}'
repeat = args['repeat']
mode = str(config['EXPERIMENTS']['mode'])
if verbose: print('\nSTART PROCESSING')
####################################
####################################
# paths to save data and to generated smi
dir_exp = str(config['EXPERIMENTS']['dir'])
exp_name = configfile.split('/')[-1].replace('.ini','')
if repeat>0:
savepath = f'{dir_exp}/{mode}/{exp_name}/{name_data}/generated_data_for_extraction/{repeat}/'
dir_gen = f'{dir_exp}/{mode}/{exp_name}/{name_data}/generated_data/{repeat}/'
else:
savepath = f'{dir_exp}/{mode}/{exp_name}/{name_data}/generated_data_for_extraction/'
dir_gen = f'{dir_exp}/{mode}/{exp_name}/{name_data}/generated_data/'
os.makedirs(savepath, exist_ok=True)
####################################
####################################
# start
min_len = int(config['PROCESSING']['min_len'])
max_len = int(config['PROCESSING']['max_len'])
vocab = list(FP.CST_PRIOR.keys())
temp = float(config['SAMPLING']['temp'])
top_k = int(config['SAMPLING']['top_k'])
top_p = float(config['SAMPLING']['top_p'])
namefile = f'{epoch}_{temp}_{top_k}_{top_p}'
generated_smi = hp.load_obj(f'{dir_gen}{namefile}.pkl')
smis_for_extraction = {}
for i,smi in enumerate(generated_smi):
smi = smi.replace('G', '')
smi = smi.replace('E', '')
try:
tokenized_s, _ = hp.process_smi(smi, min_len, max_len, vocab)
if tokenized_s:
smis_for_extraction[i] = tokenized_s
except:
pass
hp.save_obj(smis_for_extraction, f'{savepath}{namefile}_for_extraction.pkl')
end = time.time()
if verbose: print(f'PROCESSING DONE in {end-start:.2f} seconds')
####################################
| 33.191011 | 101 | 0.576506 | import os, sys
import time
import configparser
import argparse
sys.path.append('../src/')
from python import helper as hp
from python import fixed_parameters as FP
parser = argparse.ArgumentParser(description='Process generated data for proba extraction')
parser.add_argument('-c','--configfile', type=str, help='path to config file', required=True)
parser.add_argument('-f','--name_data', type=str, help='Name of the ft file', required=True)
parser.add_argument('-e','--epoch', type=str, help='Which epoch to sample from', required=True)
parser.add_argument('-r','--repeat', type=int, help='Number of repeats', required=True)
if __name__ == '__main__':
start = time.time()
####################################
# get back parameters
args = vars(parser.parse_args())
verbose = True
configfile = args['configfile']
config = configparser.ConfigParser()
config.read(configfile)
name_data = args['name_data']
epoch = args['epoch']
if len(epoch)==1:
epoch = f'0{epoch}'
repeat = args['repeat']
mode = str(config['EXPERIMENTS']['mode'])
if verbose: print('\nSTART PROCESSING')
####################################
####################################
# paths to save data and to generated smi
dir_exp = str(config['EXPERIMENTS']['dir'])
exp_name = configfile.split('/')[-1].replace('.ini','')
if repeat>0:
savepath = f'{dir_exp}/{mode}/{exp_name}/{name_data}/generated_data_for_extraction/{repeat}/'
dir_gen = f'{dir_exp}/{mode}/{exp_name}/{name_data}/generated_data/{repeat}/'
else:
savepath = f'{dir_exp}/{mode}/{exp_name}/{name_data}/generated_data_for_extraction/'
dir_gen = f'{dir_exp}/{mode}/{exp_name}/{name_data}/generated_data/'
os.makedirs(savepath, exist_ok=True)
####################################
####################################
# start
min_len = int(config['PROCESSING']['min_len'])
max_len = int(config['PROCESSING']['max_len'])
vocab = list(FP.CST_PRIOR.keys())
temp = float(config['SAMPLING']['temp'])
top_k = int(config['SAMPLING']['top_k'])
top_p = float(config['SAMPLING']['top_p'])
namefile = f'{epoch}_{temp}_{top_k}_{top_p}'
generated_smi = hp.load_obj(f'{dir_gen}{namefile}.pkl')
smis_for_extraction = {}
for i,smi in enumerate(generated_smi):
smi = smi.replace('G', '')
smi = smi.replace('E', '')
try:
tokenized_s, _ = hp.process_smi(smi, min_len, max_len, vocab)
if tokenized_s:
smis_for_extraction[i] = tokenized_s
except:
pass
hp.save_obj(smis_for_extraction, f'{savepath}{namefile}_for_extraction.pkl')
end = time.time()
if verbose: print(f'PROCESSING DONE in {end-start:.2f} seconds')
####################################
| 0 | 0 | 0 |
4547f62f51cd39690acb4f9c75fb2d2ec0385cf4 | 791 | py | Python | tab_transformer/config.py | hoopoes/tab-transformer | d5e590ccce0de4c70121e9ae4178fad1892bc46d | [
"MIT"
] | 1 | 2022-03-31T14:42:18.000Z | 2022-03-31T14:42:18.000Z | tab_transformer/config.py | hoopoes/tab-transformer | d5e590ccce0de4c70121e9ae4178fad1892bc46d | [
"MIT"
] | null | null | null | tab_transformer/config.py | hoopoes/tab-transformer | d5e590ccce0de4c70121e9ae4178fad1892bc46d | [
"MIT"
] | null | null | null | from yacs.config import CfgNode as CN
_C = CN()
# directories
_C.ADDRESS = CN()
_C.ADDRESS.DATA = 'data/'
_C.ADDRESS.CHECK = 'checkpoints/'
# data
_C.DATA = CN()
_C.DATA.NUM_CONT_FEATURES = 7
# model
_C.MODEL = CN()
_C.MODEL.NAME = 'base_tab'
_C.MODEL.HIDDEN_SIZE = 32
_C.MODEL.NUM_LAYERS = 6
_C.MODEL.NUM_HEADS = 8
_C.MODEL.ATTN_DROP_RATE = 0.1
_C.MODEL.FF_DROP_RATE = 0.1
# train
_C.TRAIN = CN()
_C.TRAIN.RUN_NAME = 'v1'
_C.TRAIN.BATCH_SIZE = 64
_C.TRAIN.EPOCHS = 5
_C.TRAIN.PATIENCE = 2
_C.TRAIN.SCHEDULER = 'cos'
_C.TRAIN.FIRST_CYCLE_STEPS = 100
_C.TRAIN.CYCLE_MULT = 1.0
_C.TRAIN.MAX_LR = 0.1
_C.TRAIN.MIN_LR = 0.001
_C.TRAIN.WARMUP_STEPS = 0
_C.TRAIN.GAMMA = 1.0
def get_cfg_defaults():
"""
get a yacs CfgNode object with default values
"""
return _C.clone()
| 17.195652 | 49 | 0.692794 | from yacs.config import CfgNode as CN
_C = CN()
# directories
_C.ADDRESS = CN()
_C.ADDRESS.DATA = 'data/'
_C.ADDRESS.CHECK = 'checkpoints/'
# data
_C.DATA = CN()
_C.DATA.NUM_CONT_FEATURES = 7
# model
_C.MODEL = CN()
_C.MODEL.NAME = 'base_tab'
_C.MODEL.HIDDEN_SIZE = 32
_C.MODEL.NUM_LAYERS = 6
_C.MODEL.NUM_HEADS = 8
_C.MODEL.ATTN_DROP_RATE = 0.1
_C.MODEL.FF_DROP_RATE = 0.1
# train
_C.TRAIN = CN()
_C.TRAIN.RUN_NAME = 'v1'
_C.TRAIN.BATCH_SIZE = 64
_C.TRAIN.EPOCHS = 5
_C.TRAIN.PATIENCE = 2
_C.TRAIN.SCHEDULER = 'cos'
_C.TRAIN.FIRST_CYCLE_STEPS = 100
_C.TRAIN.CYCLE_MULT = 1.0
_C.TRAIN.MAX_LR = 0.1
_C.TRAIN.MIN_LR = 0.001
_C.TRAIN.WARMUP_STEPS = 0
_C.TRAIN.GAMMA = 1.0
def get_cfg_defaults():
"""
get a yacs CfgNode object with default values
"""
return _C.clone()
| 0 | 0 | 0 |
3562785ebbb97d0ea79a702811d2fb790644030e | 939 | py | Python | exception.py | mitodl/release-script | 615fbabac46a7a3c6ffb62a1cefe20c6df6dbd7b | [
"BSD-3-Clause"
] | 15 | 2017-02-20T22:07:23.000Z | 2020-10-10T15:39:46.000Z | exception.py | mitodl/release-script | 615fbabac46a7a3c6ffb62a1cefe20c6df6dbd7b | [
"BSD-3-Clause"
] | 311 | 2016-02-11T17:09:33.000Z | 2022-01-20T19:07:54.000Z | exception.py | mitodl/release-script | 615fbabac46a7a3c6ffb62a1cefe20c6df6dbd7b | [
"BSD-3-Clause"
] | 7 | 2017-03-20T03:52:46.000Z | 2020-05-16T05:52:16.000Z | """Exceptions for release script"""
from subprocess import CalledProcessError
class InputException(Exception):
"""Exception raised for invalid input."""
class ReleaseException(Exception):
"""Exception raised for a command error due to some release status"""
class DependencyException(Exception):
"""Error if dependency is missing"""
class UpdateVersionException(Exception):
"""Error if the old version is invalid or cannot be found, or if there's a duplicate version"""
class VersionMismatchException(Exception):
"""Error if the version is unexpected"""
class StatusException(Exception):
"""Error if something happened when calculating the status"""
class AsyncCalledProcessError(CalledProcessError):
"""Extend CalledProcessError to print the stdout as well"""
| 26.828571 | 99 | 0.734824 | """Exceptions for release script"""
from subprocess import CalledProcessError
class InputException(Exception):
"""Exception raised for invalid input."""
class ReleaseException(Exception):
"""Exception raised for a command error due to some release status"""
class DependencyException(Exception):
"""Error if dependency is missing"""
class UpdateVersionException(Exception):
"""Error if the old version is invalid or cannot be found, or if there's a duplicate version"""
class VersionMismatchException(Exception):
"""Error if the version is unexpected"""
class StatusException(Exception):
"""Error if something happened when calculating the status"""
class AsyncCalledProcessError(CalledProcessError):
"""Extend CalledProcessError to print the stdout as well"""
def __str__(self):
super_str = super().__str__()
return f"{super_str}. stdout={self.stdout}, stderr={self.stderr}"
| 109 | 0 | 27 |
d631702dab2ab11cee442a3fe1efcd253ba50134 | 1,190 | py | Python | app/error.py | chick0/upload | 5f63590706d9a5083cbb2a42a1e5e386e9590424 | [
"MIT"
] | 1 | 2021-07-27T07:43:20.000Z | 2021-07-27T07:43:20.000Z | app/error.py | chick0/upload | 5f63590706d9a5083cbb2a42a1e5e386e9590424 | [
"MIT"
] | null | null | null | app/error.py | chick0/upload | 5f63590706d9a5083cbb2a42a1e5e386e9590424 | [
"MIT"
] | null | null | null |
from flask import current_app
from flask import render_template
from app.custom_error import *
from app.template_filter import display_size
from app.models import Error
# error map
error_map = {
403: forbidden,
404: page_not_found,
413: file_is_too_big,
# custom error
FileIsEmpty: file_is_empty,
FileIsTooBig: file_is_too_big
}
| 19.508197 | 69 | 0.589916 |
from flask import current_app
from flask import render_template
from app.custom_error import *
from app.template_filter import display_size
from app.models import Error
def forbidden(e):
return render_template(
"error.html",
error=Error(
title="403",
subtitle="권한이 부족합니다."
)
), 403
def page_not_found(e):
return render_template(
"error.html",
error=Error(
title="404",
subtitle="해당 파일을 찾을 수 없습니다."
)
), 404
def file_is_too_big(e):
max_size = display_size(current_app.config['MAX_CONTENT_LENGTH'])
return render_template(
"error.html",
error=Error(
title="파일 업로드 실패",
subtitle=f"업로드 가능한 가장 큰 파일의 크기는 <b>{max_size}</b>입니다."
)
), 413
def file_is_empty(e):
return render_template(
"error.html",
error=Error(
title="파일 업로드 실패",
subtitle="업로드 할 파일을 발견하지 못했습니다."
)
), 400
# error map
error_map = {
403: forbidden,
404: page_not_found,
413: file_is_too_big,
# custom error
FileIsEmpty: file_is_empty,
FileIsTooBig: file_is_too_big
}
| 872 | 0 | 92 |
c61d8127d280cb39cda8d95a6861368a4aab867e | 1,492 | py | Python | tests/bool_support_test.py | gglin001/poptorch | 61f38ed2d8c6b672e023862eb698865fa7f4724e | [
"MIT"
] | 128 | 2020-12-08T22:22:46.000Z | 2022-03-23T10:54:26.000Z | tests/bool_support_test.py | gglin001/poptorch | 61f38ed2d8c6b672e023862eb698865fa7f4724e | [
"MIT"
] | 4 | 2021-06-22T14:26:28.000Z | 2022-02-15T11:25:05.000Z | tests/bool_support_test.py | gglin001/poptorch | 61f38ed2d8c6b672e023862eb698865fa7f4724e | [
"MIT"
] | 7 | 2020-12-09T20:32:56.000Z | 2022-01-18T16:12:24.000Z | #!/usr/bin/env python3
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import torch
import pytest
import poptorch
import helpers
# Not need for mean or logsumexp
reduce_ops = [torch.sum, torch.prod]
test_tensors = [
torch.tensor([1.0, 2.0, 3.1]),
torch.tensor([1.1, 2.0, 3.0]),
torch.tensor([0.0, 0.0, 0.0])
]
@pytest.mark.parametrize("op", reduce_ops)
@pytest.mark.parametrize("t_1", test_tensors)
@pytest.mark.parametrize("t_2", test_tensors)
| 27.62963 | 78 | 0.658847 | #!/usr/bin/env python3
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
import torch
import pytest
import poptorch
import helpers
# Not need for mean or logsumexp
reduce_ops = [torch.sum, torch.prod]
test_tensors = [
torch.tensor([1.0, 2.0, 3.1]),
torch.tensor([1.1, 2.0, 3.0]),
torch.tensor([0.0, 0.0, 0.0])
]
@pytest.mark.parametrize("op", reduce_ops)
@pytest.mark.parametrize("t_1", test_tensors)
@pytest.mark.parametrize("t_2", test_tensors)
def test_reduce_two_bool_types(op, t_1, t_2):
class Model(torch.nn.Module):
def forward(self, x, y):
return op(x == y)
model = Model()
poptorch_model = poptorch.inferenceModel(model)
native_out = model(t_1, t_2)
poptorch_out = poptorch_model(t_1, t_2)
#expected = no dims (scalar)
helpers.assert_allclose(actual=poptorch_out, expected=native_out)
assert native_out.dtype == torch.int64
assert poptorch_out.dtype == torch.int32
def test_logits():
class Model(torch.nn.Module):
def forward(self, logits, y):
acc = torch.sum(torch.argmax(logits, -1) == y) / float(y.size(0))
return acc
model = Model()
logits = torch.tensor([[1.0, 2.0, 3.0], [3.0, 1.0, 2.0], [2.0, 3.0, 1.0]])
y = torch.tensor([[0], [2], [1]])
poptorch_model = poptorch.inferenceModel(model)
native_out = model(logits, y)
poptorch_out = poptorch_model(logits, y)
helpers.assert_allclose(actual=poptorch_out, expected=native_out)
| 976 | 0 | 45 |
6a9e0ef97e4be21bcbfc3271ae76cee4bda25e3d | 1,575 | py | Python | configs/foveabox/fovea_r50_fpn_4x4_100e_minicoco500_lrsched2.py | kemaloksuz/Mask-aware-IoU | 4c07665d3fd1080066cb99897baeee159e9dce4e | [
"Apache-2.0"
] | 24 | 2021-10-19T19:47:23.000Z | 2022-03-29T08:06:27.000Z | configs/foveabox/fovea_r50_fpn_4x4_100e_minicoco500_lrsched2.py | kemaloksuz/Mask-aware-IoU | 4c07665d3fd1080066cb99897baeee159e9dce4e | [
"Apache-2.0"
] | null | null | null | configs/foveabox/fovea_r50_fpn_4x4_100e_minicoco500_lrsched2.py | kemaloksuz/Mask-aware-IoU | 4c07665d3fd1080066cb99897baeee159e9dce4e | [
"Apache-2.0"
] | 5 | 2021-11-16T02:14:11.000Z | 2022-03-28T08:55:40.000Z | _base_ = [
'../_base_/datasets/minicoco500_detection_augm.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='FOVEA',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
num_outs=5,
add_extra_convs='on_input'),
bbox_head=dict(
type='FoveaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
base_edge_list=[16, 32, 64, 128, 256],
scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)),
sigma=0.4,
with_deform=False,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=1.50,
alpha=0.4,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)))
# training and testing settings
train_cfg = dict()
test_cfg = dict(
nms_pre=1000,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
# learning policy
lr_config = dict(step=[75, 95])
total_epochs = 100
| 27.631579 | 78 | 0.579683 | _base_ = [
'../_base_/datasets/minicoco500_detection_augm.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
# model settings
model = dict(
type='FOVEA',
pretrained='torchvision://resnet50',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
num_outs=5,
add_extra_convs='on_input'),
bbox_head=dict(
type='FoveaHead',
num_classes=80,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
base_edge_list=[16, 32, 64, 128, 256],
scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)),
sigma=0.4,
with_deform=False,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=1.50,
alpha=0.4,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)))
# training and testing settings
train_cfg = dict()
test_cfg = dict(
nms_pre=1000,
score_thr=0.05,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
# learning policy
lr_config = dict(step=[75, 95])
total_epochs = 100
| 0 | 0 | 0 |
099dcfc5cb9b5d40663db12e855a208a72e8e311 | 10,820 | py | Python | fgspectra/frequency.py | JackLonergan97/fgspectra | ae450914bb12cbaa8d4b626e1a0842a6d8fa916d | [
"BSD-3-Clause"
] | null | null | null | fgspectra/frequency.py | JackLonergan97/fgspectra | ae450914bb12cbaa8d4b626e1a0842a6d8fa916d | [
"BSD-3-Clause"
] | null | null | null | fgspectra/frequency.py | JackLonergan97/fgspectra | ae450914bb12cbaa8d4b626e1a0842a6d8fa916d | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
r"""
Frequency-dependent foreground components.
This module implements the frequency-dependent component of common foreground
contaminants.
This package draws inspiration from FGBuster (Davide Poletti and Josquin Errard)
and BeFoRe (David Alonso and Ben Thorne).
"""
import inspect
import types
import numpy as np
from scipy import constants
from .model import Model
T_CMB = 2.72548
H_OVER_KT_CMB = constants.h * 1e9 / constants.k / T_CMB
def _bandpass_integration():
''' Bandpass integrated version of the caller
The caller should have
if isinstance(nu, list):
return _bandpass_integration()
at the very beginning.
This function
* iterates over the ``nu`` argument of the caller
(while keeping all the other arguments fixed)
* splits each element of the iteration in ``nu_band, transmittance``
* integrates the caller function over the bandpass.
``np.trapz(caller(nu_band) * transmittance, nu_band)``
Note that no normalization nor unit conversion is done to the
transmittance
* stacks the output of the iteration (the frequency dimension is the last)
and returns it
'''
# This piece of code is fairly complicated, we did because:
# 1) We want to call eval on each element of the nu list (i.e. we iterate
# over the bandpasses) but we don't want to define a new eval_bandpass
# function for every class
# 2) We don't want to use a decorator because it breaks the signature
# handling of eval and the modification of its defaults.
# _bandpass_integration does from the insides of eval the same thing that
# a decorator would do from the outside. This is achieved through the
# following pretty ugly kludge
# Simpler code that achieve the same result is welcome
# You are here because this function was called inside eval before any other
# variable was defined.
# We now retrieve the keyword arguments that were passed to eval because we
# have to use them for the evaluation of eval on each bandpass
# It assumes that _bandpass_integration was called inside
# f(self, **kw) -- f is typically the eval method.
frame = inspect.currentframe().f_back
kw = frame.f_locals
self = kw['self']
del kw['self'] # self was in the locals but is not a keyword argument
# We create a copy of eval itself, we'll call it for each bandpass
f = types.FunctionType(frame.f_code, frame.f_globals)
# Store the nu-transmittance list because the nu keyword argumnt has to be
# modified with the frequencies of each bandpass
nus_transmittances = kw['nu']
# Get the shape of the output from the result of the first bandpass
kw['nu'] = nus_transmittances[0][0]
res = np.trapz(f(self, **kw) * nus_transmittances[0][1], kw['nu'])
# Append the frequency dimension and put res in its first entry
res = res[..., np.newaxis] * np.array([1.]+[0.]*(len(nus_transmittances)-1))
# Fill the remaining entries by iterating over the rest of the bandpasses
for i_band, (nu, transmittance) in enumerate(nus_transmittances[1:], 1):
kw['nu'] = nu
res[..., i_band] = np.trapz(f(self, **kw) * transmittance, nu)
return res
class PowerLaw(Model):
r""" Power Law
.. math:: f(\nu) = (\nu / \nu_0)^{\beta}
"""
def eval(self, nu=None, beta=None, nu_0=None):
""" Evaluation of the SED
Parameters
----------
nu: float or array
Frequency in the same units as `nu_0`. If array, the shape is
``(freq)``.
beta: float or array
Spectral index. If array, the shape is ``(...)``.
nu_0: float or array
Reference frequency in the same units as `nu`. If array, the shape
is ``(...)``.
Returns
-------
sed: ndarray
If `nu` is an array, the shape is ``(..., freq)``.
If `nu` is scalar, the shape is ``(..., 1)``.
Note that the last dimension is guaranteed to be the frequency.
Note
----
The extra dimensions ``...`` in the output are the broadcast of the
``...`` in the input (which are required to be broadcast-compatible).
Examples
--------
- T, E and B synchrotron SEDs with the same reference frequency but
different spectral indices. `beta` is an array with shape ``(3)``,
`nu_0` is a scalar.
- SEDs of synchrotron and dust (approximated as power law). Both `beta`
and `nu_0` are arrays with shape ``(2)``
"""
if isinstance(nu, list):
return _bandpass_integration()
beta = np.array(beta)[..., np.newaxis]
nu_0 = np.array(nu_0)[..., np.newaxis]
return (nu / nu_0)**beta * (_rj2cmb(nu) / _rj2cmb(nu_0))
class Synchrotron(PowerLaw):
""" Alias of :class:`PowerLaw`
"""
pass
class ModifiedBlackBody(Model):
r""" Modified black body in K_RJ
.. math:: f(\nu) = (\nu / \nu_0)^{\beta + 1} / (e^x - 1)
where :math:`x = h \nu / k_B T_d`
"""
def eval(self, nu=None, nu_0=None, temp=None, beta=None):
""" Evaluation of the SED
Parameters
----------
nu: float or array
Frequency in GHz.
beta: float or array
Spectral index.
temp: float or array
Dust temperature.
nu_0: float
Reference frequency in Hz.
Returns
-------
sed: ndarray
The last dimension is the frequency dependence.
The leading dimensions are the broadcast between the hypothetic
dimensions of `beta` and `temp`.
"""
if isinstance(nu, list):
return _bandpass_integration()
beta = np.array(beta)[..., np.newaxis]
temp = np.array(temp)[..., np.newaxis]
x = 1e+9 * constants.h * nu / (constants.k * temp)
x_0 = 1e+9 * constants.h * nu_0 / (constants.k * temp)
res = (nu / nu_0)**(beta + 1.0) * np.expm1(x_0) / np.expm1(x)
return res * (_rj2cmb(nu) / _rj2cmb(nu_0))
class CIB(ModifiedBlackBody):
""" Alias of :class:`ModifiedBlackBOdy`
"""
pass
class ThermalSZ(Model):
r""" Thermal Sunyaev-Zel'dovich in K_CMB
This class implements the
.. math:: f(\nu) = x \coth(x/2) - 4
where :math:`x = h \nu / k_B T_CMB`
"""
@staticmethod
def eval(self, nu=None, nu_0=None):
"""Compute the SED with the given frequency and parameters.
nu : float
Frequency in GHz.
T_CMB (optional) : float
"""
if isinstance(nu, list):
return _bandpass_integration()
return ThermalSZ.f(nu) / ThermalSZ.f(nu_0)
class FreeFree(Model):
r""" Free-free
.. math:: f(\nu) = EM * ( 1 + log( 1 + (\nu_{ff} / \nu)^{3/\pi} ) )
.. math:: \nu_{ff} = 255.33e9 * (Te / 1000)^{3/2}
"""
def eval(self, nu=None, EM=None, Te=None):
""" Evaluation of the SED
Parameters
----------
nu: float or array
Frequency in the same units as `nu_0`. If array, the shape is
``(freq)``.
EM: float or array
Emission measure in cm^-6 pc (usually around 300). If array, the shape is ``(...)``.
Te: float or array
Electron temperature (typically around 7000). If array, the shape is ``(...)``.
Returns
-------
sed: ndarray
If `nu` is an array, the shape is ``(..., freq)``.
If `nu` is scalar, the shape is ``(..., 1)``.
Note that the last dimension is guaranteed to be the frequency.
Note
----
The extra dimensions ``...`` in the output are the broadcast of the
``...`` in the input (which are required to be broadcast-compatible).
Examples
--------
- Free-free emission in temperature.
"""
if isinstance(nu, list):
return _bandpass_integration()
EM = np.array(EM)[..., np.newaxis]
Te = np.array(Te)[..., np.newaxis]
Teff = (Te / 1.e3)**(1.5)
nuff = 255.33e9 * Teff
gff = 1. + np.log(1. + (nuff / nu)**(np.sqrt(3) / np.pi))
print("warning: I need to check the units on this")
return EM * gff
class ConstantSED(Model):
"""Frequency-independent component."""
def eval(self, nu=None, amp=1.):
""" Evaluation of the SED
Parameters
----------
nu: float or array
It just determines the shape of the output.
amp: float or array
Amplitude (or set of amplitudes) of the constant SED.
Returns
-------
sed: ndarray
If `nu` is an array, the shape is ``amp.shape + (freq)``.
If `nu` is scalar, the shape is ``amp.shape + (1)``.
Note that the last dimension is guaranteed to be the frequency.
"""
if isinstance(nu, list):
return _bandpass_integration()
amp = np.array(amp)[..., np.newaxis]
return amp * np.ones_like(np.array(nu))
class Join(Model):
""" Join several SED models together
"""
def __init__(self, *seds, **kwargs):
""" Join several SED models together
Parameters
----------
*sed:
Sequence of SED models to be joined together
"""
self._seds = seds
self.set_defaults(**kwargs)
@property
def eval(self, kwseq=None):
"""Compute the SED with the given frequency and parameters.
*kwseq
The length of ``kwseq`` has to be equal to the number of SEDs
joined. ``kwseq[i]`` is a dictionary containing the keyword
arguments of the ``i``-th SED.
"""
if kwseq:
seds = [sed(**kwargs) for sed, kwargs in zip(self._seds, kwseq)]
else: # Handles the case in which no parameter has to be passed
seds = [sed() for sed in self._seds]
res = np.empty((len(seds),) + np.broadcast(*seds).shape)
for i in range(len(seds)):
res[i] = seds[i]
return res
| 32.011834 | 96 | 0.581238 | # -*- coding: utf-8 -*-
r"""
Frequency-dependent foreground components.
This module implements the frequency-dependent component of common foreground
contaminants.
This package draws inspiration from FGBuster (Davide Poletti and Josquin Errard)
and BeFoRe (David Alonso and Ben Thorne).
"""
import inspect
import types
import numpy as np
from scipy import constants
from .model import Model
T_CMB = 2.72548
H_OVER_KT_CMB = constants.h * 1e9 / constants.k / T_CMB
def _bandpass_integration():
''' Bandpass integrated version of the caller
The caller should have
if isinstance(nu, list):
return _bandpass_integration()
at the very beginning.
This function
* iterates over the ``nu`` argument of the caller
(while keeping all the other arguments fixed)
* splits each element of the iteration in ``nu_band, transmittance``
* integrates the caller function over the bandpass.
``np.trapz(caller(nu_band) * transmittance, nu_band)``
Note that no normalization nor unit conversion is done to the
transmittance
* stacks the output of the iteration (the frequency dimension is the last)
and returns it
'''
# This piece of code is fairly complicated, we did because:
# 1) We want to call eval on each element of the nu list (i.e. we iterate
# over the bandpasses) but we don't want to define a new eval_bandpass
# function for every class
# 2) We don't want to use a decorator because it breaks the signature
# handling of eval and the modification of its defaults.
# _bandpass_integration does from the insides of eval the same thing that
# a decorator would do from the outside. This is achieved through the
# following pretty ugly kludge
# Simpler code that achieve the same result is welcome
# You are here because this function was called inside eval before any other
# variable was defined.
# We now retrieve the keyword arguments that were passed to eval because we
# have to use them for the evaluation of eval on each bandpass
# It assumes that _bandpass_integration was called inside
# f(self, **kw) -- f is typically the eval method.
frame = inspect.currentframe().f_back
kw = frame.f_locals
self = kw['self']
del kw['self'] # self was in the locals but is not a keyword argument
# We create a copy of eval itself, we'll call it for each bandpass
f = types.FunctionType(frame.f_code, frame.f_globals)
# Store the nu-transmittance list because the nu keyword argumnt has to be
# modified with the frequencies of each bandpass
nus_transmittances = kw['nu']
# Get the shape of the output from the result of the first bandpass
kw['nu'] = nus_transmittances[0][0]
res = np.trapz(f(self, **kw) * nus_transmittances[0][1], kw['nu'])
# Append the frequency dimension and put res in its first entry
res = res[..., np.newaxis] * np.array([1.]+[0.]*(len(nus_transmittances)-1))
# Fill the remaining entries by iterating over the rest of the bandpasses
for i_band, (nu, transmittance) in enumerate(nus_transmittances[1:], 1):
kw['nu'] = nu
res[..., i_band] = np.trapz(f(self, **kw) * transmittance, nu)
return res
def _rj2cmb(nu):
x = H_OVER_KT_CMB * nu
return (np.expm1(x) / x)**2 / np.exp(x)
class PowerLaw(Model):
r""" Power Law
.. math:: f(\nu) = (\nu / \nu_0)^{\beta}
"""
def eval(self, nu=None, beta=None, nu_0=None):
""" Evaluation of the SED
Parameters
----------
nu: float or array
Frequency in the same units as `nu_0`. If array, the shape is
``(freq)``.
beta: float or array
Spectral index. If array, the shape is ``(...)``.
nu_0: float or array
Reference frequency in the same units as `nu`. If array, the shape
is ``(...)``.
Returns
-------
sed: ndarray
If `nu` is an array, the shape is ``(..., freq)``.
If `nu` is scalar, the shape is ``(..., 1)``.
Note that the last dimension is guaranteed to be the frequency.
Note
----
The extra dimensions ``...`` in the output are the broadcast of the
``...`` in the input (which are required to be broadcast-compatible).
Examples
--------
- T, E and B synchrotron SEDs with the same reference frequency but
different spectral indices. `beta` is an array with shape ``(3)``,
`nu_0` is a scalar.
- SEDs of synchrotron and dust (approximated as power law). Both `beta`
and `nu_0` are arrays with shape ``(2)``
"""
if isinstance(nu, list):
return _bandpass_integration()
beta = np.array(beta)[..., np.newaxis]
nu_0 = np.array(nu_0)[..., np.newaxis]
return (nu / nu_0)**beta * (_rj2cmb(nu) / _rj2cmb(nu_0))
class Synchrotron(PowerLaw):
""" Alias of :class:`PowerLaw`
"""
pass
class ModifiedBlackBody(Model):
r""" Modified black body in K_RJ
.. math:: f(\nu) = (\nu / \nu_0)^{\beta + 1} / (e^x - 1)
where :math:`x = h \nu / k_B T_d`
"""
def eval(self, nu=None, nu_0=None, temp=None, beta=None):
""" Evaluation of the SED
Parameters
----------
nu: float or array
Frequency in GHz.
beta: float or array
Spectral index.
temp: float or array
Dust temperature.
nu_0: float
Reference frequency in Hz.
Returns
-------
sed: ndarray
The last dimension is the frequency dependence.
The leading dimensions are the broadcast between the hypothetic
dimensions of `beta` and `temp`.
"""
if isinstance(nu, list):
return _bandpass_integration()
beta = np.array(beta)[..., np.newaxis]
temp = np.array(temp)[..., np.newaxis]
x = 1e+9 * constants.h * nu / (constants.k * temp)
x_0 = 1e+9 * constants.h * nu_0 / (constants.k * temp)
res = (nu / nu_0)**(beta + 1.0) * np.expm1(x_0) / np.expm1(x)
return res * (_rj2cmb(nu) / _rj2cmb(nu_0))
class CIB(ModifiedBlackBody):
""" Alias of :class:`ModifiedBlackBOdy`
"""
pass
class ThermalSZ(Model):
r""" Thermal Sunyaev-Zel'dovich in K_CMB
This class implements the
.. math:: f(\nu) = x \coth(x/2) - 4
where :math:`x = h \nu / k_B T_CMB`
"""
@staticmethod
def f(nu):
x = constants.h * (nu * 1e9) / (constants.k * T_CMB)
return (x / np.tanh(x / 2.0) - 4.0)
def eval(self, nu=None, nu_0=None):
"""Compute the SED with the given frequency and parameters.
nu : float
Frequency in GHz.
T_CMB (optional) : float
"""
if isinstance(nu, list):
return _bandpass_integration()
return ThermalSZ.f(nu) / ThermalSZ.f(nu_0)
class FreeFree(Model):
r""" Free-free
.. math:: f(\nu) = EM * ( 1 + log( 1 + (\nu_{ff} / \nu)^{3/\pi} ) )
.. math:: \nu_{ff} = 255.33e9 * (Te / 1000)^{3/2}
"""
def eval(self, nu=None, EM=None, Te=None):
""" Evaluation of the SED
Parameters
----------
nu: float or array
Frequency in the same units as `nu_0`. If array, the shape is
``(freq)``.
EM: float or array
Emission measure in cm^-6 pc (usually around 300). If array, the shape is ``(...)``.
Te: float or array
Electron temperature (typically around 7000). If array, the shape is ``(...)``.
Returns
-------
sed: ndarray
If `nu` is an array, the shape is ``(..., freq)``.
If `nu` is scalar, the shape is ``(..., 1)``.
Note that the last dimension is guaranteed to be the frequency.
Note
----
The extra dimensions ``...`` in the output are the broadcast of the
``...`` in the input (which are required to be broadcast-compatible).
Examples
--------
- Free-free emission in temperature.
"""
if isinstance(nu, list):
return _bandpass_integration()
EM = np.array(EM)[..., np.newaxis]
Te = np.array(Te)[..., np.newaxis]
Teff = (Te / 1.e3)**(1.5)
nuff = 255.33e9 * Teff
gff = 1. + np.log(1. + (nuff / nu)**(np.sqrt(3) / np.pi))
print("warning: I need to check the units on this")
return EM * gff
class ConstantSED(Model):
"""Frequency-independent component."""
def eval(self, nu=None, amp=1.):
""" Evaluation of the SED
Parameters
----------
nu: float or array
It just determines the shape of the output.
amp: float or array
Amplitude (or set of amplitudes) of the constant SED.
Returns
-------
sed: ndarray
If `nu` is an array, the shape is ``amp.shape + (freq)``.
If `nu` is scalar, the shape is ``amp.shape + (1)``.
Note that the last dimension is guaranteed to be the frequency.
"""
if isinstance(nu, list):
return _bandpass_integration()
amp = np.array(amp)[..., np.newaxis]
return amp * np.ones_like(np.array(nu))
class Join(Model):
""" Join several SED models together
"""
def __init__(self, *seds, **kwargs):
""" Join several SED models together
Parameters
----------
*sed:
Sequence of SED models to be joined together
"""
self._seds = seds
self.set_defaults(**kwargs)
def set_defaults(self, **kwargs):
if 'kwseq' in kwargs:
for sed, sed_kwargs in zip(self._seds, kwargs['kwseq']):
sed.set_defaults(**sed_kwargs)
def _get_repr(self):
return {type(self).__name__: [sed._get_repr() for sed in self._seds]}
@property
def defaults(self):
return {'kwseq': [sed.defaults for sed in self._seds]}
def eval(self, kwseq=None):
"""Compute the SED with the given frequency and parameters.
*kwseq
The length of ``kwseq`` has to be equal to the number of SEDs
joined. ``kwseq[i]`` is a dictionary containing the keyword
arguments of the ``i``-th SED.
"""
if kwseq:
seds = [sed(**kwargs) for sed, kwargs in zip(self._seds, kwseq)]
else: # Handles the case in which no parameter has to be passed
seds = [sed() for sed in self._seds]
res = np.empty((len(seds),) + np.broadcast(*seds).shape)
for i in range(len(seds)):
res[i] = seds[i]
return res
| 456 | 0 | 129 |
d5babdded86478b84213756a9060ca39ad92cb11 | 11,292 | py | Python | tests/test_crawl.py | jesuslosada/scrapy | 8be28fe4ca8b1cd011d5f7e03661da8a6bb3217b | [
"BSD-3-Clause"
] | 3 | 2016-02-26T20:16:36.000Z | 2021-08-25T12:04:31.000Z | tests/test_crawl.py | jesuslosada/scrapy | 8be28fe4ca8b1cd011d5f7e03661da8a6bb3217b | [
"BSD-3-Clause"
] | 2 | 2021-09-20T19:54:39.000Z | 2022-03-22T22:52:10.000Z | tests/test_crawl.py | jesuslosada/scrapy | 8be28fe4ca8b1cd011d5f7e03661da8a6bb3217b | [
"BSD-3-Clause"
] | 6 | 2017-12-28T03:59:54.000Z | 2020-02-26T16:01:45.000Z | import json
import logging
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.trial.unittest import TestCase
from scrapy.http import Request
from scrapy.crawler import CrawlerRunner
from scrapy.utils.python import to_unicode
from tests.spiders import FollowAllSpider, DelaySpider, SimpleSpider, \
BrokenStartRequestsSpider, SingleRequestSpider, DuplicateStartRequestsSpider
from tests.mockserver import MockServer
| 40.328571 | 103 | 0.676585 | import json
import logging
from testfixtures import LogCapture
from twisted.internet import defer
from twisted.trial.unittest import TestCase
from scrapy.http import Request
from scrapy.crawler import CrawlerRunner
from scrapy.utils.python import to_unicode
from tests.spiders import FollowAllSpider, DelaySpider, SimpleSpider, \
BrokenStartRequestsSpider, SingleRequestSpider, DuplicateStartRequestsSpider
from tests.mockserver import MockServer
class CrawlTestCase(TestCase):
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
self.runner = CrawlerRunner()
def tearDown(self):
self.mockserver.__exit__(None, None, None)
@defer.inlineCallbacks
def test_follow_all(self):
crawler = self.runner.create_crawler(FollowAllSpider)
yield crawler.crawl()
self.assertEqual(len(crawler.spider.urls_visited), 11) # 10 + start_url
@defer.inlineCallbacks
def test_delay(self):
# short to long delays
yield self._test_delay(0.2, False)
yield self._test_delay(1, False)
# randoms
yield self._test_delay(0.2, True)
yield self._test_delay(1, True)
@defer.inlineCallbacks
def _test_delay(self, delay, randomize):
settings = {"DOWNLOAD_DELAY": delay, 'RANDOMIZE_DOWNLOAD_DELAY': randomize}
crawler = CrawlerRunner(settings).create_crawler(FollowAllSpider)
yield crawler.crawl(maxlatency=delay * 2)
t = crawler.spider.times
totaltime = t[-1] - t[0]
avgd = totaltime / (len(t) - 1)
tolerance = 0.6 if randomize else 0.2
self.assertTrue(avgd > delay * (1 - tolerance),
"download delay too small: %s" % avgd)
@defer.inlineCallbacks
def test_timeout_success(self):
crawler = self.runner.create_crawler(DelaySpider)
yield crawler.crawl(n=0.5)
self.assertTrue(crawler.spider.t1 > 0)
self.assertTrue(crawler.spider.t2 > 0)
self.assertTrue(crawler.spider.t2 > crawler.spider.t1)
@defer.inlineCallbacks
def test_timeout_failure(self):
crawler = CrawlerRunner({"DOWNLOAD_TIMEOUT": 0.35}).create_crawler(DelaySpider)
yield crawler.crawl(n=0.5)
self.assertTrue(crawler.spider.t1 > 0)
self.assertTrue(crawler.spider.t2 == 0)
self.assertTrue(crawler.spider.t2_err > 0)
self.assertTrue(crawler.spider.t2_err > crawler.spider.t1)
# server hangs after receiving response headers
yield crawler.crawl(n=0.5, b=1)
self.assertTrue(crawler.spider.t1 > 0)
self.assertTrue(crawler.spider.t2 == 0)
self.assertTrue(crawler.spider.t2_err > 0)
self.assertTrue(crawler.spider.t2_err > crawler.spider.t1)
@defer.inlineCallbacks
def test_retry_503(self):
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:8998/status?n=503")
self._assert_retried(l)
@defer.inlineCallbacks
def test_retry_conn_failed(self):
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:65432/status?n=503")
self._assert_retried(l)
@defer.inlineCallbacks
def test_retry_dns_error(self):
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
# try to fetch the homepage of a non-existent domain
yield crawler.crawl("http://dns.resolution.invalid./")
self._assert_retried(l)
@defer.inlineCallbacks
def test_start_requests_bug_before_yield(self):
with LogCapture('scrapy', level=logging.ERROR) as l:
crawler = self.runner.create_crawler(BrokenStartRequestsSpider)
yield crawler.crawl(fail_before_yield=1)
self.assertEqual(len(l.records), 1)
record = l.records[0]
self.assertIsNotNone(record.exc_info)
self.assertIs(record.exc_info[0], ZeroDivisionError)
@defer.inlineCallbacks
def test_start_requests_bug_yielding(self):
with LogCapture('scrapy', level=logging.ERROR) as l:
crawler = self.runner.create_crawler(BrokenStartRequestsSpider)
yield crawler.crawl(fail_yielding=1)
self.assertEqual(len(l.records), 1)
record = l.records[0]
self.assertIsNotNone(record.exc_info)
self.assertIs(record.exc_info[0], ZeroDivisionError)
@defer.inlineCallbacks
def test_start_requests_lazyness(self):
settings = {"CONCURRENT_REQUESTS": 1}
crawler = CrawlerRunner(settings).create_crawler(BrokenStartRequestsSpider)
yield crawler.crawl()
#self.assertTrue(False, crawler.spider.seedsseen)
#self.assertTrue(crawler.spider.seedsseen.index(None) < crawler.spider.seedsseen.index(99),
# crawler.spider.seedsseen)
@defer.inlineCallbacks
def test_start_requests_dupes(self):
settings = {"CONCURRENT_REQUESTS": 1}
crawler = CrawlerRunner(settings).create_crawler(DuplicateStartRequestsSpider)
yield crawler.crawl(dont_filter=True, distinct_urls=2, dupe_factor=3)
self.assertEqual(crawler.spider.visited, 6)
yield crawler.crawl(dont_filter=False, distinct_urls=3, dupe_factor=4)
self.assertEqual(crawler.spider.visited, 3)
@defer.inlineCallbacks
def test_unbounded_response(self):
# Completeness of responses without Content-Length or Transfer-Encoding
# can not be determined, we treat them as valid but flagged as "partial"
from six.moves.urllib.parse import urlencode
query = urlencode({'raw': '''\
HTTP/1.1 200 OK
Server: Apache-Coyote/1.1
X-Powered-By: Servlet 2.4; JBoss-4.2.3.GA (build: SVNTag=JBoss_4_2_3_GA date=200807181417)/JBossWeb-2.0
Set-Cookie: JSESSIONID=08515F572832D0E659FD2B0D8031D75F; Path=/
Pragma: no-cache
Expires: Thu, 01 Jan 1970 00:00:00 GMT
Cache-Control: no-cache
Cache-Control: no-store
Content-Type: text/html;charset=UTF-8
Content-Language: en
Date: Tue, 27 Aug 2013 13:05:05 GMT
Connection: close
foo body
with multiples lines
'''})
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:8998/raw?{0}".format(query))
self.assertEqual(str(l).count("Got response 200"), 1)
@defer.inlineCallbacks
def test_retry_conn_lost(self):
# connection lost after receiving data
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:8998/drop?abort=0")
self._assert_retried(l)
@defer.inlineCallbacks
def test_retry_conn_aborted(self):
# connection lost before receiving data
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as l:
yield crawler.crawl("http://localhost:8998/drop?abort=1")
self._assert_retried(l)
def _assert_retried(self, log):
self.assertEqual(str(log).count("Retrying"), 2)
self.assertEqual(str(log).count("Gave up retrying"), 1)
@defer.inlineCallbacks
def test_referer_header(self):
"""Referer header is set by RefererMiddleware unless it is already set"""
req0 = Request('http://localhost:8998/echo?headers=1&body=0', dont_filter=1)
req1 = req0.replace()
req2 = req0.replace(headers={'Referer': None})
req3 = req0.replace(headers={'Referer': 'http://example.com'})
req0.meta['next'] = req1
req1.meta['next'] = req2
req2.meta['next'] = req3
crawler = self.runner.create_crawler(SingleRequestSpider)
yield crawler.crawl(seed=req0)
# basic asserts in case of weird communication errors
self.assertIn('responses', crawler.spider.meta)
self.assertNotIn('failures', crawler.spider.meta)
# start requests doesn't set Referer header
echo0 = json.loads(to_unicode(crawler.spider.meta['responses'][2].body))
self.assertNotIn('Referer', echo0['headers'])
# following request sets Referer to start request url
echo1 = json.loads(to_unicode(crawler.spider.meta['responses'][1].body))
self.assertEqual(echo1['headers'].get('Referer'), [req0.url])
# next request avoids Referer header
echo2 = json.loads(to_unicode(crawler.spider.meta['responses'][2].body))
self.assertNotIn('Referer', echo2['headers'])
# last request explicitly sets a Referer header
echo3 = json.loads(to_unicode(crawler.spider.meta['responses'][3].body))
self.assertEqual(echo3['headers'].get('Referer'), ['http://example.com'])
@defer.inlineCallbacks
def test_engine_status(self):
from scrapy.utils.engine import get_engine_status
est = []
def cb(response):
est.append(get_engine_status(crawler.engine))
crawler = self.runner.create_crawler(SingleRequestSpider)
yield crawler.crawl(seed='http://localhost:8998/', callback_func=cb)
self.assertEqual(len(est), 1, est)
s = dict(est[0])
self.assertEqual(s['engine.spider.name'], crawler.spider.name)
self.assertEqual(s['len(engine.scraper.slot.active)'], 1)
@defer.inlineCallbacks
def test_graceful_crawl_error_handling(self):
"""
Test whether errors happening anywhere in Crawler.crawl() are properly
reported (and not somehow swallowed) after a graceful engine shutdown.
The errors should not come from within Scrapy's core but from within
spiders/middlewares/etc., e.g. raised in Spider.start_requests(),
SpiderMiddleware.process_start_requests(), etc.
"""
class TestError(Exception):
pass
class FaultySpider(SimpleSpider):
def start_requests(self):
raise TestError
crawler = self.runner.create_crawler(FaultySpider)
yield self.assertFailure(crawler.crawl(), TestError)
self.assertFalse(crawler.crawling)
@defer.inlineCallbacks
def test_open_spider_error_on_faulty_pipeline(self):
settings = {
"ITEM_PIPELINES": {
"tests.pipelines.ZeroDivisionErrorPipeline": 300,
}
}
crawler = CrawlerRunner(settings).create_crawler(SimpleSpider)
yield self.assertFailure(
self.runner.crawl(crawler, "http://localhost:8998/status?n=200"),
ZeroDivisionError)
self.assertFalse(crawler.crawling)
@defer.inlineCallbacks
def test_crawlerrunner_accepts_crawler(self):
crawler = self.runner.create_crawler(SimpleSpider)
with LogCapture() as log:
yield self.runner.crawl(crawler, "http://localhost:8998/status?n=200")
self.assertIn("Got response 200", str(log))
@defer.inlineCallbacks
def test_crawl_multiple(self):
self.runner.crawl(SimpleSpider, "http://localhost:8998/status?n=200")
self.runner.crawl(SimpleSpider, "http://localhost:8998/status?n=503")
with LogCapture() as log:
yield self.runner.join()
self._assert_retried(log)
self.assertIn("Got response 200", str(log))
| 7,372 | 3,443 | 23 |
367806765cb3c2806102a720a2093e5e1fd95006 | 1,344 | py | Python | setup.py | vogoltsov/robotframework-docker | 3873c906e41065bb358c89a204ed454daae15a30 | [
"Apache-2.0"
] | 11 | 2019-07-09T02:08:06.000Z | 2022-01-17T00:50:28.000Z | setup.py | vogoltsov/robotframework-docker | 3873c906e41065bb358c89a204ed454daae15a30 | [
"Apache-2.0"
] | 11 | 2020-12-21T11:40:32.000Z | 2021-03-25T09:41:23.000Z | setup.py | vogoltsov/robotframework-docker | 3873c906e41065bb358c89a204ed454daae15a30 | [
"Apache-2.0"
] | 1 | 2021-07-22T11:41:46.000Z | 2021-07-22T11:41:46.000Z | """Setup module for Robot Framework Docker Library package."""
import os
from setuptools import setup
# get absolute source directory path
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as readme_file:
long_description = readme_file.read().split('long_description split')[1].strip()
setup(
name='robotframework-docker',
version='1.3.0',
description='A Robot Framework Docker Library',
long_description=long_description,
url='https://github.com/vogoltsov/robotframework-docker',
author='Vitaly Ogoltsov',
author_email='vitaly.ogoltsov@me.com',
license='Apache License 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Framework :: Robot Framework :: Library',
],
keywords='testing testautomation robotframework docker docker-compose',
package_dir={'': 'src'},
py_modules=['DockerComposeLibrary'],
install_requires=[
'robotframework>=4,<5',
'packaging',
],
)
| 33.6 | 84 | 0.674851 | """Setup module for Robot Framework Docker Library package."""
import os
from setuptools import setup
# get absolute source directory path
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the README file
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as readme_file:
long_description = readme_file.read().split('long_description split')[1].strip()
setup(
name='robotframework-docker',
version='1.3.0',
description='A Robot Framework Docker Library',
long_description=long_description,
url='https://github.com/vogoltsov/robotframework-docker',
author='Vitaly Ogoltsov',
author_email='vitaly.ogoltsov@me.com',
license='Apache License 2.0',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Framework :: Robot Framework :: Library',
],
keywords='testing testautomation robotframework docker docker-compose',
package_dir={'': 'src'},
py_modules=['DockerComposeLibrary'],
install_requires=[
'robotframework>=4,<5',
'packaging',
],
)
| 0 | 0 | 0 |
e84d2d85a67f287abd2ce2feaa78c2794839d6cb | 179 | py | Python | plugins/rot_encoder.py | NotErh/capel-bot | e7b8c271486971e9a924ec39b96eab1d7ca83d1e | [
"MIT"
] | null | null | null | plugins/rot_encoder.py | NotErh/capel-bot | e7b8c271486971e9a924ec39b96eab1d7ca83d1e | [
"MIT"
] | null | null | null | plugins/rot_encoder.py | NotErh/capel-bot | e7b8c271486971e9a924ec39b96eab1d7ca83d1e | [
"MIT"
] | null | null | null | import codecs | 22.375 | 47 | 0.670391 | import codecs
class RotEncoder:
def __init__(self):
self.rot13 = codecs.getencoder('rot13')
def encode_string(self, string):
return self.rot13(string)[0] | 94 | -4 | 76 |
38ce50f012765ca046abfe5cff150e8e4ef7155d | 4,178 | py | Python | facade_project/data/facade_random_rot_dataset.py | gregunz/MasterSemesterProject | 085f36c58b1cac141b0318657876b796c4dc5101 | [
"MIT"
] | 5 | 2019-06-10T08:42:00.000Z | 2021-09-22T08:24:24.000Z | facade_project/data/facade_random_rot_dataset.py | gregunz/MasterSemesterProject | 085f36c58b1cac141b0318657876b796c4dc5101 | [
"MIT"
] | 1 | 2019-10-31T12:56:27.000Z | 2019-10-31T12:56:27.000Z | facade_project/data/facade_random_rot_dataset.py | gregunz/MasterSemesterProject | 085f36c58b1cac141b0318657876b796c4dc5101 | [
"MIT"
] | 2 | 2019-09-13T10:23:34.000Z | 2021-05-07T14:15:46.000Z | import os
import random
import torch
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from facade_project import NUM_IMAGES, NUM_ROTATIONS, FACADE_ROT_IMAGES_TENSORS_DIR, FACADE_ROT_HEATMAPS_TENSORS_DIR
from facade_project.data.facade_heatmap_dataset import HEATMAP_INFOS_PER_ROT
from facade_project.geometry.heatmap import HeatmapsInfo
class FacadeRandomRotDataset(Dataset):
"""
Facade Random Rotations
A dataset which return rotated version of an image randomly,
useful to build batches for data augmentation
Items of the dataset are: tuple(image, mask) or tuple(image, dict) if add_aux_channels_fn is used
A demo can be found in "notebook/nb_demo_datasets.ipynb"
Note that this dataset cannot makes use the CachedDataset directly because it samples images within
the available rotations. Hence a caching is available directly and implemented here enable
sampling different rotations
"""
| 40.960784 | 116 | 0.668262 | import os
import random
import torch
from torch.utils.data import Dataset
from tqdm.auto import tqdm
from facade_project import NUM_IMAGES, NUM_ROTATIONS, FACADE_ROT_IMAGES_TENSORS_DIR, FACADE_ROT_HEATMAPS_TENSORS_DIR
from facade_project.data.facade_heatmap_dataset import HEATMAP_INFOS_PER_ROT
from facade_project.geometry.heatmap import HeatmapsInfo
def create_img_to_num_rot(num_img, num_rot_per_img):
return [num_rot_per_img for _ in range(num_img)]
class FacadeRandomRotDataset(Dataset):
"""
Facade Random Rotations
A dataset which return rotated version of an image randomly,
useful to build batches for data augmentation
Items of the dataset are: tuple(image, mask) or tuple(image, dict) if add_aux_channels_fn is used
A demo can be found in "notebook/nb_demo_datasets.ipynb"
Note that this dataset cannot makes use the CachedDataset directly because it samples images within
the available rotations. Hence a caching is available directly and implemented here enable
sampling different rotations
"""
def __init__(self, img_dir=FACADE_ROT_IMAGES_TENSORS_DIR, add_targets_fn=None, img_to_num_rot=None,
caching=False, init_caching=False, device=None):
Dataset.__init__(self)
self.dir_path = img_dir
self.aux_targets_fn = add_targets_fn
if img_to_num_rot is None:
img_to_num_rot = create_img_to_num_rot(NUM_IMAGES, NUM_ROTATIONS)
self.img_to_num_rot = img_to_num_rot
self.cached_images = None
self.device = device
assert not (device is not None and (init_caching or caching)), 'cannot cache on GPU -> GPU_RAM'
# checking all files exist
for idx, num_rot in enumerate(self.img_to_num_rot):
for rot_idx in range(num_rot):
for is_img in [True, False]:
fname = self.get_filename(idx, rot_idx, is_img)
assert os.path.isfile(fname), 'file ({}) does not exist'.format(fname)
if caching or init_caching:
self.cached_images = dict()
if init_caching:
for img_idx in tqdm(list(range(FacadeRandomRotDataset.__len__(self)))):
for rot_idx in range(NUM_ROTATIONS):
img, lbl = self.get_rot_item(img_idx, rot_idx)
self.cached_images[(img_idx, rot_idx)] = (img, lbl)
def __len__(self):
return len(self.img_to_num_rot)
def __getitem__(self, idx):
rot_idx = random.randint(0, self.img_to_num_rot[idx] - 1)
if self.cached_images is not None and (idx, rot_idx) in self.cached_images:
img, lbl = self.cached_images[(idx, rot_idx)]
else:
if idx >= FacadeRandomRotDataset.__len__(self): raise IndexError
img, lbl = self.get_rot_item(idx, rot_idx)
if self.cached_images is not None:
self.cached_images[(idx, rot_idx)] = (img, lbl)
return img, lbl
def get_rot_item(self, idx, rot_idx):
img, lbl = torch.load(self.get_filename(idx, rot_idx, True), map_location=self.device), \
torch.load(self.get_filename(idx, rot_idx, False), map_location=self.device).long()
targets = lbl
if self.aux_targets_fn is not None:
targets = {
'mask': lbl,
}
aux_targets = self.aux_targets_fn(idx, rot_idx, device=self.device)
assert type(aux_targets) is dict
targets.update(aux_targets)
return img, targets
def get_filename(self, img_idx, rot_idx, is_img):
name = 'img' if is_img else 'lbl'
return '{}/{}_{:03d}_{:03d}.torch'.format(self.dir_path, name, img_idx, rot_idx)
def add_heatmaps_target(img_idx, rot_idx, device):
def get_filename(idx, jdx):
return '{}/heatmaps_door-window_{:03d}_{:03d}.torch' \
.format(FACADE_ROT_HEATMAPS_TENSORS_DIR, idx, jdx)
heatmaps_info = HEATMAP_INFOS_PER_ROT[img_idx][rot_idx]
return {
'heatmaps': torch.load(get_filename(img_idx, rot_idx), map_location=device),
'heatmaps_info': HeatmapsInfo(heatmaps_info),
}
| 3,042 | 0 | 181 |
5d3f5cd4808c240d0431fdf210524eb4129fbddd | 1,246 | py | Python | ocean_drilling_db/create_database.py | rickdberg/ocean_drilling_rt_modeling_db | e9951fa933f41ea4740d1e66a02a6d89df10a8f9 | [
"MIT"
] | 4 | 2019-12-17T11:54:32.000Z | 2022-02-18T16:58:20.000Z | ocean_drilling_db/create_database.py | rickdberg/ocean_drilling_rt_modeling_db | e9951fa933f41ea4740d1e66a02a6d89df10a8f9 | [
"MIT"
] | 1 | 2020-11-28T12:38:34.000Z | 2020-11-28T12:38:34.000Z | ocean_drilling_db/create_database.py | rickdberg/ocean_drilling_db | e9951fa933f41ea4740d1e66a02a6d89df10a8f9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 24 15:01:56 2018
@author: rick
"""
from sqlalchemy import create_engine
# eof | 38.9375 | 110 | 0.716693 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 24 15:01:56 2018
@author: rick
"""
from sqlalchemy import create_engine
def create_db(username, password, host, db_name, hole_metadata, age_depth, interstitial_water_chem, mad, cns):
host_engine = create_engine('mysql://{}:{}@{}'.format(username, password, host)) # connect to server
host_engine.execute("CREATE DATABASE IF NOT EXISTS {}".format(db_name)) #create db
engine = create_engine("mysql://{}:{}@{}/{}".format(username, password, host, db_name))
# Send hole metadata table to database
hole_metadata.to_sql(name='hole_metadata', con=engine, if_exists='replace', chunksize=3000, index=False)
# Send Age-Depth table to database
age_depth.to_sql('age_depth', con=engine, if_exists='replace', chunksize=3000, index=False)
# Send Interstitial water data to database
interstitial_water_chem.to_sql('iw_chem', con=engine, if_exists='replace', chunksize=3000, index=False)
# Send Moisture and Density data to database
mad.to_sql('mad', con=engine, if_exists='replace', chunksize=3000, index=False)
# Send Carbon data to database
cns.to_sql('cns', con=engine, if_exists='replace', chunksize=3000, index=False)
# eof | 1,072 | 0 | 23 |
72713e4770785e80a43bd2c691071d956264799a | 239 | py | Python | top_k_dissect.py | jinmingteo/pytorch-image-models | 2aaa8b88ca2b4c8bc24dc0c80b06f03bb1f3d480 | [
"Apache-2.0"
] | null | null | null | top_k_dissect.py | jinmingteo/pytorch-image-models | 2aaa8b88ca2b4c8bc24dc0c80b06f03bb1f3d480 | [
"Apache-2.0"
] | null | null | null | top_k_dissect.py | jinmingteo/pytorch-image-models | 2aaa8b88ca2b4c8bc24dc0c80b06f03bb1f3d480 | [
"Apache-2.0"
] | null | null | null | import pandas as pd
classes = ['A', 'B', 'C', 'D', 'E']
sorted(classes)
df = pd.read_csv('topk_ids.csv', header=None)
top1_ans = dict(df[1].value_counts())
new_dict = {classes[key]:val for key,val in top1_ans.items()}
print (new_dict)
| 19.916667 | 61 | 0.665272 | import pandas as pd
classes = ['A', 'B', 'C', 'D', 'E']
sorted(classes)
df = pd.read_csv('topk_ids.csv', header=None)
top1_ans = dict(df[1].value_counts())
new_dict = {classes[key]:val for key,val in top1_ans.items()}
print (new_dict)
| 0 | 0 | 0 |
61d002f5030dd4f292dfc9b7878220462c8fb577 | 5,679 | py | Python | jmapp/routes/job.py | genwch/jmapp | 760fef05286a31b1f8216b4f5af9ed6941a35106 | [
"MIT"
] | null | null | null | jmapp/routes/job.py | genwch/jmapp | 760fef05286a31b1f8216b4f5af9ed6941a35106 | [
"MIT"
] | null | null | null | jmapp/routes/job.py | genwch/jmapp | 760fef05286a31b1f8216b4f5af9ed6941a35106 | [
"MIT"
] | null | null | null | import os
from flask import Blueprint
from jmapp.lib.auth import jwt_required
from model import job_mod, apply_mod, offer_mod
fname = os.path.basename(__file__).split(".")[0]
job = Blueprint(fname, __name__)
job_m = job_mod()
apply_m = apply_mod()
offer_m = offer_mod()
@job.route("/", methods=["GET"])
@jwt_required
@job.route("/view", methods=["GET"])
@job.route("/view", methods=["POST"])
@job.route("/apply", methods=["GET"])
@job.route("/jobcat", methods=["POST"])
@job.route("/offer", methods=["GET"])
| 33.60355 | 103 | 0.558021 | import os
from flask import Blueprint
from jmapp.lib.auth import jwt_required
from model import job_mod, apply_mod, offer_mod
fname = os.path.basename(__file__).split(".")[0]
job = Blueprint(fname, __name__)
job_m = job_mod()
apply_m = apply_mod()
offer_m = offer_mod()
@job.route("/", methods=["GET"])
@jwt_required
def job_get():
from flask import render_template, session
owner = session.get("usr_cde", None)
app_tbl = apply_m.table(owner=owner)
app_dt, _ = apply_m.find({"cre_by": owner})
cols = ["aid", "job_cde"]
app_col = [c for c in apply_m.cols if c.get("name") in cols]
app_dt = [{k: v for k, v in d.items() if k in cols} for d in app_dt]
app_col.append({"type": "text", "name": "appcnt", "val": ""})
join = [(app_col, app_dt)]
tbl = job_m.table(owner=owner, join=join, title="Job list")
opts = job_m.get_opts()
return render_template("job.html.j2", obj=[{"type": "tbl", "obj": tbl, "opts": opts}], newbtn=True)
@job.route("/view", methods=["GET"])
def job_view_get():
from flask import render_template, request, session
owner = session.get("usr_cde", None)
paralst = ("msg", "jid", "edit", "job_cde")
para = {p: request.args.get(p, None) for p in paralst}
if para.get("jid", None)==None and para.get("job_cde", None)!=None:
filt = {"job_cde": para.get("job_cde", None)}
print(filt)
form = job_m.form(filt=filt)
print(form)
else:
filt = {"jid": para.get("jid", None)} if para.get(
"jid", None) != None else {}
form = job_m.form(filt=filt)
readonly = True if para.get("edit", "0") != "1" else False
form.readonly = readonly
obj = [{"type": "form", "obj": form}]
jcde = None
for j in [c.get("val") for c in form.cols if c.get("name") == "job_cde"]:
jcde = j
break
creby = None
for j in [c.get("val") for c in form.cols if c.get("name") == "cre_by"]:
creby = j
break
if jcde != None:
off_dt, _ = offer_m.find({"job_cde": jcde})
if creby == owner:
# if para.get("edit", "0") == "1":
cols = ["oid", "aid", "rate"]
off_col = [c for c in offer_m.cols if c.get("name") in cols]
# print(off_dt)
tdt = []
for d in off_dt:
dt = {k: v for k, v in d.items() if k in cols}
dt.update({"offcnt": str(len(off_dt))})
tdt.append(dt)
off_dt = tdt
print(off_dt)
# off_dt = [{k: v for k, v in d.items() if k in cols}
# for d in off_dt]
off_col.append(
{"type": "text", "name": "offcnt", "val": str(len(off_dt))})
join = [(off_col, off_dt)]
app_tbl = apply_m.table(
filt={"job_cde": jcde}, join=join, owner=owner)
if app_tbl.data != []:
obj.append({"type": "tbl", "obj": app_tbl})
else:
off_form = offer_m.form(filt={"job_cde": jcde})
off_form.readonly = True
if off_form.cols != []:
aid = [c.get("val")
for c in off_form.cols if c.get("name") == "aid"]
app_tbl = apply_m.table(filt={"aid": aid[0], "cre_by": owner})
if app_tbl.data != []:
obj.append({"type": "form", "obj": off_form})
return render_template("job.html.j2", obj=obj, msg=para.get("msg", None))
@job.route("/view", methods=["POST"])
def job_view_post():
from flask import redirect, request, session
owner = session["usr_cde"]
args = request.args.get("jid", None)
urlpara = "&jid={}".format(args) if args != None else ""
filt = {"jid": args} if args != None else {}
form = job_m.form()
para = {k: v for k, v in request.form.items(
) if k in [c.get("name") for c in form.cols]}
if filt != {}:
para.update(filt)
rtn = job_m.add(para)
if rtn != []:
return redirect("/job")
return redirect("?msg={}{}".format("Invalid input", urlpara))
@job.route("/apply", methods=["GET"])
def job_apply_get():
from flask import redirect, request, session
args = request.args.get("job_cde", None)
urlpara = "&job_cde={}".format(args) if args != None else ""
filt = {"job_cde": args} if args != None else {}
form = apply_m.form()
para = {k: v for k, v in request.form.items(
) if k in [c.get("name") for c in form.cols]}
if filt != {}:
para.update(filt)
rtn = apply_m.add(para)
if rtn != []:
return redirect("/job")
return redirect("/job?msg={}".format("Apply fail"))
def byte2dict(data)->dict:
import ast
byte_str = data
dict_str = byte_str.decode("UTF-8")
mydata = ast.literal_eval(dict_str)
return mydata
@job.route("/jobcat", methods=["POST"])
def job_cat_post():
from flask import request
import jmapp.lib as lib
paralst = ["job_desc"]
rtn=byte2dict(request.data)
para = {p: rtn.get(p, None) for p in paralst}
rtn, data = lib.post_data_with_rtn(type="jobcat", data=para)
if rtn:
return data
return {}
@job.route("/offer", methods=["GET"])
def job_offer_get():
from flask import redirect, request, session
paralst = ("job_cde", "aid", "rate")
para = {p: request.args.get(p, None) for p in paralst}
# filt = {"job_cde": para.get("job_cde"), "aid": para.get("aid")}
# form = apply_m.form()
# para = {k: v for k, v in request.form.items(
# ) if k in [c.get("name") for c in form.cols]}
# if filt != {}:
# para.update(filt)
rtn = offer_m.add(para)
return redirect("/job")
| 5,000 | 0 | 155 |
f621b801b0d5980457d7d945ef159d32f02dbdd4 | 1,801 | py | Python | setup.py | neinseg/pcb-tools-extension | 71c371ca680483aa9ef18d2998832460dd43abdf | [
"Apache-2.0"
] | 20 | 2019-04-16T05:26:53.000Z | 2022-02-27T17:21:21.000Z | setup.py | neinseg/pcb-tools-extension | 71c371ca680483aa9ef18d2998832460dd43abdf | [
"Apache-2.0"
] | 8 | 2019-07-27T12:51:05.000Z | 2021-04-20T15:48:12.000Z | setup.py | neinseg/pcb-tools-extension | 71c371ca680483aa9ef18d2998832460dd43abdf | [
"Apache-2.0"
] | 10 | 2019-07-23T16:15:37.000Z | 2022-02-27T17:21:27.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 Hiroshi Murayama <opiopan@gmail.com>
import os
METADATA = {
'name': 'pcb-tools-extension',
'version': "0.9.3",
'author': 'Hiroshi Murayama <opiopan@gmail.com>',
'author_email': "opiopan@gmail.com",
'description': ("Extension for pcb-tools package to panelize gerber files"),
'license': "Apache",
'keywords': "pcb gerber tools extension",
'url': "http://github.com/opiopan/pcb-tools-extension",
'packages': ['gerberex'],
'long_description': read('README.md'),
'long_description_content_type': 'text/markdown',
'classifiers': [
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"License :: OSI Approved :: Apache Software License",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
}
SETUPTOOLS_METADATA = {
'install_requires': ['pcb-tools', 'dxfgrabber'],
}
def install():
""" Install using setuptools, fallback to distutils
"""
try:
from setuptools import setup
METADATA.update(SETUPTOOLS_METADATA)
setup(**METADATA)
except ImportError:
from sys import stderr
stderr.write('Could not import setuptools, using distutils')
stderr.write('NOTE: You will need to install dependencies manualy')
from distutils.core import setup
setup(**METADATA)
if __name__ == '__main__':
install()
| 31.051724 | 80 | 0.624653 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 Hiroshi Murayama <opiopan@gmail.com>
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
METADATA = {
'name': 'pcb-tools-extension',
'version': "0.9.3",
'author': 'Hiroshi Murayama <opiopan@gmail.com>',
'author_email': "opiopan@gmail.com",
'description': ("Extension for pcb-tools package to panelize gerber files"),
'license': "Apache",
'keywords': "pcb gerber tools extension",
'url': "http://github.com/opiopan/pcb-tools-extension",
'packages': ['gerberex'],
'long_description': read('README.md'),
'long_description_content_type': 'text/markdown',
'classifiers': [
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"License :: OSI Approved :: Apache Software License",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
}
SETUPTOOLS_METADATA = {
'install_requires': ['pcb-tools', 'dxfgrabber'],
}
def install():
""" Install using setuptools, fallback to distutils
"""
try:
from setuptools import setup
METADATA.update(SETUPTOOLS_METADATA)
setup(**METADATA)
except ImportError:
from sys import stderr
stderr.write('Could not import setuptools, using distutils')
stderr.write('NOTE: You will need to install dependencies manualy')
from distutils.core import setup
setup(**METADATA)
if __name__ == '__main__':
install()
| 66 | 0 | 23 |
ec8fb02287cad3ff9807d4f7b4a66b59e0088cb2 | 269 | py | Python | pub_data_visualization/outages/plot/__init__.py | cre-os/pub-data-visualization | e5ec45e6397258646290836fc1a3b39ad69bf266 | [
"MIT"
] | 10 | 2020-10-08T11:35:49.000Z | 2021-01-22T16:47:59.000Z | pub_data_visualization/outages/plot/__init__.py | l-leo/pub-data-visualization | 68eea00491424581b057495a7f0f69cf74e16e7d | [
"MIT"
] | 3 | 2021-03-15T14:26:43.000Z | 2021-12-02T15:27:49.000Z | pub_data_visualization/outages/plot/__init__.py | cre-dev/pub-data-visualization | 229bb7a543684be2cb06935299345ce3263da946 | [
"MIT"
] | 1 | 2021-01-22T16:47:10.000Z | 2021-01-22T16:47:10.000Z |
"""
Module to plot outages data.
"""
from .animated_availability import *
from .evolution_mean_availability import *
from .expected_program import *
from .incremental_programs import *
from .regression_delays import * | 22.416667 | 44 | 0.64684 |
"""
Module to plot outages data.
"""
from .animated_availability import *
from .evolution_mean_availability import *
from .expected_program import *
from .incremental_programs import *
from .regression_delays import * | 0 | 0 | 0 |
21d2d49a8c110f75cd1fd273a1509a324386447e | 613 | py | Python | IPAMpy/IPAM/RIPE.py | xk2600/ipampy | 5f46c382abc5515d1d1d156716e7d17ff8c699dd | [
"BSD-2-Clause"
] | null | null | null | IPAMpy/IPAM/RIPE.py | xk2600/ipampy | 5f46c382abc5515d1d1d156716e7d17ff8c699dd | [
"BSD-2-Clause"
] | null | null | null | IPAMpy/IPAM/RIPE.py | xk2600/ipampy | 5f46c382abc5515d1d1d156716e7d17ff8c699dd | [
"BSD-2-Clause"
] | null | null | null | import IPAM.CLASS
# NOTE: Blocks with a masklen set are supernets of various subnets
# within the originating parent supernet. Most class B and C
# addresing has been rulled up int supernets as /8s. This
# means in most cases, you use/allocate a NET-BLOCK allocation
# first with the correct subnetlen applied. Then when you
# assign segments out of this block, they'll be indexed as seen
# in modern whois implementations. The CLASSFUL allocations
# are purely here for the edge use-case that requires them.
RIPE.NET2 = IPAM.CLASS.A.subnet( index=2, subnetlen=12 )
| 47.153846 | 69 | 0.719413 | import IPAM.CLASS
# NOTE: Blocks with a masklen set are supernets of various subnets
# within the originating parent supernet. Most class B and C
# addresing has been rulled up int supernets as /8s. This
# means in most cases, you use/allocate a NET-BLOCK allocation
# first with the correct subnetlen applied. Then when you
# assign segments out of this block, they'll be indexed as seen
# in modern whois implementations. The CLASSFUL allocations
# are purely here for the edge use-case that requires them.
RIPE.NET2 = IPAM.CLASS.A.subnet( index=2, subnetlen=12 )
| 0 | 0 | 0 |
db4ddbe9efa1da57573aa38b0cb40048b1e674ea | 2,134 | py | Python | furnace/datasets/TestData.py | windyrobin/TorchSeg | 304871d578c8f1bb3eb2c896c26528b437001268 | [
"MIT"
] | 3 | 2019-06-03T06:05:35.000Z | 2019-08-06T01:26:24.000Z | furnace/datasets/TestData.py | windyrobin/TorchSeg | 304871d578c8f1bb3eb2c896c26528b437001268 | [
"MIT"
] | null | null | null | furnace/datasets/TestData.py | windyrobin/TorchSeg | 304871d578c8f1bb3eb2c896c26528b437001268 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# encoding: utf-8
# @Time : 2017/12/16 下午8:41
# @Author : yuchangqian
# @Contact : changqian_yu@163.com
# @File : BaseDataset.py
import os
import time
import cv2
import torch
import numpy as np
import torch.utils.data as data
if __name__ == "__main__":
data_setting = {'img_root': ''}
bd = TestData(data_setting, 'test/', None)
print(bd.get_class_names())
| 25.105882 | 65 | 0.615745 | #!/usr/bin/env python3
# encoding: utf-8
# @Time : 2017/12/16 下午8:41
# @Author : yuchangqian
# @Contact : changqian_yu@163.com
# @File : BaseDataset.py
import os
import time
import cv2
import torch
import numpy as np
import torch.utils.data as data
class TestData(data.Dataset):
def __init__(self, img_path, preprocess=None,
file_length=None):
super(TestData, self).__init__()
self._img_path = img_path
self._file_names = self._get_file_names(img_path)
self.preprocess = preprocess
def __len__(self):
return len(self._file_names)
def __getitem__(self, index):
name = self._file_names[index]
img_path = os.path.join(self._img_path, name)
item_name = name.split("/")[-1].split(".")[0]
img= self._fetch_data(img_path)
img = img[:, :, ::-1]
output_dict = dict(data=img, fn=str(item_name),
n=len(self._file_names))
return output_dict
def _fetch_data(self, img_path, dtype=None):
img = self._open_image(img_path)
return img
#TODO, list dir
def _get_file_names(self, source):
file_names = os.listdir(source)
#with open(source) as f:
# files = f.readlines()
#for item in files:
# img_name = self._process_item_names(item)
# file_names.append(img_name)
return file_names
#return file_names[:1]
@staticmethod
def _process_item_names(item):
item = item.strip()
return item
def get_length(self):
return self.__len__()
@staticmethod
def _open_image(filepath, mode=cv2.IMREAD_COLOR, dtype=None):
# cv2: B G R
# h w c
img = np.array(cv2.imread(filepath, mode), dtype=dtype)
return img
@classmethod
def get_class_colors(*args):
raise NotImplementedError
@classmethod
def get_class_names(*args):
raise NotImplementedError
if __name__ == "__main__":
data_setting = {'img_root': ''}
bd = TestData(data_setting, 'test/', None)
print(bd.get_class_names())
| 1,342 | 366 | 23 |
b7ab19aae83a46f97925498d52310bd9d844c53c | 1,231 | py | Python | config/urlshortener/models.py | PozziSan/Django-Url-Shortener | b27d5ad51af3177d7e98fbfb86ff7494a59e190f | [
"MIT"
] | 1 | 2021-07-04T20:00:27.000Z | 2021-07-04T20:00:27.000Z | config/urlshortener/models.py | PozziSan/Django-Url-Shortener | b27d5ad51af3177d7e98fbfb86ff7494a59e190f | [
"MIT"
] | null | null | null | config/urlshortener/models.py | PozziSan/Django-Url-Shortener | b27d5ad51af3177d7e98fbfb86ff7494a59e190f | [
"MIT"
] | null | null | null | '''
Url shortener model
'''
from django.db import models
from django.urls import reverse_lazy
from .utils import create_shortened_url
# Create your models here.
class Shortener(models.Model):
'''
Creates a short url based on the long one
created -> Hour and date a shortener was created
times_followed -> Times the shortened link has been followed
long_url -> The original link
short_url -> shortened link https://domain/(short_url)
'''
created = models.DateTimeField(auto_now_add=True)
times_followed = models.PositiveIntegerField(default=0)
long_url = models.URLField()
short_url = models.CharField(max_length=15, unique=True, blank=True)
| 24.137255 | 87 | 0.669374 | '''
Url shortener model
'''
from django.db import models
from django.urls import reverse_lazy
from .utils import create_shortened_url
# Create your models here.
class Shortener(models.Model):
'''
Creates a short url based on the long one
created -> Hour and date a shortener was created
times_followed -> Times the shortened link has been followed
long_url -> The original link
short_url -> shortened link https://domain/(short_url)
'''
created = models.DateTimeField(auto_now_add=True)
times_followed = models.PositiveIntegerField(default=0)
long_url = models.URLField()
short_url = models.CharField(max_length=15, unique=True, blank=True)
class Meta:
ordering = ["-created"]
def __str__(self):
return f'{self.long_url} to {self.short_url}'
def save(self, *args, **kwargs):
# If the short url wasn't specified
if not self.short_url:
# We pass the model instance that is being saved
self.short_url = create_shortened_url(self)
super().save(*args, **kwargs)
def get_absolute_url(self):
return reverse_lazy("shortener:redirect", kwargs={"short_url": self.short_url}) | 389 | 23 | 108 |
3118f9bf8329642c4af770d0c020bb23c6d6c071 | 295 | py | Python | mqbench/nn/intrinsic/qat/modules/__init__.py | PannenetsF/MQBench | 4336493ded0bf2bb9f11377e9105b14ec6191c09 | [
"Apache-2.0"
] | 179 | 2021-09-22T08:44:51.000Z | 2022-03-31T08:09:43.000Z | mqbench/nn/intrinsic/qat/modules/__init__.py | PannenetsF/MQBench | 4336493ded0bf2bb9f11377e9105b14ec6191c09 | [
"Apache-2.0"
] | 46 | 2021-09-29T03:04:30.000Z | 2022-03-31T11:53:23.000Z | mqbench/nn/intrinsic/qat/modules/__init__.py | PannenetsF/MQBench | 4336493ded0bf2bb9f11377e9105b14ec6191c09 | [
"Apache-2.0"
] | 42 | 2021-09-24T16:08:26.000Z | 2022-03-30T10:21:34.000Z | from .linear_fused import LinearBn1d
from .deconv_fused import ConvTransposeBnReLU2d, ConvTransposeBn2d, ConvTransposeReLU2d
from .conv_fused import ConvBnReLU2d, ConvBn2d, ConvReLU2d
from .freezebn import ConvFreezebn2d, ConvFreezebnReLU2d, ConvTransposeFreezebn2d, ConvTransposeFreezebnReLU2d
| 59 | 110 | 0.884746 | from .linear_fused import LinearBn1d
from .deconv_fused import ConvTransposeBnReLU2d, ConvTransposeBn2d, ConvTransposeReLU2d
from .conv_fused import ConvBnReLU2d, ConvBn2d, ConvReLU2d
from .freezebn import ConvFreezebn2d, ConvFreezebnReLU2d, ConvTransposeFreezebn2d, ConvTransposeFreezebnReLU2d
| 0 | 0 | 0 |
eb9566a3902e49eb4d59ee45924e4049ded4a56d | 133 | py | Python | Baekjoon/Greedy/temp.py | Gonnnnn/Algorithm | f9cbbbc64e5d62ed75a5e3d5edb7b8cdae6e18e2 | [
"Apache-2.0"
] | null | null | null | Baekjoon/Greedy/temp.py | Gonnnnn/Algorithm | f9cbbbc64e5d62ed75a5e3d5edb7b8cdae6e18e2 | [
"Apache-2.0"
] | null | null | null | Baekjoon/Greedy/temp.py | Gonnnnn/Algorithm | f9cbbbc64e5d62ed75a5e3d5edb7b8cdae6e18e2 | [
"Apache-2.0"
] | null | null | null | # print(bin(10))
# print(bin(10)[::-1].index('1'))
from collections import Counter
a = [1, 1, 1, 2, 3, 4, 5]
t = Counter(a)
print(t) | 19 | 33 | 0.586466 | # print(bin(10))
# print(bin(10)[::-1].index('1'))
from collections import Counter
a = [1, 1, 1, 2, 3, 4, 5]
t = Counter(a)
print(t) | 0 | 0 | 0 |
a4cb01eb329943b9161ef968ddfb3bf0a7149d4a | 259 | py | Python | applied_data_science3/framework/FitPredictOutput.py | rlowrance/python_lib | 222fb89308201ffe5fb77f60be1cce79aad7d055 | [
"Apache-2.0"
] | null | null | null | applied_data_science3/framework/FitPredictOutput.py | rlowrance/python_lib | 222fb89308201ffe5fb77f60be1cce79aad7d055 | [
"Apache-2.0"
] | null | null | null | applied_data_science3/framework/FitPredictOutput.py | rlowrance/python_lib | 222fb89308201ffe5fb77f60be1cce79aad7d055 | [
"Apache-2.0"
] | null | null | null | from abc import ABCMeta, abstractmethod
class FitPredictOutput(object):
'content of output file for program fit_predict.py'
__metaclass__ = ABCMeta
@abstractmethod
def as_dict(self):
'return a dict with all the fields'
pass
| 21.583333 | 55 | 0.702703 | from abc import ABCMeta, abstractmethod
class FitPredictOutput(object):
'content of output file for program fit_predict.py'
__metaclass__ = ABCMeta
@abstractmethod
def as_dict(self):
'return a dict with all the fields'
pass
| 0 | 0 | 0 |
91d4b970c1f9f25020a877ef06fdb3284a863fd8 | 950 | py | Python | cctbx_progs/get_cc_mtz_mtz.py | 7l2icj/kamo_clone | 5f4a5eed3cd9d91a021d805e46125c19cc2ed1b6 | [
"BSD-3-Clause"
] | 16 | 2016-05-20T11:19:40.000Z | 2021-01-01T19:44:23.000Z | cctbx_progs/get_cc_mtz_mtz.py | 7l2icj/kamo_clone | 5f4a5eed3cd9d91a021d805e46125c19cc2ed1b6 | [
"BSD-3-Clause"
] | 4 | 2017-03-10T00:51:11.000Z | 2021-02-07T17:18:46.000Z | cctbx_progs/get_cc_mtz_mtz.py | 7l2icj/kamo_clone | 5f4a5eed3cd9d91a021d805e46125c19cc2ed1b6 | [
"BSD-3-Clause"
] | 9 | 2016-12-15T16:00:06.000Z | 2021-09-10T08:34:14.000Z | import iotbx.mtz
from cctbx.array_family import flex
if __name__ == "__main__":
import sys
data = {}
for f in sys.argv[1:]:
data[f] = get_I(f)
for ix in xrange(len(data)-1):
for iy in xrange(ix+1, len(data)):
x, y = data.keys()[ix], data.keys()[iy]
xd, yd = data[x].common_sets(data[y], assert_is_similar_symmetry=False)
corr = flex.linear_correlation(xd.data(), yd.data())
assert corr.is_well_defined()
print x, "vs", y, " cc=", corr.coefficient()
| 31.666667 | 103 | 0.622105 | import iotbx.mtz
from cctbx.array_family import flex
def get_I(mtzin):
mtzobj = iotbx.mtz.object(file_name=mtzin)
I_arrays = filter(lambda s: s.info().type_hints_from_file =="intensity", mtzobj.as_miller_arrays())
F_arrays = filter(lambda s: s.info().type_hints_from_file =="amplitude", mtzobj.as_miller_arrays())
if len(I_arrays) > 0:
return I_arrays[0]
elif len(F_arrays) > 0:
return F_arrays[0].as_intensity_array()
if __name__ == "__main__":
import sys
data = {}
for f in sys.argv[1:]:
data[f] = get_I(f)
for ix in xrange(len(data)-1):
for iy in xrange(ix+1, len(data)):
x, y = data.keys()[ix], data.keys()[iy]
xd, yd = data[x].common_sets(data[y], assert_is_similar_symmetry=False)
corr = flex.linear_correlation(xd.data(), yd.data())
assert corr.is_well_defined()
print x, "vs", y, " cc=", corr.coefficient()
| 381 | 0 | 23 |
2bd291cd06e8febfb47b9ae9835889acf88da395 | 3,680 | py | Python | experiment/quic/graph.py | taimooralam/master-thesis | dcb988a2866e65e2faef67627428d1ffd1c0182e | [
"MIT"
] | null | null | null | experiment/quic/graph.py | taimooralam/master-thesis | dcb988a2866e65e2faef67627428d1ffd1c0182e | [
"MIT"
] | null | null | null | experiment/quic/graph.py | taimooralam/master-thesis | dcb988a2866e65e2faef67627428d1ffd1c0182e | [
"MIT"
] | null | null | null | #visualize the data that go produced
import os
import sys
import plotly
import plotly.offline as offline
import plotly.graph_objs as go
import helper as help
plotly.tools.set_credentials_file(username="<>", api_key="<>")
gprotocol = sys.argv[2] if len(sys.argv) > 2 and sys.argv[1] == "quic" else "quic"
latency = sys.argv[3] if len(sys.argv) > 3 else "30ms"
packet_loss = sys.argv[4] if len(sys.argv) > 4 else 1
save_file_name_lines = "../graphs/lines/" + gprotocol +"_"+ latency +"_"+ str(packet_loss) + ".html"
save_filename_histogram = "../graphs/histogram/" + gprotocol + "_" + latency + "_" + str(packet_loss) + ".html"
save_filename_histogram_real = "../graphs/histogram/" + gprotocol + "_" + latency + "_" + str(packet_loss) + "_hist.html"
#log_relative_file_path = "./data/creating_quic_graphs.txt"
data_file_name = sys.argv[1] if len(sys.argv) > 1 else "./data/Log.txt"
orig_stdout = sys.stdout
orig_stderr = sys.stderr
if 'log_relative_file_path' in locals():
new_path = help.give_complete_file_path(log_relative_file_path)
f = open(new_path, 'w')
sys.stdout = f
sys.stderr = f
try:
print "Opening the file ..."
# open the file from the command line
print help.give_complete_file_path(data_file_name)
datafile = open(help.give_complete_file_path(data_file_name), "r+")
print "Reading the lines ..."
# read the lines of the file
lines = datafile.readlines()
numberOfPackets = int(lines[0][:-1])
print "Number of packets: {0}".format(numberOfPackets)
final_deltas = [None] * numberOfPackets
print "Converting strings to integers..."
# get the number of the data as an int
print lines[1]
allAsStrings = lines[1].split(' ')[:-1]
for x in allAsStrings:
pair = x.split(":")
print pair
sequence_number = int(pair[0])
delta = float(pair[1])
final_deltas[sequence_number-1] = delta
print "Got the integers..."
#print the delta array
print final_deltas
print "Starting to make graphs"
trace1 = go.Scatter(
x=range(0, numberOfPackets),
y=final_deltas,
mode = 'lines',
name = 'latency3'
)
missed_sequences_data = map( give_dropped , final_deltas)
trace2 = go.Scatter(
x = range(0, numberOfPackets),
y = missed_sequences_data,
mode = 'markers',
name = 'dropped'
)
data = [trace1]
layout_lines = dict(
font=dict(size=20),
xaxis = dict(title="Packet number"),
yaxis = dict(title="Latency (seconds)")
)
fig_lines = dict(data=data, layout=layout_lines)
print help.give_complete_file_path(save_file_name_lines)
offline.plot(fig_lines, filename=help.give_complete_file_path(save_file_name_lines), auto_open=False)
trace3 = go.Box(
x=final_deltas
)
layout_histogram = dict(
font=dict(size=20),
xaxis = dict(title = "Latency(seconds)")
)
fig_box = dict(data = [trace3], layout=layout_histogram)
print help.give_complete_file_path(save_filename_histogram)
offline.plot(fig_box, filename=help.give_complete_file_path(save_filename_histogram), auto_open=False)
trace4 = go.Histogram(
x=final_deltas,
nbinsx=10
)
layout_histogram_real = dict(
font=dict(size=20),
xaxis = dict(title = "Latency(seconds)")
)
fig_hist = dict(data = [trace4], layout=layout_histogram_real)
print help.give_complete_file_path(save_filename_histogram_real)
offline.plot(fig_hist, filename=help.give_complete_file_path(save_filename_histogram_real), auto_open=False)
datafile.close()
#if log_relative_file_path in locals():
#f.close()
sys.stdout = orig_stdout
sys.stderr = orig_stderr
except IOError:
print "Could not open the file"
print sys.exc_info()
except:
print "An error occured\n"
print sys.exc_info()
| 28.091603 | 121 | 0.727446 | #visualize the data that go produced
import os
import sys
import plotly
import plotly.offline as offline
import plotly.graph_objs as go
import helper as help
plotly.tools.set_credentials_file(username="<>", api_key="<>")
gprotocol = sys.argv[2] if len(sys.argv) > 2 and sys.argv[1] == "quic" else "quic"
latency = sys.argv[3] if len(sys.argv) > 3 else "30ms"
packet_loss = sys.argv[4] if len(sys.argv) > 4 else 1
save_file_name_lines = "../graphs/lines/" + gprotocol +"_"+ latency +"_"+ str(packet_loss) + ".html"
save_filename_histogram = "../graphs/histogram/" + gprotocol + "_" + latency + "_" + str(packet_loss) + ".html"
save_filename_histogram_real = "../graphs/histogram/" + gprotocol + "_" + latency + "_" + str(packet_loss) + "_hist.html"
#log_relative_file_path = "./data/creating_quic_graphs.txt"
data_file_name = sys.argv[1] if len(sys.argv) > 1 else "./data/Log.txt"
orig_stdout = sys.stdout
orig_stderr = sys.stderr
if 'log_relative_file_path' in locals():
new_path = help.give_complete_file_path(log_relative_file_path)
f = open(new_path, 'w')
sys.stdout = f
sys.stderr = f
def give_dropped(val):
if val is None:
return 0
else:
return None
try:
print "Opening the file ..."
# open the file from the command line
print help.give_complete_file_path(data_file_name)
datafile = open(help.give_complete_file_path(data_file_name), "r+")
print "Reading the lines ..."
# read the lines of the file
lines = datafile.readlines()
numberOfPackets = int(lines[0][:-1])
print "Number of packets: {0}".format(numberOfPackets)
final_deltas = [None] * numberOfPackets
print "Converting strings to integers..."
# get the number of the data as an int
print lines[1]
allAsStrings = lines[1].split(' ')[:-1]
for x in allAsStrings:
pair = x.split(":")
print pair
sequence_number = int(pair[0])
delta = float(pair[1])
final_deltas[sequence_number-1] = delta
print "Got the integers..."
#print the delta array
print final_deltas
print "Starting to make graphs"
trace1 = go.Scatter(
x=range(0, numberOfPackets),
y=final_deltas,
mode = 'lines',
name = 'latency3'
)
missed_sequences_data = map( give_dropped , final_deltas)
trace2 = go.Scatter(
x = range(0, numberOfPackets),
y = missed_sequences_data,
mode = 'markers',
name = 'dropped'
)
data = [trace1]
layout_lines = dict(
font=dict(size=20),
xaxis = dict(title="Packet number"),
yaxis = dict(title="Latency (seconds)")
)
fig_lines = dict(data=data, layout=layout_lines)
print help.give_complete_file_path(save_file_name_lines)
offline.plot(fig_lines, filename=help.give_complete_file_path(save_file_name_lines), auto_open=False)
trace3 = go.Box(
x=final_deltas
)
layout_histogram = dict(
font=dict(size=20),
xaxis = dict(title = "Latency(seconds)")
)
fig_box = dict(data = [trace3], layout=layout_histogram)
print help.give_complete_file_path(save_filename_histogram)
offline.plot(fig_box, filename=help.give_complete_file_path(save_filename_histogram), auto_open=False)
trace4 = go.Histogram(
x=final_deltas,
nbinsx=10
)
layout_histogram_real = dict(
font=dict(size=20),
xaxis = dict(title = "Latency(seconds)")
)
fig_hist = dict(data = [trace4], layout=layout_histogram_real)
print help.give_complete_file_path(save_filename_histogram_real)
offline.plot(fig_hist, filename=help.give_complete_file_path(save_filename_histogram_real), auto_open=False)
datafile.close()
#if log_relative_file_path in locals():
#f.close()
sys.stdout = orig_stdout
sys.stderr = orig_stderr
except IOError:
print "Could not open the file"
print sys.exc_info()
except:
print "An error occured\n"
print sys.exc_info()
| 50 | 0 | 23 |
0f44aa3f0b484305b53fb972edf33e276c2a802a | 3,440 | py | Python | pydl/examples/spiral_fc_example.py | nash911/PyDL | b0b6f599184c0046f503b9ee1703dc3dfe9a89f2 | [
"MIT"
] | null | null | null | pydl/examples/spiral_fc_example.py | nash911/PyDL | b0b6f599184c0046f503b9ee1703dc3dfe9a89f2 | [
"MIT"
] | null | null | null | pydl/examples/spiral_fc_example.py | nash911/PyDL | b0b6f599184c0046f503b9ee1703dc3dfe9a89f2 | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------
# MIT License
#
# Copyright (c) [2021] [Avinash Ranganath]
#
# This code is part of the library PyDL <https://github.com/nash911/PyDL>
# This code is licensed under MIT license (see LICENSE.txt for details)
# ------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from pydl.nn.layers import FC
from pydl.nn.nn import NN
from pydl.training.sgd import SGD
from pydl.training.momentum import Momentum
from pydl.training.rmsprop import RMSprop
from pydl.training.adam import Adam
if __name__ == '__main__':
main()
| 36.210526 | 84 | 0.600581 | # ------------------------------------------------------------------------
# MIT License
#
# Copyright (c) [2021] [Avinash Ranganath]
#
# This code is part of the library PyDL <https://github.com/nash911/PyDL>
# This code is licensed under MIT license (see LICENSE.txt for details)
# ------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from pydl.nn.layers import FC
from pydl.nn.nn import NN
from pydl.training.sgd import SGD
from pydl.training.momentum import Momentum
from pydl.training.rmsprop import RMSprop
from pydl.training.adam import Adam
def main():
N = 100 # number of points per class
D = 2 # dimensionality
K = 3 # number of classes
X = np.zeros((N * K, D)) # data matrix (each row = single example)
y = np.zeros(N * K, dtype='uint8') # class labels
for j in range(K):
ix = range(N * j, N * (j + 1))
r = np.linspace(0.0, 1, N) # radius
t = np.linspace(j * 4, (j + 1) * 4, N) + np.random.randn(N) * 0.2 # theta
X[ix] = np.c_[r * np.sin(t), r * np.cos(t)]
y[ix] = j
# Data Stats
print("Data Size: ", X.shape[0])
print("Feature Size: ", X.shape[1])
print("Data Min: ", np.min(X, axis=0))
print("Data Max: ", np.max(X, axis=0))
print("Data Range: ", np.max(X, axis=0) - np.min(X, axis=0))
print("Data Mean: ", np.mean(X, axis=0))
print("Data STD: ", np.std(X, axis=0))
# Visualize Data:
fig = plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)
plt.draw()
plt.waitforbuttonpress(0)
plt.close(fig)
# SoftMax Cross Entropy - SGD
l1_a = FC(X, num_neurons=int(100), bias=True, activation_fn='ReLU')
l2_a = FC(l1_a, num_neurons=K, bias=True, activation_fn='SoftMax')
layers_a = [l1_a, l2_a]
nn_a = NN(X, layers_a)
sgd = SGD(nn_a, step_size=1e-2, reg_lambda=1e-3)
sgd.train(X, y, normalize='mean', batch_size=256, epochs=10000, y_onehot=False,
log_freq=100, plot='SGD - Softmax')
# Sigmoid Cross Entropy - Momentum
l1_b = FC(X, num_neurons=int(100), bias=True, activation_fn='Tanh')
l2_b = FC(l1_b, num_neurons=K, bias=True, activation_fn='Sigmoid')
layers_b = [l1_b, l2_b]
nn_b = NN(X, layers_b)
momentum = Momentum(nn_b, step_size=1e-2, mu=0.5, reg_lambda=1e-3)
momentum.train(X, y, batch_size=256, epochs=20000, y_onehot=False, log_freq=100,
plot='Momentum - Sigmoid')
# Sigmoid Cross Entropy - RMSprop
l1_c = FC(X, num_neurons=int(100), bias=True, activation_fn='ReLU')
l2_c = FC(l1_c, num_neurons=K, bias=True, activation_fn='Sigmoid')
layers_c = [l1_c, l2_c]
nn_c = NN(X, layers_c)
rms = RMSprop(nn_c, step_size=1e-2, beta=0.999, reg_lambda=1e-3)
rms.train(X, y, batch_size=256, epochs=10000, y_onehot=False, log_freq=100,
plot='RMSprop - Sigmoid')
# Softmax Cross Entropy - Adam
l1_d = FC(X, num_neurons=int(100), bias=True, activation_fn='Tanh')
l2_d = FC(l1_d, num_neurons=K, bias=True, activation_fn='SoftMax')
layers_d = [l1_d, l2_d]
nn_d = NN(X, layers_d)
adam = Adam(nn_d, beta_1=0.9, beta_2=0.999, step_size=1e-3, reg_lambda=1e-3)
adam.train(X, y, batch_size=256, epochs=10000, y_onehot=False, log_freq=100,
plot='Adam - SoftMax')
input("Press Enter to continue...")
if __name__ == '__main__':
main()
| 2,754 | 0 | 23 |
5bfb9c4ee3e113fed6cbd6d8c510e14cc3e301df | 6,612 | py | Python | tests/test_dsp_interpolate_spectrum.py | pyfar/pyfar | 984e61c9b90335f774f16699c9bcc18e422e7ecf | [
"MIT"
] | 23 | 2020-11-05T10:10:33.000Z | 2022-03-23T19:22:18.000Z | tests/test_dsp_interpolate_spectrum.py | pyfar/pyfar | 984e61c9b90335f774f16699c9bcc18e422e7ecf | [
"MIT"
] | 144 | 2020-11-05T16:39:27.000Z | 2022-03-24T18:59:39.000Z | tests/test_dsp_interpolate_spectrum.py | pyfar/pyfar | 984e61c9b90335f774f16699c9bcc18e422e7ecf | [
"MIT"
] | 4 | 2020-11-11T20:06:52.000Z | 2021-08-11T17:14:29.000Z | import pytest
from pytest import raises
import numpy as np
import numpy.testing as npt
import matplotlib.pyplot as plt
import pyfar as pf
from pyfar.dsp import InterpolateSpectrum
# TODO: Finish `test_interpolation()` for 'magnitude_minimum'
def test_init():
"""Test return objects"""
fd = pf.FrequencyData([1, .5], [100, 200])
# interpolation object
interpolator = InterpolateSpectrum(
fd, "complex", ("linear", "linear", "linear"))
assert isinstance(interpolator, InterpolateSpectrum)
# interpolation result
signal = interpolator(8, 44100)
assert isinstance(signal, pf.Signal)
def test_init_assertions():
"""Test if init raises assertions correctly"""
fd = pf.FrequencyData([1, .5], [100, 200])
# data (invalid type)
with raises(TypeError, match="data must be"):
InterpolateSpectrum(1, "complex", ("linear", "linear", "linear"))
# data (invalid FFT normalization)
with raises(ValueError, match="data.fft_norm is 'rms'"):
fd_rms = pf.FrequencyData([1, .5], [100, 200], 'rms')
InterpolateSpectrum(
fd_rms, "complex", ("linear", "linear", "linear"))
# data (not enough bins)
with raises(ValueError, match="data.n_bins must be at least 2"):
fd_short = pf.FrequencyData(1, 100)
InterpolateSpectrum(
fd_short, "complex", ("linear", "linear", "linear"))
# test invalid method
with raises(ValueError, match="method is 'invalid'"):
InterpolateSpectrum(fd, "invalid", ("linear", "linear", "linear"))
# test kind (invald type)
with raises(ValueError, match="kind must be a tuple of length 3"):
InterpolateSpectrum(fd, "complex", "linear")
# test kind (invalid length)
with raises(ValueError, match="kind must be a tuple of length 3"):
InterpolateSpectrum(fd, "complex", ("linear", "linear"))
# test kind (wrong entry)
with raises(ValueError, match="kind contains 'wrong'"):
InterpolateSpectrum(fd, "complex", ("linear", "linear", "wrong"))
# test fscale
with raises(ValueError, match="fscale is 'nice'"):
InterpolateSpectrum(
fd, "complex", ("linear", "linear", "linear"), fscale="nice")
# test clip (wrong value of bool)
with raises(ValueError, match="clip must be a tuple of length 2"):
InterpolateSpectrum(
fd, "complex", ("linear", "linear", "linear"), clip=True)
# test clip (invalid type)
with raises(ValueError, match="clip must be a tuple of length 2"):
InterpolateSpectrum(
fd, "complex", ("linear", "linear", "linear"), clip=1)
# test clip (invalid length)
with raises(ValueError, match="clip must be a tuple of length 2"):
InterpolateSpectrum(
fd, "complex", ("linear", "linear", "linear"), clip=(1, 2, 3))
@pytest.mark.parametrize(
"method, freq_in, frequencies, n_samples, sampling_rate, freq_out",
[
("complex", [1+2j, 2+1j], [1, 2], 12, 6,
[0+3j, 0.5+2.5j, 1+2j, 1.5+1.5j, 2+1j, 2.5+0.5j, 3+0j]),
("magnitude_phase",
# magnitude increases with 1 per Hz, phase with pi per Hz
[np.linspace(1, 2, 3) * np.exp(-1j * np.linspace(np.pi, np.pi*2, 3))],
[1, 1.5, 2], 24, 6,
# freq_out be means of magnitude and unwrapped phase response
[np.linspace(0, 3, 13), np.linspace(0, 3*np.pi, 13)]),
("magnitude", [1, 2], [1, 2], 12, 6,
[0, .5, 1, 1.5, 2, 2.5, 3])
])
def test_interpolation(
method, freq_in, frequencies, freq_out, n_samples, sampling_rate):
"""
Test the if the interpolated spectrum matches the reference across methods.
"""
# create test data
data = pf.FrequencyData(freq_in, frequencies)
interpolator = InterpolateSpectrum(
data, method, ("linear", "linear", "linear"))
signal = interpolator(n_samples, sampling_rate)
# check output depending on method
if method == "magnitude_phase":
# test magnitude and unwrapped phase response
npt.assert_allclose(np.abs(signal.freq), np.atleast_2d(freq_out[0]))
npt.assert_allclose(pf.dsp.phase(signal, unwrap=True),
np.atleast_2d(freq_out[1]))
else:
# test complex spectrum
npt.assert_allclose(signal.freq, np.atleast_2d(freq_out))
def test_clip():
"""Test if clipping the magnitude data works."""
data = pf.FrequencyData([1, 2], [1, 2])
# interpolate with and without clipping
interpolator = InterpolateSpectrum(
data, "magnitude", ("linear", "linear", "linear"))
signal_no_clip = interpolator(6, 6)
interpolator = InterpolateSpectrum(
data, "magnitude", ("linear", "linear", "linear"), clip=(1, 2))
signal_clip = interpolator(6, 6)
assert np.any(np.abs(signal_no_clip.freq) < 1) and \
np.any(np.abs(signal_no_clip.freq) > 2)
assert np.all(np.abs(signal_clip.freq) >= 1) and \
np.all(np.abs(signal_clip.freq) <= 2)
def test_fscale():
"""
Test frequency vectors for linear and logarithmic frequency interpolation.
"""
# test parametres and data
f_in_lin = [0, 10, 20]
f_in_log = np.log([10, 10, 20])
n_samples = 10
sampling_rate = 40
f_query_lin = pf.dsp.fft.rfftfreq(n_samples, sampling_rate)
f_query_log = f_query_lin.copy()
f_query_log[0] = f_query_log[1]
f_query_log = np.log(f_query_log)
data = pf.FrequencyData([1, 1, 1], f_in_lin)
# generate interpolator with linear frequency
interpolator_lin = InterpolateSpectrum(
data, "magnitude", ("linear", "linear", "linear"), fscale="linear")
_ = interpolator_lin(n_samples, sampling_rate)
# generate interpolator with logarithmic frequency
interpolator_log = InterpolateSpectrum(
data, "magnitude", ("linear", "linear", "linear"), fscale="log")
_ = interpolator_log(n_samples, sampling_rate)
# test frequency vectors
npt.assert_allclose(interpolator_lin._f_in, f_in_lin)
npt.assert_allclose(interpolator_lin._f_query, f_query_lin)
npt.assert_allclose(interpolator_log._f_in, f_in_log)
npt.assert_allclose(interpolator_log._f_query, f_query_log)
def test_show():
"""Test plotting the results.
This only tests if the code finishes without errors. Because the plot is
an informal plot for inspection, we don't test specifics of the figure and
axes for speed up the testing."""
data = pf.FrequencyData([1, 2], [1, 2])
interpolator = InterpolateSpectrum(
data, "magnitude", ("linear", "linear", "linear"))
_ = interpolator(10, 10, show=True)
plt.close()
| 36.530387 | 79 | 0.647005 | import pytest
from pytest import raises
import numpy as np
import numpy.testing as npt
import matplotlib.pyplot as plt
import pyfar as pf
from pyfar.dsp import InterpolateSpectrum
# TODO: Finish `test_interpolation()` for 'magnitude_minimum'
def test_init():
"""Test return objects"""
fd = pf.FrequencyData([1, .5], [100, 200])
# interpolation object
interpolator = InterpolateSpectrum(
fd, "complex", ("linear", "linear", "linear"))
assert isinstance(interpolator, InterpolateSpectrum)
# interpolation result
signal = interpolator(8, 44100)
assert isinstance(signal, pf.Signal)
def test_init_assertions():
"""Test if init raises assertions correctly"""
fd = pf.FrequencyData([1, .5], [100, 200])
# data (invalid type)
with raises(TypeError, match="data must be"):
InterpolateSpectrum(1, "complex", ("linear", "linear", "linear"))
# data (invalid FFT normalization)
with raises(ValueError, match="data.fft_norm is 'rms'"):
fd_rms = pf.FrequencyData([1, .5], [100, 200], 'rms')
InterpolateSpectrum(
fd_rms, "complex", ("linear", "linear", "linear"))
# data (not enough bins)
with raises(ValueError, match="data.n_bins must be at least 2"):
fd_short = pf.FrequencyData(1, 100)
InterpolateSpectrum(
fd_short, "complex", ("linear", "linear", "linear"))
# test invalid method
with raises(ValueError, match="method is 'invalid'"):
InterpolateSpectrum(fd, "invalid", ("linear", "linear", "linear"))
# test kind (invald type)
with raises(ValueError, match="kind must be a tuple of length 3"):
InterpolateSpectrum(fd, "complex", "linear")
# test kind (invalid length)
with raises(ValueError, match="kind must be a tuple of length 3"):
InterpolateSpectrum(fd, "complex", ("linear", "linear"))
# test kind (wrong entry)
with raises(ValueError, match="kind contains 'wrong'"):
InterpolateSpectrum(fd, "complex", ("linear", "linear", "wrong"))
# test fscale
with raises(ValueError, match="fscale is 'nice'"):
InterpolateSpectrum(
fd, "complex", ("linear", "linear", "linear"), fscale="nice")
# test clip (wrong value of bool)
with raises(ValueError, match="clip must be a tuple of length 2"):
InterpolateSpectrum(
fd, "complex", ("linear", "linear", "linear"), clip=True)
# test clip (invalid type)
with raises(ValueError, match="clip must be a tuple of length 2"):
InterpolateSpectrum(
fd, "complex", ("linear", "linear", "linear"), clip=1)
# test clip (invalid length)
with raises(ValueError, match="clip must be a tuple of length 2"):
InterpolateSpectrum(
fd, "complex", ("linear", "linear", "linear"), clip=(1, 2, 3))
@pytest.mark.parametrize(
"method, freq_in, frequencies, n_samples, sampling_rate, freq_out",
[
("complex", [1+2j, 2+1j], [1, 2], 12, 6,
[0+3j, 0.5+2.5j, 1+2j, 1.5+1.5j, 2+1j, 2.5+0.5j, 3+0j]),
("magnitude_phase",
# magnitude increases with 1 per Hz, phase with pi per Hz
[np.linspace(1, 2, 3) * np.exp(-1j * np.linspace(np.pi, np.pi*2, 3))],
[1, 1.5, 2], 24, 6,
# freq_out be means of magnitude and unwrapped phase response
[np.linspace(0, 3, 13), np.linspace(0, 3*np.pi, 13)]),
("magnitude", [1, 2], [1, 2], 12, 6,
[0, .5, 1, 1.5, 2, 2.5, 3])
])
def test_interpolation(
method, freq_in, frequencies, freq_out, n_samples, sampling_rate):
"""
Test the if the interpolated spectrum matches the reference across methods.
"""
# create test data
data = pf.FrequencyData(freq_in, frequencies)
interpolator = InterpolateSpectrum(
data, method, ("linear", "linear", "linear"))
signal = interpolator(n_samples, sampling_rate)
# check output depending on method
if method == "magnitude_phase":
# test magnitude and unwrapped phase response
npt.assert_allclose(np.abs(signal.freq), np.atleast_2d(freq_out[0]))
npt.assert_allclose(pf.dsp.phase(signal, unwrap=True),
np.atleast_2d(freq_out[1]))
else:
# test complex spectrum
npt.assert_allclose(signal.freq, np.atleast_2d(freq_out))
def test_clip():
"""Test if clipping the magnitude data works."""
data = pf.FrequencyData([1, 2], [1, 2])
# interpolate with and without clipping
interpolator = InterpolateSpectrum(
data, "magnitude", ("linear", "linear", "linear"))
signal_no_clip = interpolator(6, 6)
interpolator = InterpolateSpectrum(
data, "magnitude", ("linear", "linear", "linear"), clip=(1, 2))
signal_clip = interpolator(6, 6)
assert np.any(np.abs(signal_no_clip.freq) < 1) and \
np.any(np.abs(signal_no_clip.freq) > 2)
assert np.all(np.abs(signal_clip.freq) >= 1) and \
np.all(np.abs(signal_clip.freq) <= 2)
def test_fscale():
"""
Test frequency vectors for linear and logarithmic frequency interpolation.
"""
# test parametres and data
f_in_lin = [0, 10, 20]
f_in_log = np.log([10, 10, 20])
n_samples = 10
sampling_rate = 40
f_query_lin = pf.dsp.fft.rfftfreq(n_samples, sampling_rate)
f_query_log = f_query_lin.copy()
f_query_log[0] = f_query_log[1]
f_query_log = np.log(f_query_log)
data = pf.FrequencyData([1, 1, 1], f_in_lin)
# generate interpolator with linear frequency
interpolator_lin = InterpolateSpectrum(
data, "magnitude", ("linear", "linear", "linear"), fscale="linear")
_ = interpolator_lin(n_samples, sampling_rate)
# generate interpolator with logarithmic frequency
interpolator_log = InterpolateSpectrum(
data, "magnitude", ("linear", "linear", "linear"), fscale="log")
_ = interpolator_log(n_samples, sampling_rate)
# test frequency vectors
npt.assert_allclose(interpolator_lin._f_in, f_in_lin)
npt.assert_allclose(interpolator_lin._f_query, f_query_lin)
npt.assert_allclose(interpolator_log._f_in, f_in_log)
npt.assert_allclose(interpolator_log._f_query, f_query_log)
def test_show():
"""Test plotting the results.
This only tests if the code finishes without errors. Because the plot is
an informal plot for inspection, we don't test specifics of the figure and
axes for speed up the testing."""
data = pf.FrequencyData([1, 2], [1, 2])
interpolator = InterpolateSpectrum(
data, "magnitude", ("linear", "linear", "linear"))
_ = interpolator(10, 10, show=True)
plt.close()
| 0 | 0 | 0 |
6481a21c660a997c6a55626782d2d8522c077632 | 3,810 | py | Python | tests/test_command_execution.py | welchbj/almanac | 91db5921a27f7d089b4ad8463ffb6e1453c5126a | [
"MIT"
] | 4 | 2020-08-04T10:59:10.000Z | 2021-08-23T13:42:03.000Z | tests/test_command_execution.py | welchbj/almanac | 91db5921a27f7d089b4ad8463ffb6e1453c5126a | [
"MIT"
] | null | null | null | tests/test_command_execution.py | welchbj/almanac | 91db5921a27f7d089b4ad8463ffb6e1453c5126a | [
"MIT"
] | 2 | 2021-07-20T04:49:22.000Z | 2021-08-23T13:42:23.000Z | """Tests for execution of various command lines."""
import pytest
from almanac import (
MissingArgumentsError,
NoSuchArgumentError,
TooManyPositionalArgumentsError
)
from .utils import get_test_app
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
| 29.765625 | 78 | 0.670866 | """Tests for execution of various command lines."""
import pytest
from almanac import (
MissingArgumentsError,
NoSuchArgumentError,
TooManyPositionalArgumentsError
)
from .utils import get_test_app
@pytest.mark.asyncio
async def test_simple_type_promotion():
app = get_test_app()
app.add_promoter_for_type(int, bool)
@app.cmd.register()
async def cmd(arg: int):
assert type(arg) == bool
assert arg is True
await app.eval_line('cmd arg=1')
@pytest.mark.asyncio
async def test_var_args_type_promotions():
app = get_test_app()
app.add_promoter_for_type(int, str)
@app.cmd.register()
async def cmd_var_pos_args(*args: int):
for i, x in enumerate(args):
assert type(x) == str
assert x == str(i)
await app.eval_line('cmd_var_pos_args 0 1 2 3 4 5')
@app.cmd.register()
async def cmd_var_kw_args(**kwargs: int):
for key, val in kwargs.items():
assert type(val) == str
assert val == '18'
await app.eval_line('cmd_var_kw_args one=18 two=18 three=18')
@pytest.mark.asyncio
async def test_missing_pos_args():
app = get_test_app(propagate_runtime_exceptions=True)
@app.cmd.register()
async def some_command(arg1: int, arg2: int, arg3: int = 3, *, arg4: int):
pass
with pytest.raises(MissingArgumentsError) as ctx:
await app.eval_line('some_command 1 arg3=3 arg4=4')
assert ctx.value.missing_args == ('arg2',)
with pytest.raises(MissingArgumentsError) as ctx:
await app.eval_line('some_command 1')
assert ctx.value.missing_args == ('arg2', 'arg4',)
with pytest.raises(MissingArgumentsError) as ctx:
await app.eval_line('some_command arg4=4')
assert ctx.value.missing_args == ('arg1', 'arg2',)
@pytest.mark.asyncio
async def test_missing_kw_args():
app = get_test_app(propagate_runtime_exceptions=True)
@app.cmd.register()
async def some_command(arg1: int, arg2: int = 2, *, arg3: int, arg4: int):
pass
with pytest.raises(MissingArgumentsError) as ctx:
await app.eval_line('some_command 1 arg3=3')
assert ctx.value.missing_args == ('arg4',)
with pytest.raises(MissingArgumentsError) as ctx:
await app.eval_line('some_command 1 arg4=4')
assert ctx.value.missing_args == ('arg3',)
with pytest.raises(MissingArgumentsError) as ctx:
await app.eval_line('some_command 1 arg2=2')
assert ctx.value.missing_args == ('arg3', 'arg4',)
@pytest.mark.asyncio
async def test_too_many_pos_args():
app = get_test_app(propagate_runtime_exceptions=True)
@app.cmd.register()
async def some_command(arg1: int, arg2: int, arg3: int = 3, *, arg4: int):
pass
with pytest.raises(TooManyPositionalArgumentsError) as ctx:
await app.eval_line('some_command 1 2 3 4 arg4=4')
assert ctx.value.values == (4,)
with pytest.raises(TooManyPositionalArgumentsError) as ctx:
await app.eval_line('some_command 1 2 3 4 5 arg4=4')
assert ctx.value.values == (4, 5,)
@pytest.mark.asyncio
async def test_extra_kw_args():
app = get_test_app(propagate_runtime_exceptions=True)
@app.cmd.register()
@app.arg.a(name='A')
async def some_command(a: int, b: str, x: bool = False):
pass
with pytest.raises(NoSuchArgumentError) as ctx:
await app.eval_line('some_command a=1 b="a string" x=False')
assert ctx.value.names == ('a',)
with pytest.raises(NoSuchArgumentError) as ctx:
await app.eval_line('some_command A=1 a=1 b="a string" x=False')
assert ctx.value.names == ('a',)
with pytest.raises(NoSuchArgumentError) as ctx:
await app.eval_line('some_command A=1 b=2 c=3 x=True y=4 z=[1,2,3]')
assert ctx.value.names == ('c', 'y', 'z',)
| 3,327 | 0 | 132 |
6b947f35ee1b57ac96afc40e7c9c468c9397cdc0 | 718 | py | Python | meiduo_mall/meiduo_mall/apps/meiduo_admin/views/option.py | ZHD165/Django_- | f89c80a22c5065b46900a20bd505614b5bcb2e6e | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/meiduo_admin/views/option.py | ZHD165/Django_- | f89c80a22c5065b46900a20bd505614b5bcb2e6e | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/meiduo_admin/views/option.py | ZHD165/Django_- | f89c80a22c5065b46900a20bd505614b5bcb2e6e | [
"MIT"
] | null | null | null | from rest_framework.viewsets import ModelViewSet
from meiduo_admin.utils import PageNum
from meiduo_admin.serializers.option import OptionSerialzier
from goods.models import SpecificationOption
class OptionsView(ModelViewSet):
"""
规格选项表的增删改查
"""
serializer_class = OptionSerialzier
queryset = SpecificationOption.objects.all()
pagination_class = PageNum
from rest_framework.generics import ListAPIView
from goods.models import GoodsSpecification
from meiduo_admin.serializers.option import OptionSpecificationSerializer
class OptionSimple(ListAPIView):
"""
获取规格信息
"""
serializer_class = OptionSpecificationSerializer
queryset = GoodsSpecification.objects.all()
| 32.636364 | 73 | 0.791086 | from rest_framework.viewsets import ModelViewSet
from meiduo_admin.utils import PageNum
from meiduo_admin.serializers.option import OptionSerialzier
from goods.models import SpecificationOption
class OptionsView(ModelViewSet):
"""
规格选项表的增删改查
"""
serializer_class = OptionSerialzier
queryset = SpecificationOption.objects.all()
pagination_class = PageNum
from rest_framework.generics import ListAPIView
from goods.models import GoodsSpecification
from meiduo_admin.serializers.option import OptionSpecificationSerializer
class OptionSimple(ListAPIView):
"""
获取规格信息
"""
serializer_class = OptionSpecificationSerializer
queryset = GoodsSpecification.objects.all()
| 0 | 0 | 0 |
27b8833491ef3ccaa44ea26899c4ac0ffeff5c0a | 1,275 | py | Python | medium/79.py | pisskidney/leetcode | 08c19cbf3d7afc897908ea05db4ad11a5487f523 | [
"MIT"
] | null | null | null | medium/79.py | pisskidney/leetcode | 08c19cbf3d7afc897908ea05db4ad11a5487f523 | [
"MIT"
] | null | null | null | medium/79.py | pisskidney/leetcode | 08c19cbf3d7afc897908ea05db4ad11a5487f523 | [
"MIT"
] | null | null | null | """
79. Word Search
https://leetcode.com/problems/word-search/
"""
from typing import List
if __name__ == '__main__':
raise(SystemExit(main()))
| 26.020408 | 96 | 0.418824 | """
79. Word Search
https://leetcode.com/problems/word-search/
"""
from typing import List
class Solution:
def exist(self, board: List[List[str]], word: str) -> bool:
def go(visited, i, j, word):
if not word:
return True
if i < 0 or i >= len(board):
return False
if j < 0 or j >= len(board[0]):
return False
if (i, j) in visited:
return False
if board[i][j] == word[0]:
visited.add((i, j))
found = (
go(visited, i-1, j, word[1:]) or
go(visited, i+1, j, word[1:]) or
go(visited, i, j-1, word[1:]) or
go(visited, i, j+1, word[1:])
)
visited.remove((i, j))
return found
return False
n, m = len(board), len(board[0])
acc = []
for i in range(n):
for j in range(m):
acc.append(go(set([]), i, j, word))
return any(acc)
def main():
s = Solution()
print(s.exist([["A", "B", "C", "E"], ["S", "F", "C", "S"], ["A", "D", "E", "E"]], "ABCCED"))
if __name__ == '__main__':
raise(SystemExit(main()))
| 1,056 | -6 | 72 |
5dcfe2f81de53539c653f934a73cb8fdbef0840d | 2,640 | py | Python | src/mem/ruby/system/RubySystem.py | jblab/gem5 | 2d23bb08697f8fa552c21d337090077137fa7fb3 | [
"BSD-3-Clause"
] | 10 | 2020-03-08T18:07:48.000Z | 2021-12-07T07:08:24.000Z | src/mem/ruby/system/RubySystem.py | jblab/gem5 | 2d23bb08697f8fa552c21d337090077137fa7fb3 | [
"BSD-3-Clause"
] | 2 | 2019-03-22T14:23:38.000Z | 2019-03-22T15:45:35.000Z | src/mem/ruby/system/RubySystem.py | jblab/gem5 | 2d23bb08697f8fa552c21d337090077137fa7fb3 | [
"BSD-3-Clause"
] | 6 | 2019-03-07T06:45:00.000Z | 2022-03-12T11:04:05.000Z | # Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Brad Beckmann
from m5.params import *
from ClockedObject import ClockedObject
from SimpleMemory import *
| 47.142857 | 78 | 0.755682 | # Copyright (c) 2009 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Brad Beckmann
from m5.params import *
from ClockedObject import ClockedObject
from SimpleMemory import *
class RubySystem(ClockedObject):
type = 'RubySystem'
cxx_header = "mem/ruby/system/RubySystem.hh"
randomization = Param.Bool(False,
"insert random delays on message enqueue times (if True, all message \
buffers are enforced to have randomization; otherwise, a message \
buffer set its own flag to enable/disable randomization)");
block_size_bytes = Param.UInt32(64,
"default cache block size; must be a power of two");
memory_size_bits = Param.UInt32(64,
"number of bits that a memory address requires");
phys_mem = Param.SimpleMemory(NULL, "")
access_backing_store = Param.Bool(False, "Use phys_mem as the functional \
store and only use ruby for timing.")
# Profiler related configuration variables
hot_lines = Param.Bool(False, "")
all_instructions = Param.Bool(False, "")
num_of_sequencers = Param.Int("")
number_of_virtual_networks = Param.Unsigned("")
| 0 | 937 | 23 |
93e45774425b72f8de709ea6f0b005cfe33ce883 | 2,118 | py | Python | test/test_decorators_conf.py | dan-win/fairways_py | 771623c6f9ec40e8016b5cebb7951613d01e31f7 | [
"Apache-2.0"
] | null | null | null | test/test_decorators_conf.py | dan-win/fairways_py | 771623c6f9ec40e8016b5cebb7951613d01e31f7 | [
"Apache-2.0"
] | null | null | null | test/test_decorators_conf.py | dan-win/fairways_py | 771623c6f9ec40e8016b5cebb7951613d01e31f7 | [
"Apache-2.0"
] | null | null | null | import unittest
import json
import os
| 22.294737 | 102 | 0.572238 | import unittest
import json
import os
def setUpModule():
pass
def tearDownModule():
pass
class ConfigTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
from fairways.decorators import use
from fairways import conf
cls.use = use
cls.conf = conf
class TestSettings:
def __init__(self, key1, key2):
self.KEY1 = key1
self.KEY2 = key2
self.LOGGING = {}
self.CONNECTIONS = {}
cls.settings_factory = TestSettings
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
# Cleanup settings, simulate fresh load
self.conf.settings = None
def test_decorator_before_conf_load(self):
"""
"""
use = self.use
conf = self.conf
settings_factory = self.settings_factory
settings = settings_factory("VALUE1", "VALUE2")
result = {}
# 1. Register decorator
@use.config("KEY2")
def set_conf(sub_conf):
result.update({"config": sub_conf})
# 2. Load conf
conf.load(settings)
self.assertDictEqual(result, {'config': 'VALUE2'})
def test_decorator_after_conf_load(self):
"""
"""
use = self.use
conf = self.conf
settings_factory = self.settings_factory
settings = settings_factory("VALUE1", "VALUE2")
result = {}
# 1. Load conf
conf.load(settings)
# 2. Register decorator
@use.config("KEY2")
def set_conf(sub_conf):
result.update({"config": sub_conf})
self.assertDictEqual(result, {'config': 'VALUE2'})
def test_decorated_direct_call(self):
"""
"""
pass
# self.assertEqual(steps_count, 6, "Some steps were not executed")
# self.assertTrue(jumps_to_second.search(line) is not None, "No transitions to second thread")
# self.assertTrue(jumps_to_first.search(line) is not None, "No transitions to first thread")
| 594 | 1,412 | 69 |
eeb7a3190a987218fce09baf9f34549ea26a24f9 | 1,894 | py | Python | day4/puzzle1.py | StvnWthrsp/advent-of-code-2021 | 940b018ea3e1bca090be7498e2f26203c1a45eac | [
"MIT"
] | null | null | null | day4/puzzle1.py | StvnWthrsp/advent-of-code-2021 | 940b018ea3e1bca090be7498e2f26203c1a45eac | [
"MIT"
] | null | null | null | day4/puzzle1.py | StvnWthrsp/advent-of-code-2021 | 940b018ea3e1bca090be7498e2f26203c1a45eac | [
"MIT"
] | null | null | null | with open('input') as input:
lines = input.readlines()
number_sequence = lines[0].split(',')
board_numbers = []
called_indexes = []
# Flatten data structure for boards
for i, line in enumerate(lines):
if i == 0:
continue
if line == '\n':
continue
stripped_line = line.strip('\n')
num_list = line.split()
for num in num_list:
board_numbers.append(num)
# "Call" numbers and check for winner
winner = None
for num in number_sequence:
winner = checkForWin(board_numbers, called_indexes, num)
if winner != None:
board_start = winner*25
unmarked_sum = 0
for i in range(board_start, board_start+25):
if i not in called_indexes:
unmarked_sum += int(board_numbers[i])
print(f"SOLUTION = {unmarked_sum} * {num} = {int(unmarked_sum) * int(num)}")
break
| 31.566667 | 84 | 0.56547 | with open('input') as input:
lines = input.readlines()
number_sequence = lines[0].split(',')
board_numbers = []
called_indexes = []
# Flatten data structure for boards
for i, line in enumerate(lines):
if i == 0:
continue
if line == '\n':
continue
stripped_line = line.strip('\n')
num_list = line.split()
for num in num_list:
board_numbers.append(num)
def checkForWin(board_numbers, called_indexes, num):
for i, space in enumerate(board_numbers):
if space == num:
# print(f"Space at index {i} contains called number {num}")
called_indexes.append(i)
# Check for win based on indexes
board_index = i // 25
row_pos = i % 5
row_start = i - row_pos
col_start = i - (i % 25 - row_pos)
# print(f"X value = {i % 5}")
# print(f"line_start = {line_start}")
horizontal_win = True
for j in range(row_start, row_start+5):
if j not in called_indexes:
horizontal_win = False
vertical_win = True
for j in range(col_start, col_start+25, 5):
if j not in called_indexes:
vertical_win = False
if horizontal_win or vertical_win:
print(f"Winner on board {board_index}")
return board_index
# "Call" numbers and check for winner
winner = None
for num in number_sequence:
winner = checkForWin(board_numbers, called_indexes, num)
if winner != None:
board_start = winner*25
unmarked_sum = 0
for i in range(board_start, board_start+25):
if i not in called_indexes:
unmarked_sum += int(board_numbers[i])
print(f"SOLUTION = {unmarked_sum} * {num} = {int(unmarked_sum) * int(num)}")
break
| 1,002 | 0 | 23 |
c22ff0785a3bd0b57882b133aead848048854dc7 | 1,717 | py | Python | nidm/experiment/tools/rest.py | tvanerp/PyNIDM | 6a94875969c6bc5247b09d7d2793ed979b18ab3f | [
"Apache-2.0"
] | null | null | null | nidm/experiment/tools/rest.py | tvanerp/PyNIDM | 6a94875969c6bc5247b09d7d2793ed979b18ab3f | [
"Apache-2.0"
] | null | null | null | nidm/experiment/tools/rest.py | tvanerp/PyNIDM | 6a94875969c6bc5247b09d7d2793ed979b18ab3f | [
"Apache-2.0"
] | null | null | null | from nidm.experiment import Project, Session, AssessmentAcquisition, AssessmentObject, Acquisition, AcquisitionObject, Query
from nidm.core import Constants
import json
import re
from urllib import parse
import pprint
| 37.326087 | 124 | 0.648224 | from nidm.experiment import Project, Session, AssessmentAcquisition, AssessmentObject, Acquisition, AcquisitionObject, Query
from nidm.core import Constants
import json
import re
from urllib import parse
import pprint
def restParser (nidm_files, command, verbosity_level = 0):
restLog("parsing command "+ command, 1, verbosity_level)
restLog("Files to read:" + str(nidm_files), 1, verbosity_level)
result = []
if re.match(r"^/?projects/?$", command):
restLog("Returning all projects", 2, verbosity_level)
projects = Query.GetProjectsUUID(nidm_files)
for uuid in projects:
result.append( Query.matchPrefix(str(uuid)))
elif re.match(r"^/?projects/[^/]+$", command):
restLog("Returing metadata ", 2, verbosity_level)
match = re.match(r"^/?projects/([^/]+)$", command)
id = parse.unquote ( str( match.group(1) ) )
restLog("computing metadata", 5, verbosity_level)
projects = Query.GetProjectsComputedMetadata(nidm_files)
for pid in projects['projects'].keys():
restLog("comparng " + str(pid) + " with " + str(id), 5, verbosity_level)
if pid == id:
result = projects['projects'][pid]
return result
def restLog (message, verbosity_of_message, verbosity_level):
if verbosity_of_message <= verbosity_level:
print (message)
def formatResults (result, format, stream):
pp = pprint.PrettyPrinter(stream=stream)
if format == 'text':
if isinstance(result, list):
print(*result, sep='\n', file=stream)
else:
pp.pprint(result)
else:
print(json.dumps(result, indent=2, separators=(',', ';')), file=stream)
| 1,429 | 0 | 69 |
d11bd872c3e7734f25fb0c80cda80c700c722180 | 2,943 | py | Python | python_submitty_utils/tests/test_glob.py | muarachmann/Submitty | 86cea3d3441419089b1a3058d01c663e6807294c | [
"BSD-3-Clause"
] | 3 | 2020-07-27T16:23:09.000Z | 2022-01-07T16:07:31.000Z | python_submitty_utils/tests/test_glob.py | muarachmann/Submitty | 86cea3d3441419089b1a3058d01c663e6807294c | [
"BSD-3-Clause"
] | null | null | null | python_submitty_utils/tests/test_glob.py | muarachmann/Submitty | 86cea3d3441419089b1a3058d01c663e6807294c | [
"BSD-3-Clause"
] | null | null | null | from os import path, mkdir
from pathlib import Path
import shutil
import tempfile
import types
import unittest
from submitty_utils import glob
if __name__ == '__main__':
unittest.main()
| 37.730769 | 111 | 0.606184 | from os import path, mkdir
from pathlib import Path
import shutil
import tempfile
import types
import unittest
from submitty_utils import glob
class TestGlob(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
Path(path.join(self.dir, 'a')).touch()
Path(path.join(self.dir, 'b')).touch()
mkdir(path.join(self.dir, 'c'))
mkdir(path.join(self.dir, 'd'))
Path(path.join(self.dir, 'c', 'e')).touch()
mkdir(path.join(self.dir, 'd', 'f'))
Path(path.join(self.dir, 'd', 'f', 'g')).touch()
def test_glob(self):
expected = [path.join(self.dir, x) for x in ['a', 'b', 'c', 'd']]
actual = glob.glob(path.join(self.dir, '**'))
self.assertIsInstance(actual, list)
self.assertCountEqual(expected, actual)
actual2 = glob.glob(path.join(self.dir, '*'))
self.assertIsInstance(actual2, list)
self.assertEqual(actual, actual2)
def test_iglob(self):
expected = [path.join(self.dir, x) for x in ['a', 'b', 'c', 'd']]
actual = glob.iglob(path.join(self.dir, '**'))
self.assertIsInstance(actual, types.GeneratorType)
actual = list(actual)
self.assertCountEqual(expected, actual)
actual2 = glob.iglob(path.join(self.dir, '*'))
self.assertIsInstance(actual2, types.GeneratorType)
actual2 = list(actual2)
self.assertEqual(actual, actual2)
def test_glob_recursive_star(self):
expected = ['a', 'b', 'c', 'd']
expected = [path.join(self.dir, x) for x in expected]
actual = glob.glob(path.join(self.dir, '*'), recursive=True)
self.assertIsInstance(actual, list)
self.assertCountEqual(expected, actual)
def test_glob_recursive(self):
expected = ['', 'a', 'b', 'c', 'd', path.join('c', 'e'), path.join('d', 'f'), path.join('d', 'f', 'g')]
expected = [path.join(self.dir, x) for x in expected]
actual = glob.glob(path.join(self.dir, '**'), recursive=True)
self.assertIsInstance(actual, list)
self.assertCountEqual(expected, actual)
def test_iglob_recursive_star(self):
expected = ['a', 'b', 'c', 'd']
expected = [path.join(self.dir, x) for x in expected]
actual = glob.iglob(path.join(self.dir, '*'), recursive=True)
self.assertIsInstance(actual, types.GeneratorType)
self.assertCountEqual(expected, list(actual))
def test_iglob_recursive(self):
expected = ['', 'a', 'b', 'c', 'd', path.join('c', 'e'), path.join('d', 'f'), path.join('d', 'f', 'g')]
expected = [path.join(self.dir, x) for x in expected]
actual = glob.iglob(path.join(self.dir, '**'), recursive=True)
self.assertIsInstance(actual, types.GeneratorType)
self.assertCountEqual(expected, list(actual))
def tearDown(self):
shutil.rmtree(self.dir)
if __name__ == '__main__':
unittest.main()
| 2,498 | 13 | 238 |
8b730ac3e6e3192ba140431c976240bf6e0b617a | 7,854 | py | Python | training_ptr_gen/train.py | michaellin/cs224n-finalproj | a82e420f9ed3055ee0155133670ad82b4ca171d4 | [
"Apache-2.0"
] | null | null | null | training_ptr_gen/train.py | michaellin/cs224n-finalproj | a82e420f9ed3055ee0155133670ad82b4ca171d4 | [
"Apache-2.0"
] | null | null | null | training_ptr_gen/train.py | michaellin/cs224n-finalproj | a82e420f9ed3055ee0155133670ad82b4ca171d4 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals, print_function, division
import math
import os
import time
import argparse
import tensorflow as tf
import torch
from model import Model
from torch.nn.utils import clip_grad_norm_
from torch.optim import Adagrad
from torch.autograd import Variable
from data_util import config
from data_util.batcher import Batcher
from data_util.data import Vocab
from data_util.utils import calc_running_avg_loss
from train_util import get_input_from_batch, get_output_from_batch
use_cuda = config.use_gpu and torch.cuda.is_available()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Train script")
parser.add_argument("-m",
dest="model_file_path",
required=False,
default=None,
help="Model file for retraining (default: None).")
args = parser.parse_args()
train_processor = Train()
train_processor.trainIters(config.max_iterations, args.model_file_path)
| 42.918033 | 114 | 0.615355 | from __future__ import unicode_literals, print_function, division
import math
import os
import time
import argparse
import tensorflow as tf
import torch
from model import Model
from torch.nn.utils import clip_grad_norm_
from torch.optim import Adagrad
from torch.autograd import Variable
from data_util import config
from data_util.batcher import Batcher
from data_util.data import Vocab
from data_util.utils import calc_running_avg_loss
from train_util import get_input_from_batch, get_output_from_batch
use_cuda = config.use_gpu and torch.cuda.is_available()
class Train(object):
def __init__(self):
self.vocab = Vocab(config.vocab_path, config.vocab_size)
self.batcher = Batcher(config.train_data_path, self.vocab, mode='train',
batch_size=config.batch_size, single_pass=False)
time.sleep(15)
train_dir = os.path.join(config.log_root, 'train_%d' % (int(time.time())))
if not os.path.exists(train_dir):
os.mkdir(train_dir)
self.model_dir = os.path.join(train_dir, 'model')
if not os.path.exists(self.model_dir):
os.mkdir(self.model_dir)
self.summary_writer = tf.summary.FileWriter(train_dir)
self.last_good_model_save_path = None
def save_model(self, running_avg_loss, iter):
state = {
'iter': iter,
'encoder_state_dict': self.model.encoder.state_dict(),
'decoder_state_dict': self.model.decoder.state_dict(),
'reduce_state_dict': self.model.reduce_state.state_dict(),
'optimizer': self.optimizer.state_dict(),
'current_loss': running_avg_loss
}
model_save_path = os.path.join(self.model_dir, 'model_%d_%d' % (iter, int(time.time())))
# save the path to the last model that was not nan
if (not math.isnan(running_avg_loss)):
self.last_good_model_save_path = model_save_path
torch.save(state, model_save_path)
def setup_train(self, model_file_path=None):
self.model = Model(model_file_path)
self.last_good_model_save_path = model_file_path
params = list(self.model.encoder.parameters()) + list(self.model.decoder.parameters()) + \
list(self.model.reduce_state.parameters())
initial_lr = config.lr_coverage if config.is_coverage else config.lr
self.optimizer = Adagrad(params, lr=initial_lr, initial_accumulator_value=config.adagrad_init_acc)
start_iter, start_loss = 0, 0
if model_file_path is not None:
state = torch.load(model_file_path, map_location= lambda storage, location: storage)
start_iter = state['iter']
start_loss = state['current_loss']
if not config.is_coverage:
self.optimizer.load_state_dict(state['optimizer'])
if use_cuda:
for state in self.optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
return start_iter, start_loss
def train_one_batch(self, batch):
enc_batch, enc_padding_mask, enc_lens, enc_batch_extend_vocab, extra_zeros, c_t_1, coverage = \
get_input_from_batch(batch, use_cuda)
dec_batch, dec_padding_mask, max_dec_len, dec_lens_var, target_batch = \
get_output_from_batch(batch, use_cuda)
self.optimizer.zero_grad()
encoder_outputs, encoder_feature, encoder_hidden = self.model.encoder(enc_batch, enc_lens)
s_t_1 = self.model.reduce_state(encoder_hidden)
step_losses = []
for di in range(min(max_dec_len, config.max_dec_steps)):
y_t_1 = dec_batch[:, di] # Teacher forcing
final_dist, s_t_1, c_t_1, attn_dist, p_gen, next_coverage = self.model.decoder(y_t_1, s_t_1,
encoder_outputs, encoder_feature, enc_padding_mask, c_t_1,
extra_zeros, enc_batch_extend_vocab,
coverage, di)
target = target_batch[:, di]
gold_probs = torch.gather(final_dist, 1, target.unsqueeze(1)).squeeze()
step_loss = -torch.log(gold_probs + config.eps)
if config.is_coverage:
step_coverage_loss = torch.sum(torch.min(attn_dist, coverage), 1)
step_loss = step_loss + config.cov_loss_wt * step_coverage_loss
coverage = next_coverage
# calculate copy loss
vocab_zero = Variable(torch.zeros(self.model.decoder.vocab_dist_.shape, dtype=torch.float))
if use_cuda:
vocab_zero = vocab_zero.cuda()
if extra_zeros is not None:
vocab_zero = torch.cat([vocab_zero, extra_zeros], 1)
attn_dist_ = (1 - p_gen) * attn_dist
attn_expanded = vocab_zero.scatter_add(1, enc_batch_extend_vocab, attn_dist_)
vocab_zero[:, self.vocab.word2id('[UNK]')] = 1.0
# Not sure whether we want to add loss for the extra vocab indices
#vocab_zero[:, config.vocab_size:] = 1.0
y_unk_neg = 1.0 - vocab_zero
copyloss=torch.bmm(y_unk_neg.unsqueeze(1), attn_expanded.unsqueeze(2))
# add copy loss with lambda 2 weight
step_loss = step_loss + config.copy_loss_wt * copyloss
step_mask = dec_padding_mask[:, di]
step_loss = step_loss * step_mask
step_losses.append(step_loss)
sum_losses = torch.sum(torch.stack(step_losses, 1), 1)
batch_avg_loss = sum_losses/dec_lens_var
loss = torch.mean(batch_avg_loss)
loss.backward()
self.norm = clip_grad_norm_(self.model.encoder.parameters(), config.max_grad_norm)
clip_grad_norm_(self.model.decoder.parameters(), config.max_grad_norm)
clip_grad_norm_(self.model.reduce_state.parameters(), config.max_grad_norm)
self.optimizer.step()
return loss.item()
def trainIters(self, n_iters, model_file_path=None):
iter, running_avg_loss = self.setup_train(model_file_path)
start = time.time()
while iter < n_iters:
batch = self.batcher.next_batch()
loss = self.train_one_batch(batch)
running_avg_loss = calc_running_avg_loss(loss, running_avg_loss, self.summary_writer, iter)
iter += 1
if (math.isnan(running_avg_loss)):
print('Found a nan loss return. Restarting the training at {}' \
.format(self.last_good_model_save_path))
iter, running_avg_loss = self.setup_train(self.last_good_model_save_path)
start = time.time()
if iter % 100 == 0:
self.summary_writer.flush()
print_interval = 1000
if iter % print_interval == 0:
print('steps %d, seconds for %d batch: %.2f , loss: %f' % (iter, print_interval,
time.time() - start, loss))
start = time.time()
if iter % 1000 == 0:
self.save_model(running_avg_loss, iter)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Train script")
parser.add_argument("-m",
dest="model_file_path",
required=False,
default=None,
help="Model file for retraining (default: None).")
args = parser.parse_args()
train_processor = Train()
train_processor.trainIters(config.max_iterations, args.model_file_path)
| 6,664 | -1 | 158 |
9f2c8f00943e0bcce75113f16bdcf14f865572af | 2,398 | py | Python | alembic/versions/49b367d3d25f_remove_siway_add_golab.py | morelab/labmanager | b44f97f26f224e4a94c7981e5cc84f3b5c8cc440 | [
"BSD-2-Clause"
] | 2 | 2015-11-05T01:43:19.000Z | 2017-10-19T15:28:53.000Z | alembic/versions/49b367d3d25f_remove_siway_add_golab.py | morelab/labmanager | b44f97f26f224e4a94c7981e5cc84f3b5c8cc440 | [
"BSD-2-Clause"
] | 3 | 2021-03-22T17:12:12.000Z | 2021-12-13T19:39:20.000Z | alembic/versions/49b367d3d25f_remove_siway_add_golab.py | morelab/labmanager | b44f97f26f224e4a94c7981e5cc84f3b5c8cc440 | [
"BSD-2-Clause"
] | 6 | 2016-03-08T09:32:16.000Z | 2022-01-06T09:53:37.000Z | """Remove SiWay, add GoLab
Revision ID: 49b367d3d25f
Revises: 13f9fd64f85b
Create Date: 2017-04-07 01:07:29.653200
"""
# revision identifiers, used by Alembic.
revision = '49b367d3d25f'
down_revision = '13f9fd64f85b'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
| 42.070175 | 131 | 0.725188 | """Remove SiWay, add GoLab
Revision ID: 49b367d3d25f
Revises: 13f9fd64f85b
Create Date: 2017-04-07 01:07:29.653200
"""
# revision identifiers, used by Alembic.
revision = '49b367d3d25f'
down_revision = '13f9fd64f85b'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('GoLabOAuthUsers',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('display_name', sa.Unicode(length=255), nullable=False),
sa.Column('email', sa.Unicode(length=255), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(u'ix_GoLabOAuthUsers_display_name', 'GoLabOAuthUsers', ['display_name'], unique=False)
op.create_index(u'ix_GoLabOAuthUsers_email', 'GoLabOAuthUsers', ['email'], unique=True)
op.drop_constraint(u'EmbedApplications_ibfk_1', 'EmbedApplications', type_='foreignkey')
op.drop_column('EmbedApplications', u'owner_id')
try:
op.drop_table(u'siway_user')
except:
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('EmbedApplications', sa.Column(u'owner_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))
op.create_foreign_key(u'EmbedApplications_ibfk_1', u'EmbedApplications', u'siway_user', [u'owner_id'], [u'id'])
op.create_table(u'siway_user',
sa.Column(u'id', mysql.INTEGER(display_width=11), nullable=False),
sa.Column(u'email', mysql.VARCHAR(length=255), nullable=False),
sa.Column(u'uid', mysql.INTEGER(display_width=11), autoincrement=False, nullable=False),
sa.Column(u'employee_type', mysql.VARCHAR(length=255), nullable=False),
sa.Column(u'full_name', mysql.VARCHAR(length=255), nullable=False),
sa.Column(u'short_name', mysql.VARCHAR(length=255), nullable=False),
sa.Column(u'school_name', mysql.VARCHAR(length=255), nullable=False),
sa.Column(u'group', mysql.VARCHAR(length=255), nullable=False),
sa.PrimaryKeyConstraint(u'id'),
mysql_default_charset=u'latin1',
mysql_engine=u'InnoDB'
)
op.drop_index(u'ix_GoLabOAuthUsers_email', table_name='GoLabOAuthUsers')
op.drop_index(u'ix_GoLabOAuthUsers_display_name', table_name='GoLabOAuthUsers')
op.drop_table('GoLabOAuthUsers')
### end Alembic commands ###
| 2,045 | 0 | 46 |
b4364657ba1f9bb78cceec1053292ab0219939f9 | 1,361 | py | Python | sd2wiki/techs.py | ajul/stardrive2wikiscripts | df329b6e80a3c50a36fd4fdb48f286a24e4c802e | [
"MIT"
] | null | null | null | sd2wiki/techs.py | ajul/stardrive2wikiscripts | df329b6e80a3c50a36fd4fdb48f286a24e4c802e | [
"MIT"
] | null | null | null | sd2wiki/techs.py | ajul/stardrive2wikiscripts | df329b6e80a3c50a36fd4fdb48f286a24e4c802e | [
"MIT"
] | null | null | null | from sd2wiki.config import *
from sd2wiki.core import *
from sd2wiki.loc import getLoc
from sd2wiki.buildings import buildings
import csv, re, os
techs = {}
techFile = open(os.path.join(basedir, 'Techs', 'TechTree.txt'))
for building in buildings.values():
building.tech = None
for line in csv.reader(techFile, csv.excel_tab):
uid = line[0].strip()
if uid == '': continue
techs[uid] = Tech(*line)
techFile.close()
| 29.586957 | 92 | 0.619398 | from sd2wiki.config import *
from sd2wiki.core import *
from sd2wiki.loc import getLoc
from sd2wiki.buildings import buildings
import csv, re, os
techs = {}
class TechOption():
def __init__(self, type, uid, tech):
self.uid = uid
self.tech = tech
self.name = getLoc(type, uid, 'name')
self.reference = None # todo
class Tech():
def __init__(self, uid, cost,
advanceOptions, buildingOptions, moduleOptions, skillOptions,
skills, advance1Instruction, advance1Value, advance2Instruction, advance2Value):
uid = uid.strip()
self.uid = uid
self.cost = int(cost)
self.options = []
self.name = getLoc(uid) or uid.split('_')[0]
self.level = uid.split('_')[-1]
self.category = uid.split('_')[-2]
for buildingUID in re.split(',', buildingOptions):
buildingUID = buildingUID.strip()
if buildingUID == '': continue
techOption = TechOption('Building', buildingUID, self)
buildings[buildingUID].tech = self
techFile = open(os.path.join(basedir, 'Techs', 'TechTree.txt'))
for building in buildings.values():
building.tech = None
for line in csv.reader(techFile, csv.excel_tab):
uid = line[0].strip()
if uid == '': continue
techs[uid] = Tech(*line)
techFile.close()
| 833 | -10 | 98 |
7efc83e707e9042dd9fa839614829d16aaf3dec5 | 1,617 | py | Python | dodger.py | Elephant34/dodger | a7dcf19a2a4e9eca6731e2d1a5e9a16d0047e851 | [
"MIT"
] | null | null | null | dodger.py | Elephant34/dodger | a7dcf19a2a4e9eca6731e2d1a5e9a16d0047e851 | [
"MIT"
] | null | null | null | dodger.py | Elephant34/dodger | a7dcf19a2a4e9eca6731e2d1a5e9a16d0047e851 | [
"MIT"
] | null | null | null | '''
A game- player dodge the falling blocks
'''
import random
import math
import pyglet
from pyglet.window import key
from assets.entities import player, timer, block
class Window(pyglet.window.Window):
'''
The window class with custom draw function
'''
def __init__(self, width, height, **kawgs):
'''
Sets up the main window
'''
super().__init__(width, height, **kawgs)
def on_draw(self):
'''
Overwrites the main draw function
'''
self.clear()
# Draws all the other needed items
main_batch.draw()
def update(dt):
'''
Updates all the entities so they can move
'''
# Based off decay function- probability increases over time
# Stars in 1/72 with y asomote at 2
chance = random.randint(
1,
round(70 * (math.e/2)**((-1/20)*float(timer.text))+2)
)
if chance == 1:
falling = block.Block(batch=main_batch)
falling.speed = random.uniform(
30,
70+(math.e**(float(timer.text)*9/100))
)
enitity_list.append(falling)
for enitity in enitity_list:
enitity.update(dt)
window = Window(600, 600)
window.set_caption("Dodger")
main_batch = pyglet.graphics.Batch()
pressed = key.KeyStateHandler()
window.push_handlers(pressed)
enitity_list = []
player = player.Player(pressed, batch=main_batch)
enitity_list.append(player)
timer = timer.Timer(batch=main_batch)
enitity_list.append(timer)
enemy_list = []
if __name__ == "__main__":
pyglet.clock.schedule_interval(update, 1/120.0)
pyglet.app.run() | 23.1 | 63 | 0.633271 | '''
A game- player dodge the falling blocks
'''
import random
import math
import pyglet
from pyglet.window import key
from assets.entities import player, timer, block
class Window(pyglet.window.Window):
'''
The window class with custom draw function
'''
def __init__(self, width, height, **kawgs):
'''
Sets up the main window
'''
super().__init__(width, height, **kawgs)
def on_draw(self):
'''
Overwrites the main draw function
'''
self.clear()
# Draws all the other needed items
main_batch.draw()
def update(dt):
'''
Updates all the entities so they can move
'''
# Based off decay function- probability increases over time
# Stars in 1/72 with y asomote at 2
chance = random.randint(
1,
round(70 * (math.e/2)**((-1/20)*float(timer.text))+2)
)
if chance == 1:
falling = block.Block(batch=main_batch)
falling.speed = random.uniform(
30,
70+(math.e**(float(timer.text)*9/100))
)
enitity_list.append(falling)
for enitity in enitity_list:
enitity.update(dt)
window = Window(600, 600)
window.set_caption("Dodger")
main_batch = pyglet.graphics.Batch()
pressed = key.KeyStateHandler()
window.push_handlers(pressed)
enitity_list = []
player = player.Player(pressed, batch=main_batch)
enitity_list.append(player)
timer = timer.Timer(batch=main_batch)
enitity_list.append(timer)
enemy_list = []
if __name__ == "__main__":
pyglet.clock.schedule_interval(update, 1/120.0)
pyglet.app.run() | 0 | 0 | 0 |
78bb26e8d89a54b5c030d69bdf6a09b1037e4697 | 609 | py | Python | python/advanced/modules-packages-methods/3.built-in-modules.py | arkhoss/scripting | 6e780b93f94d7a39c44c88787e783e20ee89fab9 | [
"MIT"
] | null | null | null | python/advanced/modules-packages-methods/3.built-in-modules.py | arkhoss/scripting | 6e780b93f94d7a39c44c88787e783e20ee89fab9 | [
"MIT"
] | null | null | null | python/advanced/modules-packages-methods/3.built-in-modules.py | arkhoss/scripting | 6e780b93f94d7a39c44c88787e783e20ee89fab9 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
#Usage: lambda.py
#Author: David Caballero <d@dcaballero.net>
#Version: 1.0
import copy
my_dictionary = {'Key':'Value', ('K','E','Y'):5}
my_dictionary1 = copy.deepcopy(my_dictionary)
my_dictionary[1] = 1
print(my_dictionary)
print(my_dictionary1)
import math as m
print( m.cos(m.pi))
print( m.exp(m.pi))
print( m.ceil(m.pi))
import cmath as cm
print(dir(cm))
print(cm.sqrt(4))
print(cm.polar(complex(0,1)))
import random as ran
print(dir(ran))
print(ran.sample([1,2,3,4,5] ,3))
print(ran.random())
print(ran.randint(5,100))
import sys
print(sys.version)
print(sys.path)
| 12.957447 | 48 | 0.689655 | #!/usr/bin/python3
#Usage: lambda.py
#Author: David Caballero <d@dcaballero.net>
#Version: 1.0
import copy
my_dictionary = {'Key':'Value', ('K','E','Y'):5}
my_dictionary1 = copy.deepcopy(my_dictionary)
my_dictionary[1] = 1
print(my_dictionary)
print(my_dictionary1)
import math as m
print( m.cos(m.pi))
print( m.exp(m.pi))
print( m.ceil(m.pi))
import cmath as cm
print(dir(cm))
print(cm.sqrt(4))
print(cm.polar(complex(0,1)))
import random as ran
print(dir(ran))
print(ran.sample([1,2,3,4,5] ,3))
print(ran.random())
print(ran.randint(5,100))
import sys
print(sys.version)
print(sys.path)
| 0 | 0 | 0 |
94d435d6b288d87939172e8a4c1ad435ee658241 | 1,628 | py | Python | matplotlib/gallery_python/axes_grid1/demo_colorbar_with_inset_locator.py | gottaegbert/penter | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | [
"MIT"
] | 13 | 2020-01-04T07:37:38.000Z | 2021-08-31T05:19:58.000Z | matplotlib/gallery_python/axes_grid1/demo_colorbar_with_inset_locator.py | gottaegbert/penter | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | [
"MIT"
] | 3 | 2020-06-05T22:42:53.000Z | 2020-08-24T07:18:54.000Z | matplotlib/gallery_python/axes_grid1/demo_colorbar_with_inset_locator.py | gottaegbert/penter | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | [
"MIT"
] | 9 | 2020-10-19T04:53:06.000Z | 2021-08-31T05:20:01.000Z | """
==============================================================
Controlling the position and size of colorbars with Inset Axes
==============================================================
This example shows how to control the position, height, and width of
colorbars using `~mpl_toolkits.axes_grid1.inset_locator.inset_axes`.
Controlling the placement of the inset axes is done similarly as that of the
legend: either by providing a location option ("upper right", "best", ...), or
by providing a locator with respect to the parent bbox.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[6, 3])
axins1 = inset_axes(ax1,
width="50%", # width = 50% of parent_bbox width
height="5%", # height : 5%
loc='upper right')
im1 = ax1.imshow([[1, 2], [2, 3]])
fig.colorbar(im1, cax=axins1, orientation="horizontal", ticks=[1, 2, 3])
axins1.xaxis.set_ticks_position("bottom")
axins = inset_axes(ax2,
width="5%", # width = 5% of parent_bbox width
height="50%", # height : 50%
loc='lower left',
bbox_to_anchor=(1.05, 0., 1, 1),
bbox_transform=ax2.transAxes,
borderpad=0,
)
# Controlling the placement of the inset axes is basically same as that
# of the legend. you may want to play with the borderpad value and
# the bbox_to_anchor coordinate.
im = ax2.imshow([[1, 2], [2, 3]])
fig.colorbar(im, cax=axins, ticks=[1, 2, 3])
plt.show()
| 35.391304 | 78 | 0.578624 | """
==============================================================
Controlling the position and size of colorbars with Inset Axes
==============================================================
This example shows how to control the position, height, and width of
colorbars using `~mpl_toolkits.axes_grid1.inset_locator.inset_axes`.
Controlling the placement of the inset axes is done similarly as that of the
legend: either by providing a location option ("upper right", "best", ...), or
by providing a locator with respect to the parent bbox.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=[6, 3])
axins1 = inset_axes(ax1,
width="50%", # width = 50% of parent_bbox width
height="5%", # height : 5%
loc='upper right')
im1 = ax1.imshow([[1, 2], [2, 3]])
fig.colorbar(im1, cax=axins1, orientation="horizontal", ticks=[1, 2, 3])
axins1.xaxis.set_ticks_position("bottom")
axins = inset_axes(ax2,
width="5%", # width = 5% of parent_bbox width
height="50%", # height : 50%
loc='lower left',
bbox_to_anchor=(1.05, 0., 1, 1),
bbox_transform=ax2.transAxes,
borderpad=0,
)
# Controlling the placement of the inset axes is basically same as that
# of the legend. you may want to play with the borderpad value and
# the bbox_to_anchor coordinate.
im = ax2.imshow([[1, 2], [2, 3]])
fig.colorbar(im, cax=axins, ticks=[1, 2, 3])
plt.show()
| 0 | 0 | 0 |
f423b12cc917b18b18a2be2f75f0089c5bbf447d | 2,122 | py | Python | maillogger/parser.py | Natureshadow/maillogger | b147e81411befd42a94f2252aae42def6d891732 | [
"MIT"
] | 2 | 2020-08-17T05:08:47.000Z | 2020-12-16T19:47:15.000Z | maillogger/parser.py | Natureshadow/maillogger | b147e81411befd42a94f2252aae42def6d891732 | [
"MIT"
] | null | null | null | maillogger/parser.py | Natureshadow/maillogger | b147e81411befd42a94f2252aae42def6d891732 | [
"MIT"
] | 1 | 2022-03-31T10:10:28.000Z | 2022-03-31T10:10:28.000Z | import re
from dataclasses import InitVar, asdict, dataclass, field
from datetime import datetime
from typing import Dict, Optional
REGEX = r'(?P<month>[A-Z][a-z]{2}) (?P<day>[0-9]{,2}) ' \
+ r'(?P<time>[0-9]{2}:[0-9]{2}:[0-9]{2}) mail postfix/[a-z]+\[[0-9]+\]: ' \
+ r'(?P<mail_id>[A-Z0-9]+): to=<(?P<to_address>.*@.*)>, ' \
+ r'relay=(?P<relay>.*), delay=(?P<delay>[0-9.]+), ' \
+ r'delays=(?P<delays>[0-9][0-9/.]+), dsn=(?P<dsn>[0-9].[0-9].[0-9]), ' \
+ r'status=(?P<status>(sent|deferred|bounced)) \((?P<description>.*)\)'
PATTERN = re.compile(REGEX)
ParseResultType = Dict[str, str]
def parse(target: str) -> Optional[ParseResultType]:
"""Parse postfix maillog including send status
Args:
target (str): maillog
Returns:
Optional[ParseResultType]: return the following dict if match
{
'month': 'Aug',
'day': '1',
'time': '10:00:00',
'mail_id': '677RGS0',
'to_address': 'dummy@gmail.com',
'relay': 'local',
'delay': '0.06',
'delays': '0.06/0.01/0/0',
'dsn': '2.0.0',
'status': 'sent',
'description': 'delivered to maildir'
}
"""
match_obj = re.search(PATTERN, target)
if match_obj is None:
return None
result = match_obj.groupdict()
return ParseResult(**result).to_dict()
@dataclass
| 27.205128 | 79 | 0.557493 | import re
from dataclasses import InitVar, asdict, dataclass, field
from datetime import datetime
from typing import Dict, Optional
REGEX = r'(?P<month>[A-Z][a-z]{2}) (?P<day>[0-9]{,2}) ' \
+ r'(?P<time>[0-9]{2}:[0-9]{2}:[0-9]{2}) mail postfix/[a-z]+\[[0-9]+\]: ' \
+ r'(?P<mail_id>[A-Z0-9]+): to=<(?P<to_address>.*@.*)>, ' \
+ r'relay=(?P<relay>.*), delay=(?P<delay>[0-9.]+), ' \
+ r'delays=(?P<delays>[0-9][0-9/.]+), dsn=(?P<dsn>[0-9].[0-9].[0-9]), ' \
+ r'status=(?P<status>(sent|deferred|bounced)) \((?P<description>.*)\)'
PATTERN = re.compile(REGEX)
ParseResultType = Dict[str, str]
def parse(target: str) -> Optional[ParseResultType]:
"""Parse postfix maillog including send status
Args:
target (str): maillog
Returns:
Optional[ParseResultType]: return the following dict if match
{
'month': 'Aug',
'day': '1',
'time': '10:00:00',
'mail_id': '677RGS0',
'to_address': 'dummy@gmail.com',
'relay': 'local',
'delay': '0.06',
'delays': '0.06/0.01/0/0',
'dsn': '2.0.0',
'status': 'sent',
'description': 'delivered to maildir'
}
"""
match_obj = re.search(PATTERN, target)
if match_obj is None:
return None
result = match_obj.groupdict()
return ParseResult(**result).to_dict()
@dataclass
class ParseResult:
month: InitVar[str]
day: InitVar[str]
time: InitVar[str]
mail_id: str
to_address: str
relay: str
delay: str
delays: str
dsn: str
status: str
description: str
datetime: str = field(init=False)
def __post_init__(self, month: str, day: str, time: str) -> None:
self.datetime = self.convert2dateime(month, day, time)
def to_dict(self) -> ParseResultType:
return asdict(self)
@staticmethod
def convert2dateime(month: str, day: str, time: str) -> str:
tmp = datetime.strptime(f'{month}{day}{time}', '%b%d%H:%M:%S')
return tmp.replace(year=datetime.now().year).strftime('%Y%m%d%H%M%S')
| 339 | 338 | 22 |
7cf375472392576f6008428cec7b6cbba41b78fe | 1,060 | py | Python | frontend/auth_providers/jlu.py | MetLee/hackergame | 571b5407e0644169a2f9b3907a0a1d93138ba436 | [
"MIT"
] | 48 | 2018-09-30T11:07:52.000Z | 2021-12-07T03:32:59.000Z | frontend/auth_providers/jlu.py | MetLee/hackergame | 571b5407e0644169a2f9b3907a0a1d93138ba436 | [
"MIT"
] | 100 | 2018-10-13T18:37:25.000Z | 2021-11-11T12:14:45.000Z | frontend/auth_providers/jlu.py | MetLee/hackergame | 571b5407e0644169a2f9b3907a0a1d93138ba436 | [
"MIT"
] | 11 | 2018-10-08T14:59:33.000Z | 2022-03-02T03:21:09.000Z | from datetime import timedelta
from django.core.mail import EmailMessage
from django.shortcuts import redirect
from django.urls import path
from .base import BaseLoginView, BaseGetCodeView, DomainEmailValidator
urlpatterns = [
path('jlu/login/', LoginView.as_view()),
path('jlu/get_code/', GetCodeView.as_view()),
]
| 25.238095 | 70 | 0.669811 | from datetime import timedelta
from django.core.mail import EmailMessage
from django.shortcuts import redirect
from django.urls import path
from .base import BaseLoginView, BaseGetCodeView, DomainEmailValidator
class LoginView(BaseLoginView):
template_name = 'login_email.html'
template_context = {'provider_name': '吉林大学'}
provider = 'jlu'
group = 'jlu'
def post(self, request):
if self.check_code():
self.login(email=self.identity)
return redirect('hub')
def normalize_identity(self):
return self.identity.casefold()
class GetCodeView(BaseGetCodeView):
provider = 'jlu'
duration = timedelta(hours=1)
validate_identity = DomainEmailValidator('mails.jlu.edu.cn')
def send(self, identity, code):
EmailMessage(
subject=f'Hackergame 登录校验码:{code}',
body=f'{code}\n请使用该校验码登录 Hackergame\n',
to=[identity],
).send()
urlpatterns = [
path('jlu/login/', LoginView.as_view()),
path('jlu/get_code/', GetCodeView.as_view()),
]
| 362 | 360 | 46 |
f8f72419bea8be608ca055380f01026d347bca8f | 5,104 | py | Python | chromeperf/src/chromeperf/pinpoint/models/exploration.py | xswz8015/infra | f956b78ce4c39cc76acdda47601b86794ae0c1ba | [
"BSD-3-Clause"
] | null | null | null | chromeperf/src/chromeperf/pinpoint/models/exploration.py | xswz8015/infra | f956b78ce4c39cc76acdda47601b86794ae0c1ba | [
"BSD-3-Clause"
] | 7 | 2022-02-15T01:11:37.000Z | 2022-03-02T12:46:13.000Z | chromeperf/src/chromeperf/pinpoint/models/exploration.py | NDevTK/chromium-infra | d38e088e158d81f7f2065a38aa1ea1894f735ec4 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Change Exploration Module
In this module we expose the Speculate function which generates a list of
potential Change instances for exploration in the effort to find candidate
Changes to identify culprits. We use a binary search within a range of changes
to identify potential culprits and expose the list of changes in between
revisions in a commit range.
If we think about the range of changes as a list of revisions, each represented
as a subscripted 'c' in the list below:
[c[0], c[1], c[2], ..., c[N]]
We can treat a binary search through the range c[0]..c[N] to be a binary tree of
subscripts in the range 0..N as shown below:
N1/2
N1/4 N3/4
N1/8 N3/8 N5/8 N7/8
....
This is useful when attempting to bisect a potentially large range of revisions
quickly when finding one or more culprit changes for performance regressions.
Being able to speculate levels ahead of the bisection range gives us a means for
trading machine resources to reduce overall time-to-culprits when bisecting
large revision ranges.
"""
import functools
__all__ = ['speculate']
def speculate(changes, change_detected, on_unknown, midpoint, levels=2):
"""Speculate on a range of changes.
This function yields a list of tuples with the following form:
(insertion index, change)
Where `insertion index` refers to an index in the `changes` argument. The
returned list is in insertion order with the property that if applied in the
given order to the `changes` list that the resulting `changes` list is in a
valid relative ordering of revisions to explore.
Arguments:
- changes: a list of Change instances.
- change_detected: a predicate returning True whether we can detect a change
between two Change instances, None if the result is inconclusive.
- on_unknown: a callable invoked when change_detected returns None
(or is inconclusive) taking both changes.
- midpoint: a callable invoked returning the midpoint between two changes,
returning an object of the same type as the arguments or None;
midpoint(a, b) -> m|None where type(m) == type(a) && type(m) == type(b).
- levels: the depth of the binary search to explore for speculation; default
is 2.
"""
if not changes:
return []
additional_changes = []
# We apply the speculator on each adjacent pair of Change elements in the
# changes we're provided.
functools.reduce(speculator, enumerate(changes))
# At this point in the function, we have the additional changes in infix
# traversal order (left, node, right), but we want to return the results in
# stable insertion order so we reverse this list. This way, we have the
# elements that will fall in the same insertion index to be inserted at the
# same index one after another, which will restore the traversal order in
# the final list.
#
# For example:
#
# c = [(0, change2), (0, change1), (0, change0)]
#
# When inserted to an empty list by insertion index:
#
# a = []
# for index, change in c:
# a.insert(index, change)
#
# We end up with:
#
# a = [change0, change1, change2]
return reversed(additional_changes)
| 38.089552 | 80 | 0.675353 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Change Exploration Module
In this module we expose the Speculate function which generates a list of
potential Change instances for exploration in the effort to find candidate
Changes to identify culprits. We use a binary search within a range of changes
to identify potential culprits and expose the list of changes in between
revisions in a commit range.
If we think about the range of changes as a list of revisions, each represented
as a subscripted 'c' in the list below:
[c[0], c[1], c[2], ..., c[N]]
We can treat a binary search through the range c[0]..c[N] to be a binary tree of
subscripts in the range 0..N as shown below:
N1/2
N1/4 N3/4
N1/8 N3/8 N5/8 N7/8
....
This is useful when attempting to bisect a potentially large range of revisions
quickly when finding one or more culprit changes for performance regressions.
Being able to speculate levels ahead of the bisection range gives us a means for
trading machine resources to reduce overall time-to-culprits when bisecting
large revision ranges.
"""
import functools
__all__ = ['speculate']
def _binary_infix_traversal(change_a, change_b, levels, midpoint, callback):
if levels == 0:
return
m = midpoint(change_a, change_b)
if m is None:
return
_binary_infix_traversal(change_a, m, levels - 1, midpoint, callback)
callback(m)
_binary_infix_traversal(m, change_b, levels - 1, midpoint, callback)
def speculate(changes, change_detected, on_unknown, midpoint, levels=2):
"""Speculate on a range of changes.
This function yields a list of tuples with the following form:
(insertion index, change)
Where `insertion index` refers to an index in the `changes` argument. The
returned list is in insertion order with the property that if applied in the
given order to the `changes` list that the resulting `changes` list is in a
valid relative ordering of revisions to explore.
Arguments:
- changes: a list of Change instances.
- change_detected: a predicate returning True whether we can detect a change
between two Change instances, None if the result is inconclusive.
- on_unknown: a callable invoked when change_detected returns None
(or is inconclusive) taking both changes.
- midpoint: a callable invoked returning the midpoint between two changes,
returning an object of the same type as the arguments or None;
midpoint(a, b) -> m|None where type(m) == type(a) && type(m) == type(b).
- levels: the depth of the binary search to explore for speculation; default
is 2.
"""
if not changes:
return []
additional_changes = []
def speculator(change_a_index, change_b_index):
_, change_a = change_a_index
index_b, change_b = change_b_index
result = change_detected(change_a, change_b)
if result is None:
on_unknown(change_a, change_b)
elif result:
accumulated_changes = []
# This inserter is used to capture the change and the index in
# `changes` to which the found change is to be inserted.
def Inserter(change):
# We only add changes that we've not encountered yet in this
# traversal.
if change not in accumulated_changes and change not in changes:
accumulated_changes.append(change)
additional_changes.append(tuple([index_b, change]))
# We explore the space with a binary infix traversal, so that we can
# get the changes already in insertion order.
_binary_infix_traversal(change_a, change_b, levels, midpoint,
Inserter)
# We return the change_b_index so that the next invocation of this
# reducer function will always get the second argument provided to this
# call as the first argument.
return change_b_index
# We apply the speculator on each adjacent pair of Change elements in the
# changes we're provided.
functools.reduce(speculator, enumerate(changes))
# At this point in the function, we have the additional changes in infix
# traversal order (left, node, right), but we want to return the results in
# stable insertion order so we reverse this list. This way, we have the
# elements that will fall in the same insertion index to be inserted at the
# same index one after another, which will restore the traversal order in
# the final list.
#
# For example:
#
# c = [(0, change2), (0, change1), (0, change0)]
#
# When inserted to an empty list by insertion index:
#
# a = []
# for index, change in c:
# a.insert(index, change)
#
# We end up with:
#
# a = [change0, change1, change2]
return reversed(additional_changes)
| 1,588 | 0 | 50 |
113cc65652e6b0675b74d28efec8e5632845e4d4 | 1,255 | py | Python | Chatbot.py | nirajp786/chatbot | e26ab7fb53e675ae23d6636735c48be3fb8c5174 | [
"MIT"
] | null | null | null | Chatbot.py | nirajp786/chatbot | e26ab7fb53e675ae23d6636735c48be3fb8c5174 | [
"MIT"
] | null | null | null | Chatbot.py | nirajp786/chatbot | e26ab7fb53e675ae23d6636735c48be3fb8c5174 | [
"MIT"
] | null | null | null | # This program is by: Nidhi Patel
# It uses: Python & Tkinter
# It answers user's question using an api system.
from tkinter import *
import wolframalpha
# This program is for the main background of the function
root = Tk()
root.title("Chatbot")
root.geometry('400x400')
# tells the user to enter the question
theLabel = Label(root, text=" Enter your question here:")
theLabel.grid(row=1, column =1)
theLabel.config(font=("Times", 17))
entry = Entry(root, bg='light grey', font=35)
entry.place(x = 10, y= 50, height= 40, width = 290)
button = Button(root, text ="Enter", width = 8, font=20, height= 1, command=lambda:answer())
button.place(x=310,y=50)
# the output system of the code.
output = Text(bg='light grey')
output.config(state=DISABLED)
output.place(x=10, y= 100, height = 290, width= 360)
root.mainloop()
| 26.145833 | 92 | 0.675697 | # This program is by: Nidhi Patel
# It uses: Python & Tkinter
# It answers user's question using an api system.
from tkinter import *
import wolframalpha
def answer():
output.config(state=NORMAL)
question = entry.get()
entry.delete(0, END)
print (question)
output.insert(END, ("User's Question: " + question + '\n'))
app_id = "ELVQP7-XW8G556KRV"
client = wolframalpha.Client(app_id)
res = client.query(question)
answer = next(res.results).text
output.insert(END, ("Chatbot: " + answer + '\n'))
output.config(font=("Times", 15))
# This program is for the main background of the function
root = Tk()
root.title("Chatbot")
root.geometry('400x400')
# tells the user to enter the question
theLabel = Label(root, text=" Enter your question here:")
theLabel.grid(row=1, column =1)
theLabel.config(font=("Times", 17))
entry = Entry(root, bg='light grey', font=35)
entry.place(x = 10, y= 50, height= 40, width = 290)
button = Button(root, text ="Enter", width = 8, font=20, height= 1, command=lambda:answer())
button.place(x=310,y=50)
# the output system of the code.
output = Text(bg='light grey')
output.config(state=DISABLED)
output.place(x=10, y= 100, height = 290, width= 360)
root.mainloop()
| 404 | 0 | 23 |
c74db7e7cedba74d55da00210fe82022f537abcb | 3,108 | py | Python | lib/netty/protobuf/python/google/protobuf/internal/proto_builder_test.py | meghana0507/grpc-java-poll | b35805a7265e5d6d9468ab17bc33b92ed00ecd97 | [
"BSD-3-Clause"
] | 1 | 2017-08-16T15:00:29.000Z | 2017-08-16T15:00:29.000Z | lib/netty/protobuf/python/google/protobuf/internal/proto_builder_test.py | meghana0507/grpc-java-poll | b35805a7265e5d6d9468ab17bc33b92ed00ecd97 | [
"BSD-3-Clause"
] | null | null | null | lib/netty/protobuf/python/google/protobuf/internal/proto_builder_test.py | meghana0507/grpc-java-poll | b35805a7265e5d6d9468ab17bc33b92ed00ecd97 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.proto_builder."""
from google.apputils import basetest
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor_pool
from google.protobuf import proto_builder
from google.protobuf import text_format
if __name__ == '__main__':
basetest.main()
| 39.846154 | 72 | 0.754826 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.proto_builder."""
from google.apputils import basetest
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor_pool
from google.protobuf import proto_builder
from google.protobuf import text_format
class ProtoBuilderTest(basetest.TestCase):
def setUp(self):
self._fields = {
'foo': descriptor_pb2.FieldDescriptorProto.TYPE_INT64,
'bar': descriptor_pb2.FieldDescriptorProto.TYPE_STRING,
}
def testMakeSimpleProtoClass(self):
"""Test that we can create a proto class."""
proto_cls = proto_builder.MakeSimpleProtoClass(
self._fields,
full_name='net.proto2.python.public.proto_builder_test.Test')
proto = proto_cls()
proto.foo = 12345
proto.bar = 'asdf'
self.assertMultiLineEqual(
'bar: "asdf"\nfoo: 12345\n', text_format.MessageToString(proto))
def testMakeSameProtoClassTwice(self):
"""Test that the DescriptorPool is used."""
pool = descriptor_pool.DescriptorPool()
proto_cls1 = proto_builder.MakeSimpleProtoClass(
self._fields,
full_name='net.proto2.python.public.proto_builder_test.Test',
pool=pool)
proto_cls2 = proto_builder.MakeSimpleProtoClass(
self._fields,
full_name='net.proto2.python.public.proto_builder_test.Test',
pool=pool)
self.assertIs(proto_cls1.DESCRIPTOR, proto_cls2.DESCRIPTOR)
if __name__ == '__main__':
basetest.main()
| 153 | 977 | 23 |
4c6d1afb698d25a1b01a3e5382c71317362b9025 | 2,563 | py | Python | home/templatetags/gentlecoffee_tags.py | taedori81/gentlecoffee | 62de8ff17c934afdfde188ecc6b9dbfb400d0682 | [
"BSD-3-Clause"
] | null | null | null | home/templatetags/gentlecoffee_tags.py | taedori81/gentlecoffee | 62de8ff17c934afdfde188ecc6b9dbfb400d0682 | [
"BSD-3-Clause"
] | null | null | null | home/templatetags/gentlecoffee_tags.py | taedori81/gentlecoffee | 62de8ff17c934afdfde188ecc6b9dbfb400d0682 | [
"BSD-3-Clause"
] | null | null | null | from django import template
from ..models import Area
register = template.Library()
@register.assignment_tag(takes_context=True)
@register.inclusion_tag("home/navbar/navbar.html", takes_context=True)
@register.inclusion_tag('home/navbar/navbar_dropdown.html', takes_context=True)
@register.inclusion_tag('home/include/side_menu_area.html', takes_context=True)
@register.filter
def url_param_dict_to_list(url_items_dict):
"""Turn this dictionary into a param list for the URL"""
params_list = ""
for key,value in url_items_dict:
if key != "page":
params_list += "&%s=%s" % (key, value)
return params_list
@register.filter
@register.inclusion_tag('home/include/blog_item.html', takes_context=True)
| 26.978947 | 96 | 0.657433 | from django import template
from ..models import Area
register = template.Library()
@register.assignment_tag(takes_context=True)
def get_site_root(context):
return context['request'].site.root_page
@register.inclusion_tag("home/navbar/navbar.html", takes_context=True)
def display_navbar(context):
parent = get_site_root(context)
if context.has_key('self'):
calling_page = context['self']
else:
calling_page = None
menuitems = parent.get_children().live().in_menu()
for menuitem in menuitems:
menuitem.show_dropdown = menuitem.get_children().live().in_menu().exists()
menuitem.active = (calling_page.url.startswith(menuitem.url) if calling_page else False)
return {
"calling_page": calling_page,
"menuitems": menuitems,
"request": context['request']
}
@register.inclusion_tag('home/navbar/navbar_dropdown.html', takes_context=True)
def display_navbar_dropdown(context, parent):
menuitems_children = parent.get_children().live().in_menu()
return {
"parent": parent,
"menuitems_children": menuitems_children,
"request": context['request'],
}
@register.inclusion_tag('home/include/side_menu_area.html', takes_context=True)
def display_side_menu_area(context):
request = context['request']
areas = Area.objects.all()
# TODO Need to build href for filter the page
area_items = []
for area in areas:
item_name = area.area_name
item_href = '?area=' + item_name
area_items.append({"name": item_name, "href": item_href})
return {
"request": request,
"areas": areas,
"area_items": area_items
}
@register.filter
def url_param_dict_to_list(url_items_dict):
"""Turn this dictionary into a param list for the URL"""
params_list = ""
for key,value in url_items_dict:
if key != "page":
params_list += "&%s=%s" % (key, value)
return params_list
@register.filter
def get_item(dictionary, key):
return dictionary.get(key)
@register.inclusion_tag('home/include/blog_item.html', takes_context=True)
def display_blog_list(context, blog_list):
blogs = []
for blog in blog_list:
for block in blog.body:
if block.block_type == 'heading':
blog.heading = block.value
if block.block_type == 'photo':
blog.photo = block.value
blogs.append(blog)
request = context['request']
return {
"request": request,
"blogs": blogs,
}
| 1,683 | 0 | 132 |
6ba8bf8ca0064bf23aef3ec53101017ab6a070f1 | 4,198 | py | Python | patent/patent_crawber.py | RogerJTX/kbpPipeline_ExpertSystem | 53f998ccb04ec9e932363818e037c3c5b225f05b | [
"MIT"
] | 3 | 2021-01-29T17:09:55.000Z | 2021-07-12T11:37:29.000Z | patent/patent_crawber.py | RogerJTX/kbpPipeline_ExpertSystem | 53f998ccb04ec9e932363818e037c3c5b225f05b | [
"MIT"
] | null | null | null | patent/patent_crawber.py | RogerJTX/kbpPipeline_ExpertSystem | 53f998ccb04ec9e932363818e037c3c5b225f05b | [
"MIT"
] | 1 | 2022-02-11T11:09:03.000Z | 2022-02-11T11:09:03.000Z | #!/home/apollo/anaconda3/bin/python3
#-*- coding: utf-8 -*-
#******************************************************************************
# Author : jtx
# Last modified: 2020-04-13 15:34
# Filename : patent_crawber.py
# Description : res_kb_patent专利信息生成,目前是转移企业相关的专利信息,实际这一步是用爬虫替换
#******************************************************************************
import configparser
import sys
from pymongo import MongoClient
from pymongo import errors
import pymysql
from dateutil import parser
from datetime import datetime, date, timedelta
import json
import logging
import re
import copy
import os
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
dir_path = os.path.dirname(__file__)
kbp_path = os.path.dirname(dir_path)
config_path = os.path.join(kbp_path,"config.ini")
if __name__=="__main__":
pc = PatentCrawber()
pc.process()
| 31.80303 | 126 | 0.610052 | #!/home/apollo/anaconda3/bin/python3
#-*- coding: utf-8 -*-
#******************************************************************************
# Author : jtx
# Last modified: 2020-04-13 15:34
# Filename : patent_crawber.py
# Description : res_kb_patent专利信息生成,目前是转移企业相关的专利信息,实际这一步是用爬虫替换
#******************************************************************************
import configparser
import sys
from pymongo import MongoClient
from pymongo import errors
import pymysql
from dateutil import parser
from datetime import datetime, date, timedelta
import json
import logging
import re
import copy
import os
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s')
logger = logging.getLogger(__name__)
dir_path = os.path.dirname(__file__)
kbp_path = os.path.dirname(dir_path)
config_path = os.path.join(kbp_path,"config.ini")
class PatentCrawber(object):
def __init__(self):
self.config = configparser.ConfigParser()
self.config.read(config_path)
self.mongo_con = MongoClient(self.config.get("mongo","mongo_url"))
# self.company_ai = self.mongo_con[self.config.get("mongo","info_db")][self.config.get("mongo","company_ai")]
self.crawber_patent = self.mongo_con[self.config.get("mongo","crawber_db")][self.config.get("mongo","crawber_patent")]
self.res_kb_patent = self.mongo_con[self.config.get("mongo","res_kb_db")][self.config.get("mongo","res_kb_patent")]
self.transfer_count = 0
def company_from_ai(self):
'''
数据源切换为所有AI企业
'''
process_companys = set()
query = {
"sector_ai":{
"$ne":[]
}
}
company_res = self.company_ai.find(query)
for company in company_res:
process_companys.add(company["company_name"])
sorted_process_companys = list(process_companys)
sorted_process_companys.sort()
return sorted_process_companys
def company_from_file(self):
'''读取候选企业'''
with open(os.path.join(kbp_path,"necar_companys.txt"), "r") as fr:
companys_read = fr.readlines()
companys = list(map(lambda x:x.strip(), companys_read))
return companys
def close_connection(self):
if self.mongo_con:
self.mongo_con.close()
def process_company_patent(self, company:str):
process_res = []
##### 按行业迁移数据,先判断量知产业知识中心的库里是否有该企业信息,如果有,不再重复处理
exist = self.res_kb_patent.find_one({"search_key":company})
if not exist:
zn_company = company.replace("(","(").replace(")",")")
exist = self.res_kb_patent.find_one({"search_key":zn_company})
if exist:
return process_res
#### 判断完毕
patents_res = self.crawber_patent.find({"search_key":company})
if not patents_res:
zn_company = company.replace("(","(").replace(")",")") # 再匹配下中文的括号格式是否有
patents_res = self.crawber_patent.find({"search_key":zn_company})
logger.info("企业[{}]查询到目前专利库中有[{}]个专利信息".format(company,patents_res.count()))
for patent in patents_res:
process_res.append(patent)
return process_res
def process(self):
# 获取业务需求,企业名
# process_companys = self.company_from_ai()
process_companys = self.company_from_file()
logger.info("企业名单获取完成,共[{}]个".format(len(process_companys)))
# TODO 其他渠道获取企业
# 根据推荐的AI企业进行知识构建库的新增
for i, company in enumerate(process_companys):
if( i%100 == 0):
logger.info("开始处理第{}个企业".format(i))
logger.info("查询推荐企业[{}]".format(company))
company_patents = self.process_company_patent(company)
if company_patents:
insert_res = self.res_kb_patent.insert_many(company_patents)
self.transfer_count += len(insert_res.inserted_ids)
self.close_connection()
logger.info("企业{}个,迁移专利信息[{}]个".format(len(process_companys),self.transfer_count))
if __name__=="__main__":
pc = PatentCrawber()
pc.process()
| 2,605 | 916 | 23 |
2f547e94de102f660ba77d8f971a6178ac8f9a78 | 319 | py | Python | 1 - Beginner/1142.py | andrematte/uri-submissions | 796e7fee56650d9e882880318d6e7734038be2dc | [
"MIT"
] | 1 | 2020-09-09T12:48:09.000Z | 2020-09-09T12:48:09.000Z | 1 - Beginner/1142.py | andrematte/uri-submissions | 796e7fee56650d9e882880318d6e7734038be2dc | [
"MIT"
] | null | null | null | 1 - Beginner/1142.py | andrematte/uri-submissions | 796e7fee56650d9e882880318d6e7734038be2dc | [
"MIT"
] | null | null | null | # URI Online Judge 1142
X = int(input())
count = 1
for x in range(1,X+1):
String = ''
for i in range(4):
if i == 0:
String += "{}".format(count)
elif i == 3:
String += " PUM"
else:
String += " {}".format(count)
count += 1
print(String)
| 17.722222 | 41 | 0.435737 | # URI Online Judge 1142
X = int(input())
count = 1
for x in range(1,X+1):
String = ''
for i in range(4):
if i == 0:
String += "{}".format(count)
elif i == 3:
String += " PUM"
else:
String += " {}".format(count)
count += 1
print(String)
| 0 | 0 | 0 |
5fc8be04bd155c30474bbde4186211cf949d9697 | 1,533 | py | Python | application/user.py | RB387/GorbInDocks | e7a76eeae5e2bb781f5465fcbb67ed99dba7bafe | [
"Unlicense"
] | null | null | null | application/user.py | RB387/GorbInDocks | e7a76eeae5e2bb781f5465fcbb67ed99dba7bafe | [
"Unlicense"
] | 1 | 2019-10-29T17:24:53.000Z | 2019-10-29T17:25:52.000Z | application/user.py | RB387/GorbInDocks | e7a76eeae5e2bb781f5465fcbb67ed99dba7bafe | [
"Unlicense"
] | 1 | 2019-10-19T12:33:01.000Z | 2019-10-19T12:33:01.000Z | from application import app
from application import decorators
from flask import request, session, redirect, url_for, Blueprint, render_template
from run import gt
import gorbin_tools2
page = Blueprint('user', __name__,
template_folder='templates')
@page.route('/user', methods = ['GET', 'POST'])
@page.route('/user/<user_id>', methods = ['GET', 'POST'])
@decorators.login_required
@decorators.check_session | 36.5 | 90 | 0.714286 | from application import app
from application import decorators
from flask import request, session, redirect, url_for, Blueprint, render_template
from run import gt
import gorbin_tools2
page = Blueprint('user', __name__,
template_folder='templates')
@page.route('/user', methods = ['GET', 'POST'])
@page.route('/user/<user_id>', methods = ['GET', 'POST'])
@decorators.login_required
@decorators.check_session
def user():
error_message = None
if request.method == "POST":
action = request.form.get('action')
if action == 'change_mail':
new_email = request.form.get('change_email_val')
if new_email:
if not gt.check_email(new_email):
gt.update_user_mail(login = session['login'], email = new_email)
error_message = 'Email was changed succesfully!'
else:
error_message = 'This email is already taken!'
elif action == 'change_pass':
old_password = request.form.get('password_val_old')
new_password = request.form.get('change_password_val')
if old_password and new_password:
if gt.get_user(login = session['login'], pas = gorbin_tools2.hash(old_password)):
gt.update_user_pass(login = session['login'], pas = gorbin_tools2.hash(new_password))
session['current_password'] = gorbin_tools2.hash(new_password)
error_message = 'Password was changed succesfully!'
else:
error_message = 'Wrong password!'
return render_template("user.html",
current_mail = gt.get_user_data(session['login'])['email'],
error_message = error_message) | 1,077 | 0 | 23 |
8b080e47687254e9dc5696d54053f9d2b09bcdd2 | 3,708 | py | Python | getconfig.py | sondercoder/hass-gelight | 0002a582b9cf5d2d973f07cb46cefae392bafb54 | [
"MIT"
] | 25 | 2020-11-07T03:42:07.000Z | 2022-03-04T19:52:00.000Z | getconfig.py | sondercoder/hass-gelight | 0002a582b9cf5d2d973f07cb46cefae392bafb54 | [
"MIT"
] | 6 | 2020-11-25T18:49:03.000Z | 2022-02-15T05:57:02.000Z | getconfig.py | sondercoder/hass-gelight | 0002a582b9cf5d2d973f07cb46cefae392bafb54 | [
"MIT"
] | 16 | 2020-11-07T03:43:33.000Z | 2022-03-04T20:13:26.000Z | #!/usr/bin/env python3
# Adoptd from https://github.com/google/python-laurel/blob/master/laurel/__init__.py
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import getpass
import json
import random
API_TIMEOUT = 5
# https://github.com/unixpickle/cbyge/blob/main/login.go
# https://github.com/juanboro/cync2mqtt/blob/main/src/acync/__init__.py
def authenticate():
"""Authenticate with the API and get a token."""
API_AUTH = "https://api.gelighting.com/v2/two_factor/email/verifycode"
auth_data = {'corp_id': "1007d2ad150c4000", 'email': username,"local_lang": "en-us"}
r = requests.post(API_AUTH, json=auth_data, timeout=API_TIMEOUT)
code=input("Enter emailed code:")
API_AUTH = "https://api.gelighting.com/v2/user_auth/two_factor"
auth_data = {'corp_id': "1007d2ad150c4000", 'email': username, 'password': password, "two_factor": code, "resource": randomLoginResource()}
r = requests.post(API_AUTH, json=auth_data, timeout=API_TIMEOUT)
try:
return (r.json()['access_token'], r.json()['user_id'])
except KeyError:
raise(LaurelException('API authentication failed'))
def get_devices(auth_token, user):
"""Get a list of devices for a particular user."""
API_DEVICES = "https://api2.xlink.cn/v2/user/{user}/subscribe/devices"
headers = {'Access-Token': auth_token}
r = requests.get(API_DEVICES.format(user=user), headers=headers,
timeout=API_TIMEOUT)
return r.json()
def get_properties(auth_token, product_id, device_id):
"""Get properties for a single device."""
API_DEVICE_INFO = "https://api2.xlink.cn/v2/product/{product_id}/device/{device_id}/property"
headers = {'Access-Token': auth_token}
r = requests.get(API_DEVICE_INFO.format(product_id=product_id, device_id=device_id), headers=headers, timeout=API_TIMEOUT)
return r.json()
username = input("Cync Username/Email:")
password=getpass.getpass()
access_token, user_id = authenticate()
print("light:")
devices = get_devices(access_token, user_id)
errormsg = ""
for device in devices:
product_id = device['product_id']
device_id = device['id']
username = device['mac']
access_key = device['access_key']
print(" - platform: gelight")
print(" password: {}".format(access_key))
print(" username: {}".format(username))
print(" lights:")
device_info = get_properties(access_token, product_id, device_id)
try:
for bulb in device_info['bulbsArray']:
id = int(bulb['deviceID']) % 1000
mac = [bulb['mac'][i:i+2] for i in range(0, 12, 2)]
mac = "%s:%s:%s:%s:%s:%s" % (mac[5], mac[4], mac[3], mac[2], mac[1], mac[0])
name = bulb['displayName']
device_type = bulb['deviceType']
print(" - id: {}".format(id))
print(" mac: {}".format(mac))
print(" name: {}".format(name))
print(" type: {}".format(device_type))
except KeyError:
errormsg+="Warning: Missing bulb info.\n"
print(errormsg)
| 43.116279 | 143 | 0.670712 | #!/usr/bin/env python3
# Adoptd from https://github.com/google/python-laurel/blob/master/laurel/__init__.py
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
import getpass
import json
import random
API_TIMEOUT = 5
# https://github.com/unixpickle/cbyge/blob/main/login.go
# https://github.com/juanboro/cync2mqtt/blob/main/src/acync/__init__.py
def randomLoginResource():
return ''.join([chr(ord('a')+random.randint(0,26)) for i in range(0,16)])
def authenticate():
"""Authenticate with the API and get a token."""
API_AUTH = "https://api.gelighting.com/v2/two_factor/email/verifycode"
auth_data = {'corp_id': "1007d2ad150c4000", 'email': username,"local_lang": "en-us"}
r = requests.post(API_AUTH, json=auth_data, timeout=API_TIMEOUT)
code=input("Enter emailed code:")
API_AUTH = "https://api.gelighting.com/v2/user_auth/two_factor"
auth_data = {'corp_id': "1007d2ad150c4000", 'email': username, 'password': password, "two_factor": code, "resource": randomLoginResource()}
r = requests.post(API_AUTH, json=auth_data, timeout=API_TIMEOUT)
try:
return (r.json()['access_token'], r.json()['user_id'])
except KeyError:
raise(LaurelException('API authentication failed'))
def get_devices(auth_token, user):
"""Get a list of devices for a particular user."""
API_DEVICES = "https://api2.xlink.cn/v2/user/{user}/subscribe/devices"
headers = {'Access-Token': auth_token}
r = requests.get(API_DEVICES.format(user=user), headers=headers,
timeout=API_TIMEOUT)
return r.json()
def get_properties(auth_token, product_id, device_id):
"""Get properties for a single device."""
API_DEVICE_INFO = "https://api2.xlink.cn/v2/product/{product_id}/device/{device_id}/property"
headers = {'Access-Token': auth_token}
r = requests.get(API_DEVICE_INFO.format(product_id=product_id, device_id=device_id), headers=headers, timeout=API_TIMEOUT)
return r.json()
username = input("Cync Username/Email:")
password=getpass.getpass()
access_token, user_id = authenticate()
print("light:")
devices = get_devices(access_token, user_id)
errormsg = ""
for device in devices:
product_id = device['product_id']
device_id = device['id']
username = device['mac']
access_key = device['access_key']
print(" - platform: gelight")
print(" password: {}".format(access_key))
print(" username: {}".format(username))
print(" lights:")
device_info = get_properties(access_token, product_id, device_id)
try:
for bulb in device_info['bulbsArray']:
id = int(bulb['deviceID']) % 1000
mac = [bulb['mac'][i:i+2] for i in range(0, 12, 2)]
mac = "%s:%s:%s:%s:%s:%s" % (mac[5], mac[4], mac[3], mac[2], mac[1], mac[0])
name = bulb['displayName']
device_type = bulb['deviceType']
print(" - id: {}".format(id))
print(" mac: {}".format(mac))
print(" name: {}".format(name))
print(" type: {}".format(device_type))
except KeyError:
errormsg+="Warning: Missing bulb info.\n"
print(errormsg)
| 83 | 0 | 23 |
379ca47a756b660342850b92e101bf7e49b3260d | 903 | py | Python | flask-backend/app.py | Jroc561/Model-Tester | 839c6ccd50eddd34255c0993a33e23a8ec2b2783 | [
"MIT"
] | null | null | null | flask-backend/app.py | Jroc561/Model-Tester | 839c6ccd50eddd34255c0993a33e23a8ec2b2783 | [
"MIT"
] | null | null | null | flask-backend/app.py | Jroc561/Model-Tester | 839c6ccd50eddd34255c0993a33e23a8ec2b2783 | [
"MIT"
] | null | null | null | from os import getenv
from flask import Flask, render_template, request
from dotenv import load_dotenv
from .models import to_list
| 29.129032 | 97 | 0.58804 | from os import getenv
from flask import Flask, render_template, request
from dotenv import load_dotenv
from .models import to_list
def create_app():
app = Flask(__name__)
@app.route('/')
def root():
return render_template('base.html', title="Home")
@app.route("/input", methods = ["GET", "POST"])
def input():
"""
Input user's NFT.
Return reccomended value.
"""
if request.method == "GET":
NFT_id = to_list(df)
return render_template('input.html', data=NFT_id)
if request.method == "POST":
input = request.form.get("input")
index = df.loc[df.isin([input]).any(axis=1)].index.tolist()
index = index[0]
model = song_model(index)
return render_template('output_song.html', output_song=input, recommended_song=model)
return app | 749 | 0 | 23 |
7fa35ddcfbead9c6b14953320793dda89e4a14cf | 436 | py | Python | my_back_solve.py | Michaellianeris/NSODE-Algorithms | 40788cc7b4fc889a2b0dfe72e88b0e417cafa001 | [
"MIT"
] | null | null | null | my_back_solve.py | Michaellianeris/NSODE-Algorithms | 40788cc7b4fc889a2b0dfe72e88b0e417cafa001 | [
"MIT"
] | null | null | null | my_back_solve.py | Michaellianeris/NSODE-Algorithms | 40788cc7b4fc889a2b0dfe72e88b0e417cafa001 | [
"MIT"
] | null | null | null | import numpy as np
A=np.array([[10,2,1],[0,4,2],[1,2,2]])
b=np.array([3,2,1])
x=my_back_solve(A,b)
print('x=',x)
print('A x=', np.dot(A,x))
| 16.769231 | 42 | 0.431193 | import numpy as np
A=np.array([[10,2,1],[0,4,2],[1,2,2]])
b=np.array([3,2,1])
def my_back_solve(A,b) :
n=len(A)
x=np.zeros(n)
x[n-1] = b[n-1] / A[n-1, n-1]
for k in range(n-2, -1, -1):
sums = b[k]
for j in range(k+1, n):
sums = sums - (A[k,j] * x[j])
x[k] = sums / A[k,k]
return x
x=my_back_solve(A,b)
print('x=',x)
print('A x=', np.dot(A,x))
| 246 | 0 | 25 |
f17ea5bcdcc9ea331cdec247e73b977e21c475b6 | 10,180 | py | Python | src/extract4tcn.py | knmac/LCDC_release | f977ca1cda972983cac7e33b324f07f2e1463a19 | [
"MIT"
] | 24 | 2019-09-18T09:22:08.000Z | 2022-03-08T06:47:33.000Z | src/extract4tcn.py | knmac/LCDC_release | f977ca1cda972983cac7e33b324f07f2e1463a19 | [
"MIT"
] | 6 | 2019-09-18T09:21:02.000Z | 2022-02-09T23:31:48.000Z | src/extract4tcn.py | knmac/LCDC_release | f977ca1cda972983cac7e33b324f07f2e1463a19 | [
"MIT"
] | 4 | 2020-08-06T02:05:36.000Z | 2021-12-12T07:19:17.000Z | """Extract features and save as .mat files for ED-TCN. Only used for
spatial-temporal or appearance stream (in the case of 2 stream). Do NOT use
for motion stream.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'src')))
import numpy as np
import tensorflow as tf
import skimage.io as sio
import scipy.io
from skimage.transform import resize
from progressbar import ProgressBar
from data_utils import dataset_factory
from networks import networks_factory, networks_utils
from tensorflow.contrib.framework import get_variables_to_restore
flags = tf.app.flags
FLAGS = flags.FLAGS
# paths and directories
flags.DEFINE_string('segmented_dir', None,
'segmented frames, used for reference')
flags.DEFINE_string('pretrained_model', None,
'path to the pretrained model')
flags.DEFINE_string('lbl_dict_pth', None,
'path to label dictionary')
flags.DEFINE_string('outputdir', None,
'output directory')
flags.DEFINE_string('featname', None,
'name of the layer to extract features')
# other parameters
flags.DEFINE_string('datasetname', '50salads', 'name of the dataset')
flags.DEFINE_integer('frameskip', 5, 'frame skip for downsampling')
flags.DEFINE_integer('stride', 2, 'stride after downsampling (this is testing '
'stride, not training stride)')
flags.DEFINE_string('netname', None, 'Resnet50 without offsets')
flags.DEFINE_string('bg_lbl', 'background', 'name of the background class')
flags.DEFINE_string('ext', 'png', 'extension of frame file names')
flags.DEFINE_integer('snippet_len', 1, 'extract features frame by frame')
flags.DEFINE_integer('target_height', 224, 'target image height')
flags.DEFINE_integer('target_width', 224, 'target image width')
flags.DEFINE_integer('batch_size', 1, 'number of images to feed at a time')
flags.DEFINE_integer('max_time_gap', 1, 'maximum time gap for motion loss')
flags.DEFINE_boolean('usemotionloss', False, 'no need to use motion loss')
flags.DEFINE_boolean('has_bg_lbl', True, 'has background class or not. If'
'True, the number of classes will be'
'increased by 1 from the content of'
'`labels_fname`')
flags.DEFINE_boolean('use_single_mid', False, 'use a single middle frame. Used for vanilla')
# set up mean image
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
im_mean = np.array([_R_MEAN, _G_MEAN, _B_MEAN], dtype=np.float32)
def read_n_process(im_batch):
"""Read images from given path then preprocess them
Args:
im_batch: a list of image file names
Returns:
images: preprocessed images (central crop and mean removal)
"""
# allocate memory
target_shape = [FLAGS.target_height, FLAGS.target_width, 3]
images = np.zeros([len(im_batch)] + target_shape, dtype=np.float32)
# load each image
for i in range(len(im_batch)):
# load images from filenames
img = sio.imread(im_batch[i])
# resize image
img = resize(img, (FLAGS.target_height, FLAGS.target_width, 3),
mode='constant', preserve_range=True)
# mean removal
img -= im_mean
# append
images[i] = img
return images
def make_mat_file(output_fname, all_feat, lbl_lst, expected_length=None):
"""Create mat files from given feature and label list to match Lea's
file format
Args:
all_feat: all extracted feature, ndarray (N, feat_dim)
lbl_lst: list of all labels, length of N
"""
# Expand or reduce the feature array if needed
if expected_length is not None:
N = all_feat.shape[0]
if expected_length < N:
all_feat = all_feat[:expected_length]
lbl_lst = lbl_lst[:expected_length]
elif expected_length > N:
diff = expected_length - N
left = np.ceil(diff / 2.0).astype(np.int)
right = diff - left
# Expand features
left_feat = np.expand_dims(all_feat[0], axis=0)
left_pad = np.repeat(left_feat, left, axis=0)
right_feat = np.expand_dims(all_feat[-1], axis=0)
right_pad = np.repeat(right_feat, right, axis=0)
all_feat = np.concatenate([left_pad, all_feat, right_pad], axis=0)
# Expand labels
left_lbl = np.repeat(lbl_lst[0], left)
right_lbl = np.repeat(lbl_lst[-1], right)
lbl_lst = np.concatenate([left_lbl, lbl_lst, right_lbl])
assert len(all_feat) == len(lbl_lst), \
'features and labels list must have the same length'
# Save as matlab *mat file
mdict = {'A': all_feat,
'Y': np.expand_dims(lbl_lst, axis=1)}
scipy.io.savemat(os.path.join(FLAGS.outputdir, output_fname), mdict)
pass
def main(_):
"""Main function"""
if not os.path.exists(FLAGS.outputdir):
os.makedirs(FLAGS.outputdir)
# load video list
vid_lst = os.listdir(FLAGS.segmented_dir)
vid_lst.sort()
# load label dictionary
lbl_list = open(FLAGS.lbl_dict_pth).read().splitlines()
n_classes = len(lbl_list)
if FLAGS.has_bg_lbl:
n_classes += 1
# lbl_dict = {'background': 0}
# for i in range(len(lbl_list)):
# lbl_dict[lbl_list[i]] = i + 1
# lbl_dict[lbl_list[i]] = i
# use the load_snippet_pths_test in data writer to get frames and labels
dataset_writer = dataset_factory.get_writer(FLAGS.datasetname)
writer = dataset_writer()
# set default graph
with tf.Graph().as_default():
# build network
if FLAGS.use_single_mid:
real_snippet_len = 1
else:
real_snippet_len = FLAGS.snippet_len
net = networks_factory.build_net(
FLAGS.netname, n_classes, real_snippet_len,
FLAGS.target_height, FLAGS.target_width,
max_time_gap=FLAGS.max_time_gap,
trainable=False)
# extract features
feat = net.get_output(FLAGS.featname)
# load pretrained weights
if '.pkl' in FLAGS.pretrained_model:
assign_ops = networks_utils.load_pretrained(
FLAGS.pretrained_model, ignore_missing=True,
extension='pkl',
initoffset=FLAGS.usemotionloss)
else:
variables_to_restore = get_variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
# create session
with tf.Session() as sess:
# initialization
sess.run([tf.global_variables_initializer(),
tf.local_variables_initializer()])
if '.pkl' in FLAGS.pretrained_model:
sess.run(assign_ops)
else:
init_fn(sess)
# for each video in video list
n_vids = len(vid_lst)
for vid_id in range(n_vids):
# skip existing feature files
output_fname = '{}.avi.mat'.format(vid_lst[vid_id])
if os.path.exists(os.path.join(FLAGS.outputdir, output_fname)):
print('{} already exists'.format(output_fname))
continue
# load all file names and labels
vid = vid_lst[vid_id]
print('\nExtracting features for ' + vid)
fname_lst, lbl_lst = writer.load_snippet_pths_test(
FLAGS.segmented_dir, [vid], FLAGS.lbl_dict_pth,
FLAGS.bg_lbl, FLAGS.ext, FLAGS.frameskip)
fname_lst = [x[0] for x in fname_lst]
# prefetch all frames of a video
frames_all = read_n_process(fname_lst)
# prepare indices
n_frames = len(lbl_lst)
left = FLAGS.snippet_len // 2
right = FLAGS.snippet_len - left
# go through the video frames in acausal fashion
frame_id = left
feats_per_vid = []
groundtruths_per_vid = []
pbar = ProgressBar(max_value=n_frames)
while frame_id < n_frames-right+1:
# produce inputs
snippet_batch = []
lbl_batch = []
for _ in range(FLAGS.batch_size):
if frame_id+right > n_frames:
break
if FLAGS.use_single_mid:
snippet = np.expand_dims(frames_all[frame_id], axis=0)
else:
snippet = frames_all[frame_id-left:frame_id+right]
lbl = lbl_lst[frame_id]
snippet_batch.append(snippet)
lbl_batch.append(lbl)
frame_id += FLAGS.stride
feed_dict = {net.data_raw: snippet_batch,
net.labels_raw: lbl_batch}
# extract features
feat_ = sess.run(feat, feed_dict=feed_dict)
# append data
for i in range(feat_.shape[0]):
feats_per_vid.append(feat_[i])
groundtruths_per_vid.append(lbl_batch[i])
pbar.update(frame_id)
# produce mat file for a video
feats_per_vid = np.array(feats_per_vid, dtype=np.float32)
groundtruths_per_vid = np.array(groundtruths_per_vid)
make_mat_file(output_fname, feats_per_vid,
groundtruths_per_vid,
expected_length=n_frames//FLAGS.stride)
pass
pass
pass
if __name__ == '__main__':
tf.app.run()
| 37.153285 | 92 | 0.596267 | """Extract features and save as .mat files for ED-TCN. Only used for
spatial-temporal or appearance stream (in the case of 2 stream). Do NOT use
for motion stream.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import sys
sys.path.insert(
0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'src')))
import numpy as np
import tensorflow as tf
import skimage.io as sio
import scipy.io
from skimage.transform import resize
from progressbar import ProgressBar
from data_utils import dataset_factory
from networks import networks_factory, networks_utils
from tensorflow.contrib.framework import get_variables_to_restore
flags = tf.app.flags
FLAGS = flags.FLAGS
# paths and directories
flags.DEFINE_string('segmented_dir', None,
'segmented frames, used for reference')
flags.DEFINE_string('pretrained_model', None,
'path to the pretrained model')
flags.DEFINE_string('lbl_dict_pth', None,
'path to label dictionary')
flags.DEFINE_string('outputdir', None,
'output directory')
flags.DEFINE_string('featname', None,
'name of the layer to extract features')
# other parameters
flags.DEFINE_string('datasetname', '50salads', 'name of the dataset')
flags.DEFINE_integer('frameskip', 5, 'frame skip for downsampling')
flags.DEFINE_integer('stride', 2, 'stride after downsampling (this is testing '
'stride, not training stride)')
flags.DEFINE_string('netname', None, 'Resnet50 without offsets')
flags.DEFINE_string('bg_lbl', 'background', 'name of the background class')
flags.DEFINE_string('ext', 'png', 'extension of frame file names')
flags.DEFINE_integer('snippet_len', 1, 'extract features frame by frame')
flags.DEFINE_integer('target_height', 224, 'target image height')
flags.DEFINE_integer('target_width', 224, 'target image width')
flags.DEFINE_integer('batch_size', 1, 'number of images to feed at a time')
flags.DEFINE_integer('max_time_gap', 1, 'maximum time gap for motion loss')
flags.DEFINE_boolean('usemotionloss', False, 'no need to use motion loss')
flags.DEFINE_boolean('has_bg_lbl', True, 'has background class or not. If'
'True, the number of classes will be'
'increased by 1 from the content of'
'`labels_fname`')
flags.DEFINE_boolean('use_single_mid', False, 'use a single middle frame. Used for vanilla')
# set up mean image
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
im_mean = np.array([_R_MEAN, _G_MEAN, _B_MEAN], dtype=np.float32)
def read_n_process(im_batch):
"""Read images from given path then preprocess them
Args:
im_batch: a list of image file names
Returns:
images: preprocessed images (central crop and mean removal)
"""
# allocate memory
target_shape = [FLAGS.target_height, FLAGS.target_width, 3]
images = np.zeros([len(im_batch)] + target_shape, dtype=np.float32)
# load each image
for i in range(len(im_batch)):
# load images from filenames
img = sio.imread(im_batch[i])
# resize image
img = resize(img, (FLAGS.target_height, FLAGS.target_width, 3),
mode='constant', preserve_range=True)
# mean removal
img -= im_mean
# append
images[i] = img
return images
def make_mat_file(output_fname, all_feat, lbl_lst, expected_length=None):
"""Create mat files from given feature and label list to match Lea's
file format
Args:
all_feat: all extracted feature, ndarray (N, feat_dim)
lbl_lst: list of all labels, length of N
"""
# Expand or reduce the feature array if needed
if expected_length is not None:
N = all_feat.shape[0]
if expected_length < N:
all_feat = all_feat[:expected_length]
lbl_lst = lbl_lst[:expected_length]
elif expected_length > N:
diff = expected_length - N
left = np.ceil(diff / 2.0).astype(np.int)
right = diff - left
# Expand features
left_feat = np.expand_dims(all_feat[0], axis=0)
left_pad = np.repeat(left_feat, left, axis=0)
right_feat = np.expand_dims(all_feat[-1], axis=0)
right_pad = np.repeat(right_feat, right, axis=0)
all_feat = np.concatenate([left_pad, all_feat, right_pad], axis=0)
# Expand labels
left_lbl = np.repeat(lbl_lst[0], left)
right_lbl = np.repeat(lbl_lst[-1], right)
lbl_lst = np.concatenate([left_lbl, lbl_lst, right_lbl])
assert len(all_feat) == len(lbl_lst), \
'features and labels list must have the same length'
# Save as matlab *mat file
mdict = {'A': all_feat,
'Y': np.expand_dims(lbl_lst, axis=1)}
scipy.io.savemat(os.path.join(FLAGS.outputdir, output_fname), mdict)
pass
def main(_):
"""Main function"""
if not os.path.exists(FLAGS.outputdir):
os.makedirs(FLAGS.outputdir)
# load video list
vid_lst = os.listdir(FLAGS.segmented_dir)
vid_lst.sort()
# load label dictionary
lbl_list = open(FLAGS.lbl_dict_pth).read().splitlines()
n_classes = len(lbl_list)
if FLAGS.has_bg_lbl:
n_classes += 1
# lbl_dict = {'background': 0}
# for i in range(len(lbl_list)):
# lbl_dict[lbl_list[i]] = i + 1
# lbl_dict[lbl_list[i]] = i
# use the load_snippet_pths_test in data writer to get frames and labels
dataset_writer = dataset_factory.get_writer(FLAGS.datasetname)
writer = dataset_writer()
# set default graph
with tf.Graph().as_default():
# build network
if FLAGS.use_single_mid:
real_snippet_len = 1
else:
real_snippet_len = FLAGS.snippet_len
net = networks_factory.build_net(
FLAGS.netname, n_classes, real_snippet_len,
FLAGS.target_height, FLAGS.target_width,
max_time_gap=FLAGS.max_time_gap,
trainable=False)
# extract features
feat = net.get_output(FLAGS.featname)
# load pretrained weights
if '.pkl' in FLAGS.pretrained_model:
assign_ops = networks_utils.load_pretrained(
FLAGS.pretrained_model, ignore_missing=True,
extension='pkl',
initoffset=FLAGS.usemotionloss)
else:
variables_to_restore = get_variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
def init_fn(sess):
tf.logging.info('Restoring checkpoint...')
return saver.restore(sess, FLAGS.pretrained_model)
# create session
with tf.Session() as sess:
# initialization
sess.run([tf.global_variables_initializer(),
tf.local_variables_initializer()])
if '.pkl' in FLAGS.pretrained_model:
sess.run(assign_ops)
else:
init_fn(sess)
# for each video in video list
n_vids = len(vid_lst)
for vid_id in range(n_vids):
# skip existing feature files
output_fname = '{}.avi.mat'.format(vid_lst[vid_id])
if os.path.exists(os.path.join(FLAGS.outputdir, output_fname)):
print('{} already exists'.format(output_fname))
continue
# load all file names and labels
vid = vid_lst[vid_id]
print('\nExtracting features for ' + vid)
fname_lst, lbl_lst = writer.load_snippet_pths_test(
FLAGS.segmented_dir, [vid], FLAGS.lbl_dict_pth,
FLAGS.bg_lbl, FLAGS.ext, FLAGS.frameskip)
fname_lst = [x[0] for x in fname_lst]
# prefetch all frames of a video
frames_all = read_n_process(fname_lst)
# prepare indices
n_frames = len(lbl_lst)
left = FLAGS.snippet_len // 2
right = FLAGS.snippet_len - left
# go through the video frames in acausal fashion
frame_id = left
feats_per_vid = []
groundtruths_per_vid = []
pbar = ProgressBar(max_value=n_frames)
while frame_id < n_frames-right+1:
# produce inputs
snippet_batch = []
lbl_batch = []
for _ in range(FLAGS.batch_size):
if frame_id+right > n_frames:
break
if FLAGS.use_single_mid:
snippet = np.expand_dims(frames_all[frame_id], axis=0)
else:
snippet = frames_all[frame_id-left:frame_id+right]
lbl = lbl_lst[frame_id]
snippet_batch.append(snippet)
lbl_batch.append(lbl)
frame_id += FLAGS.stride
feed_dict = {net.data_raw: snippet_batch,
net.labels_raw: lbl_batch}
# extract features
feat_ = sess.run(feat, feed_dict=feed_dict)
# append data
for i in range(feat_.shape[0]):
feats_per_vid.append(feat_[i])
groundtruths_per_vid.append(lbl_batch[i])
pbar.update(frame_id)
# produce mat file for a video
feats_per_vid = np.array(feats_per_vid, dtype=np.float32)
groundtruths_per_vid = np.array(groundtruths_per_vid)
make_mat_file(output_fname, feats_per_vid,
groundtruths_per_vid,
expected_length=n_frames//FLAGS.stride)
pass
pass
pass
if __name__ == '__main__':
tf.app.run()
| 123 | 0 | 35 |
ea7f522b33f809dfdf3238edbf7c341d8f038a52 | 7,166 | py | Python | Assignment3/acts/acts_app.py | Sharath-N/SelfieLessActs | bfe163f9a98d51d566df4353fa4c9461e12d7bc8 | [
"MIT"
] | null | null | null | Assignment3/acts/acts_app.py | Sharath-N/SelfieLessActs | bfe163f9a98d51d566df4353fa4c9461e12d7bc8 | [
"MIT"
] | null | null | null | Assignment3/acts/acts_app.py | Sharath-N/SelfieLessActs | bfe163f9a98d51d566df4353fa4c9461e12d7bc8 | [
"MIT"
] | null | null | null | from flask import Flask,jsonify,request,make_response
#from flask_httpauth import HTTPBasicAuth
import sqlite3 as sql
import requests
#auth = HTTPBasicAuth()
import datetime
import base64
import binascii
from flask_cors import CORS,cross_origin
app = Flask(__name__)
CORS(app)
#3
@app.route('/api/v1/categories',methods=['GET'])
#4
@app.route('/api/v1/categories',methods=['POST'])
#5
@app.route('/api/v1/categories/<categoryName>',methods=['DELETE'])
#6 or #8
@app.route('/api/v1/categories/<categoryName>/acts',methods=['GET'])
#7
@app.route('/api/v1/categories/<categoryName>/acts/size',methods=['GET'])
#9
@app.route('/api/v1/acts/upvote',methods=['POST'])
#10
@app.route('/api/v1/acts/<actId>',methods=['DELETE'])
#11
@app.route('/api/v1/acts',methods=['POST'])
# Get total number of acts
@app.route('/api/v1/count',methods=['GET'])
@app.errorhandler(405)
'''
@auth.error_handler
def unauthorized():
return make_response(jsonify({'error': 'Unauthorized access'}), 403)
'''
if __name__=='__main__':
app.run(debug=True,host="0.0.0.0",port=80)#port=8000)
| 26.639405 | 118 | 0.671783 | from flask import Flask,jsonify,request,make_response
#from flask_httpauth import HTTPBasicAuth
import sqlite3 as sql
import requests
#auth = HTTPBasicAuth()
import datetime
import base64
import binascii
from flask_cors import CORS,cross_origin
app = Flask(__name__)
CORS(app)
#3
@app.route('/api/v1/categories',methods=['GET'])
def ret_categories():
con = sql.connect("categories.db")
con.row_factory = sql.Row
cur = con.cursor()
cur.execute("select categoryname,posts from category_data")
rows = cur.fetchall()
if(not(rows)):
return jsonify(),204
ret_cat = {}
for i in rows:
ret_cat[i['categoryname']] = i['posts']
return jsonify(ret_cat),200
#4
@app.route('/api/v1/categories',methods=['POST'])
def add_category():
if(not(request.json)):
return jsonify(),400
try:
with sql.connect("categories.db") as con:
cur = con.cursor()
cur.execute("SELECT * FROM category_data WHERE categoryname=?",(request.json[0],))
out1 = cur.fetchall()
if(len(out1)):
return jsonify(),400
cur.execute("INSERT INTO category_data (categoryname,username,posts) VALUES (?,?,?)",(request.json[0],'Rohit B',0))
con.commit()
return jsonify(),201
except:
con.rollback()
return jsonify(),400
#5
@app.route('/api/v1/categories/<categoryName>',methods=['DELETE'])
def remove_category(categoryName):
with sql.connect("categories.db") as con:
cur = con.cursor()
cur.execute("SELECT categoryname FROM category_data WHERE categoryname=?",(categoryName,))
cat_name = cur.fetchall()
if(not(cat_name)):
return jsonify(),400
cur.execute("DELETE FROM category_data WHERE categoryname=?",(categoryName,))
con.commit()
return jsonify(),200
#6 or #8
@app.route('/api/v1/categories/<categoryName>/acts',methods=['GET'])
def list_of_acts(categoryName):
if(request.args.get('start')==None):
return acts_of_category(categoryName)
else:
return list_acts_in_range(categoryName,request.args.get('start'),request.args.get('end'))
def acts_of_category(categoryName):
with sql.connect("categories.db") as con:
cur = con.cursor()
cur.execute("SELECT categoryname FROM category_data WHERE categoryname=?",(categoryName,))
cat_name = cur.fetchall()
if(not(cat_name)):
return jsonify(),400
with sql.connect("acts.db") as con:
cur = con.cursor()
cur.execute("SELECT * FROM acts_data")
total = cur.fetchall()
if(len(total)>100):
return jsonify(),413
cur.execute("SELECT * FROM acts_data WHERE categoryname=?",(categoryName,))
acts = cur.fetchall()
if(not(acts)):
return jsonify(),204
out = []
for i in range(len(acts)):
ret_acts = {
'act_id': acts[i][0],
'username': acts[i][1],
'time_stamp': acts[i][2],
'caption': acts[i][3],
'category name': acts[i][4],
'upvotes': acts[i][5],
'imgB64': acts[i][6]
}
out.append(ret_acts)
return jsonify(out),200
def list_acts_in_range(categoryName,startRange,endRange):
with sql.connect("categories.db") as con:
cur = con.cursor()
cur.execute("SELECT categoryname FROM category_data WHERE categoryname=?",(categoryName,))
out = cur.fetchall()
if(len(out)==0):
return jsonify(),400
with sql.connect("acts.db") as con:
cur = con.cursor()
cur.execute("SELECT * FROM acts_data WHERE categoryname=? ORDER BY time_stamp DESC",(categoryName,))
acts = cur.fetchall()
try:
new_acts = acts[int(startRange)-1:int(endRange)]
except:
return jsonify(),413
if(len(new_acts)>100):
return jsonify(),413
if(len(new_acts)==0):
return jsonify(),204
out = []
for i in range(len(new_acts)):
ret_acts = {
'act_id': new_acts[i][0],
'username': new_acts[i][1],
'time_stamp': new_acts[i][2],
'caption': new_acts[i][3],
'category name': new_acts[i][4],
'upvotes': new_acts[i][5],
'imgB64': new_acts[i][6]
}
out.append(ret_acts)
return jsonify(out),200
#7
@app.route('/api/v1/categories/<categoryName>/acts/size',methods=['GET'])
def size_of_category(categoryName):
with sql.connect("categories.db") as con:
cur = con.cursor()
cur.execute("SELECT categoryname,posts FROM category_data WHERE categoryname=?",(categoryName,))
out = cur.fetchall()
if(len(out)==0):
return jsonify(),400
return jsonify([out[0][1]]),200
'''
if(out[0][1]):
return jsonify([out[0][1]]),200
else:
return jsonify([out[0][1]]),204
'''
#9
@app.route('/api/v1/acts/upvote',methods=['POST'])
def upvote():
if(not(request.json)):
return jsonify(),400
with sql.connect("acts.db") as con:
cur = con.cursor()
cur.execute("SELECT * FROM acts_data WHERE act_id=?",(request.json[0],))
out = cur.fetchall()
if(len(out)==0):
return jsonify(),400
cur.execute("UPDATE acts_data SET upvote=upvote+1 WHERE act_id=?",(request.json[0],))
con.commit()
return jsonify(),200
#10
@app.route('/api/v1/acts/<actId>',methods=['DELETE'])
def remove_act(actId):
with sql.connect("acts.db") as con:
cur = con.cursor()
cur.execute("SELECT * FROM acts_data WHERE act_id=?",(actId,))
out = cur.fetchall()
if(len(out)==0):
return jsonify(),400
cur.execute("DELETE FROM acts_data WHERE act_id=?",(actId,))
con.commit()
return jsonify(),200
#11
@app.route('/api/v1/acts',methods=['POST'])
def upload_act():
if(not(request.json)):
return jsonify(),400
a_id = request.json['actId']
user = request.json['username']
ts = request.json['timestamp']
cap = request.json['caption']
cat_name = request.json['categoryName']
ib6 = request.json['imgB64']
with sql.connect("categories.db") as con:
cur = con.cursor()
cur.execute("SELECT categoryname FROM category_data WHERE categoryname=?",(cat_name,))
out = cur.fetchall()
if(len(out)==0):
return jsonify(),400
with sql.connect("acts.db") as con:
cur = con.cursor()
cur.execute("SELECT act_id FROM acts_data WHERE act_id=?",(a_id,))
out = cur.fetchall()
if(len(out)):
return jsonify(),400
try:
datetime.datetime.strptime(ts,'%d-%m-%Y:%S-%M-%H')
except:
return jsonify(),400
#3
#req = requests.get(url="http://100.24.77.155:8080/api/v1/users")
req = requests.get(url="http://127.0.0.1:8080/api/v1/users")
prev_user = req.json()
if(user not in prev_user):
return jsonify(prev_user),400
#5
if('upvote' in request.json):
return jsonify(),400
cur.execute("INSERT INTO acts_data VALUES (?,?,?,?,?,?,?)",(a_id,user,ts,cap,cat_name,0,ib6,))
con.commit()
with sql.connect("categories.db") as con1:
cur1 = con1.cursor()
cur1.execute("UPDATE category_data SET posts=posts+1 WHERE categoryname=?",(cat_name,))
con1.commit()
return jsonify(),201
# Get total number of acts
@app.route('/api/v1/count',methods=['GET'])
def acts_count():
with sql.connect("acts.db") as con:
cur = con.cursor()
cur.execute("SELECT COUNT(act_id) FROM acts_data")
tot_acts = cur.fetchall()
return jsonify(tot_acts[0]),200
@app.errorhandler(405)
def method_not_found(e):
return jsonify(),405
'''
@auth.error_handler
def unauthorized():
return make_response(jsonify({'error': 'Unauthorized access'}), 403)
'''
if __name__=='__main__':
app.run(debug=True,host="0.0.0.0",port=80)#port=8000)
| 5,807 | 0 | 268 |
d096fb257ced16d2f5502aab4e6d41f75ff11d68 | 28,245 | py | Python | parseGlobals/test_EsoParseGlobals.py | Kriskras99/uesp-esoapps | 53d3fc2caaf8a58f8063f75a6e843a6f591fc3b8 | [
"MIT"
] | 9 | 2021-01-25T05:48:53.000Z | 2021-08-30T14:21:29.000Z | parseGlobals/test_EsoParseGlobals.py | Kriskras99/uesp-esoapps | 53d3fc2caaf8a58f8063f75a6e843a6f591fc3b8 | [
"MIT"
] | 1 | 2021-11-17T03:49:12.000Z | 2021-11-17T03:49:12.000Z | parseGlobals/test_EsoParseGlobals.py | Kriskras99/uesp-esoapps | 53d3fc2caaf8a58f8063f75a6e843a6f591fc3b8 | [
"MIT"
] | 5 | 2021-02-28T02:26:34.000Z | 2021-08-01T11:58:22.000Z | import csv
import collections
import os.path
import re
import operator
import sys
import datetime
import shutil
import ntpath
import EsoLuaFile
#from skipdict import SkipDict
OUTPUT_PATH = "d:\\temp\\esoglobals\\"
INPUT_FILENAME = "d:\\esoexport\\goodimages10\\globals_6b.txt"
LUA_ROOT_PATH = "d:\\esoexport\\gamemnf10\\esoui\\"
#LUA_ROOT_PATH = "d:\\esoexport\\gamemnf10\\esoui\\pregame\\console\\"
#INPUT_FILENAME = "d:\\src\\uesp\\eso\\parseGlobals\\globals_6b.txt"
#LUA_ROOT_PATH = "d:\\src\\esoui\\"
functionCalls = { }
luaFunctions = { }
GlobalData_Time = ""
GlobalData_Date = ""
GlobalData_Version = ""
totalLuaFunctions = 0
totalLuaDuplicates = 0
totalLuaCalls = 0
totalIgnoredLuaFiles = 0
#matchFunctions = re.compile("((?:local\s+)?function\s+.*)\s*\n")
matchFunctions = re.compile("((?:local\s+)?function\s+.*)")
#matchFunctions = re.compile("((?:local\s+)?function\s+.*\))\s*\n")
#matchFunctions = re.compile("((?:local\s+)?function\s+.*)\n")
matchFunctionName = re.compile("(local)?\s*function\s+([A-Za-z0-9_]+)?([:.])?([A-Za-z0-9_]+)\s*\(\s*(.*)\s*\)")
matchFunctionParams = re.compile("([A-Za-z0-9_]+)\s*,?")
matchFunctionCall = re.compile("(?:([A-Za-z_\[][A-Za-z0-9_,.\[\]\t ]*)\s*=\s*)?([A-Za-z_][A-Za-z0-9_.:\[\]]*)\s*\((.*)\)")
# function name()
# function name(var)
# function name(var1, var2)
# x, y, z = func()
callFuncs = matchFunctionCall.findall("x = y()")
callFuncs = matchFunctionCall.findall("x[0], y.z = self:zy(abc[1].t, 123)")
print callFuncs
luaFunctions = FindLuaFunctions(LUA_ROOT_PATH)
DumpLuaFunctionCalls(OUTPUT_PATH + "funccalls.txt")
#sys.exit()
parsedGlobalLog = ParseGlobalLogFile(INPUT_FILENAME)
print "Loaded " + str(len(parsedGlobalLog)) + " rows from " + INPUT_FILENAME
globalData = ParseGlobalData(parsedGlobalLog)
print "Parsed into " + str(len(globalData)) + " root global objects"
DumpGlobalData(globalData, OUTPUT_PATH + "test.txt")
CreateFunctionCallHTML(OUTPUT_PATH + "functioncalls/")
CreateGlobalHTML(globalData, OUTPUT_PATH + "all.html")
CreateGlobalHTML(globalData, OUTPUT_PATH + "func.html", [ "function" ])
CreateGlobalHTML(globalData, OUTPUT_PATH + "var.html", [ "number", "string", "boolean" ] )
CreateGlobalHTML(globalData, OUTPUT_PATH + "data.html", [ "userdata", "table" ])
CreateLuaSource(LUA_ROOT_PATH, OUTPUT_PATH + "src/")
| 35.528302 | 348 | 0.597876 | import csv
import collections
import os.path
import re
import operator
import sys
import datetime
import shutil
import ntpath
import EsoLuaFile
#from skipdict import SkipDict
OUTPUT_PATH = "d:\\temp\\esoglobals\\"
INPUT_FILENAME = "d:\\esoexport\\goodimages10\\globals_6b.txt"
LUA_ROOT_PATH = "d:\\esoexport\\gamemnf10\\esoui\\"
#LUA_ROOT_PATH = "d:\\esoexport\\gamemnf10\\esoui\\pregame\\console\\"
#INPUT_FILENAME = "d:\\src\\uesp\\eso\\parseGlobals\\globals_6b.txt"
#LUA_ROOT_PATH = "d:\\src\\esoui\\"
class CInstanceInfo:
def __init__(self):
self.type = ""
self.access = ""
self.name = ""
self.value = ""
self.meta = ""
self.index = ""
self.string = ""
self.firstTable = False
self.firstIndex = False
self.firstMeta = False
self.children = { }
class CFunctionInfo:
def __init__(self):
self.fullName = ""
self.fullString = ""
self.filename = ""
self.namespace = ""
self.namespaceType = ""
self.local = ""
self.name = ""
self.line = ""
self.allParams = ""
self.params = [ ]
class CFunctionCallInfo:
def __init__(self):
self.filename = ""
self.line = ""
self.fullString = ""
self.vars = ""
self.name = ""
self.params = ""
functionCalls = { }
luaFunctions = { }
GlobalData_Time = ""
GlobalData_Date = ""
GlobalData_Version = ""
def ParseGlobalLogFile(filename):
with open (filename, "r") as myfile:
GlobalDataFile = myfile.read()
matchLogLines = re.compile("\w*\[\d+\] = \"(.*)\",\w*")
logLines = matchLogLines.findall(GlobalDataFile)
parseLogLine = re.compile("([a-zA-Z]+){(.*?)} ")
parsedLogLines = []
for line in logLines:
parsedLine = parseLogLine.findall(line)
parsedLineDict = { }
for parsedLine in parsedLine:
varName = parsedLine[0]
varValue = parsedLine[1]
parsedLineDict[varName] = varValue
parsedLogLines.append(parsedLineDict)
return parsedLogLines
def CreateGlobalInstance(globalData, parsedName):
currentParent = globalData
lastParent = None
currentInstance = None
for name in parsedName:
lastParent = currentParent
if (name in currentParent):
currentInstance = currentParent[name]
currentParent = currentParent[name].children
else:
currentParent[name] = CInstanceInfo()
currentInstance = currentParent[name]
currentParent[name].name = name
currentParent = currentParent[name].children
return currentInstance
def ParseGlobalData_Start(log):
global GlobalData_Date
global GlobalData_Time
global GlobalData_Version
#[1] = "event{Global::Start} niceDate{20141114} niceTime{15:21:47} apiVersion{100010} timeStamp{4743725927807057920} gameTime{647585} lang{en} ",
#
fullDate = int(log.get('niceDate', '0'))
if (fullDate > 0):
GlobalData_Date = str(fullDate/10000 % 10000) + "-" + str(fullDate/100 % 100) + "-" + str(fullDate % 100)
GlobalData_Time = log.get('niceTime', '')
GlobalData_Version = log.get('apiVersion', '')
return
def ParseGlobalData(globalLog):
globalData = { }
parseName = re.compile("([a-zA-Z0-9_()]+)\.?")
for log in globalLog:
event = log.get('event', '')
name = log.get('name', '')
parsedName = parseName.findall(name)
if event == "Global::Start":
ParseGlobalData_Start(log)
continue
elif event == "Global::End":
continue
elif event != "Global":
continue
instance = CreateGlobalInstance(globalData, parsedName)
instance.type = log.get('type', '')
instance.access = log.get('label', '')
instance.value = log.get('value', '')
instance.meta = log.get('meta', '')
instance.index = log.get('index', '')
instance.string = log.get('string', '')
instance.firstTable = log.get('firstTable', '') == "1"
instance.firstMeta = log.get('firstMeta', '') == "1"
instance.firstIndex = log.get('firstIndex', '') == "1"
return globalData
def DumpGlobalData_Record(root, header, outFile, types):
sortedKeys = sorted(root.keys())
for key in sortedKeys:
thisObject = root[key]
if (types != None and not thisObject.type in types):
continue
outFile.write(header)
outFile.write(key)
outFile.write(" = ")
if (thisObject.type == "table" or thisObject.type == "function" or
thisObject.type == "userdata"):
outFile.write(thisObject.type)
outFile.write(":")
outFile.write(thisObject.value)
if (thisObject.type == "number" and thisObject.string != ""):
outFile.write(" = \"")
outFile.write(thisObject.string)
outFile.write("\"")
if (thisObject.access == "Private"):
outFile.write("function: Private")
if (thisObject.meta != ""):
outFile.write(" (meta " + thisObject.meta + "}")
if (thisObject.firstTable):
outFile.write(" firstTable")
if (thisObject.firstMeta):
outFile.write(" firstMeta")
if (thisObject.firstIndex):
outFile.write(" firstIndex")
outFile.write("\n")
DumpGlobalData_Record(thisObject.children, header + "\t", outFile, None)
return
def DumpGlobalData(globalData, filename, types = None):
with open (filename, "w") as outFile:
DumpGlobalData_Record(globalData, "", outFile, types)
return
def CreateGlobalHTML_Header(globalData, outFile, types):
outFile.write("<html>\n")
outFile.write("\t<head>\n")
outFile.write("\t\t<title>UESP: ESO Global Data</title>\n")
outFile.write("\t\t<link rel=\"stylesheet\" href=\"esoglobaldata.css\" type=\"text/css\" />\n")
outFile.write("\t</head>\n")
outFile.write("<body>\n")
typesString = ""
if (types == None):
typesString = "all"
else:
typesString = ", ".join(types)
outFile.write("The following is all global LUA data ({0}) found in Elder Scrolls Online as generated by the <a href=\"http://www.uesp.net\">UESP</a>. \n".format(typesString))
outFile.write("Data was exported from ESO on {0} {1}, API version {2}.\n".format(GlobalData_Date, GlobalData_Time, GlobalData_Version))
outFile.write("See the <a href=\"#endoffile\">end of file</a> for notes on this data. \n")
outFile.write("<br /><br />\n")
return
def CreateGlobalHTML_Record(root, lineHeader, level, parentName, outFile, types):
sortedKeys = sorted(root.keys())
for key in sortedKeys:
thisObject = root[key]
if (types != None and not thisObject.type in types):
continue
if (parentName == ""):
completeName = thisObject.name
else:
completeName = parentName + "." + thisObject.name
metaName = ""
tableName = ""
indexName = ""
tableLink = ""
metaLink = ""
indexLink = ""
accessClass = ""
if (thisObject.firstTable):
tableName = "table_" + thisObject.value
outFile.write("<a name=\"{0}\" />\n".format(tableName))
else:
tableLink = " <a class=\"esog_table\" href=\"#table_{0}\">table:{0}</a>".format(thisObject.value)
if (thisObject.firstMeta and thisObject.meta != ""):
metaLink = " <div class=\"esog_meta\">meta:" + thisObject.meta + "</div>"
metaName = "meta_" + thisObject.meta
outFile.write("<a name=\"{0}\" />\n".format(metaName))
elif (thisObject.meta != ""):
metaLink = " <a class=\"esog_meta\" href=\"#meta_{0}\">meta:{0}</a>".format(thisObject.meta)
if (thisObject.firstIndex and thisObject.index != ""):
indexLink = " <div class=\"esog_index\">index:" + thisObject.index + "</div>"
indexName = "index_" + thisObject.index
outFile.write("<a name=\"{0}\" />\n".format(indexName))
elif (thisObject.index != ""):
indexLink = " <a class=\"esog_index\" href=\"#index_{0}\">index:{0}</a>".format(thisObject.index)
if (thisObject.access == "Private"):
accessClass = " esog_private"
outFile.write(lineHeader + "<div class=\"esog_section{0}\" title=\"{1}\">\n".format(level, completeName))
thisTitle = thisObject.name
if (thisObject.type == "table"):
if (tableLink == ""):
thisTitle += " = " + thisObject.type + ":" + thisObject.value + tableLink
else:
thisTitle += " = " + tableLink
elif (thisObject.type == "function"):
thisTitle = "<a href=\"{1}\">{0}</a>".format(thisObject.name, GetFunctionLinkName(completeName))
thisTitle += " = " + thisObject.type + ":" + thisObject.value + metaLink
elif (thisObject.type == "userdata"):
thisTitle += " = " + thisObject.type + ":" + thisObject.value + metaLink + indexLink
elif (thisObject.type == "number" and thisObject.name.startswith("SI_")):
thisTitle += " (" + thisObject.value + ") = \"" + thisObject.string + "\""
elif (thisObject.access == "Private"):
thisTitle += " = Private"
else:
thisTitle += " = " + thisObject.value
outFile.write(lineHeader + "\t<div class=\"esog_title{1}\">{0}</div>".format(thisTitle, accessClass))
outFile.write("\n")
outFile.write(lineHeader + "\t<div class=\"esog_children\">\n")
CreateGlobalHTML_Record(thisObject.children, lineHeader + "\t", level+1, completeName, outFile, None)
outFile.write(lineHeader + "\t</div>\n")
outFile.write("</div>\n")
return
def CreateGlobalHTML_Footer(globalData, outFile, types):
currentDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
typesString = ""
if (types == None):
typesString = "all"
else:
typesString = ", ".join(types)
outFile.write("<hr />\n")
outFile.write("<div class=\"esog_footer\">\n")
outFile.write("<a name=\"endoffile\" />\n")
outFile.write("<b>Data Notes:</b>\n")
outFile.write("<ul>\n")
outFile.write("<li>The hex numbers for tables, functions, userdata, and meta/index will change each time the UI is reloaded.</li>\n")
outFile.write("<li>Only one of each unique table/userdata/index/meta object is listed to save space. Use the links to jump to the expanded definition of an object.</li>\n")
outFile.write("<li>Hover over an object to get its complete name.</li>\n")
outFile.write("</ul>\n")
outFile.write("This file was generated automatically on {3}. Data ({4}) was exported from ESO on {0} {1}, API version {2}.<br />\n".format(GlobalData_Date, GlobalData_Time, GlobalData_Version, currentDate, typesString))
outFile.write("</div>\n</body>\n")
outFile.write("</html>\n")
return
def CreateGlobalHTML(globalData, filename, types = None):
path = ntpath.dirname(filename)
shutil.copyfile("esoglobaldata.css", path + "/esoglobaldata.css")
with open (filename, "w") as outFile:
CreateGlobalHTML_Header(globalData, outFile, types)
CreateGlobalHTML_Record(globalData, "", 1, "", outFile, types)
CreateGlobalHTML_Footer(globalData, outFile, types)
return
totalLuaFunctions = 0
totalLuaDuplicates = 0
totalLuaCalls = 0
totalIgnoredLuaFiles = 0
#matchFunctions = re.compile("((?:local\s+)?function\s+.*)\s*\n")
matchFunctions = re.compile("((?:local\s+)?function\s+.*)")
#matchFunctions = re.compile("((?:local\s+)?function\s+.*\))\s*\n")
#matchFunctions = re.compile("((?:local\s+)?function\s+.*)\n")
matchFunctionName = re.compile("(local)?\s*function\s+([A-Za-z0-9_]+)?([:.])?([A-Za-z0-9_]+)\s*\(\s*(.*)\s*\)")
matchFunctionParams = re.compile("([A-Za-z0-9_]+)\s*,?")
matchFunctionCall = re.compile("(?:([A-Za-z_\[][A-Za-z0-9_,.\[\]\t ]*)\s*=\s*)?([A-Za-z_][A-Za-z0-9_.:\[\]]*)\s*\((.*)\)")
# function name()
# function name(var)
# function name(var1, var2)
# x, y, z = func()
def FindLuaFunctions_ParseFunction(filename, function, lineNumber, luaFunctions):
global totalLuaDuplicates
# 0=local, 1=Namespace, 2=:|., 3=Function, 4=Params
funcData = matchFunctionName.findall(function)
if (len(funcData) <= 0):
return None
#print "\t", funcData[0]
funcParams = ""
if (funcData[0][4] != ""):
funcParams = matchFunctionParams.findall(funcData[0][4])
newFunc = CFunctionInfo()
newFunc.fullString = function
newFunc.local = funcData[0][0]
newFunc.namespace = funcData[0][1]
newFunc.namespaceType = funcData[0][2]
newFunc.name = funcData[0][3]
newFunc.line = str(lineNumber)
newFunc.allParams = funcData[0][4]
newFunc.fullName = newFunc.namespace + newFunc.namespaceType + newFunc.name
newFunc.filename = filename
niceName = CreateNiceFunctionName(newFunc.fullName)
#print "\t\t", newFunc.fullName
if (newFunc.local != ""):
return newFunc
if (niceName in luaFunctions):
totalLuaDuplicates += 1
print "\tDuplicate function " + niceName + " found!"
print "\t\tExisting Found in " + luaFunctions[niceName].filename + " Line " + luaFunctions[niceName].line
print "\t\t New Found in " + newFunc.filename + " Line " + newFunc.line
print "\t\tExisting Def: " + luaFunctions[niceName].fullString
print "\t\t New Def: " + newFunc.fullString
return None
luaFunctions[niceName] = newFunc
return newFunc
def FindLuaFunctions_ParseFile(filename, luaFileContents, luaFunctions):
global totalLuaFunctions
global totalLuaCalls
global functionCalls
fileLines = luaFileContents.split("\n")
functions = [ ]
lineNumbers = [ ]
for i, line in enumerate(fileLines):
lineFuncs = matchFunctions.findall(line)
functions.extend(lineFuncs)
lineNumbers.extend([i+1] * len(lineFuncs))
if (len(lineFuncs) > 0):
continue
callFuncs = matchFunctionCall.findall(line)
#functionCalls.extend(callFuncs)
paramCallFuncs = [ ]
for call in callFuncs:
paramCallFuncs = matchFunctionCall.findall(call[2])
callFuncs.extend(paramCallFuncs)
for call in callFuncs:
newCallInfo = CFunctionCallInfo()
newCallInfo.filename = filename
newCallInfo.line = str(i+1)
newCallInfo.vars = call[0].strip()
newCallInfo.name = call[1].strip()
newCallInfo.params = call[2].strip()
if (newCallInfo.vars.startswith("local")):
newCallInfo.vars = newCallInfo.vars[5:].strip()
if (newCallInfo.vars == ""):
newCallInfo.fullString = newCallInfo.name + "(" + newCallInfo.params + ")"
else:
newCallInfo.fullString = newCallInfo.vars + " = " + newCallInfo.name + "(" + newCallInfo.params + ")"
if (not newCallInfo.name in functionCalls):
functionCalls[newCallInfo.name] = [ ]
functionCalls[newCallInfo.name].append(newCallInfo)
#if (len(callFuncs) > 0):
#print callFuncs
# print "\tFound " + str(len(functions)) + " functions and " + str(len(functionCalls)) + " calls in " + str(len(lineNumbers)) + " lines"
for i, func in enumerate(functions):
FindLuaFunctions_ParseFunction(filename, func, lineNumbers[i], luaFunctions)
totalLuaFunctions += len(functions)
totalLuaCalls += len(functionCalls)
return
def FindLuaFunctions_CheckFile(filename, luaFunctions):
if (not filename.endswith(".lua")):
return False
#print "Checking LUA source file " + filename
with open (filename, "r") as inFile:
fileContents = inFile.read()
FindLuaFunctions_ParseFile(filename, fileContents, luaFunctions)
return True
def FindLuaFunctions(searchPath):
global totalIgnoredLuaFiles
luaFunctions = { }
totalFiles = 0
for subdir, dirs, files in os.walk(searchPath):
subPath = subdir.replace("\\", "/") + "/"
if ("/gamepad/" in subPath or
"/pregame/" in subPath or
"/pregamelocalization/" in subPath):
print "\tSkipping " + subPath + "..."
totalIgnoredLuaFiles += 1
continue
for filename in files:
fullFilename = subPath + filename
fullFilename = fullFilename.replace("\\", "/")
if (FindLuaFunctions_CheckFile(fullFilename, luaFunctions)):
totalFiles += 1
print "Found " + str(totalFiles) + " LUA files"
print "Ignored " + str(totalIgnoredLuaFiles) + " LUA files"
print "Found " + str(totalLuaFunctions) + " LUA functions"
print "Found " + str(totalLuaCalls) + " LUA function calls for " + str(len(functionCalls)) + " different functions"
print "Found " + str(totalLuaDuplicates) + " duplicate function definitions"
return luaFunctions
def DumpLuaFunctionCall(outFile, funcName, funcCalls):
outFile.write(funcName)
outFile.write("()\n")
for call in funcCalls:
relFile = os.path.relpath(call.filename, LUA_ROOT_PATH).replace("\\", "/")
outFile.write("\t")
outFile.write(call.fullString)
#outFile.write("\n")
#outFile.write("\t\t")
outFile.write(" -- ")
outFile.write(relFile)
outFile.write(":")
outFile.write(call.line)
outFile.write("\n")
outFile.write("\n")
return
def DumpLuaFunctionCalls(filename):
sortedKeys = sorted(functionCalls.keys())
with open(filename, "w") as outFile:
for funcName in sortedKeys:
DumpLuaFunctionCall(outFile, funcName, functionCalls[funcName])
return
def CreateFunctionCallHTML_Header(outFile, funcName):
funcData = luaFunctions.get(funcName, None)
outFile.write("<html>\n")
outFile.write("\t<head>\n")
outFile.write("\t\t<title>UESP: ESO Function Call {0}</title>\n".format(funcName))
outFile.write("\t\t<link rel=\"stylesheet\" href=\"esofunccall.css\" type=\"text/css\" />\n")
outFile.write("\t</head>\n")
outFile.write("<body>\n")
outFile.write("<h1 class=\"esofc_title\">Function Calls for {0}():</h1>\n".format(funcName))
outFile.write("The following are all the LUA function calls for the {0}() function found in Elder Scrolls Online as generated by the <a href=\"http://www.uesp.net\">UESP</a>. \n".format(funcName))
#outFile.write("Data was exported from ESO on {0} {1}, API version {2}.\n".format(GlobalData_Date, GlobalData_Time, GlobalData_Version))
outFile.write("<br /><br />\n")
if (funcData == None):
outFile.write("No function definition found!\n")
else:
outFile.write("Function definition found in {0}\n".format(CreateLuaFileLink(funcData.filename, funcData.line, "../")))
#outFile.write("<br /><br />\n")
return
def CreateFunctionCallHTML_Call(outFile, funcName, funcCall):
CreateFunctionCallHTML_Header(outFile, funcName)
outFile.write("<ul>\n")
for call in funcCall:
outFile.write("\t<li>\n")
#outFile.write("<div class=\"esofc_filename\">{0}:{1}</div>\n".format(call.filename, call.line))
outFile.write("<div class=\"esofc_filename\">{0}</div>\n".format(CreateLuaFileLink(call.filename, call.line, "../")))
outFile.write(" -- <div class=\"esofc_record\">{0}</div>\n".format(call.fullString))
outFile.write("\t</li>\n")
outFile.write("</ul>\n")
CreateFunctionCallHTML_Footer(outFile)
return
def CreateFunctionCallHTML_Footer(outFile):
currentDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
outFile.write("<hr />\n")
outFile.write("<div class=\"esofc_footer\">\n")
outFile.write("<a name=\"endoffile\" />\n")
outFile.write("This file was generated automatically on {3}. Function data was exported from ESO on {0} {1}, API version {2}.<br />\n".format(GlobalData_Date, GlobalData_Time, GlobalData_Version, currentDate))
outFile.write("</div>\n</body>\n")
outFile.write("</html>\n")
return
def CreateNiceFunctionName(funcName):
return funcName.replace(":", ".")
def CreateFunctionFilename(funcName):
return CreateNiceFunctionName(funcName) + ".html"
def GetFunctionLinkName(funcName):
filename = "functioncalls/" + CreateNiceFunctionName(funcName)
if filename.endswith("()"):
filename = filename[:-2]
filename += ".html"
return filename
def CreateLuaFileLink(filename, line, relPath = ""):
baseFilename = os.path.relpath(filename, LUA_ROOT_PATH).replace("\\", "/")
link = "<a href=\"" + relPath + "src/" + baseFilename + ".html#" + str(line) + "\">" + baseFilename + ":" + str(line) + "</a>"
return link
def CreateFunctionCallHTML(path):
if not os.path.exists(path):
os.makedirs(path)
shutil.copyfile("esofunccall.css", path + "esofunccall.css")
for funcName in functionCalls:
funcCall = functionCalls[funcName]
filename = path + CreateFunctionFilename(funcName)
with open(filename, "w") as outFile:
CreateFunctionCallHTML_Call(outFile, funcName, functionCalls[funcName])
return
def CreateLuaSource_Header(outFile, filename, relPath):
outFile.write("\t<head>\n")
outFile.write("\t\t<title>UESP:ESO Data -- {0}</title>\n".format(filename))
outFile.write("\t\t<link rel=\"stylesheet\" href=\"{0}/esoluafile.css\" type=\"text/css\" />\n".format(relPath))
outFile.write("\t\t<link rel=\"stylesheet\" href=\"{0}/shCore.css\" type=\"text/css\" />\n".format(relPath))
outFile.write("\t\t<link rel=\"stylesheet\" href=\"{0}/shCoreDefault.css\" type=\"text/css\" />\n".format(relPath))
outFile.write("\t\t<script type=\"text/javascript\" src=\"{0}/shCore.js\"></script>\n".format(relPath))
outFile.write("\t\t<script type=\"text/javascript\" src=\"{0}/shBrushLua.js\"></script>\n".format(relPath))
outFile.write("\t\t<script type=\"text/javascript\" src=\"{0}/jquery.js\"></script>\n".format(relPath))
outFile.write("\t</head>\n")
outFile.write("<body>\n")
outFile.write("<h1 class=\"esofc_title\">ESO LUA File: {0}</h1>\n".format(filename))
return
def CreateLuaSource_LUAData(outFile, luaFile):
outFile.write("<pre class=\"brush: lua;\">")
convertFile = luaFile.replace("<", "<").replace(">", ">")
outFile.write(convertFile)
outFile.write("</pre>\n")
return
def CreateLuaSource_Footer(outFile):
currentDate = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
outFile.write("<script type=\"text/javascript\">\n")
outFile.write("var initialHash = window.location.hash.substring(1);\n")
outFile.write("SyntaxHighlighter.defaults['highlight'] = initialHash;\n")
outFile.write("SyntaxHighlighter.all();\n")
#outFile.write("window.location.hash = '';\n")
outFile.write("setTimeout(function() { $('.gutter .line').each(function(i) {\n")
outFile.write("\t $(this).attr('id', $(this).text()); \n")
outFile.write("}); if (initialHash) $('html,body').animate({scrollTop: $('#'+initialHash).offset().top},'fast'); }, 500);\n")
outFile.write("</script>\n")
outFile.write("<hr />\n")
outFile.write("<i>")
outFile.write("This file was generated automatically on {3}. File data was exported from ESO on {0} {1}, API version {2}.<br />\n".format(GlobalData_Date, GlobalData_Time, GlobalData_Version, currentDate))
outFile.write("File content is Copyright (c) 2014 Zenimax Online Studios. All trademarks and registered trademarks present in the file are proprietary to ZeniMax Online Studios, the inclusion of which implies no affiliation with the UESP. The use of this file data is believed to fall under the fair dealing clause of Canadian copyright law.")
outFile.write("</i>\n")
outFile.write("</body>\n")
outFile.write("</html>")
return
def CreateLuaSource_LUA(filename, relFilename, relPath):
outputFilename = filename + ".html"
luaFile = ""
with open(filename, "r") as inFile:
luaFile = inFile.read()
with open(outputFilename, "w") as outFile:
CreateLuaSource_Header(outFile, relFilename, relPath)
CreateLuaSource_LUAData(outFile, luaFile)
CreateLuaSource_Footer(outFile)
return
def CreateLuaSource(inputPath, outputPath):
shutil.copyfile("jquery.js", OUTPUT_PATH + "jquery.js")
shutil.copyfile("shCore.js", OUTPUT_PATH + "shCore.js")
shutil.copyfile("shBrushLua.js", OUTPUT_PATH + "shBrushLua.js")
shutil.copyfile("shCore.css", OUTPUT_PATH + "shCore.css")
shutil.copyfile("shCoreDefault.css", OUTPUT_PATH + "shCoreDefault.css")
shutil.copyfile("esoluafile.css", OUTPUT_PATH + "esoluafile.css")
for root, dirs, files in os.walk(inputPath):
subPath = root.replace("\\", "/") + "/"
subDir = os.path.relpath(subPath, inputPath)
outputSubDir = os.path.join(outputPath, subDir)
if not os.path.exists(outputSubDir):
os.makedirs(outputSubDir)
for filename in files:
relFilename = os.path.relpath(subPath + filename, inputPath)
outputFilename = os.path.join(outputSubDir, filename)
shutil.copyfile(subPath + filename, outputFilename)
relPath = os.path.join("../", os.path.relpath(outputPath, os.path.dirname(outputFilename)))
relPath = relPath.replace("\\", "/")
relFilename = relFilename.replace("\\", "/")
outputFilename = outputFilename.replace("\\", "/")
if (filename.endswith(".lua")):
CreateLuaSource_LUA(outputFilename, relFilename, relPath)
return
callFuncs = matchFunctionCall.findall("x = y()")
callFuncs = matchFunctionCall.findall("x[0], y.z = self:zy(abc[1].t, 123)")
print callFuncs
luaFunctions = FindLuaFunctions(LUA_ROOT_PATH)
DumpLuaFunctionCalls(OUTPUT_PATH + "funccalls.txt")
#sys.exit()
parsedGlobalLog = ParseGlobalLogFile(INPUT_FILENAME)
print "Loaded " + str(len(parsedGlobalLog)) + " rows from " + INPUT_FILENAME
globalData = ParseGlobalData(parsedGlobalLog)
print "Parsed into " + str(len(globalData)) + " root global objects"
DumpGlobalData(globalData, OUTPUT_PATH + "test.txt")
CreateFunctionCallHTML(OUTPUT_PATH + "functioncalls/")
CreateGlobalHTML(globalData, OUTPUT_PATH + "all.html")
CreateGlobalHTML(globalData, OUTPUT_PATH + "func.html", [ "function" ])
CreateGlobalHTML(globalData, OUTPUT_PATH + "var.html", [ "number", "string", "boolean" ] )
CreateGlobalHTML(globalData, OUTPUT_PATH + "data.html", [ "userdata", "table" ])
CreateLuaSource(LUA_ROOT_PATH, OUTPUT_PATH + "src/")
| 24,879 | 1 | 881 |
44c55ac0ecb64bcd26d82ed69fcb8c40f1a57479 | 3,430 | py | Python | scout/utils/requests.py | CHRUdeLille/scout | 0f70bec32e078d1825ebf20237f4a4979585dffb | [
"BSD-3-Clause"
] | null | null | null | scout/utils/requests.py | CHRUdeLille/scout | 0f70bec32e078d1825ebf20237f4a4979585dffb | [
"BSD-3-Clause"
] | null | null | null | scout/utils/requests.py | CHRUdeLille/scout | 0f70bec32e078d1825ebf20237f4a4979585dffb | [
"BSD-3-Clause"
] | null | null | null | import logging
import urllib.request
from urllib.error import (HTTPError, URLError)
LOG = logging.getLogger(__name__)
HPO_URL = ("http://compbio.charite.de/jenkins/job/hpo.annotations.monthly/"
"lastStableBuild/artifact/annotation/{0}")
def fetch_resource(url, file_name=None):
"""Fetch a resource and return the resulting lines in a list
Send file_name to get more clean log messages
Args:
url(str)
file_name(str)
Returns:
lines(list(str))
"""
try:
LOG.info("Requesting %s", (file_name or url))
response = urllib.request.urlopen(url)
data = response.read() # a `bytes` object
lines = data.decode('utf-8').split('\n')
except HTTPError as err:
LOG.warning("Something went wrong, perhaps the api key is not valid?")
raise err
except URLError as err:
LOG.warning("Something went wrong, are you connected to internet?")
raise err
return lines
def fetch_mim_files(api_key, mim2genes=False, mimtitles=False, morbidmap=False, genemap2=False):
"""Fetch the necessary mim files using a api key
Args:
api_key(str): A api key necessary to fetch mim data
Returns:
mim_files(dict): A dictionary with the neccesary files
"""
LOG.info("Fetching OMIM files from https://omim.org/")
mim2genes_url = 'https://omim.org/static/omim/data/mim2gene.txt'
mimtitles_url= 'https://data.omim.org/downloads/{0}/mimTitles.txt'.format(api_key)
morbidmap_url = 'https://data.omim.org/downloads/{0}/morbidmap.txt'.format(api_key)
genemap2_url = 'https://data.omim.org/downloads/{0}/genemap2.txt'.format(api_key)
mim_files = {}
mim_urls = {}
if mim2genes is True:
mim_urls['mim2genes'] = mim2genes_url
if mimtitles is True:
mim_urls['mimtitles'] = mimtitles_url
if morbidmap is True:
mim_urls['morbidmap'] = morbidmap_url
if genemap2 is True:
mim_urls['genemap2'] = genemap2_url
for file_name in mim_urls:
url = mim_urls[file_name]
mim_files[file_name] = fetch_resource(url, file_name)
return mim_files
def fetch_hpo_terms():
"""Fetch the latest version of the hpo terms in .obo format
Returns:
res(list(str)): A list with the lines
"""
url = "http://purl.obolibrary.org/obo/hp.obo"
return fetch_resource(url)
def fetch_hpo_to_genes():
"""Fetch the latest version of the map from phenotypes to genes
Returns:
res(list(str)): A list with the lines
"""
file_name = "ALL_SOURCES_ALL_FREQUENCIES_phenotype_to_genes.txt"
url = HPO_URL.format(file_name)
return fetch_resource(url, file_name)
def fetch_hpo_genes():
"""Fetch the latest version of the map from genes to hpo terms
Returns:
res(list(str)): A list with the lines
"""
file_name = "ALL_SOURCES_ALL_FREQUENCIES_genes_to_phenotype.txt"
url = HPO_URL.format(file_name)
return fetch_resource(url, file_name)
def fetch_hpo_phenotype_to_terms():
"""Fetch the latest version of the map from phenotype to terms
Returns:
res(list(str)): A list with the lines
"""
file_name = "ALL_SOURCES_ALL_FREQUENCIES_diseases_to_genes_to_phenotypes.txt"
url = HPO_URL.format(file_name)
return fetch_resource(url, file_name)
| 30.087719 | 96 | 0.66035 | import logging
import urllib.request
from urllib.error import (HTTPError, URLError)
LOG = logging.getLogger(__name__)
HPO_URL = ("http://compbio.charite.de/jenkins/job/hpo.annotations.monthly/"
"lastStableBuild/artifact/annotation/{0}")
def fetch_resource(url, file_name=None):
"""Fetch a resource and return the resulting lines in a list
Send file_name to get more clean log messages
Args:
url(str)
file_name(str)
Returns:
lines(list(str))
"""
try:
LOG.info("Requesting %s", (file_name or url))
response = urllib.request.urlopen(url)
data = response.read() # a `bytes` object
lines = data.decode('utf-8').split('\n')
except HTTPError as err:
LOG.warning("Something went wrong, perhaps the api key is not valid?")
raise err
except URLError as err:
LOG.warning("Something went wrong, are you connected to internet?")
raise err
return lines
def fetch_mim_files(api_key, mim2genes=False, mimtitles=False, morbidmap=False, genemap2=False):
"""Fetch the necessary mim files using a api key
Args:
api_key(str): A api key necessary to fetch mim data
Returns:
mim_files(dict): A dictionary with the neccesary files
"""
LOG.info("Fetching OMIM files from https://omim.org/")
mim2genes_url = 'https://omim.org/static/omim/data/mim2gene.txt'
mimtitles_url= 'https://data.omim.org/downloads/{0}/mimTitles.txt'.format(api_key)
morbidmap_url = 'https://data.omim.org/downloads/{0}/morbidmap.txt'.format(api_key)
genemap2_url = 'https://data.omim.org/downloads/{0}/genemap2.txt'.format(api_key)
mim_files = {}
mim_urls = {}
if mim2genes is True:
mim_urls['mim2genes'] = mim2genes_url
if mimtitles is True:
mim_urls['mimtitles'] = mimtitles_url
if morbidmap is True:
mim_urls['morbidmap'] = morbidmap_url
if genemap2 is True:
mim_urls['genemap2'] = genemap2_url
for file_name in mim_urls:
url = mim_urls[file_name]
mim_files[file_name] = fetch_resource(url, file_name)
return mim_files
def fetch_hpo_terms():
"""Fetch the latest version of the hpo terms in .obo format
Returns:
res(list(str)): A list with the lines
"""
url = "http://purl.obolibrary.org/obo/hp.obo"
return fetch_resource(url)
def fetch_hpo_to_genes():
"""Fetch the latest version of the map from phenotypes to genes
Returns:
res(list(str)): A list with the lines
"""
file_name = "ALL_SOURCES_ALL_FREQUENCIES_phenotype_to_genes.txt"
url = HPO_URL.format(file_name)
return fetch_resource(url, file_name)
def fetch_hpo_genes():
"""Fetch the latest version of the map from genes to hpo terms
Returns:
res(list(str)): A list with the lines
"""
file_name = "ALL_SOURCES_ALL_FREQUENCIES_genes_to_phenotype.txt"
url = HPO_URL.format(file_name)
return fetch_resource(url, file_name)
def fetch_hpo_phenotype_to_terms():
"""Fetch the latest version of the map from phenotype to terms
Returns:
res(list(str)): A list with the lines
"""
file_name = "ALL_SOURCES_ALL_FREQUENCIES_diseases_to_genes_to_phenotypes.txt"
url = HPO_URL.format(file_name)
return fetch_resource(url, file_name)
| 0 | 0 | 0 |
129d222f89d99051922fb72971b161556910305d | 1,242 | py | Python | 1301-1400/1362-Substring with Concatenation of All Words/1362-Substring with Concatenation of All Words.py | jiadaizhao/LintCode | a8aecc65c47a944e9debad1971a7bc6b8776e48b | [
"MIT"
] | 77 | 2017-12-30T13:33:37.000Z | 2022-01-16T23:47:08.000Z | 1301-1400/1362-Substring with Concatenation of All Words/1362-Substring with Concatenation of All Words.py | jxhangithub/LintCode-1 | a8aecc65c47a944e9debad1971a7bc6b8776e48b | [
"MIT"
] | 1 | 2018-05-14T14:15:40.000Z | 2018-05-14T14:15:40.000Z | 1301-1400/1362-Substring with Concatenation of All Words/1362-Substring with Concatenation of All Words.py | jxhangithub/LintCode-1 | a8aecc65c47a944e9debad1971a7bc6b8776e48b | [
"MIT"
] | 39 | 2017-12-07T14:36:25.000Z | 2022-03-10T23:05:37.000Z | class Solution:
"""
@param s: a string
@param words: a list of words
@return: all starting indices of substring(s)
"""
| 32.684211 | 77 | 0.403382 | class Solution:
"""
@param s: a string
@param words: a list of words
@return: all starting indices of substring(s)
"""
def findSubstring(self, s, words):
# write your code here
m = len(s)
target = len(words)
result = []
if m == 0 or target == 0:
return result
n = len(words[0])
expected = collections.Counter(words)
for i in range(n):
seen = collections.Counter()
count = 0
for j in range(i, m - n + 1, n):
word = s[j: j + n]
if word not in expected:
count = 0
seen.clear()
else:
count += 1
seen[word] += 1
while seen[word] > expected[word]:
deletedWord = s[j - (count - 1)*n: j - (count - 2)*n]
seen[deletedWord] -= 1
count -= 1
if count == target:
result.append(j - (count - 1)*n)
firstWord = s[j - (count - 1)*n: j - (count - 2)*n]
seen[firstWord] -= 1
count -= 1
return result
| 1,077 | 0 | 26 |
e180d03a706826e43e20966983fa7894b83d5a30 | 1,665 | py | Python | pt_sbi/cnf_theta_cp0k.py | muntazirabidi/boss-sbi | fae016eb10b64153391499276d238ccdf660df88 | [
"MIT"
] | 1 | 2022-03-15T18:13:02.000Z | 2022-03-15T18:13:02.000Z | pt_sbi/cnf_theta_cp0k.py | muntazirabidi/boss-sbi | fae016eb10b64153391499276d238ccdf660df88 | [
"MIT"
] | 11 | 2020-12-16T18:26:31.000Z | 2021-04-02T14:58:37.000Z | pt_sbi/cnf_theta_cp0k.py | muntazirabidi/boss-sbi | fae016eb10b64153391499276d238ccdf660df88 | [
"MIT"
] | 2 | 2021-03-29T17:33:54.000Z | 2021-04-01T16:07:07.000Z | '''
script to train conditional normalizing flow to estimate p( theta | compressed
p0k ).
'''
import torch
import os, sys
import numpy as np
import pt_sbi as PTsbi
Nsims = int(sys.argv[1])
#############################################################################
# setup training and validation set
#############################################################################
dat_dir = PTsbi.dat_dir()
thetas = PTsbi.theta_samples()
wcp0ks = PTsbi.wcP0_samples()
thetas = thetas.astype(np.float32)[:Nsims]
condit = wcp0ks.astype(np.float32)[:Nsims]
#############################################################################
# train CNF
#############################################################################
Ntrain = int(np.floor(0.85 * thetas.shape[0]))
fcheck = os.path.join(dat_dir, 'cnf.theta_cp0k.%i.check.pt' % Nsims)
fmodel = os.path.join(dat_dir, 'cnf.theta_cp0k.%i.pt' % Nsims)
fbest = os.path.join(dat_dir, 'cnf.theta_cp0k.%i.best.pt' % Nsims)
best_model, device = PTsbi.train_CNF(
thetas, condit,
Ntrain,
num_blocks=10,
fbest=fbest,
fcheck=fcheck,
fmodel=fmodel)
#############################################################################
# deploy on fiducial cP0k
#############################################################################
theta_fid = PTsbi.theta_fiducial()
wcp0k_fid = PTsbi.wcP0_fiducial().astype(np.float32)
condit = torch.from_numpy(wcp0k_fid)
best_model.eval()
with torch.no_grad():
post = np.array(best_model.sample(100000, cond_inputs=condit).detach().cpu())
fpost = fbest.replace('best.pt', 'posterior.npy')
np.save(fpost, post)
| 29.732143 | 81 | 0.499099 | '''
script to train conditional normalizing flow to estimate p( theta | compressed
p0k ).
'''
import torch
import os, sys
import numpy as np
import pt_sbi as PTsbi
Nsims = int(sys.argv[1])
#############################################################################
# setup training and validation set
#############################################################################
dat_dir = PTsbi.dat_dir()
thetas = PTsbi.theta_samples()
wcp0ks = PTsbi.wcP0_samples()
thetas = thetas.astype(np.float32)[:Nsims]
condit = wcp0ks.astype(np.float32)[:Nsims]
#############################################################################
# train CNF
#############################################################################
Ntrain = int(np.floor(0.85 * thetas.shape[0]))
fcheck = os.path.join(dat_dir, 'cnf.theta_cp0k.%i.check.pt' % Nsims)
fmodel = os.path.join(dat_dir, 'cnf.theta_cp0k.%i.pt' % Nsims)
fbest = os.path.join(dat_dir, 'cnf.theta_cp0k.%i.best.pt' % Nsims)
best_model, device = PTsbi.train_CNF(
thetas, condit,
Ntrain,
num_blocks=10,
fbest=fbest,
fcheck=fcheck,
fmodel=fmodel)
#############################################################################
# deploy on fiducial cP0k
#############################################################################
theta_fid = PTsbi.theta_fiducial()
wcp0k_fid = PTsbi.wcP0_fiducial().astype(np.float32)
condit = torch.from_numpy(wcp0k_fid)
best_model.eval()
with torch.no_grad():
post = np.array(best_model.sample(100000, cond_inputs=condit).detach().cpu())
fpost = fbest.replace('best.pt', 'posterior.npy')
np.save(fpost, post)
| 0 | 0 | 0 |
c11a339f18373af6a02cd69a65c5607889fe1ade | 860 | py | Python | Week-2/Day-7.py | abusamrah2005/Python | b601a9daf8a5245bbcc1466d629adda43ed7c6ca | [
"Unlicense"
] | 4 | 2019-09-21T22:47:53.000Z | 2020-04-17T03:32:21.000Z | Week-2/Day-7.py | abusamrah2005/Python | b601a9daf8a5245bbcc1466d629adda43ed7c6ca | [
"Unlicense"
] | null | null | null | Week-2/Day-7.py | abusamrah2005/Python | b601a9daf8a5245bbcc1466d629adda43ed7c6ca | [
"Unlicense"
] | 2 | 2019-09-21T22:47:59.000Z | 2020-04-17T03:32:14.000Z | """
السلاسل النصية / النصوص في لغة البايثون
"""
print("السلاسل النصية / النصوص في لغة البايثون")
print('Hello World') # ' '
print("Hello World") # " "
print("--------------------------")
# تعريف متغير يخزّن فيه نص
print("تعريف متغير يخزّن فيه نص ")
text = "Welcom To Python Lessons"
print(text)
print("--------------------------")
# تعريف متغير يخزّن فيه نص متعدد الاسطر
print(" تعريف متغير يخزّن فيه نص متعدد الاسطر")
install = """
Python source code and installers
are available for download for all
versions!
"""
print(install)
print("--------------------------")
# السلاسل النصية هي مصفوفات
# P y t h o n , Week 2, lesson
# 1 2 3 4 5 6
MyText = "Python, Week 2, lesson"
print(MyText) # All
print(MyText[0:6]) # From 0 To 6 Python Word
print(MyText[7:14]) # From 7 To 14 Week 2 Word
print(MyText[15:22]) # From 15 To 22 lesson Word
| 19.111111 | 48 | 0.605814 | """
السلاسل النصية / النصوص في لغة البايثون
"""
print("السلاسل النصية / النصوص في لغة البايثون")
print('Hello World') # ' '
print("Hello World") # " "
print("--------------------------")
# تعريف متغير يخزّن فيه نص
print("تعريف متغير يخزّن فيه نص ")
text = "Welcom To Python Lessons"
print(text)
print("--------------------------")
# تعريف متغير يخزّن فيه نص متعدد الاسطر
print(" تعريف متغير يخزّن فيه نص متعدد الاسطر")
install = """
Python source code and installers
are available for download for all
versions!
"""
print(install)
print("--------------------------")
# السلاسل النصية هي مصفوفات
# P y t h o n , Week 2, lesson
# 1 2 3 4 5 6
MyText = "Python, Week 2, lesson"
print(MyText) # All
print(MyText[0:6]) # From 0 To 6 Python Word
print(MyText[7:14]) # From 7 To 14 Week 2 Word
print(MyText[15:22]) # From 15 To 22 lesson Word
| 0 | 0 | 0 |
828505803235434fcad35452330a78c506f74752 | 118 | py | Python | web_scraping/ec2files/ec2file143.py | nikibhatt/Groa | fc2d4ae87cb825e6d54a0831c72be16541eebe61 | [
"MIT"
] | 1 | 2020-04-08T19:44:30.000Z | 2020-04-08T19:44:30.000Z | web_scraping/ec2files/ec2file143.py | cmgospod/Groa | 31b3624bfe61e772b55f8175b4e95d63c9e67966 | [
"MIT"
] | null | null | null | web_scraping/ec2files/ec2file143.py | cmgospod/Groa | 31b3624bfe61e772b55f8175b4e95d63c9e67966 | [
"MIT"
] | 1 | 2020-09-12T07:07:41.000Z | 2020-09-12T07:07:41.000Z | from scraper import *
s = Scraper(start=254826, end=256607, max_iter=30, scraper_instance=143)
s.scrape_letterboxd() | 39.333333 | 73 | 0.779661 | from scraper import *
s = Scraper(start=254826, end=256607, max_iter=30, scraper_instance=143)
s.scrape_letterboxd() | 0 | 0 | 0 |
a1bfac324061c90d8817a6cc066d3a3ae6dae7db | 1,925 | py | Python | TrainHelper/keras_utils/keras_functions.py | MaLiN2223/TrainHelper | 0119185c1f260d03c3c364dfeda6271a92e812ef | [
"MIT"
] | 1 | 2018-04-01T21:47:36.000Z | 2018-04-01T21:47:36.000Z | TrainHelper/keras_utils/keras_functions.py | MaLiN2223/TrainHelper | 0119185c1f260d03c3c364dfeda6271a92e812ef | [
"MIT"
] | null | null | null | TrainHelper/keras_utils/keras_functions.py | MaLiN2223/TrainHelper | 0119185c1f260d03c3c364dfeda6271a92e812ef | [
"MIT"
] | null | null | null | import keras
import logging
| 43.75 | 116 | 0.742857 | import keras
import logging
def __get_early_stopping(config):
early_stop = config.EarlyStopping.EarlyStopping
early_stop = early_stop if early_stop is not None else 10
monitor = config.EarlyStopping.Monitor
monitor = monitor if monitor is not None else 'val_acc'
logging.info('Model will early stop after {}'.format(early_stop))
early = keras.callbacks.EarlyStopping(monitor=monitor, patience=early_stop, mode='auto')
return early
def __get_model_checkpoint(base_name, monitor, save_best_only):
comment = "for best epoch" if save_best_only else "every epoch"
logging.info('Model will save checkpoint {}'.format(comment))
return keras.callbacks.ModelCheckpoint(base_name + '_best.h5', monitor=monitor, save_best_only=save_best_only,
save_weights_only=True, mode='auto', period=1)
def __get_tensor_board(base_name, batch_size):
return keras.callbacks.TensorBoard(log_dir=base_name, histogram_freq=0, batch_size=batch_size, write_graph=True)
def get_keras_callbacks(config):
callbacks = []
base_name = config.General.ResultsDir + '/' + config.General.Name
if config.ModelCheckpoint.Best:
callbacks.append(__get_model_checkpoint(base_name, config.ModelCheckpoint.Monitor, True))
if config.ModelCheckpoint.EachEpoch:
callbacks.append(__get_model_checkpoint(base_name, config.ModelCheckpoint.Monitor, False))
if config.EarlyStopping:
callbacks.append(__get_early_stopping(config))
if config.TensorBoard:
callbacks.append(__get_tensor_board(base_name, config.Training.BatchSize))
if config.CSV:
callbacks.append(keras.callbacks.CSVLogger(base_name + '.log'))
if config.Repeater:
from TrainHelper.callbacks import Repeater # because we don't want to import TensorFlow directly
callbacks.append(Repeater(config, config.queuer))
return callbacks
| 1,801 | 0 | 92 |
51b780fd6299877de96835d29da1d4abc39f8620 | 1,518 | py | Python | 2019/03-pragyan/cry-decode/solve.py | wani-hackase/wani-writeup | dd4ad0607d2f2193ad94c1ce65359294aa591681 | [
"MIT"
] | 25 | 2019-03-06T11:55:56.000Z | 2021-05-21T22:07:14.000Z | 2019/03-pragyan/cry-decode/solve.py | wani-hackase/wani-writeup | dd4ad0607d2f2193ad94c1ce65359294aa591681 | [
"MIT"
] | 1 | 2020-06-25T07:27:15.000Z | 2020-06-25T07:27:15.000Z | 2019/03-pragyan/cry-decode/solve.py | wani-hackase/wani-writeup | dd4ad0607d2f2193ad94c1ce65359294aa591681 | [
"MIT"
] | 1 | 2019-02-14T00:42:28.000Z | 2019-02-14T00:42:28.000Z | cipher_txt = open("ciphertext.txt").read()
cipher_num = []
for i in range(72):
cipher_num.append(ord(cipher_txt[i]) - 97)
p_num = ord("p") - 97
c_num = ord("c") - 97
t_num = ord("t") - 97
f_num = ord("f") - 97
# try all key
for a in range(26):
for b in range(26):
for c in range(26):
for d in range(26):
for i in range(34):
# make sure ciphertext matches
res1 = (a * p_num + b * c_num) % 26
res2 = (c * p_num + d * c_num) % 26
res3 = (a * t_num + b * f_num) % 26
res4 = (c * t_num + d * f_num) % 26
if (
(cipher_num[i * 2] == res1)
and (cipher_num[i * 2 + 1] == res2)
and (cipher_num[i * 2 + 2] == res3)
and (cipher_num[i * 2 + 3] == res4)
):
print("\nkey is", a, b, c, d)
# decode all ciphertext
for item in range(36):
for x in range(26):
for y in range(26):
if (
(cipher_num[item * 2] == (a * x + b * y) % 26)
and (cipher_num[item * 2 + 1] == (c * x + d * y) % 26)
):
print(chr(x + 97), chr(y + 97), end=" ")
| 38.923077 | 94 | 0.344532 | cipher_txt = open("ciphertext.txt").read()
cipher_num = []
for i in range(72):
cipher_num.append(ord(cipher_txt[i]) - 97)
p_num = ord("p") - 97
c_num = ord("c") - 97
t_num = ord("t") - 97
f_num = ord("f") - 97
# try all key
for a in range(26):
for b in range(26):
for c in range(26):
for d in range(26):
for i in range(34):
# make sure ciphertext matches
res1 = (a * p_num + b * c_num) % 26
res2 = (c * p_num + d * c_num) % 26
res3 = (a * t_num + b * f_num) % 26
res4 = (c * t_num + d * f_num) % 26
if (
(cipher_num[i * 2] == res1)
and (cipher_num[i * 2 + 1] == res2)
and (cipher_num[i * 2 + 2] == res3)
and (cipher_num[i * 2 + 3] == res4)
):
print("\nkey is", a, b, c, d)
# decode all ciphertext
for item in range(36):
for x in range(26):
for y in range(26):
if (
(cipher_num[item * 2] == (a * x + b * y) % 26)
and (cipher_num[item * 2 + 1] == (c * x + d * y) % 26)
):
print(chr(x + 97), chr(y + 97), end=" ")
| 0 | 0 | 0 |
7997c7107676971fe011f85b2a3d5321505232c4 | 3,447 | py | Python | python/ray/rllib/RL/BRL/ktdq_exe.py | christopher-hsu/ray | abe84b596253411607a91b3a44c135f5e9ac6ac7 | [
"Apache-2.0"
] | 1 | 2019-07-08T15:29:25.000Z | 2019-07-08T15:29:25.000Z | python/ray/rllib/RL/BRL/ktdq_exe.py | christopher-hsu/ray | abe84b596253411607a91b3a44c135f5e9ac6ac7 | [
"Apache-2.0"
] | null | null | null | python/ray/rllib/RL/BRL/ktdq_exe.py | christopher-hsu/ray | abe84b596253411607a91b3a44c135f5e9ac6ac7 | [
"Apache-2.0"
] | null | null | null |
from ktd_q import *
import brl
import models
import matplotlib.pyplot as plt
import seeding
import numpy as np
import gym
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--kappa', type=int, default=10, help='kappa')
parser.add_argument('--epsilon', type=float, default=0.0, help= 'epsilon for covariance')
parser.add_argument('--gym', type=bool, default=False)
parser.add_argument('--scene', type=str, default='')
parser.add_argument('--iter', type=int, default=100, help='number of trials')
args = parser.parse_args()
if __name__ == "__main__":
main_brl() | 26.929688 | 110 | 0.668697 |
from ktd_q import *
import brl
import models
import matplotlib.pyplot as plt
import seeding
import numpy as np
import gym
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--kappa', type=int, default=10, help='kappa')
parser.add_argument('--epsilon', type=float, default=0.0, help= 'epsilon for covariance')
parser.add_argument('--gym', type=bool, default=False)
parser.add_argument('--scene', type=str, default='')
parser.add_argument('--iter', type=int, default=100, help='number of trials')
args = parser.parse_args()
def main():
np_random,_ = seeding.np_random(None)
np.random.seed()
eta = 0.0
reward_noise = 1.0
P_init = 10.
theta_noise = None
env = models.Inv_Pendulum()
test_env = models.Inv_Pendulum()
x = KTD_Q(phi=env.phi[0], gamma=0.95, P_init=P_init, theta0 = np.zeros(30,),theta_noise=theta_noise, eta=eta,
reward_noise=reward_noise, anum = 3, kappa = 10.0)
performance = []
episode = 0
step = 0
state = env.reset(np_random)
while(episode < 1000):
action = np.random.choice(3,)
reward, n_state, done = env.observe(state, action)
x.update_V(state, action, n_state, reward)
state = n_state
step +=1
if done or (step > 3000):
if episode%50 == 0:
performance.append(test(x, test_env))
print("After %d steps, Episode %d: %d"%(step, episode, performance[-1]))
episode +=1
step = 0
state = env.reset(np_random)
plt.plot(performance)
plt.show()
def main_brl():
perf = []
np.random.seed()
for i in range(args.iter):
print("Iteration %d"%i)
x = brl.ktd_Q('inv_pendulum', 0.95)
if args.gym:
x.learning_cartpole_gym(args.kappa, epsilon = args.epsilon)
else:
x.learning_cartpole(args.kappa, epsilon = args.epsilon, obs_noise=1.0)
if np.mean(x.test_counts) > 2000.0:
pdb.set_trace()
perf.append(x.test_counts)
plt.plot(np.mean(perf, axis=0))
plt.show()
means = np.array(x.avgmeans)
[plt.plot(means[:,i]) for i in range(30)]; plt.show()
pdb.set_trace()
def main_gym():
eta = 0.0
reward_noise = 1.0
P_init = 10.
theta_noise = None
env = gym.make('CartPole-v0')
test_env = gym.make('CartPole-v0')
x = KTD_Q(phi=rbf(10,2), gamma=1., P_init=P_init, theta0 = np.zeros(2*10,),theta_noise=theta_noise, eta=eta,
reward_noise=reward_noise, anum = 2)
performance = []
episode = 0
step = 0
while(episode < 1000):
state = env.reset()
action = np.random.choice(2,)
n_state, reward, done, _ = env.step(action)
x.update_V(state[-2:], action, n_state[-2:], reward)
state = n_state
step+=1
if done or (step > 3000):
step = 0
if episode%50 == 0:
performance.append(test_gym(x, test_env))
print("Episode %d: %d"%(episode, performance[-1]))
episode +=1
state = env.reset()
plt.plot(performance)
plt.show()
def test(x, env):
np_random,_ = seeding.np_random(None)
state = env.reset(np_random)
step = 0
done = False
while((not done) and (step < 3000)) :
action = np.argmax([np.dot(x.theta, x.phi(state,a)) for a in range(x.anum)])
reward, n_state, done = env.observe(state, action)
step+=1
state = n_state
return step
def test_gym(x, env):
state = env.reset()
step = 0
done = False
while((not done) and (step < 3000)) :
action = np.argmax([np.dot(x.theta, x.phi(state[-2:],a)) for a in range(x.anum)])
print(action)
env.render()
n_state, reward, done, _ = env.step(action)
step+=1
state = n_state
return step
if __name__ == "__main__":
main_brl() | 2,746 | 0 | 115 |
db0d870ea15925fad92d89b9571a9051b1e41722 | 13,735 | py | Python | seek/migrations/0001_initial.py | BMCBCC/NExtSEEK | 7aca407bbc74efc5beb4a98227c6864444b11f61 | [
"MIT"
] | null | null | null | seek/migrations/0001_initial.py | BMCBCC/NExtSEEK | 7aca407bbc74efc5beb4a98227c6864444b11f61 | [
"MIT"
] | null | null | null | seek/migrations/0001_initial.py | BMCBCC/NExtSEEK | 7aca407bbc74efc5beb4a98227c6864444b11f61 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2020-08-04 01:45
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 50.87037 | 122 | 0.558355 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2020-08-04 01:45
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Assay_assets',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('assay_id', models.IntegerField(default=None)),
('asset_id', models.IntegerField(default=None)),
('version', models.IntegerField(default=1)),
('created_at', models.DateTimeField(default=None)),
('updated_at', models.DateTimeField(default=None)),
('relationship_type_id', models.IntegerField(default=None)),
('asset_type', models.CharField(default=None, max_length=255)),
('direction', models.IntegerField(default=None)),
],
options={
'db_table': 'assay_assets',
},
),
migrations.CreateModel(
name='Content_blobs',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('md5sum', models.CharField(default=None, max_length=255)),
('url', models.TextField(default=None)),
('uuid', models.CharField(default=None, max_length=255)),
('original_filename', models.CharField(default=None, max_length=255)),
('content_type', models.CharField(default=None, max_length=255)),
('asset_id', models.IntegerField(default=None)),
('asset_type', models.CharField(default=None, max_length=255)),
('asset_version', models.IntegerField(default=1)),
('is_webpage', models.BooleanField(default=0)),
('external_link', models.BooleanField(default=None)),
('sha1sum', models.CharField(default=None, max_length=255)),
('file_size', models.BigIntegerField(default=None)),
('created_at', models.DateTimeField(default=None)),
('updated_at', models.DateTimeField(default=None)),
],
options={
'db_table': 'content_blobs',
},
),
migrations.CreateModel(
name='Data_files',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('contributor_id', models.IntegerField(default=None)),
('title', models.CharField(default=None, max_length=255)),
('description', models.TextField(default=None)),
('template_id', models.IntegerField(default=None)),
('last_used_at', models.DateTimeField()),
('created_at', models.DateTimeField()),
('updated_at', models.DateTimeField()),
('version', models.IntegerField(default=1)),
('first_letter', models.CharField(default=None, max_length=1)),
('other_creators', models.TextField(default=None)),
('uuid', models.CharField(default=None, max_length=255)),
('policy_id', models.IntegerField(default=None)),
('doi', models.CharField(default=None, max_length=255)),
('license', models.CharField(default=None, max_length=255)),
('simulation_data', models.BooleanField(default=0)),
('deleted_contributor', models.CharField(default=None, max_length=255)),
],
options={
'db_table': 'data_files',
},
),
migrations.CreateModel(
name='Documents',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default=None, max_length=255)),
('description', models.TextField(default=None)),
('contributor_id', models.IntegerField(default=None)),
('version', models.IntegerField(default=1)),
('first_letter', models.CharField(default=None, max_length=1)),
('uuid', models.CharField(default=None, max_length=255)),
('policy_id', models.IntegerField(default=None)),
('doi', models.CharField(default=None, max_length=255)),
('license', models.CharField(default=None, max_length=255)),
('last_used_at', models.DateTimeField()),
('created_at', models.DateTimeField()),
('updated_at', models.DateTimeField()),
('other_creators', models.TextField(default=None)),
('deleted_contributor', models.CharField(default=None, max_length=255)),
],
options={
'db_table': 'documents',
},
),
migrations.CreateModel(
name='Permissions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('contributor_type', models.CharField(default=None, max_length=255)),
('contributor_id', models.IntegerField(default=None)),
('policy_id', models.IntegerField(default=None)),
('access_type', models.IntegerField(default=0)),
('created_at', models.DateTimeField()),
('updated_at', models.DateTimeField()),
],
options={
'db_table': 'permissions',
},
),
migrations.CreateModel(
name='Policies',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default=None, max_length=255)),
('sharing_scope', models.IntegerField(default=None)),
('access_type', models.IntegerField(default=0)),
('use_whitelist', models.BooleanField(default=None)),
('use_blacklist', models.BooleanField(default=None)),
('created_at', models.DateTimeField()),
('updated_at', models.DateTimeField()),
],
options={
'db_table': 'policies',
},
),
migrations.CreateModel(
name='Projects_samples',
fields=[
('project_id', models.IntegerField(default=None, primary_key=True)),
('sample_id', models.IntegerField(default=None, primary_key=True, serialize=False)),
],
options={
'db_table': 'projects_samples',
},
),
migrations.CreateModel(
name='Sample_attributes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default=None, max_length=255)),
('sample_attribute_type_id', models.IntegerField(default=None)),
('required', models.BooleanField(default=0)),
('created_at', models.DateTimeField()),
('updated_at', models.DateTimeField()),
('pos', models.IntegerField(default=None)),
('sample_type_id', models.IntegerField(default=None)),
('unit_id', models.IntegerField(default=None)),
('is_title', models.BooleanField(default=0)),
('template_column_index', models.IntegerField(default=None)),
('accessor_name', models.CharField(default=None, max_length=255)),
('sample_controlled_vocab_id', models.IntegerField(default=None)),
('linked_sample_type_id', models.IntegerField(default=None)),
],
options={
'db_table': 'sample_attributes',
},
),
migrations.CreateModel(
name='Sample_types',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default=None, max_length=255)),
('uuid', models.CharField(default=None, max_length=255)),
('created_at', models.DateTimeField()),
('updated_at', models.DateTimeField()),
('first_letter', models.CharField(default=None, max_length=1)),
('description', models.TextField(default=None)),
('uploaded_template', models.BooleanField(default=0)),
('contributor_id', models.IntegerField(default=None)),
('deleted_contributor', models.CharField(default=None, max_length=255)),
],
options={
'db_table': 'sample_types',
},
),
migrations.CreateModel(
name='Samples',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default=None, max_length=255)),
('sample_type_id', models.IntegerField(default=None)),
('json_metadata', models.TextField(default=None)),
('uuid', models.CharField(default=None, max_length=255)),
('contributor_id', models.IntegerField(default=None)),
('policy_id', models.IntegerField(default=None)),
('created_at', models.DateTimeField()),
('updated_at', models.DateTimeField()),
('first_letter', models.CharField(default=None, max_length=1)),
('other_creators', models.TextField(default=None)),
('originating_data_file_id', models.IntegerField(default=None)),
('deleted_contributor', models.CharField(default=None, max_length=255)),
],
options={
'db_table': 'samples',
},
),
migrations.CreateModel(
name='Sops',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('contributor_id', models.IntegerField(default=None)),
('title', models.CharField(default=None, max_length=255)),
('description', models.TextField(default=None)),
('created_at', models.DateTimeField()),
('updated_at', models.DateTimeField()),
('last_used_at', models.DateTimeField()),
('version', models.IntegerField(default=1)),
('first_letter', models.CharField(default=None, max_length=1)),
('other_creators', models.TextField(default=None)),
('uuid', models.CharField(default=None, max_length=255)),
('policy_id', models.IntegerField(default=None)),
('doi', models.CharField(default=None, max_length=255)),
('license', models.CharField(default=None, max_length=255)),
('deleted_contributor', models.CharField(default=None, max_length=255)),
],
options={
'db_table': 'sops',
},
),
migrations.CreateModel(
name='User_profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('project', models.CharField(choices=[(0, 'Undefined'), (1, 'IMPAcTb'), (2, 'MIT_SRP')], max_length=255)),
('institute', models.TextField()),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Users',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('login', models.CharField(default=None, max_length=255)),
('crypted_password', models.CharField(default=None, max_length=255)),
('salt', models.CharField(default=None, max_length=255)),
('created_at', models.DateTimeField()),
('updated_at', models.DateTimeField()),
('remember_token', models.CharField(default=None, max_length=255)),
('remember_token_expires_at', models.DateTimeField()),
('activation_code', models.CharField(default=None, max_length=255)),
('activated_at', models.DateTimeField()),
('person_id', models.IntegerField(default=None)),
('reset_password_code', models.CharField(default=None, max_length=255)),
('reset_password_code_until', models.DateTimeField()),
('posts_count', models.IntegerField(default=None)),
('last_seen_at', models.DateTimeField()),
('uuid', models.CharField(default=None, max_length=255)),
],
options={
'db_table': 'users',
},
),
migrations.AlterUniqueTogether(
name='projects_samples',
unique_together=set([('project_id', 'sample_id')]),
),
]
| 0 | 13,490 | 23 |
d6bcbbbfafdf821ac53550174ac49f1c78ed82a9 | 830 | py | Python | setup.py | Free-tek/Worldwide-Newspaper-Scraping-Script | df7eca801fcfa314b6078126256712b53b7a2344 | [
"MIT"
] | 2 | 2020-01-04T00:08:19.000Z | 2021-07-12T18:55:11.000Z | setup.py | Free-tek/Worldwide-Newspaper-Scraping-Script | df7eca801fcfa314b6078126256712b53b7a2344 | [
"MIT"
] | null | null | null | setup.py | Free-tek/Worldwide-Newspaper-Scraping-Script | df7eca801fcfa314b6078126256712b53b7a2344 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="anjie", # Replace with your own username
version="1.0.0",
author="Babatunde Adewole",
author_email="adewole63@gmail.com",
description="This python library provides corpus in English and various local african languages e.g(Youruba, Hausa, Pidgin), it also does sentiment analysis on brands",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Free-tek/Anjie_local_language_corpus_generator",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 36.086957 | 172 | 0.701205 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="anjie", # Replace with your own username
version="1.0.0",
author="Babatunde Adewole",
author_email="adewole63@gmail.com",
description="This python library provides corpus in English and various local african languages e.g(Youruba, Hausa, Pidgin), it also does sentiment analysis on brands",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Free-tek/Anjie_local_language_corpus_generator",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 0 | 0 | 0 |
075d2b07fd5f27148bcd7043dbb0aaf8cae8e8a2 | 1,473 | py | Python | gazonoVehicle/OpenCV/follower_opencv.py | huckl3b3rry87/Chrono_Gazebo | 5261cd3b4eabe9188ddd0c8991bd6a8d69acf06b | [
"BSD-3-Clause"
] | 3 | 2018-11-11T11:44:25.000Z | 2022-01-25T14:39:14.000Z | gazonoVehicle/OpenCV/follower_opencv.py | huckl3b3rry87/Chrono_Gazebo | 5261cd3b4eabe9188ddd0c8991bd6a8d69acf06b | [
"BSD-3-Clause"
] | null | null | null | gazonoVehicle/OpenCV/follower_opencv.py | huckl3b3rry87/Chrono_Gazebo | 5261cd3b4eabe9188ddd0c8991bd6a8d69acf06b | [
"BSD-3-Clause"
] | 2 | 2018-01-29T18:26:33.000Z | 2021-10-04T13:45:35.000Z | import rospy, cv2, cv_bridge, numpy, math
from sensor_msgs.msg import Image
from std_msgs.msg import Float64
rospy.init_node('follower')
follower = Follower()
rospy.spin()
| 30.6875 | 80 | 0.610319 | import rospy, cv2, cv_bridge, numpy, math
from sensor_msgs.msg import Image
from std_msgs.msg import Float64
class Follower:
def __init__(self):
self.bridge = cv_bridge.CvBridge()
cv2.namedWindow("window", 1)
self.image_sub = rospy.Subscriber('image_raw', Image, self.image_callback)
self.pub = rospy.Publisher('track_point', Float64, queue_size=10)
def image_callback(self, msg):
image = self.bridge.imgmsg_to_cv2(msg)
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower_yellow = numpy.array([ 50, 240, 100])
upper_yellow = numpy.array([255, 255, 190])
#original limits below
#lower_yellow = numpy.array([ 50, 50, 170])
#upper_yellow = numpy.array([255, 255, 190])
mask = cv2.inRange(hsv, lower_yellow, upper_yellow)
h, w, d = image.shape
search = int(h/2.0*(math.asin(0.5/3.0) / .875) + h/2.0)
search_top = search - 10
search_bot = search_top + 10
mask[0:search_top, 0:w] = 0
mask[search_bot:h, 0:w] = 0
M = cv2.moments(mask)
if M['m00'] > 0:
cx = int(M['m10']/M['m00'])
cy = int(M['m01']/M['m00'])
cv2.circle(image, (cx, cy), 20, (0,0,255), -1)
cv2.imshow("window", image)
cv2.waitKey(3)
#hello_str = "hello world %s" % rospy.get_time()
#rospy.loginfo(hello_str)
y = (w/2 - cx)/float(w/2)
self.pub.publish(y)
rospy.init_node('follower')
follower = Follower()
rospy.spin()
| 1,231 | -6 | 72 |