hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0c34fc08f42c8637a1d795a07180d44a6de5c252
| 4,946
|
py
|
Python
|
components/server/src/data_model/sources/jenkins.py
|
m-zakeri/quality-time
|
531931f0d8d4f5d262ea98445868158e41d268da
|
[
"Apache-2.0"
] | null | null | null |
components/server/src/data_model/sources/jenkins.py
|
m-zakeri/quality-time
|
531931f0d8d4f5d262ea98445868158e41d268da
|
[
"Apache-2.0"
] | null | null | null |
components/server/src/data_model/sources/jenkins.py
|
m-zakeri/quality-time
|
531931f0d8d4f5d262ea98445868158e41d268da
|
[
"Apache-2.0"
] | null | null | null |
"""Jenkins source."""
from ..meta.entity import Color, EntityAttributeType
from ..parameters import (
access_parameters,
Days,
FailureType,
MultipleChoiceParameter,
MultipleChoiceWithAdditionParameter,
TestResult,
)
from ..meta.source import Source
def jenkins_access_parameters(*args, **kwargs):
"""Create Jenkins specific access parameters."""
kwargs["include"] = dict(private_token=False, landing_url=False)
if "name" not in kwargs.setdefault("kwargs", {}).setdefault("url", {}):
kwargs["kwargs"]["url"]["name"] = "URL to Jenkins job"
kwargs["kwargs"]["password"] = dict(
name="Password or API token for basic authentication",
help_url="https://wiki.jenkins.io/display/JENKINS/Authenticating+scripted+clients",
)
return access_parameters(*args, **kwargs)
ALL_JENKINS_METRICS = ["failed_jobs", "source_up_to_dateness", "source_version", "unused_jobs"]
JOB_ENTITY = dict(
name="job",
attributes=[
dict(name="Job name", key="name", url="url"),
dict(
name="Status of most recent build",
key="build_status",
color=dict(Success=Color.POSITIVE, Failure=Color.NEGATIVE, Aborted=Color.ACTIVE, Unstable=Color.WARNING),
),
dict(name="Date of most recent build", key="build_date", type=EntityAttributeType.DATE),
],
)
JENKINS = Source(
name="Jenkins",
description="Jenkins is an open source continuous integration/continuous deployment server.",
url="https://jenkins.io/",
parameters=dict(
inactive_days=Days(
name="Number of days without builds after which to consider CI-jobs unused.",
short_name="number of days without builds",
default_value="90",
metrics=["unused_jobs"],
),
jobs_to_include=MultipleChoiceWithAdditionParameter(
name="Jobs to include (regular expressions or job names)",
short_name="jobs to include",
help="Jobs to include can be specified by job name or by regular expression. "
"Use {parent job name}/{child job name} for the names of nested jobs.",
placeholder="all",
metrics=["failed_jobs", "source_up_to_dateness", "unused_jobs"],
),
jobs_to_ignore=MultipleChoiceWithAdditionParameter(
name="Jobs to ignore (regular expressions or job names)",
short_name="jobs to ignore",
help="Jobs to ignore can be specified by job name or by regular expression. "
"Use {parent job name}/{child job name} for the names of nested jobs.",
metrics=["failed_jobs", "source_up_to_dateness", "unused_jobs"],
),
result_type=MultipleChoiceParameter(
name="Build result types",
short_name="result types",
help="Limit which build result types to include.",
placeholder="all result types",
values=["Aborted", "Failure", "Not built", "Success", "Unstable"],
metrics=["source_up_to_dateness"],
),
failure_type=FailureType(values=["Aborted", "Failure", "Not built", "Unstable"]),
**jenkins_access_parameters(
ALL_JENKINS_METRICS,
kwargs=dict(
url=dict(
name="URL",
help="URL of the Jenkins instance, with port if necessary, but without path. For example, "
"'https://jenkins.example.org'.",
)
),
)
),
entities=dict(
failed_jobs=JOB_ENTITY,
source_up_to_dateness=JOB_ENTITY,
unused_jobs=JOB_ENTITY,
),
)
ALL_JENKINS_TEST_REPORT_METRICS = ["source_up_to_dateness", "tests"]
JENKINS_TEST_REPORT = Source(
name="Jenkins test report",
description="A Jenkins job with test results.",
url="https://plugins.jenkins.io/junit",
parameters=dict(
test_result=TestResult(values=["failed", "passed", "skipped"]),
**jenkins_access_parameters(
ALL_JENKINS_TEST_REPORT_METRICS,
kwargs=dict(
url=dict(
help="URL to a Jenkins job with a test report generated by the JUnit plugin. For example, "
"'https://jenkins.example.org/job/test' or https://jenkins.example.org/job/test/job/master' "
"in case of a pipeline job."
)
),
)
),
entities=dict(
tests=dict(
name="test",
attributes=[
dict(name="Class name"),
dict(name="Test case", key="name"),
dict(
name="Test result", color=dict(failed=Color.NEGATIVE, passed=Color.POSITIVE, skipped=Color.WARNING)
),
dict(name="Number of builds the test has been failing", key="age", type=EntityAttributeType.INTEGER),
],
)
),
)
| 38.640625
| 119
| 0.603114
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,978
| 0.399919
|
0c3529b1f848cd4aef129f241e7149c8c46fd8c6
| 3,155
|
py
|
Python
|
tests/test_transducer.py
|
dandersonw/myouji-kenchi
|
6a373d8626995bf5d4383dcac4fc8e6372135640
|
[
"MIT"
] | 7
|
2019-10-22T10:09:12.000Z
|
2022-01-31T07:49:07.000Z
|
tests/test_transducer.py
|
dandersonw/myouji-kenchi
|
6a373d8626995bf5d4383dcac4fc8e6372135640
|
[
"MIT"
] | null | null | null |
tests/test_transducer.py
|
dandersonw/myouji-kenchi
|
6a373d8626995bf5d4383dcac4fc8e6372135640
|
[
"MIT"
] | 1
|
2021-07-09T18:10:17.000Z
|
2021-07-09T18:10:17.000Z
|
import myouji_kenchi
# Given that the output depends on what goes into the attested myouji file I'm
# hesitant to write too many tests in the blast radius of changes to that file
class TestTransducer():
nbt = myouji_kenchi.MyoujiBackTransliteration()
def assert_transliteration(self, romaji, *expected_results):
results = self.nbt.back_transliterate(romaji)
strings = set(r[0] for r in results)
assert strings == set(expected_results)
def test_assorted(self):
self.assert_transliteration('sa', 'サ')
self.assert_transliteration('SA', 'サ')
self.assert_transliteration('se', 'セ')
self.assert_transliteration('shō', 'ショウ') # composed
self.assert_transliteration('shō', 'ショウ') # decomposed
self.assert_transliteration('sho', 'ショウ')
self.assert_transliteration('syo', 'ショウ')
self.assert_transliteration('ho', 'ホ', 'ホウ', 'ホオ')
self.assert_transliteration('teppou', 'テッポウ')
self.assert_transliteration('shibukawa', 'シブカワ')
self.assert_transliteration('watamura', 'ワタムラ')
self.assert_transliteration('Matsumoto', 'マツモト')
self.assert_transliteration('Matumoto', 'マツモト')
self.assert_transliteration('Tusima', 'ツシマ')
self.assert_transliteration('IMAZU', 'イマヅ', 'イマズ')
self.assert_transliteration('SATO', 'サトウ', 'サトオ', 'サト')
self.assert_transliteration('Uchino', 'ウチノ', 'ウチノウ')
self.assert_transliteration('Utino', 'ウチノ', 'ウチノウ')
self.assert_transliteration('Chano', 'チャノ')
self.assert_transliteration('Tyano', 'チャノ')
self.assert_transliteration('Kojima', 'コジマ', 'コヂマ', 'コウジマ')
self.assert_transliteration('Kozima', 'コジマ', 'コヂマ', 'コウジマ')
self.assert_transliteration('Inuduka', 'イヌヅカ')
self.assert_transliteration('Inuzuka', 'イヌヅカ', 'イヌズカ')
self.assert_transliteration('Inudzuka', 'イヌヅカ')
self.assert_transliteration('Betchaku', 'ベッチャク')
self.assert_transliteration('Becchaku', 'ベッチャク')
self.assert_transliteration('Uwozaki', 'ウヲザキ')
self.assert_transliteration('Uozaki', 'ウヲザキ', 'ウオザキ')
self.assert_transliteration('Nyoya', 'ニョウヤ')
self.assert_transliteration('Nitta', 'ニッタ')
def test_oh(self):
self.assert_transliteration('Ohnishi', 'オオニシ', 'オウニシ')
def test_leading_m(self):
self.assert_transliteration('Sampei', 'サンペイ')
self.assert_transliteration('Sanpei', 'サンペイ')
def test_glottal_stop(self):
self.assert_transliteration('Shinyagaito', 'シンヤガイト')
self.assert_transliteration('Sinyagaito', 'シンヤガイト')
self.assert_transliteration('Shin\'yagaito', 'シンヤガイト')
self.assert_transliteration('Shin-yagaito', 'シンヤガイト')
def test_double_i(self):
self.assert_transliteration('Ishii', 'イシイ')
# To be clear, 'Ishî' is almost certainly a wrong transliteration
# Nevertheless, the below is the expected behavior
self.assert_transliteration('Ishî', 'イシイ')
self.assert_transliteration('Isî', 'イシイ')
def test_bad_characters(self):
self.assert_transliteration('@')
| 44.43662
| 78
| 0.66878
| 3,362
| 0.948111
| 0
| 0
| 0
| 0
| 0
| 0
| 1,317
| 0.371404
|
0c36ba1ececdebf56f9ae6696bc5d261578450ca
| 1,668
|
py
|
Python
|
algorithms/named/mergesort.py
|
thundergolfer/uni
|
e604d1edd8e5085f0ae1c0211015db38c07fc926
|
[
"MIT"
] | 1
|
2022-01-06T04:50:09.000Z
|
2022-01-06T04:50:09.000Z
|
algorithms/named/mergesort.py
|
thundergolfer/uni
|
e604d1edd8e5085f0ae1c0211015db38c07fc926
|
[
"MIT"
] | 1
|
2022-01-23T06:09:21.000Z
|
2022-01-23T06:14:17.000Z
|
algorithms/named/mergesort.py
|
thundergolfer/uni
|
e604d1edd8e5085f0ae1c0211015db38c07fc926
|
[
"MIT"
] | null | null | null |
import unittest
from typing import List, Optional
# Consider making generic with:
# https://stackoverflow.com/a/47970232/4885590
def merge_sort(items: List[int], lo: int = 0, hi: Optional[int] = None) -> None:
if hi is None:
hi = len(items) - 1
if lo < hi:
mid = (lo + hi) // 2
# Ranges passed are *inclusive*.
merge_sort(items, lo=lo, hi=mid)
merge_sort(items, lo=mid + 1, hi=hi)
merge_partitions(items, lo, mid, hi)
def merge_partitions(items: List[int], lo: int, mid: int, hi: int) -> None:
left_part = items[lo : mid + 1]
right_part = items[mid + 1 : hi + 1]
i = lo
left_ptr = 0
right_ptr = 0
while left_ptr < len(left_part) and right_ptr < len(right_part):
if left_part[left_ptr] < right_part[right_ptr]:
items[i] = left_part[left_ptr]
left_ptr += 1
else:
items[i] = right_part[right_ptr]
right_ptr += 1
i += 1
# Only 1 of these two for-loops should ever run anything.
for item in left_part[left_ptr:]:
items[i] = item
i += 1
for item in right_part[right_ptr:]:
items[i] = item
i += 1
# Idx tracking sorted insertions has reached beyond hi, so
# partitions are fully merged.
assert i == (hi + 1)
class TestMergeSort(unittest.TestCase):
def test(self):
actual = [10, 5, 9, 10, 3]
expected = [10, 5, 9, 10, 3]
expected.sort()
merge_sort(actual)
self.assertEqual(actual, expected)
if __name__ == "__main__":
x = [10, 5, 9, 10, 3]
print(x)
merge_sort(x)
print(x)
unittest.main()
| 24.895522
| 80
| 0.576739
| 225
| 0.134892
| 0
| 0
| 0
| 0
| 0
| 0
| 264
| 0.158273
|
0c380570b168add317dd67b7037f3b6ec7e93c2b
| 392
|
py
|
Python
|
pages/main_page.py
|
thaidem/selenium-training-page-objects
|
1f37a2b5287a502295bb57050c95455d68c2d3eb
|
[
"Apache-2.0"
] | null | null | null |
pages/main_page.py
|
thaidem/selenium-training-page-objects
|
1f37a2b5287a502295bb57050c95455d68c2d3eb
|
[
"Apache-2.0"
] | null | null | null |
pages/main_page.py
|
thaidem/selenium-training-page-objects
|
1f37a2b5287a502295bb57050c95455d68c2d3eb
|
[
"Apache-2.0"
] | null | null | null |
from selenium.webdriver.support.wait import WebDriverWait
class MainPage:
def __init__(self, driver):
self.driver = driver
self.wait = WebDriverWait(driver, 10)
def open(self):
self.driver.get("https://litecart.stqa.ru/en/")
return self
@property
def product_item(self):
return self.driver.find_element_by_css_selector(".product")
| 23.058824
| 67
| 0.670918
| 331
| 0.844388
| 0
| 0
| 105
| 0.267857
| 0
| 0
| 40
| 0.102041
|
0c39dce08e2639d2b8e9721a52545154b1694858
| 196
|
py
|
Python
|
src/frr/tests/lib/test_nexthop_iter.py
|
zhouhaifeng/vpe
|
9c644ffd561988e5740021ed26e0f7739844353d
|
[
"Apache-2.0"
] | null | null | null |
src/frr/tests/lib/test_nexthop_iter.py
|
zhouhaifeng/vpe
|
9c644ffd561988e5740021ed26e0f7739844353d
|
[
"Apache-2.0"
] | null | null | null |
src/frr/tests/lib/test_nexthop_iter.py
|
zhouhaifeng/vpe
|
9c644ffd561988e5740021ed26e0f7739844353d
|
[
"Apache-2.0"
] | null | null | null |
import frrtest
class TestNexthopIter(frrtest.TestMultiOut):
program = "./test_nexthop_iter"
TestNexthopIter.onesimple("Simple test passed.")
TestNexthopIter.onesimple("PRNG test passed.")
| 19.6
| 48
| 0.785714
| 80
| 0.408163
| 0
| 0
| 0
| 0
| 0
| 0
| 61
| 0.311224
|
0c3b154bc332d251c3e35e98a56001cf94c27a53
| 1,319
|
py
|
Python
|
setup.py
|
themis-project/themis-finals-checker-app-py
|
12e70102bcca3d6e4082d96e676e364176c0da67
|
[
"MIT"
] | null | null | null |
setup.py
|
themis-project/themis-finals-checker-app-py
|
12e70102bcca3d6e4082d96e676e364176c0da67
|
[
"MIT"
] | null | null | null |
setup.py
|
themis-project/themis-finals-checker-app-py
|
12e70102bcca3d6e4082d96e676e364176c0da67
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import io
import os
about = {}
about_filename = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'themis', 'finals', 'checker', 'app', '__about__.py')
with io.open(about_filename, 'rb') as fp:
exec(fp.read(), about)
setup(
name='themis.finals.checker.app',
version=about['__version__'],
description='Themis Finals checker application',
author='Alexander Pyatkin',
author_email='aspyatkin@gmail.com',
url='https://github.com/themis-project/themis-finals-checker-app-py',
license='MIT',
packages=find_packages('.'),
install_requires=[
'setuptools>=0.8',
'Flask>=0.11.1,<0.12',
'redis>=2.10.5,<2.11',
'hiredis>=0.2.0,<0.3',
'rq>=0.7.1,<0.8.0',
'requests>=2.11.0',
'python-dateutil>=2.5.3,<2.6',
'themis.finals.checker.result==1.1.0',
'raven>=5.26.0,<5.27.0',
'PyJWT>=1.5.0,<1.6.0',
'cryptography>=1.8.1,<1.9.0',
'PyYAML>=3.11'
],
namespace_packages=[
'themis',
'themis.finals',
'themis.finals.checker'
],
entry_points=dict(
console_scripts=[
'themis-finals-checker-app-worker = themis.finals.checker.app:start_worker'
]
)
)
| 26.918367
| 87
| 0.581501
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 647
| 0.490523
|
0c3b1affbabd1c858deb93d0a0302a8d675091d1
| 8,090
|
py
|
Python
|
tools/xenserver/cleanup_sm_locks.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
tools/xenserver/cleanup_sm_locks.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | null | null | null |
tools/xenserver/cleanup_sm_locks.py
|
bopopescu/nova-token
|
ec98f69dea7b3e2b9013b27fd55a2c1a1ac6bfb2
|
[
"Apache-2.0"
] | 2
|
2017-07-20T17:31:34.000Z
|
2020-07-24T02:42:19.000Z
|
begin_unit
comment|'#!/usr/bin/env python'
nl|'\n'
nl|'\n'
comment|'# Copyright 2013 OpenStack Foundation'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Licensed under the Apache License, Version 2.0 (the "License");'
nl|'\n'
comment|'# you may not use this file except in compliance with the License.'
nl|'\n'
comment|'# You may obtain a copy of the License at'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# http://www.apache.org/licenses/LICENSE-2.0'
nl|'\n'
comment|'#'
nl|'\n'
comment|'# Unless required by applicable law or agreed to in writing, software'
nl|'\n'
comment|'# distributed under the License is distributed on an "AS IS" BASIS,'
nl|'\n'
comment|'# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.'
nl|'\n'
comment|'# See the License for the specific language governing permissions and'
nl|'\n'
comment|'# limitations under the License.'
nl|'\n'
string|'"""\nScript to cleanup old XenServer /var/lock/sm locks.\n\nXenServer 5.6 and 6.0 do not appear to always cleanup locks when using a\nFileSR. ext3 has a limit of 32K inode links, so when we have 32K-2 (31998)\nlocks laying around, builds will begin to fail because we can\'t create any\nadditional locks. This cleanup script is something we can run periodically as\na stop-gap measure until this is fixed upstream.\n\nThis script should be run on the dom0 of the affected machine.\n"""'
newline|'\n'
name|'import'
name|'errno'
newline|'\n'
name|'import'
name|'optparse'
newline|'\n'
name|'import'
name|'os'
newline|'\n'
name|'import'
name|'sys'
newline|'\n'
name|'import'
name|'time'
newline|'\n'
nl|'\n'
DECL|variable|BASE
name|'BASE'
op|'='
string|"'/var/lock/sm'"
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|_get_age_days
name|'def'
name|'_get_age_days'
op|'('
name|'secs'
op|')'
op|':'
newline|'\n'
indent|' '
name|'return'
name|'float'
op|'('
name|'time'
op|'.'
name|'time'
op|'('
op|')'
op|'-'
name|'secs'
op|')'
op|'/'
number|'86400'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|_parse_args
dedent|''
name|'def'
name|'_parse_args'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'parser'
op|'='
name|'optparse'
op|'.'
name|'OptionParser'
op|'('
op|')'
newline|'\n'
name|'parser'
op|'.'
name|'add_option'
op|'('
string|'"-d"'
op|','
string|'"--dry-run"'
op|','
nl|'\n'
name|'action'
op|'='
string|'"store_true"'
op|','
name|'dest'
op|'='
string|'"dry_run"'
op|','
name|'default'
op|'='
name|'False'
op|','
nl|'\n'
name|'help'
op|'='
string|'"don\'t actually remove locks"'
op|')'
newline|'\n'
name|'parser'
op|'.'
name|'add_option'
op|'('
string|'"-l"'
op|','
string|'"--limit"'
op|','
nl|'\n'
name|'action'
op|'='
string|'"store"'
op|','
name|'type'
op|'='
string|"'int'"
op|','
name|'dest'
op|'='
string|'"limit"'
op|','
nl|'\n'
name|'default'
op|'='
name|'sys'
op|'.'
name|'maxint'
op|','
nl|'\n'
name|'help'
op|'='
string|'"max number of locks to delete (default: no limit)"'
op|')'
newline|'\n'
name|'parser'
op|'.'
name|'add_option'
op|'('
string|'"-v"'
op|','
string|'"--verbose"'
op|','
nl|'\n'
name|'action'
op|'='
string|'"store_true"'
op|','
name|'dest'
op|'='
string|'"verbose"'
op|','
name|'default'
op|'='
name|'False'
op|','
nl|'\n'
name|'help'
op|'='
string|'"don\'t print status messages to stdout"'
op|')'
newline|'\n'
nl|'\n'
name|'options'
op|','
name|'args'
op|'='
name|'parser'
op|'.'
name|'parse_args'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'try'
op|':'
newline|'\n'
indent|' '
name|'days_old'
op|'='
name|'int'
op|'('
name|'args'
op|'['
number|'0'
op|']'
op|')'
newline|'\n'
dedent|''
name|'except'
op|'('
name|'IndexError'
op|','
name|'ValueError'
op|')'
op|':'
newline|'\n'
indent|' '
name|'parser'
op|'.'
name|'print_help'
op|'('
op|')'
newline|'\n'
name|'sys'
op|'.'
name|'exit'
op|'('
number|'1'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'return'
name|'options'
op|','
name|'days_old'
newline|'\n'
nl|'\n'
nl|'\n'
DECL|function|main
dedent|''
name|'def'
name|'main'
op|'('
op|')'
op|':'
newline|'\n'
indent|' '
name|'options'
op|','
name|'days_old'
op|'='
name|'_parse_args'
op|'('
op|')'
newline|'\n'
nl|'\n'
name|'if'
name|'not'
name|'os'
op|'.'
name|'path'
op|'.'
name|'exists'
op|'('
name|'BASE'
op|')'
op|':'
newline|'\n'
indent|' '
name|'print'
op|'>>'
name|'sys'
op|'.'
name|'stderr'
op|','
string|'"error: \'%s\' doesn\'t exist. Make sure you\'re"'
string|'" running this on the dom0."'
op|'%'
name|'BASE'
newline|'\n'
name|'sys'
op|'.'
name|'exit'
op|'('
number|'1'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'lockpaths_removed'
op|'='
number|'0'
newline|'\n'
name|'nspaths_removed'
op|'='
number|'0'
newline|'\n'
nl|'\n'
name|'for'
name|'nsname'
name|'in'
name|'os'
op|'.'
name|'listdir'
op|'('
name|'BASE'
op|')'
op|'['
op|':'
name|'options'
op|'.'
name|'limit'
op|']'
op|':'
newline|'\n'
indent|' '
name|'nspath'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'BASE'
op|','
name|'nsname'
op|')'
newline|'\n'
nl|'\n'
name|'if'
name|'not'
name|'os'
op|'.'
name|'path'
op|'.'
name|'isdir'
op|'('
name|'nspath'
op|')'
op|':'
newline|'\n'
indent|' '
name|'continue'
newline|'\n'
nl|'\n'
comment|'# Remove old lockfiles'
nl|'\n'
dedent|''
name|'removed'
op|'='
number|'0'
newline|'\n'
name|'locknames'
op|'='
name|'os'
op|'.'
name|'listdir'
op|'('
name|'nspath'
op|')'
newline|'\n'
name|'for'
name|'lockname'
name|'in'
name|'locknames'
op|':'
newline|'\n'
indent|' '
name|'lockpath'
op|'='
name|'os'
op|'.'
name|'path'
op|'.'
name|'join'
op|'('
name|'nspath'
op|','
name|'lockname'
op|')'
newline|'\n'
name|'lock_age_days'
op|'='
name|'_get_age_days'
op|'('
name|'os'
op|'.'
name|'path'
op|'.'
name|'getmtime'
op|'('
name|'lockpath'
op|')'
op|')'
newline|'\n'
name|'if'
name|'lock_age_days'
op|'>'
name|'days_old'
op|':'
newline|'\n'
indent|' '
name|'lockpaths_removed'
op|'+='
number|'1'
newline|'\n'
name|'removed'
op|'+='
number|'1'
newline|'\n'
nl|'\n'
name|'if'
name|'options'
op|'.'
name|'verbose'
op|':'
newline|'\n'
indent|' '
name|'print'
string|"'Removing old lock: %03d %s'"
op|'%'
op|'('
name|'lock_age_days'
op|','
nl|'\n'
name|'lockpath'
op|')'
newline|'\n'
nl|'\n'
dedent|''
name|'if'
name|'not'
name|'options'
op|'.'
name|'dry_run'
op|':'
newline|'\n'
indent|' '
name|'os'
op|'.'
name|'unlink'
op|'('
name|'lockpath'
op|')'
newline|'\n'
nl|'\n'
comment|'# Remove empty namespace paths'
nl|'\n'
dedent|''
dedent|''
dedent|''
name|'if'
name|'len'
op|'('
name|'locknames'
op|')'
op|'=='
name|'removed'
op|':'
newline|'\n'
indent|' '
name|'nspaths_removed'
op|'+='
number|'1'
newline|'\n'
nl|'\n'
name|'if'
name|'options'
op|'.'
name|'verbose'
op|':'
newline|'\n'
indent|' '
name|'print'
string|"'Removing empty namespace: %s'"
op|'%'
name|'nspath'
newline|'\n'
nl|'\n'
dedent|''
name|'if'
name|'not'
name|'options'
op|'.'
name|'dry_run'
op|':'
newline|'\n'
indent|' '
name|'try'
op|':'
newline|'\n'
indent|' '
name|'os'
op|'.'
name|'rmdir'
op|'('
name|'nspath'
op|')'
newline|'\n'
dedent|''
name|'except'
name|'OSError'
op|','
name|'e'
op|':'
newline|'\n'
indent|' '
name|'if'
name|'e'
op|'.'
name|'errno'
op|'=='
name|'errno'
op|'.'
name|'ENOTEMPTY'
op|':'
newline|'\n'
indent|' '
name|'print'
op|'>>'
name|'sys'
op|'.'
name|'stderr'
op|','
string|'"warning: directory \'%s\'"'
string|'" not empty"'
op|'%'
name|'nspath'
newline|'\n'
dedent|''
name|'else'
op|':'
newline|'\n'
indent|' '
name|'raise'
newline|'\n'
nl|'\n'
dedent|''
dedent|''
dedent|''
dedent|''
dedent|''
name|'if'
name|'options'
op|'.'
name|'dry_run'
op|':'
newline|'\n'
indent|' '
name|'print'
string|'"** Dry Run **"'
newline|'\n'
nl|'\n'
dedent|''
name|'print'
string|'"Total locks removed: "'
op|','
name|'lockpaths_removed'
newline|'\n'
name|'print'
string|'"Total namespaces removed: "'
op|','
name|'nspaths_removed'
newline|'\n'
nl|'\n'
nl|'\n'
dedent|''
name|'if'
name|'__name__'
op|'=='
string|"'__main__'"
op|':'
newline|'\n'
indent|' '
name|'main'
op|'('
op|')'
newline|'\n'
dedent|''
endmarker|''
end_unit
| 13.758503
| 495
| 0.591595
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,563
| 0.56403
|
0c3c5abd70c1f21c01879c6ec3f584ca3464ae2e
| 13,324
|
py
|
Python
|
clifun.py
|
tdimiduk/clifun
|
d7e5acae0a76506d9440ae86a15341b6cc1cf25e
|
[
"MIT"
] | 1
|
2022-01-04T17:58:19.000Z
|
2022-01-04T17:58:19.000Z
|
clifun.py
|
tdimiduk/clifun
|
d7e5acae0a76506d9440ae86a15341b6cc1cf25e
|
[
"MIT"
] | 4
|
2022-01-04T17:17:33.000Z
|
2022-01-04T17:26:12.000Z
|
clifun.py
|
tdimiduk/clifun
|
d7e5acae0a76506d9440ae86a15341b6cc1cf25e
|
[
"MIT"
] | null | null | null |
import datetime as dt
import importlib.util
import inspect
import itertools
import json
import os
import pathlib
import sys
import types
import typing
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
S = TypeVar("S")
T = TypeVar("T")
O = TypeVar("O", Any, None)
StringInterpreters = Dict[Type[T], Callable[[str], T]]
def call(
c: Callable[..., T],
args: Optional[List[str]] = None,
string_interpreters: Optional[StringInterpreters] = None,
) -> T:
"""
Call a function from the command line
Assembles the inputs to a function from command line arguments, environment variables, and config files and call it.
"""
argv = sys.argv if args is None else args
interpreters = (
string_interpreters
if string_interpreters is not None
else default_string_interpreters()
)
annotated = annotate_callable(c, interpreters, [])
provided_inputs = assemble_input_sources(argv)
if provided_inputs.args.help:
print_usage(annotated, header=True)
sys.exit(0)
needed_inputs = all_needed_inputs(annotated)
unknown = invalid_args(provided_inputs.args.keyword.keys(), needed_inputs)
if unknown:
print(f"Unknown arguments: {unknown}")
print_usage(annotated)
sys.exit(1)
resolved_inputs, missing_inputs = resolve_inputs(needed_inputs, provided_inputs)
if missing_inputs:
print(f"Missing arguments: {missing_inputs}")
print_usage(annotated)
sys.exit(1)
return annotated(resolved_inputs)
################################################################################
# Interpreting strings into python types
################################################################################
def default_string_interpreters() -> StringInterpreters:
return {
int: int,
float: float,
str: str,
bool: interpret_bool,
dt.datetime: interpret_datetime,
dt.date: interpret_date,
}
class InterpretationError(ValueError):
def __init__(self, s: str, t: T):
self.s = s
self.t = t
def __str__(self):
return f"Could not interpret '{self.s}' as {self.t}"
def interpret_bool(s: str) -> bool:
"""
Slightly more intuitive bool iterpretation
Raw python's `bool("false")==True` since it is a non-empty string
"""
if s.lower() in {"t", "true", "yes", "y"}:
return True
elif s.lower() in {"f", "false", "no", "n"}:
return False
else:
raise InterpretationError(s, bool)
def interpret_datetime(s: str) -> dt.datetime:
"""
Date and time in isoformat
"""
if hasattr(dt.datetime, "fromisoformat"):
return dt.datetime.fromisoformat(s)
else:
# for python 3.6 where `fromisoformat` doesn't exist
import isodate # type: ignore
return isodate.parse_datetime(s)
def interpret_date(s: str) -> dt.date:
"""
Dates in YYYY-MM-DD format
"""
return dt.date(*[int(i) for i in s.split("-")])
def interpret_string_as_type(
s: str, t: Type[T], type_converters: StringInterpreters
) -> T:
try:
return (
type_converters[unwrap_optional(t)](s)
if is_optional(t)
else type_converters[t](s)
)
except KeyError:
raise InterpretationError(s, t)
################################################################################
# Data classes
#
# these should really be dataclasses, and will be converted when clifun drops compatability
# with python 3.6
################################################################################
class Arguments:
def __init__(
self, positional: List[str], keyword: Dict[str, str], help: bool = False
):
self.positional = positional
self.keyword = keyword
self.help = help
class ConfigFiles:
def __init__(self, configs: List[Dict[str, str]]):
self.configs = configs
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
for config in self.configs:
if key in config:
return config[key]
return default
Annotated = Union["AnnotatedParameter", "AnnotatedCallable"]
class AnnotatedCallable(Generic[T]):
def __init__(
self, callable: Callable[[...], T], name: str, needed_inputs: List[Annotated]
):
self.callable = callable
self.name = name
self.needed_inputs = needed_inputs
def __call__(self, inputs: Dict[str, str]):
def collect(needed: Annotated):
if isinstance(needed, AnnotatedParameter):
value = inputs[needed.prefixed_name]
if value is None:
if is_optional(needed.t):
return None
raise ValueError(
f"Somehow got None for non optional parameter {needed}"
)
return needed(value)
return needed(inputs)
collected_inputs = {
needed.name: collect(needed) for needed in self.needed_inputs
}
return self.callable(**collected_inputs)
def __str__(self) -> str:
return f"<callable: {self.name} {[str(i) for i in self.needed_inputs]}>"
class AnnotatedParameter(Generic[T]):
def __init__(
self, parameter: inspect.Parameter, from_string: Callable[[str], T], prefix
):
self.parameter = parameter
self.from_string = from_string
self.prefix = prefix
@property
def name(self):
return self.parameter.name
@property
def prefixed_name(self):
return ".".join(self.prefix + [self.name])
@property
def t(self):
return self.parameter.annotation
@property
def default(self):
return self.parameter.default
def __call__(self, input: Optional[str]) -> T:
return self.from_string(input)
def __str__(self) -> str:
return f"<parameter: {self.name}: {self.t}>"
class InputSources:
def __init__(self, args: Arguments, config_files: ConfigFiles):
self.args = args
self.config_files = config_files
def get(self, key: str, default: Optional[T] = None) -> Union[str, T, None]:
env_value = os.environ.get(key.upper(), default)
return self.args.keyword.get(key, self.config_files.get(key, env_value))
def get_value(self, value: AnnotatedParameter) -> Union[str, T, None]:
return self.get(value.prefixed_name, value.default)
################################################################################
# Assemble inputs from the "outside world"
################################################################################
def assemble_input_sources(args: List[str]) -> InputSources:
args_object = interpret_arguments(args)
return InputSources(args_object, load_config_files(args_object.positional))
def interpret_arguments(args: Optional[List[str]] = None) -> Arguments:
if args is None:
args = sys.argv
i = 1
keyword = {}
positional = []
while i < len(args):
arg = args[i]
key = arg[2:]
if arg in {"-h", "--help"}:
return Arguments([], {}, True)
if arg[:2] == "--":
if len(args) < i + 2:
raise ValueError(f"Missing value for argument: {key}")
keyword[key] = args[i + 1]
i += 2
else:
positional.append(arg)
i += 1
return Arguments(positional, keyword, not (keyword or positional))
def load_config_files(filenames: List[str]) -> ConfigFiles:
# reverse the order so that later config files override earlier ones
def load(name):
if not pathlib.Path(name).exists():
raise ValueError(f"Could not find config file {name}")
return json.load(open(name))
return ConfigFiles([load(name) for name in filenames[::-1]])
NOT_SPECIFIED = inspect._empty
def resolve_inputs(
needed_inputs: List[AnnotatedParameter], provided_inputs: InputSources
) -> Tuple[Dict[str, Optional[str]], Set[str]]:
missing = set()
def resolve(v):
s = provided_inputs.get_value(v)
if s is None:
if is_optional(v.t):
return None
else:
missing.add(v.prefixed_name)
if s == NOT_SPECIFIED:
missing.add(v.prefixed_name)
return s
collected = {value.prefixed_name: resolve(value) for value in needed_inputs}
return collected, missing
################################################################################
# Input validation and help
################################################################################
def check_usage(provided_inputs, needed_inputs) -> None:
check_help(provided_inputs, needed_inputs)
check_invalid_args(provided_inputs, needed_inputs)
def valid_args(values: List[AnnotatedParameter]) -> Set[str]:
return {v.prefixed_name for v in values}
def invalid_args(args, allowed_args):
return set(args) - valid_args(allowed_args)
def print_usage(annotated: AnnotatedCallable, header: bool = False) -> None:
needed_inputs = all_needed_inputs(annotated)
if header:
print(f"{annotated.name}\n")
doc = inspect.getdoc(annotated.callable)
if doc:
print(f"{doc}\n")
print(f"Usage: {sys.argv[0]} [config_file] [--key: value]")
print("\n".join(describe_needed(needed_inputs)))
def describe_needed(needed_inputs: List[AnnotatedParameter]) -> List[str]:
def desc(v):
base = f" --{v.prefixed_name}: {type_to_string(v.t)}"
if v.default != NOT_SPECIFIED:
default = f'"{v.default}"' if isinstance(v.default, str) else v.default
return f"{base} (default: {default})"
return base
return [desc(v) for v in needed_inputs]
################################################################################
# Determine what inputs a function needs
################################################################################
def all_needed_inputs(c: AnnotatedCallable) -> List[AnnotatedParameter]:
def inner():
for needed in c.needed_inputs:
if isinstance(needed, AnnotatedParameter):
yield needed
else:
yield from all_needed_inputs(needed)
return list(inner())
def inspect_parameters(t: Type[T]) -> Iterable[inspect.Parameter]:
return inspect.signature(t).parameters.values()
def is_optional(t: Type[T]) -> bool:
return Union[t, None] == t
def unwrap_optional(t: Optional[Type[T]]) -> Type[T]:
if hasattr(typing, "get_args"):
args = typing.get_args(t)
if len(args) == 0:
return t
else:
return args[0]
# fallback for python < 3.8. May be brittle since it depends on an `_`'d interface
# this should use typing.get_args, but that is not available until python 3.8
if type(t) != typing._GenericAlias:
return t
for s in t.__args__: # type: ignore
if s != type(None):
return s
def type_to_string(t: Type[O]) -> str:
if is_optional(t):
return f"Optional[{unwrap_optional(t).__name__}]"
return t.__name__
def annotate_parameter(
parameter: inspect.Parameter, interpreter: StringInterpreters, prefix: List[str]
) -> Union[AnnotatedParameter, AnnotatedCallable]:
if parameter.annotation == NOT_SPECIFIED:
raise Exception(f"Missing type annotation for {parameter}")
t = unwrap_optional(parameter.annotation)
if t in interpreter:
# We have found a "basic" value we know how to interpret
return AnnotatedParameter(parameter, from_string=interpreter[t], prefix=prefix)
# This is some kind of composite
prefix = prefix + [parameter.name]
return annotate_callable(parameter.annotation, interpreter, prefix, parameter.name)
def annotate_callable(
callable: Callable[[...], T],
interpreter: StringInterpreters,
prefix: List[str],
name: Optional[str] = None,
) -> AnnotatedCallable[T]:
needed = [
annotate_parameter(p, interpreter, prefix) for p in inspect_parameters(callable)
]
return AnnotatedCallable(
callable, name if name is not None else callable.__name__, needed
)
################################################################################
# Make clifun.py usable as a script to call functions in any module
################################################################################
def import_module_by_path(path: pathlib.Path) -> types.ModuleType:
spec = importlib.util.spec_from_file_location(target.name, str(target))
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
if __name__ == "__main__":
print(sys.argv)
if len(sys.argv) < 3:
print("Usage: clifun.py path_to_module function_name ...")
sys.exit(1)
target = pathlib.Path(sys.argv[1]).resolve()
function_name = sys.argv[2]
arguments = sys.argv[2:]
module = import_module_by_path(target)
function = getattr(module, function_name)
print(call(function, arguments))
| 29.283516
| 120
| 0.590513
| 3,014
| 0.226208
| 309
| 0.023191
| 290
| 0.021765
| 0
| 0
| 2,878
| 0.216001
|
0c3c8f51e10f9073a8d53d99be68ca016464578d
| 2,758
|
py
|
Python
|
pages/views.py
|
joshua-hashimoto/eigo-of-the-day-django
|
68ec7fe4257c67689de596cf34e991a3750b7f36
|
[
"MIT"
] | null | null | null |
pages/views.py
|
joshua-hashimoto/eigo-of-the-day-django
|
68ec7fe4257c67689de596cf34e991a3750b7f36
|
[
"MIT"
] | 8
|
2021-04-08T19:45:15.000Z
|
2022-03-12T00:49:25.000Z
|
pages/views.py
|
joshua-hashimoto/eigo-of-the-day-django
|
68ec7fe4257c67689de596cf34e991a3750b7f36
|
[
"MIT"
] | null | null | null |
import os
import json
import uuid
from django.conf import settings
from django.http import HttpResponse
from django.utils.translation import ugettext_lazy as _
from django.views.generic import View
import cloudinary
class MarkdownImageUploader(View):
"""
custom image uploader for martor.
"""
def post(self, request, *args, **kwargs):
"""
called when images are uploaded to martor's markdown field.
validation is from martor's documentation.
it will upload images to cloudinary.
Note:
when there is '?' in the to be foldername the image upload will not work.
"""
if not request.is_ajax():
return HttpResponse(_('Invalid request!'))
if 'markdown-image-upload' not in request.FILES:
return HttpResponse(_('Invalid request!'))
image = request.FILES['markdown-image-upload']
image_types = [
'image/png', 'image/jpg',
'image/jpeg', 'image/pjpeg', 'image/gif'
]
if image.content_type not in image_types:
# return error when the image type
# is not an expected type
data = json.dumps({
'status': 405,
'error': _('Bad image format.')
}, cls=LazyEncoder)
return HttpResponse(
data, content_type='application/json', status=405)
if image.size > settings.MAX_IMAGE_UPLOAD_SIZE:
# return error when the image size
# is over the setted MAX_IMAGE_UPLOAD_SIZE
to_MB = settings.MAX_IMAGE_UPLOAD_SIZE / (1024 * 1024)
data = json.dumps({
'status': 405,
'error': _('Maximum image file is %(size) MB.') % {'size': to_MB}
}, cls=LazyEncoder)
return HttpResponse(
data, content_type='application/json', status=405)
# when the image is valid
# create new name for image
img_name = f'{uuid.uuid4().hex[:10]}-{image.name.replace(" ", "-")}'
# assign new name to the image that is being uploaded
image.name = img_name
# create folder path
img_folder = os.path.join(
settings.MEDIA_URL, 'memo')
# save image to cloudinary
cloudinary_img = cloudinary.uploader.upload(
image, folder=img_folder, overwrite=True)
# get the saved image url from cloudinary response
cloudinary_img_url = cloudinary_img['url']
# name json data to return to markdown
data = json.dumps({
'status': 200,
'link': cloudinary_img_url,
'name': image.name
})
return HttpResponse(data, content_type='application/json')
| 34.049383
| 85
| 0.591008
| 2,537
| 0.919869
| 0
| 0
| 0
| 0
| 0
| 0
| 1,075
| 0.389775
|
0c3e673531e09903ae71e40dc82ffb45887a73df
| 1,776
|
py
|
Python
|
shc/log/in_memory.py
|
fabaff/smarthomeconnect
|
611cd0f372d03b5fc5798a2a9a5f962d1da72799
|
[
"Apache-2.0"
] | 5
|
2021-07-02T21:48:45.000Z
|
2021-12-12T21:55:42.000Z
|
shc/log/in_memory.py
|
fabaff/smarthomeconnect
|
611cd0f372d03b5fc5798a2a9a5f962d1da72799
|
[
"Apache-2.0"
] | 49
|
2020-09-18T20:05:55.000Z
|
2022-03-05T19:51:33.000Z
|
shc/log/in_memory.py
|
fabaff/smarthomeconnect
|
611cd0f372d03b5fc5798a2a9a5f962d1da72799
|
[
"Apache-2.0"
] | 1
|
2021-12-10T14:50:43.000Z
|
2021-12-10T14:50:43.000Z
|
import datetime
from typing import Optional, Type, Generic, List, Tuple
from ..base import T
from .generic import PersistenceVariable
class InMemoryPersistenceVariable(PersistenceVariable, Generic[T]):
def __init__(self, type_: Type[T], keep: datetime.timedelta):
super().__init__(type_, log=True)
self.data: List[Tuple[datetime.datetime, T]] = []
self.keep = keep
async def _write_to_log(self, value: T):
self.clean_up()
self.data.append((datetime.datetime.now(datetime.timezone.utc), value))
def clean_up(self) -> None:
begin = datetime.datetime.now(datetime.timezone.utc) - self.keep
keep_from: Optional[int] = None
for i, (ts, _v) in enumerate(self.data):
if ts > begin:
keep_from = i
break
self.data = self.data[keep_from:]
async def _read_from_log(self) -> Optional[T]:
if not self.data:
return None
return self.data[-1][1]
async def retrieve_log(self, start_time: datetime.datetime, end_time: datetime.datetime,
include_previous: bool = False) -> List[Tuple[datetime.datetime, T]]:
iterator = iter(enumerate(self.data))
try:
start_index = next(i for i, (ts, _v) in iterator if ts >= start_time)
except StopIteration:
if include_previous and self.data:
return self.data[-1:]
else:
return []
if include_previous and start_index > 0:
start_index -= 1
try:
end_index = next(i for i, (ts, _v) in iterator if ts > end_time)
except StopIteration:
return self.data[start_index:]
return self.data[start_index:end_index]
| 36.244898
| 96
| 0.606419
| 1,638
| 0.922297
| 0
| 0
| 0
| 0
| 1,046
| 0.588964
| 0
| 0
|
0c3ec0f29f7bce414073cc341dd9839fbf5fca06
| 1,393
|
py
|
Python
|
guts/api/contrib/type_actions.py
|
smallwormer/stable-liberty-guts
|
e635b710cdd210f70e9d50c3b85fffdeb53e8f01
|
[
"Apache-2.0"
] | null | null | null |
guts/api/contrib/type_actions.py
|
smallwormer/stable-liberty-guts
|
e635b710cdd210f70e9d50c3b85fffdeb53e8f01
|
[
"Apache-2.0"
] | null | null | null |
guts/api/contrib/type_actions.py
|
smallwormer/stable-liberty-guts
|
e635b710cdd210f70e9d50c3b85fffdeb53e8f01
|
[
"Apache-2.0"
] | 1
|
2022-03-03T05:41:31.000Z
|
2022-03-03T05:41:31.000Z
|
# Copyright (c) 2015 Aptira Pty Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from guts.api import extensions
from guts.api.openstack import wsgi
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('types', '')
class TypeActionsController(wsgi.Controller):
def __init__(self):
super(TypeActionsController, self).__init__()
class Type_actions(extensions.ExtensionDescriptor):
"""Enables source hypervisor type actions."""
name = "TypeActions"
alias = "os-type-actions"
namespace = ""
updated = ""
def get_controller_extensions(self):
controller = TypeActionsController()
extension = extensions.ControllerExtension(self, 'types', controller)
return [extension]
| 29.638298
| 78
| 0.724336
| 508
| 0.364681
| 0
| 0
| 0
| 0
| 0
| 0
| 715
| 0.513281
|
0c4122d4f0b749136bdf171cdb6e696eecf404bd
| 8,579
|
py
|
Python
|
models/StyleTransfer/AdaIN.py
|
mtroym/pytorch-train
|
3b303b6c7e364a58cb88d7142da942a30cc2b255
|
[
"Apache-2.0"
] | 2
|
2019-12-21T14:40:11.000Z
|
2020-05-26T09:26:52.000Z
|
models/StyleTransfer/AdaIN.py
|
mtroym/pytorch-train
|
3b303b6c7e364a58cb88d7142da942a30cc2b255
|
[
"Apache-2.0"
] | null | null | null |
models/StyleTransfer/AdaIN.py
|
mtroym/pytorch-train
|
3b303b6c7e364a58cb88d7142da942a30cc2b255
|
[
"Apache-2.0"
] | 1
|
2020-10-16T12:03:19.000Z
|
2020-10-16T12:03:19.000Z
|
"""
Author: Yiming Mao - mtroym@github
Description: Transplant from "https://github.com/xunhuang1995/AdaIN-style/blob/master/train.lua"
"""
import functools
import os
from collections import OrderedDict
import torch
import torch.nn as nn
from torchvision.models import vgg19
from datasets.utils import denorm
from models.blocks import AdaptiveInstanceNorm2d
from models.blocks.vgg import rename_sequential
from models.helpers import init_weights
class _Encoder(nn.Module):
def __init__(self, pretrained=True, init_type="normal", endlayer='relu4_1', feature_hook=None):
super(_Encoder, self).__init__()
self.init_type = init_type
self.pretrained = pretrained
self.feature_hook = ["relu1_1", "relu2_1", "relu3_1", "relu4_1"] if feature_hook is None else feature_hook
self.core = nn.Sequential()
backbone = vgg19(pretrained=pretrained, progress=True)
feature_extractor = rename_sequential(backbone.features)
for name, layer in feature_extractor.named_children():
self.core.add_module(name, layer)
if name == endlayer:
break
idx = -1
while not hasattr(self.core[idx], "out_channels"):
idx -= 1
self.out_channels = self.core[idx].out_channels
def init_param(self) -> None:
if self.pretrained:
return
init_weights(self.model, self.init_type)
def frozen(self):
for param in self.core.parameters():
param.requires_grad = False
def forward(self, inputs, feature_hook=None):
if feature_hook is None:
feature_hook = self.feature_hook
results = OrderedDict()
for name, layer in self.core.named_children():
inputs = layer(inputs)
if name in feature_hook:
results[name] = inputs
return results
class _Decoder(nn.Module):
def __init__(self, enc: nn.Module, activation="relu", remove_idx=-1):
super(_Decoder, self).__init__()
core_list = []
nonlinear_act = nn.ReLU
if activation == "lrelu":
nonlinear_act = functools.partial(nn.LeakyReLU, negative_slope=0.2)
for name, layer in enc.core.named_children():
if 'conv' in name:
in_channels, out_channels = layer.in_channels, layer.out_channels
core_list.append((activation + name.replace("conv", ""),
nonlinear_act(inplace=True)))
# core_list.append(("in{}".format(name.replace("conv", "")),
# nn.InstanceNorm2d(num_features=in_channels)))
core_list.append(("conv{}".format(name.replace("conv", "")),
nn.Conv2d(out_channels, in_channels, kernel_size=3, stride=1)))
core_list.append(("pad{}".format(name.replace("conv", "")),
nn.ReflectionPad2d(padding=(1, 1, 1, 1))))
if 'pool' in name:
core_list.append(("up{}".format(name.replace("pool", "")),
nn.UpsamplingNearest2d(scale_factor=2)))
self.core = rename_sequential(nn.Sequential(OrderedDict(reversed(core_list))))
# print(self)
def forward(self, inputs) -> torch.Tensor:
return self.core(inputs)
class AdaIN:
def __init__(self, opt):
self.name = "AdaIN-Style model"
self.opt = opt
self.in_channel = opt.channel
self.init_type = self.opt.init_type
self.gpu_ids = opt.gpu_ids if opt.gpu_ids else []
self.device = torch.device("cuda:0" if (torch.cuda.is_available() and len(self.gpu_ids) > 0) else "cpu")
self.dtype = torch.cuda.FloatTensor if self.device != torch.device("cpu") else torch.FloatTensor
self.save_dir = opt.expr_dir
self.encoder = _Encoder()
if self.opt.freeze_enc:
self.encoder.frozen()
self.decoder = _Decoder(self.encoder)
self.adain = AdaptiveInstanceNorm2d(num_features=self.encoder.out_channels)
self.optimizer = torch.optim.Adam(self.decoder.parameters(), lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
if self.opt.resume_path is not None:
pass
# place_holders
self.inputs = None
self.loss = None
self.metrics = None
self.current_minibatch = self.opt.batchSize
if self.opt.resume_path is not None:
self.load_model(self.opt.resume_path)
self.cuda()
def cuda(self):
if torch.cuda.is_available():
self.encoder.cuda(self.device)
self.decoder.cuda(self.device)
self.adain.cuda(self.device)
def train_decoder(self, content, style, alpha=1.0):
self.optimizer.zero_grad()
# find all the features with frozen VGG
style_features = self.encoder(style)
content_latent = self.encoder(content, feature_hook=["relu4_1"])
# Find adain
trans_content = self.adain(content=content_latent["relu4_1"],
style=style_features["relu4_1"])
interpolate_latent = (1.0 - alpha) * content_latent["relu4_1"] + \
alpha * trans_content
transferred_image = self.decoder(interpolate_latent)
transferred_features = self.encoder(transferred_image)
c_loss = self.loss["content_loss"](transferred_features["relu4_1"], interpolate_latent)
s_loss = self.loss["style_loss"](transferred_features, style_features)
smooth_reg = self.loss["smooth_reg"](transferred_image)
loss = c_loss.mean() + s_loss.mean() + smooth_reg.mean()
loss.backward()
self.optimizer.step()
return c_loss, s_loss, smooth_reg, transferred_image
def train_batch(self, inputs: dict, loss: dict, metrics: dict, niter: int = 0, epoch: int = 0) -> dict:
self.inputs = inputs
self.loss = loss if self.loss is None else self.loss
self.metrics = metrics if self.metrics is None else self.metrics
self.current_minibatch = inputs["Source"].shape[0]
c_loss, s_loss, smooth_reg, transferred_image = self.train_decoder(inputs["Source"].to(self.device),
inputs["Style"].to(self.device))
store_val = {"vis": {"Target": denorm(transferred_image, self.device, to_board=True),
"Source": denorm(inputs["Source"], self.device, to_board=True),
"Style": denorm(inputs["Style"], self.device, to_board=True)},
"loss": {"loss_content": c_loss,
"loss_style": s_loss,
"smooth_reg": smooth_reg,
}
}
if epoch % 30 == 5:
self.save_model(epoch, store=store_val)
return store_val
def predict_batch(self, inputs: dict, loss=None, metrics=None, niter=None, epoch=None):
self.current_minibatch = self.opt.batchSize
return {
"vis": {"Target": None},
"loss": {}
}
def save_model(self, epoch, store):
store_dict = {
"epoch": epoch,
"model_state_dict": {
"decoder_model_state_dict": self.decoder.state_dict(),
},
"optimizer_state_dict": {
"decoder_optimizer_state_dict": self.optimizer.state_dict(),
}
}
store_dict.update(store)
torch.save(store_dict, os.path.join(self.opt.expr_dir, "epoch_{}.pth".format(epoch)))
torch.save(store_dict, os.path.join(self.opt.expr_dir, "latest.pth".format(epoch)))
def load_model(self, store_path, no_opt=False):
store_dict = torch.load(store_path)
self.decoder.load_state_dict(store_dict["model_state_dict"]["decoder_model_state_dict"])
if no_opt:
return
self.optimizer.load_state_dict(store_dict["optimizer_state_dict"]["decoder_optimizer_state_dict"])
if __name__ == '__main__':
bs = 10
w, h = 128, 128
image = torch.rand((bs, 3, w, h))
# g = _Generator_ResizeConv()
e = _Encoder()
d = _Decoder(e)
adain = AdaptiveInstanceNorm2d(e.out_channels)
te = adain(e(image)["relu4_1"], e(image)["relu4_1"])
print(d)
print(d(te).shape)
# print(e(image).shape)
# print(d(e(image)).shape)
# print(.out_channels)
# fak = g(z)
# print(fak.shape)
# print(d(fak).shape)
| 40.852381
| 115
| 0.602518
| 7,642
| 0.89078
| 0
| 0
| 0
| 0
| 0
| 0
| 1,152
| 0.134281
|
0c418b56746d824c2d98f37af03cc0b209cd7415
| 1,099
|
py
|
Python
|
airflow/migrations/versions/52d714495f0_job_id_indices.py
|
rubeshdcube/incubator-airflow
|
5419fbb78a2ea2388456c356d2f899ea1991b2de
|
[
"Apache-2.0"
] | 6
|
2016-04-20T20:40:43.000Z
|
2022-02-20T10:32:00.000Z
|
airflow/migrations/versions/52d714495f0_job_id_indices.py
|
curest0x1021/incubator-airflow
|
e6d3160a061dbaa6042d524095dcd1cbc15e0bcd
|
[
"Apache-2.0"
] | 13
|
2018-11-30T18:18:32.000Z
|
2021-02-19T17:04:12.000Z
|
airflow/migrations/versions/52d714495f0_job_id_indices.py
|
curest0x1021/incubator-airflow
|
e6d3160a061dbaa6042d524095dcd1cbc15e0bcd
|
[
"Apache-2.0"
] | 9
|
2017-08-24T15:47:44.000Z
|
2022-02-14T03:30:49.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""job_id indices
Revision ID: 52d714495f0
Revises: 338e90f54d61
Create Date: 2015-10-20 03:17:01.962542
"""
# revision identifiers, used by Alembic.
revision = '52d714495f0'
down_revision = '338e90f54d61'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
op.create_index('idx_job_state_heartbeat', 'job', ['state', 'latest_heartbeat'], unique=False)
def downgrade():
op.drop_index('idx_job_state_heartbeat', table_name='job')
| 27.475
| 98
| 0.755232
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 815
| 0.741583
|
0c42822d78adfa0e7f0264f5d356cb0270939941
| 7,836
|
py
|
Python
|
deep_semantic_similarity_keras.py
|
viksit/Deep-Semantic-Similarity-Model
|
1dc94346801e711125fb573284a1984ce17fb90e
|
[
"MIT"
] | 3
|
2016-05-26T00:04:38.000Z
|
2019-10-22T09:52:39.000Z
|
deep_semantic_similarity_keras.py
|
viksit/Deep-Semantic-Similarity-Model
|
1dc94346801e711125fb573284a1984ce17fb90e
|
[
"MIT"
] | null | null | null |
deep_semantic_similarity_keras.py
|
viksit/Deep-Semantic-Similarity-Model
|
1dc94346801e711125fb573284a1984ce17fb90e
|
[
"MIT"
] | 1
|
2019-10-22T09:59:04.000Z
|
2019-10-22T09:59:04.000Z
|
# Michael A. Alcorn (malcorn@redhat.com)
# An implementation of the Deep Semantic Similarity Model (DSSM) found in [1].
# [1] Shen, Y., He, X., Gao, J., Deng, L., and Mesnil, G. 2014. A latent semantic model
# with convolutional-pooling structure for information retrieval. In CIKM, pp. 101-110.
# http://research.microsoft.com/pubs/226585/cikm2014_cdssm_final.pdf
# [2] http://research.microsoft.com/en-us/projects/dssm/
# [3] http://research.microsoft.com/pubs/238873/wsdm2015.v3.pdf
import numpy as np
from keras import backend
from keras.layers import Input, merge
from keras.layers.core import Dense, Lambda, Reshape
from keras.layers.convolutional import Convolution1D
from keras.models import Model
def R(vects):
"""
Calculates the cosine similarity of two vectors.
:param vects: a list of two vectors.
:return: the cosine similarity of two vectors.
"""
(x, y) = vects
return backend.dot(x, backend.transpose(y)) / (x.norm(2) * y.norm(2)) # See equation (4)
LETTER_GRAM_SIZE = 3 # See section 3.2.
WINDOW_SIZE = 3 # See section 3.2.
TOTAL_LETTER_GRAMS = int(3 * 1e4) # Determined from data. See section 3.2.
WORD_DEPTH = WINDOW_SIZE * TOTAL_LETTER_GRAMS # See equation (1).
K = 300 # Dimensionality of the max-pooling layer. See section 3.4.
L = 128 # Dimensionality of latent semantic space. See section 3.5.
J = 4 # Number of random unclicked documents serving as negative examples for a query. See section 4.
FILTER_LENGTH = 1 # We only consider one time step for convolutions.
# Input tensors holding the query, positive (clicked) document, and negative (unclicked) documents.
# The first dimension is None because the queries and documents can vary in length.
query = Input(shape = (None, WORD_DEPTH))
pos_doc = Input(shape = (None, WORD_DEPTH))
neg_docs = [Input(shape = (None, WORD_DEPTH)) for j in range(J)]
# Query model. The paper uses separate neural nets for queries and documents (see section 5.2).
# In this step, we transform each word vector with WORD_DEPTH dimensions into its
# convolved representation with K dimensions. K is the number of kernels/filters
# being used in the operation. Essentially, the operation is taking the dot product
# of a single weight matrix (W_c) with each of the word vectors (l_t) from the
# query matrix (l_Q), adding a bias vector (b_c), and then applying the tanh function.
# That is, h_Q = tanh(W_c • l_Q + b_c). With that being said, that's not actually
# how the operation is being calculated here. To tie the weights of the weight
# matrix (W_c) together, we have to use a one-dimensional convolutional layer.
# Further, we have to transpose our query matrix (l_Q) so that time is the first
# dimension rather than the second (as described in the paper). That is, l_Q[0, :]
# represents our first word vector rather than l_Q[:, 0]. We can think of the weight
# matrix (W_c) as being similarly transposed such that each kernel is a column
# of W_c. Therefore, h_Q = tanh(l_Q • W_c + b_c) with l_Q, W_c, and b_c being
# the transposes of the matrices described in the paper.
query_conv = Convolution1D(K, FILTER_LENGTH, border_mode = "same", input_shape = (None, WORD_DEPTH), activation = "tanh")(query) # See equation (2).
# Next, we apply a max-pooling layer to the convolved query matrix. Keras provides
# its own max-pooling layers, but they cannot handle variable length input (as
# far as I can tell). As a result, I define my own max-pooling layer here. In the
# paper, the operation selects the maximum value for each row of h_Q, but, because
# we're using the transpose, we're selecting the maximum value for each column.
query_max = Lambda(lambda x: x.max(axis = 1), output_shape = (K,))(query_conv) # See section 3.4.
# In this step, we generate the semantic vector represenation of the query. This
# is a standard neural network dense layer, i.e., y = tanh(W_s • v + b_s).
query_sem = Dense(L, activation = "tanh", input_dim = K)(query_max) # See section 3.5.
# The document equivalent of the above query model.
doc_conv = Convolution1D(K, FILTER_LENGTH, border_mode = "same", input_shape = (None, WORD_DEPTH), activation = "tanh")
doc_max = Lambda(lambda x: x.max(axis = 1), output_shape = (K,))
doc_sem = Dense(L, activation = "tanh", input_dim = K)
pos_doc_conv = doc_conv(pos_doc)
neg_doc_convs = [doc_conv(neg_doc) for neg_doc in neg_docs]
pos_doc_max = doc_max(pos_doc_conv)
neg_doc_maxes = [doc_max(neg_doc_conv) for neg_doc_conv in neg_doc_convs]
pos_doc_sem = doc_sem(pos_doc_max)
neg_doc_sems = [doc_sem(neg_doc_max) for neg_doc_max in neg_doc_maxes]
# This layer calculates the cosine similarity between the semantic representations of
# a query and a document.
R_layer = Lambda(R, output_shape = (1,)) # See equation (4).
R_Q_D_p = R_layer([query_sem, pos_doc_sem]) # See equation (4).
R_Q_D_ns = [R_layer([query_sem, neg_doc_sem]) for neg_doc_sem in neg_doc_sems] # See equation (4).
concat_Rs = merge([R_Q_D_p] + R_Q_D_ns, mode = "concat")
concat_Rs = Reshape((J + 1, 1))(concat_Rs)
# In this step, we multiply each R(Q, D) value by gamma. In the paper, gamma is
# described as a smoothing factor for the softmax function, and it's set empirically
# on a held-out data set. We're going to learn gamma's value by pretending it's
# a single, 1 x 1 kernel.
with_gamma = Convolution1D(1, 1, border_mode = "same", input_shape = (J + 1, 1), activation = "linear")(concat_Rs) # See equation (5).
# Next, we exponentiate each of the gamma x R(Q, D) values.
exponentiated = Lambda(lambda x: backend.exp(x), output_shape = (J + 1,))(with_gamma) # See equation (5).
exponentiated = Reshape((J + 1,))(exponentiated)
# Finally, we use the softmax function to calculate the P(D+|Q).
prob = Lambda(lambda x: x[0][0] / backend.sum(x[0]), output_shape = (1,))(exponentiated) # See equation (5).
# We now have everything we need to define our model.
model = Model(input = [query, pos_doc] + neg_docs, output = prob)
model.compile(optimizer = "adadelta", loss = "binary_crossentropy")
# Build a random data set.
sample_size = 10
l_Qs = []
pos_l_Ds = []
for i in range(sample_size):
query_len = np.random.randint(1, 10)
l_Q = np.random.rand(1, query_len, WORD_DEPTH)
l_Qs.append(l_Q)
doc_len = np.random.randint(50, 500)
l_D = np.random.rand(1, doc_len, WORD_DEPTH)
pos_l_Ds.append(l_D)
neg_l_Ds = []
for i in range(sample_size):
possibilities = list(range(sample_size))
possibilities.remove(i)
negatives = np.random.choice(possibilities, J)
neg_l_Ds.append([pos_l_Ds[negative] for negative in negatives])
# Because we're using the "binary_crossentropy" loss function, we can pretend that
# we're dealing with a binary classification problem and that every sample is a
# member of the "1" class.
y = np.ones(1)
for i in range(sample_size):
history = model.fit([l_Qs[i], pos_l_Ds[i]] + neg_l_Ds[i], y, nb_epoch = 1, verbose = 0)
# Here, I walk through an example of how to define a function for calculating output
# from the computational graph. Let's define a function that calculates R(Q, D+)
# for a given query and clicked document. The function depends on two inputs, query
# and pos_doc. That is, if you start at the point in the graph where R(Q, D+) is
# calculated and then backtrack as far as possible, you'll end up at two different
# starting points, query and pos_doc. As a result, we supply those inputs in a list
# to the function. This particular function only calculates a single output, but
# multiple outputs are possible (see the next example).
get_R_Q_D_p = backend.function([query, pos_doc], R_Q_D_p)
get_R_Q_D_p([l_Qs[0], pos_l_Ds[0]])
# A slightly more complex function. Notice that both neg_docs and the output are
# lists.
get_R_Q_D_ns = backend.function([query] + neg_docs, R_Q_D_ns)
get_R_Q_D_ns([l_Qs[0]] + neg_l_Ds[0])
| 49.910828
| 148
| 0.732006
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,759
| 0.60686
|
0c4483174d1c4ff711dd1bd4cb802a150131d7f7
| 469
|
py
|
Python
|
posthog/migrations/0087_fix_annotation_created_at.py
|
avoajaugochukwu/posthog
|
7e7fd42b0542ebc4734aedb926df11d462e3dd4f
|
[
"MIT"
] | 7,409
|
2020-02-09T23:18:10.000Z
|
2022-03-31T22:36:25.000Z
|
posthog/migrations/0087_fix_annotation_created_at.py
|
avoajaugochukwu/posthog
|
7e7fd42b0542ebc4734aedb926df11d462e3dd4f
|
[
"MIT"
] | 5,709
|
2020-02-09T23:26:13.000Z
|
2022-03-31T20:20:01.000Z
|
posthog/migrations/0087_fix_annotation_created_at.py
|
avoajaugochukwu/posthog
|
7e7fd42b0542ebc4734aedb926df11d462e3dd4f
|
[
"MIT"
] | 647
|
2020-02-13T17:50:55.000Z
|
2022-03-31T11:24:19.000Z
|
# Generated by Django 3.0.7 on 2020-10-14 07:46
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0086_team_session_recording_opt_in"),
]
operations = [
migrations.AlterField(
model_name="annotation",
name="created_at",
field=models.DateTimeField(default=django.utils.timezone.now, null=True),
),
]
| 23.45
| 85
| 0.648188
| 347
| 0.739872
| 0
| 0
| 0
| 0
| 0
| 0
| 116
| 0.247335
|
0c476cbc9139db2d5b5477a2919a3f47a83b94b5
| 4,723
|
py
|
Python
|
tumorevo/tumorfig/main.py
|
pedrofale/tumorevo
|
cf43f3854f6815c822cf4df71be82fc6dbae065b
|
[
"MIT"
] | 2
|
2022-02-08T12:54:58.000Z
|
2022-03-04T12:21:06.000Z
|
tumorevo/tumorfig/main.py
|
pedrofale/tumorevo
|
cf43f3854f6815c822cf4df71be82fc6dbae065b
|
[
"MIT"
] | null | null | null |
tumorevo/tumorfig/main.py
|
pedrofale/tumorevo
|
cf43f3854f6815c822cf4df71be82fc6dbae065b
|
[
"MIT"
] | null | null | null |
"""
Create a cartoon of a tumor given the frequencies of different genotypes.
"""
from .util import *
import pandas as pd
import matplotlib.pyplot as plt
import click
import os
from pathlib import Path
from pymuller import muller
@click.command(help="Plot the evolution of a tumor.")
@click.argument(
"genotype-counts",
type=click.Path(exists=True, dir_okay=False),
)
@click.argument(
"genotype-parents",
type=click.Path(exists=True, dir_okay=False),
)
@click.option("-c", "--cells", default=100, help="Number of cells in slice plot.")
@click.option(
"-r",
"--average-radius",
default=10,
help="Average radius of circles in slice plot.",
)
@click.option("--grid-file", default="", help="Path to grid file.")
@click.option("--colormap", default="gnuplot", help="Colormap for genotypes.")
@click.option("--dpi", default=100, help="DPI for figures.")
@click.option("--plot", is_flag=True, help="Plot all the figures.")
@click.option("--do-muller", is_flag=True, help="Make a Muller plot.")
@click.option("--do-slice", is_flag=True, help="Make a slice plot.")
@click.option("--do-tree", is_flag=True, help="Make a clone tree plot.")
@click.option(
"--normalize", is_flag=True, help="Normalize the abundances in the Muller plot."
)
@click.option("--labels", is_flag=True, help="Annotate the clone tree plot.")
@click.option(
"--remove", is_flag=True, help="Remove empty clones in the clone tree plot."
)
@click.option(
"-o", "--output-path", default="./", help="Directory to write figures into."
)
def main(
genotype_counts,
genotype_parents,
cells,
average_radius,
grid_file,
colormap,
dpi,
plot,
do_muller,
do_slice,
do_tree,
normalize,
labels,
remove,
output_path,
):
genotype_counts = pd.read_csv(genotype_counts, index_col=0)
genotype_parents = pd.read_csv(genotype_parents, index_col=0, dtype=str)
if grid_file != "":
grid = pd.read_csv(grid_file, index_col=0, dtype=str)
pop_df, anc_df, color_by = prepare_plots(genotype_counts, genotype_parents)
cmap, genotypes = get_colormap(pop_df, anc_df, color_by, colormap)
if plot:
fig, ax_list = plt.subplots(ncols=3, sharex=False, dpi=dpi, figsize=(8, 2))
muller(
pop_df,
anc_df,
color_by,
ax=ax_list[0],
colorbar=False,
colormap=colormap,
normalize=normalize,
background_strain=False,
)
plt.axis("off")
if grid_file == "":
plot_deme(
cells,
genotype_counts.iloc[-1],
pop_df,
anc_df,
color_by,
average_radius=average_radius,
colormap=colormap,
ax=ax_list[1],
)
else:
plot_grid(grid, cmap, genotypes, ax=ax_list[1])
plot_tree(
genotype_parents,
pop_df,
anc_df,
color_by,
genotype_counts=genotype_counts.iloc[-1],
filter_clones=remove,
labels=labels,
colormap=colormap,
ax=ax_list[2],
)
plt.show()
else:
Path(output_path).mkdir(parents=True, exist_ok=True)
if do_muller:
ax = muller(
pop_df,
anc_df,
color_by,
colorbar=False,
colormap=colormap,
normalize=normalize,
)
plt.axis("off")
plt.savefig(
os.path.join(output_path, "muller.pdf"), dpi=dpi, bbox_inches="tight"
)
if do_slice:
if grid_file == "":
ax = plot_deme(
cells,
genotype_counts.iloc[-1],
pop_df,
anc_df,
color_by,
average_radius=average_radius,
colormap=colormap,
)
else:
ax = plot_grid(grid, cmap)
plt.savefig(
os.path.join(output_path, "slice.pdf"), dpi=dpi, bbox_inches="tight"
)
if do_tree:
ax = plot_tree(
genotype_parents,
pop_df,
anc_df,
color_by,
genotype_counts=genotype_counts.iloc[-1],
filter_clones=remove,
labels=labels,
colormap=colormap,
)
plt.savefig(
os.path.join(output_path, "tree.pdf"), dpi=dpi, bbox_inches="tight"
)
if __name__ == "__main__":
main()
| 28.624242
| 85
| 0.547957
| 0
| 0
| 0
| 0
| 4,447
| 0.941563
| 0
| 0
| 788
| 0.166843
|
0c486c53d8fb3bd729143cb46efffc314acc492f
| 23
|
py
|
Python
|
Aulas/aula20/src/bb_circular.py
|
alexNeto/data-struct
|
f54917247a4e75ffe15783b0a025185d2215309a
|
[
"BSD-2-Clause"
] | 5
|
2017-08-25T19:24:47.000Z
|
2020-01-19T15:52:02.000Z
|
Aulas/aula20/src/bb_circular.py
|
alexNeto/data-struct
|
f54917247a4e75ffe15783b0a025185d2215309a
|
[
"BSD-2-Clause"
] | null | null | null |
Aulas/aula20/src/bb_circular.py
|
alexNeto/data-struct
|
f54917247a4e75ffe15783b0a025185d2215309a
|
[
"BSD-2-Clause"
] | 5
|
2017-09-25T14:49:43.000Z
|
2019-11-27T00:05:56.000Z
|
class Circular():
| 11.5
| 18
| 0.565217
| 17
| 0.73913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0c48b673acc0ea7efa42fafb3fba6d032e5deab7
| 196
|
py
|
Python
|
src/brouwers/online_users/urls.py
|
modelbrouwers/modelbrouwers
|
e0ba4819bf726d6144c0a648fdd4731cdc098a52
|
[
"MIT"
] | 6
|
2015-03-03T13:23:07.000Z
|
2021-12-19T18:12:41.000Z
|
src/brouwers/online_users/urls.py
|
modelbrouwers/modelbrouwers
|
e0ba4819bf726d6144c0a648fdd4731cdc098a52
|
[
"MIT"
] | 95
|
2015-02-07T00:55:39.000Z
|
2022-02-08T20:22:05.000Z
|
src/brouwers/online_users/urls.py
|
modelbrouwers/modelbrouwers
|
e0ba4819bf726d6144c0a648fdd4731cdc098a52
|
[
"MIT"
] | 2
|
2016-03-22T16:53:26.000Z
|
2019-02-09T22:46:04.000Z
|
from django.conf.urls import url
from .views import get_online_users, set_online
app_name = 'online_users'
urlpatterns = [
url(r'^so/$', set_online),
url(r'^ous/$', get_online_users),
]
| 19.6
| 47
| 0.704082
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 31
| 0.158163
|
0c49d08e42802a84e6d6315644d21f43e88ce921
| 5,881
|
py
|
Python
|
dftb+/plot_spline.py
|
hsulab/DailyScripts
|
26b03cfb721fd66f39c86df50d2ec5866e651d6e
|
[
"MIT"
] | 2
|
2020-06-08T21:39:44.000Z
|
2020-10-18T15:12:47.000Z
|
dftb+/plot_spline.py
|
hsulab/DailyScripts
|
26b03cfb721fd66f39c86df50d2ec5866e651d6e
|
[
"MIT"
] | null | null | null |
dftb+/plot_spline.py
|
hsulab/DailyScripts
|
26b03cfb721fd66f39c86df50d2ec5866e651d6e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import argparse
import numpy as np
import matplotlib as mpl
mpl.use('Agg') #silent mode
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes, InsetPosition, zoomed_inset_axes, mark_inset
MAXLINE = 10000
class SplineRepulsion():
def __init__(self, npoints, cutoff, begrep, splrep, endrep):
#
self.npoints, self.cutoff = npoints, cutoff
self.begrep = begrep
self.splrep = splrep
self.endrep = endrep
#
bounds = np.zeros(npoints+2)
bounds[0], bounds[npoints+1] = 0., cutoff
bounds[1:npoints] = splrep[:,0]
bounds[npoints] = endrep[0]
self.bounds = bounds
return
def calc_rep(self,r):
""""""
for i in range(self.npoints+1):
b1, b2 = self.bounds[i], self.bounds[i+1]
if b1 <= r < b2:
if i == 0:
rep = self.calc_begrep(r,*self.begrep)
elif 0 < i < self.npoints:
cur_spl = self.splrep[i-1,:]
coefs = np.zeros(5)
coefs[0] = cur_spl[0]
coefs[1:] = cur_spl[2:]
rep = self.calc_splrep(r,*coefs)
elif i == self.npoints:
cur_spl = self.endrep
coefs = np.zeros(7)
coefs[0] = cur_spl[0]
coefs[1:] = cur_spl[2:]
rep = self.calc_endrep(r,*coefs)
break
else:
raise ValueError('Distance %12.8f Not in Bound...' %r)
return rep
@staticmethod
def calc_begrep(r,a1,a2,a3):
rep = np.exp(-a1*r+a2) + a3
return rep
@staticmethod
def calc_splrep(r,r0,c0,c1,c2,c3):
rep = c0 + c1*(r-r0) + c2*(r-r0)**2 + c3*(r-r0)**3
return rep
@staticmethod
def calc_endrep(r,r0,c0,c1,c2,c3,c4,c5):
rep = c0 + c1*(r-r0) + c2*(r-r0)**2 + c3*(r-r0)**3 \
+ c4*(r-r0)**4 + c5*(r-r0)**5
return rep
def read_spline(skf):
# read spline data
fopen = open(skf, 'r')
for i in range(MAXLINE):
line = fopen.readline()
if line:
if line.startswith('Spline'):
# points and cutoff
line = fopen.readline()
data = line.strip().split()
npoints, cutoff = int(data[0]), float(data[1])
# exp rep
# exp(-a1*r+a2)+a3
line = fopen.readline()
data = line.strip().split()
begrep = np.array(data, dtype=float)
# spline
# c0+c1(r-r0)+c2(r-r0)**2+c3(r-r0)**3
splrep = []
for j in range(npoints-1):
line = fopen.readline()
data = line.strip().split()
splrep.append(data)
splrep = np.array(splrep, dtype=float)
# end
line = fopen.readline()
data = line.strip().split()
endrep = np.array(data, dtype=float)
else:
# end of the file
break
fopen.close()
# init spline
sprp = SplineRepulsion(npoints,cutoff,begrep,splrep,endrep)
return sprp
def plot_spline(skf='Pt-Pt.skf', skf2=None, rmin=1.0, pic='spl.png'):
"""Plot the Spline Repulsive Potential..."""
# read spline, turn into spline object
SP_rep1 = read_spline(skf)
# generate data
rs = np.linspace(0.,SP_rep1.cutoff-0.01,1000)
reps = []
for r in rs:
reps.append(SP_rep1.calc_rep(r))
skf_name = os.path.basename(skf).split('.')[0]
rs = np.array(rs)
reps = np.array(reps)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12,8))
ax.set_title(r'$%s$ Spline Repulsive Potential' %skf_name, \
fontsize=24, fontweight='bold')
ax.set_xlabel(r'$r$ / Bohr', fontsize=20)
ax.set_ylabel(r'$V_{rep}(r)$ / Hartree', fontsize=20)
skf1_curve, = ax.plot(rs, reps, \
color='g', linestyle='-', linewidth=2., \
label='Skf-1')
# inset figure
ax2 = plt.axes([0,0,1,1])
ip = InsetPosition(ax, [0.4,0.2,0.5,0.5])
ax2.set_axes_locator(ip)
mark_inset(ax, ax2, loc1=1, loc2=3, fc="none", ec='0.5')
#ax2 = zoomed_inset_axes(ax, 1, loc=4)
r_min, r_max = rmin, SP_rep1.cutoff
indices = np.where((rs>r_min) & (rs<r_max))
ax2.plot(rs[indices], reps[indices], color='g', linestyle='-', linewidth=2.)
# skf2 for comparision
if skf2:
SP_rep2 = read_spline(skf2)
# generate data
rs = np.linspace(0.,SP_rep2.cutoff-0.01,1000)
reps = []
for r in rs:
reps.append(SP_rep2.calc_rep(r))
rs = np.array(rs)
reps = np.array(reps)
skf2_curve, = ax.plot(rs, reps, \
color='orange', linestyle='--', linewidth=2., \
label='Skf-2')
ax2.plot(rs[indices], reps[indices], color='orange', linestyle='--', linewidth=2.)
plt.legend(handles=[skf1_curve,skf2_curve])
else:
plt.legend(handles=[skf1_curve,])
plt.savefig(pic)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--skf', required=True,\
help='Slater-Koster File')
parser.add_argument('-f2', '--skf2', default=None, \
help='Second Slater-Koster File for Comparision')
parser.add_argument('-p', '--pic', \
default='spl.png', help='Spline Repulsive Potential Figure')
parser.add_argument('-rmin', '--radius_min', type=float,\
default=1.0, help='Minimum Radius for Zoom')
args = parser.parse_args()
plot_spline(args.skf, args.skf2, args.radius_min, args.pic)
#plot_spline()
| 29.70202
| 106
| 0.530012
| 1,800
| 0.30607
| 0
| 0
| 418
| 0.071076
| 0
| 0
| 818
| 0.139092
|
0c4b5ba22b3ba7761012b4918404bffd6258a269
| 370
|
py
|
Python
|
network_monitor/__init__.py
|
brennanhfredericks/network-monitor-client
|
618d222bb015662c3958f0100a965f3c71b29d32
|
[
"MIT"
] | null | null | null |
network_monitor/__init__.py
|
brennanhfredericks/network-monitor-client
|
618d222bb015662c3958f0100a965f3c71b29d32
|
[
"MIT"
] | null | null | null |
network_monitor/__init__.py
|
brennanhfredericks/network-monitor-client
|
618d222bb015662c3958f0100a965f3c71b29d32
|
[
"MIT"
] | null | null | null |
import argparse
import netifaces
import sys
import signal
import os
import asyncio
from asyncio import CancelledError, Task
from typing import Optional, List, Any
from .services import (
Service_Manager,
Packet_Parser,
Packet_Submitter,
Packet_Filter,
)
from .configurations import generate_configuration_template, DevConfig, load_config_from_file
| 16.818182
| 93
| 0.802703
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0c4c75e50a5aeb0f4d0c50388de64676ac264483
| 1,516
|
py
|
Python
|
investing_com/cs_pattern_list.py
|
filipecn/maldives
|
f20f17d817fc3dcad7f9674753744716d1d4c821
|
[
"MIT"
] | 1
|
2021-09-17T18:04:33.000Z
|
2021-09-17T18:04:33.000Z
|
investing_com/cs_pattern_list.py
|
filipecn/maldives
|
f20f17d817fc3dcad7f9674753744716d1d4c821
|
[
"MIT"
] | null | null | null |
investing_com/cs_pattern_list.py
|
filipecn/maldives
|
f20f17d817fc3dcad7f9674753744716d1d4c821
|
[
"MIT"
] | 3
|
2021-09-17T18:04:43.000Z
|
2022-03-18T20:04:07.000Z
|
#!/usr/bin/py
import pandas as pd
import os
# Holds investing.com candlestick patterns
class CSPatternList:
def __init__(self, path):
self.data = None
with os.scandir(path) as entries:
for e in entries:
if e.is_file() and os.path.splitext(e.path)[1] == '.csv':
if self.data is None:
self.data = pd.read_csv(e.path)
else:
self.data.append(pd.read_csv(e.path))
# imgui settings
self.selected_row = 0
def draw_filter_menu(self, imgui):
imgui.separator()
def draw(self, imgui):
imgui.begin("CS Patterns")
imgui.columns(len(self.data.columns) + 1, "asodf")
imgui.separator()
imgui.text("")
imgui.next_column()
for c in self.data.columns:
imgui.text(c)
imgui.next_column()
imgui.separator()
# fill with data
for i in range(10):
label = str(i)
clicked, _ = imgui.selectable(
label=label,
selected=self.selected_row == i,
flags=imgui.SELECTABLE_SPAN_ALL_COLUMNS,
)
if clicked:
self.selected_row = i
hovered = imgui.is_item_hovered()
row = self.data.loc[i]
imgui.next_column()
for c in self.data.columns:
imgui.text(row[c])
imgui.next_column()
imgui.end()
| 29.72549
| 73
| 0.513852
| 1,425
| 0.939974
| 0
| 0
| 0
| 0
| 0
| 0
| 115
| 0.075858
|
0c4cdf64475499e51798185a532224a138493103
| 1,113
|
py
|
Python
|
simpleTest04Client_.py
|
LaplaceKorea/APIClient
|
e772482c3d9cbedee98f46a3529dca5acc254f3c
|
[
"MIT"
] | null | null | null |
simpleTest04Client_.py
|
LaplaceKorea/APIClient
|
e772482c3d9cbedee98f46a3529dca5acc254f3c
|
[
"MIT"
] | null | null | null |
simpleTest04Client_.py
|
LaplaceKorea/APIClient
|
e772482c3d9cbedee98f46a3529dca5acc254f3c
|
[
"MIT"
] | null | null | null |
from LaplaceWSAPIClient import *
from MarkowitzSerde import *
from TargetSerde import *
from Operators import *
from TargetOperators import *
from RLStructure import *
from ClientConfig import client_config
query = RLQuery("default", datetime(2021,1,1), datetime(2021,1,21), {
"BankAccount": 100000.0,
"MMM":1.0,
"AA":1.0,
"AXP":1.0,
"BA":1.0,
"BAC":1.0,
"C":1.0,
"CAT":1.0,
"CVX":1.0,
"DD":1.0,
"DIS":1.0,
"GE":1.0,
"GM":1.0,
"HD":1.0,
"HPQ":1.0,
"IBM":1.0,
"JNJ":1.0,
"JPM":1.0,
"KO":1.0,
"MCD":1.0,
"MRK":1.0,
"PFE":1.0,
"PG":1.0,
"T":1.0,
"UTX":1.0,
"VZ":1.0,
"WMT":1.0,
"XOM":1.0
}, UserTokenSerde(client_config["user"],client_config["token"]))
performQueryRLQuery(client_config["wss"], query, lambda x: print("yahoo: ", x.Steps[0][0], x.Steps[0][1], x.Steps[0][203]))
| 27.146341
| 123
| 0.442947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 171
| 0.153639
|
0c4fdea50a153837205a14c5c61c7d560b9d7a43
| 14,406
|
py
|
Python
|
vdisk.py
|
cookpan001/vdisk
|
1414e5c20eba3722ce99818fe48ddf0217fb25ca
|
[
"BSD-3-Clause"
] | 1
|
2016-01-11T06:46:11.000Z
|
2016-01-11T06:46:11.000Z
|
vdisk.py
|
cookpan001/vdisk
|
1414e5c20eba3722ce99818fe48ddf0217fb25ca
|
[
"BSD-3-Clause"
] | null | null | null |
vdisk.py
|
cookpan001/vdisk
|
1414e5c20eba3722ce99818fe48ddf0217fb25ca
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
# author: cookpan001
import sys
import logging
import time
import mimetypes
import urllib
import urllib2
"""
oauth2 client
"""
class OAuth2(object):
ACCESS_TOKEN_URL = "https://auth.sina.com.cn/oauth2/access_token"
AUTHORIZE_URL = "https://auth.sina.com.cn/oauth2/authorize"
def __init__(self,app_key, app_secret, call_back_url):
self.version = 1.0
self.app_key = app_key
self.app_secret = app_secret
self.call_back_url = call_back_url
#display = default|mobile|popup
def authorize(self,response_type = "code",display = "default",state = ""):
data = {"client_id":self.app_key,
"redirect_uri":self.call_back_url,
"response_type":response_type,
"display":display}
if len(state) > 0:
data["state"] = state
return OAuth2.AUTHORIZE_URL + "?" + urllib.urlencode(data)
#grant_type = authorization_code|refresh_token
def access_token(self,grant_type = "authorization_code",code = "",refresh_token = ""):
data = {"client_id":self.app_key,
"client_secret":self.app_secret,
"grant_type":grant_type}
if grant_type == "authorization_code":
data["code"] = code
data["redirect_uri"] = self.call_back_url
elif grant_type == "refresh_token":
data["refresh_token"] = refresh_token
try:
request = urllib2.Request(OAuth2.ACCESS_TOKEN_URL)
response = urllib2.urlopen(request,urllib.urlencode(data))
return response.read()
except urllib2.HTTPError,e:
return e.read()
except urllib2.URLError,e:
return e.read()
"""
All the responses will be a Response object
"""
class Response(object):
BLOCK_SIZE = 8192
def __init__(self,response):
self.response = response
"""
return a HTTPMessage object
"""
def headers(self):
if hasattr(self.response,"info"):
return self.response.info()
elif hasattr(self.resopnse,"msg"):
return self.resopnse.msg()
"""
Get the content of response ,optimized for big size resopnse.
data() return a generator. Developer can use this method like this:
for content in Response.data():
print content
"""
def data(self):
while True:
block = self.response.read(Response.BLOCK_SIZE)
if block:
yield block
else:
return
def read(self):
return self.response.read()
def __str__(self):
return self.response.read()
"""
The vdisk(weipan) client.
"""
class Client(object):
log = logging.getLogger('api_client')
API_URL = 'https://api.weipan.cn/2/'
WEIBO_URL = 'https://api.weipan.cn/weibo/'
UPLOAD_HOST = 'upload-vdisk.sina.com.cn'
CONTENT_SAFE_URL = 'https://'+UPLOAD_HOST+'/2/'
version = 1.0
def __init__(self,root = "basic"):
self.timeout = 10
self.python_version_is_bigger_than_2_4 = float(sys.version[:3]) > 2.4
self.root = root
def setRoot(self,root):
self.root = root
def get(self, host, api, queries={}):
try:
if isinstance(api, unicode):
api = api.encode('utf-8')
else:
api = str(api)
url = host.strip('/') + '/' + urllib.quote(api.strip('/'))
queries = self.encode_queries(queries)
request = urllib2.Request('%s?%s' % (url, queries))
# set timeout.
if self.python_version_is_bigger_than_2_4:
response = urllib2.urlopen(request, timeout=self.timeout)
else:
# http://stackoverflow.com/questions/2084782/timeout-for-urllib2-urlopen-in-pre-python-2-6-versions
import socket
socket.setdefaulttimeout(self.timeout)
response = urllib2.urlopen(request)
return Response(response)
except urllib2.HTTPError,e:
return e.read()
except urllib2.URLError,e:
return e.read()
def post(self, host, api, data=[], files=[]):
try:
if isinstance(api, unicode):
api = api.encode('utf-8')
else:
api = str(api)
url = host.strip('/') + '/' + api.strip('/')
if isinstance(data, dict):
data = data.items()
content_type, body = self.encode_multipart_formdata(data, files)
request = urllib2.Request(url, data=body)
request.add_header('Content-Type', content_type)
request.add_header('Content-Length', str(len(body)))
if self.python_version_is_bigger_than_2_4:
response = urllib2.urlopen(request, timeout=self.timeout)
else:
import socket
socket.setdefaulttimeout(self.timeout)
response = urllib2.urlopen(request)
return Response(response)
except urllib2.HTTPError,e:
return e.read()
except urllib2.URLError,e:
return e.read()
# used by non GET or POST method. such as PUT
def request(self, method,host, api, data, headers = {}, use_safe = True):
import httplib
if isinstance(api, unicode):
api = api.encode('utf-8')
else:
api = str(api)
if isinstance(data, dict):
data = self.encode_queries(data)
try:
if use_safe:
conn = httplib.HTTPSConnection(host)
else:
conn = httplib.HTTPConnection(host)
conn.request(method,api,data,headers)
return Response(conn.getresponse())
except httplib.HTTPException,e:
print e
return e.read()
def get_content_type(self, filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def encode_multipart_formdata(self, fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------%s' % hex(int(time.time() * 1000))
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % str(key))
L.append('')
if isinstance(value, unicode):
L.append(value.encode('utf-8'))
else:
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (str(key), str(filename)))
L.append('Content-Type: %s' % str(self.get_content_type(filename)))
L.append('Content-Length: %d' % len(value))
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def encode_queries(self, queries={}, **kwargs):
queries.update(kwargs)
args = []
for k, v in queries.iteritems():
if isinstance(v, unicode):
qv = v.encode('utf-8')
else:
qv = str(v)
args.append('%s=%s' % (k, urllib.quote(qv)))
return '&'.join(args)
def account_info(self,access_token):
data = self.get(Client.API_URL,
'account/info',
{"access_token":access_token})
return data
def metadata(self,access_token,path):
data = self.get(Client.API_URL,
'metadata/' + self.root + '/' + path,
{"access_token":access_token})
return data
def delta(self,access_token,cursor = ''):
param = {"access_token":access_token}
if len(cursor) > 0:
param['cursor'] = cursor
data = self.get(Client.API_URL,
'delta/' + self.root,
param)
return data
def files(self,access_token,path,rev = ''):
param = {"access_token":access_token}
if len(rev) > 0:
param['rev'] = rev
data = self.get(Client.API_URL,
'files/' + self.root + "/" + path,
param)
return data
def revisions(self,access_token,path):
data = self.get(Client.API_URL,
'revisions/' + self.root + "/" + path,
{"access_token":access_token})
return data
#files = {"filename":filename,"content":"file content"}
def files_post(self,access_token,path,files,overwrite = "true",sha1 = "",size = "", parent_rev = ""):
param = {
"access_token":access_token,
"overwrite":overwrite
}
if len(sha1) > 0:
param["sha1"] = sha1
if len(size) > 0:
param["size"] = size
if len(parent_rev) > 0:
param["parent_rev"] = parent_rev
queries = self.encode_queries(param)
data = self.post(Client.CONTENT_SAFE_URL,
'files/'+self.root+"/"+path+"?"+queries,
[],
[("file",files["filename"],files["content"])])
return data
"""
content should be a file object or file raw content, such as: open("./filename","rb"), "rb" is prefered.
"""
def files_put(self,access_token,path,content,overwrite = "true",sha1 = "",size = "", parent_rev = ""):
param = {
"access_token":access_token,
"overwrite":overwrite
}
if len(sha1) > 0:
param["sha1"] = sha1
if len(size) > 0:
param["size"] = size
if len(parent_rev) > 0:
param["parent_rev"] = parent_rev
data = self.request(
method="PUT",
host=Client.UPLOAD_HOST,
api='/2/files_put/'+self.root+"/"+path+"?"+self.encode_queries(param),
data=content)
return data
# 公开分享
def shares(self,access_token,path,cancel = "false"):
data = self.post(Client.API_URL,
'shares/'+self.root+"/"+path,
{"access_token":access_token,
"cancel":cancel
})
return data
def restore(self,access_token,path,rev = ""):
param = {"access_token":access_token,
"path":path
}
if len(rev) > 0:
param['rev'] = rev
data = self.post(Client.API_URL,
'restore/'+self.root+"/"+path,
{"access_token":access_token})
return data
def search(self,access_token,path,query,file_limit = 1000,include_deleted = "false"):
data = self.get(Client.API_URL,
'search/'+self.root+"/"+path,
{"access_token":access_token,
"path":path,
"query":query,
"file_limit":file_limit,
"include_deleted":include_deleted
})
return data
def copy_ref(self,access_token,path):
data = self.post(Client.API_URL,
'copy_ref/'+self.root+"/"+path,
{"access_token":access_token,
"path":path})
return data
def media(self,access_token,path):
data = self.get(Client.API_URL,
'media/'+self.root+"/"+path,
{"access_token":access_token,
"path":path})
return data
#s:60x60,m:100x100,l:640x480,xl:1027x768
def thumbnails(self,access_token,path,size):
data = self.get(Client.API_URL,
'thumbnails/'+self.root+"/"+path,
{"access_token":access_token,
"path":path,
"size":size})
return data
def fileops_copy(self,access_token,to_path,from_path = "",from_copy_ref = ""):
param = {"access_token":access_token,
"root":self.root,
"to_path":to_path
}
if len(from_path) > 0:
param['from_path'] = from_path
if len(from_copy_ref) > 0:
param['from_copy_ref'] = from_copy_ref
data = self.post(Client.API_URL,
'fileops/copy',
param)
return data
def fileops_delete(self,access_token,path):
data = self.post(Client.API_URL,
'fileops/delete',
{"access_token":access_token,
"root":self.root,
"path":path
})
return data
def fileops_move(self,access_token,from_path = "",to_path = ""):
param = {"access_token":access_token,
"root":self.root
}
if len(from_path) > 0:
param['from_path'] = from_path
if len(to_path) > 0:
param['to_path'] = to_path
data = self.post(Client.API_URL,
'fileops/move',
param)
return data
def fileops_create_folder(self,access_token,path):
data = self.post(Client.API_URL,
'fileops/create_folder',
{"access_token":access_token,
"root":self.root,
"path":path
})
return data
def shareops_media(self,access_token,from_copy_ref):
data = self.get(Client.API_URL,
'shareops/media',
{"access_token":access_token,
"from_copy_ref":from_copy_ref})
return data
| 36.470886
| 115
| 0.521102
| 14,158
| 0.982239
| 191
| 0.013251
| 0
| 0
| 0
| 0
| 2,783
| 0.193076
|
0c50ef47cd53ea48685602b6b3d98c7fea184c96
| 263
|
py
|
Python
|
setup.py
|
thevoxium/netspeed
|
9e16a49d64da90a173ef9eaf491d4245c1023105
|
[
"MIT"
] | null | null | null |
setup.py
|
thevoxium/netspeed
|
9e16a49d64da90a173ef9eaf491d4245c1023105
|
[
"MIT"
] | null | null | null |
setup.py
|
thevoxium/netspeed
|
9e16a49d64da90a173ef9eaf491d4245c1023105
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name='netspeed',
version='0.1',
py_modules=['netspeed'],
install_requires=[
'Click',
'pyspeedtest'
],
entry_points='''
[console_scripts]
netspeed=netspeed:cli
''',
)
| 16.4375
| 29
| 0.558935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 112
| 0.425856
|
0c51490cf6a9e00d3f171f44d583a875d050c2af
| 244
|
py
|
Python
|
store/admin.py
|
salemzii/ChopFast
|
95ea88387ecfdb56bd643970b69425b1a1c6f388
|
[
"MIT"
] | null | null | null |
store/admin.py
|
salemzii/ChopFast
|
95ea88387ecfdb56bd643970b69425b1a1c6f388
|
[
"MIT"
] | null | null | null |
store/admin.py
|
salemzii/ChopFast
|
95ea88387ecfdb56bd643970b69425b1a1c6f388
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import (Dish, Payments, Order, Delivery, OrderItem)
admin.site.register(Dish)
admin.site.register(Payments)
admin.site.register(Order)
admin.site.register(Delivery)
admin.site.register(OrderItem)
| 24.4
| 64
| 0.807377
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0c517d2c976cb6c4a933b0a237cbe0bcc83aaacb
| 31,109
|
py
|
Python
|
hpedockerplugin/request_context.py
|
renovate-bot/python-hpedockerplugin
|
b7fa6b3193fa6dd42574585b4c621ff6a16babc9
|
[
"Apache-2.0"
] | 49
|
2016-06-14T22:25:40.000Z
|
2021-04-05T05:00:59.000Z
|
hpedockerplugin/request_context.py
|
imran-ansari/python-hpedockerplugin
|
e2726f48ac793dc894100e3772c40ce89bfe9bb8
|
[
"Apache-2.0"
] | 550
|
2016-07-25T12:01:12.000Z
|
2021-11-15T17:52:40.000Z
|
hpedockerplugin/request_context.py
|
imran-ansari/python-hpedockerplugin
|
e2726f48ac793dc894100e3772c40ce89bfe9bb8
|
[
"Apache-2.0"
] | 96
|
2016-06-01T22:07:03.000Z
|
2021-06-22T09:05:05.000Z
|
import abc
import json
import re
from collections import OrderedDict
from oslo_log import log as logging
import hpedockerplugin.exception as exception
from hpedockerplugin.hpe import share
LOG = logging.getLogger(__name__)
class RequestContextBuilderFactory(object):
def __init__(self, all_configs):
self._all_configs = all_configs
# if 'block' in all_configs:
# block_configs = all_configs['block']
# backend_configs = block_configs[1]
# self._vol_req_ctxt_creator = VolumeRequestContextBuilder(
# backend_configs)
# else:
# self._vol_req_ctxt_creator = NullRequestContextBuilder(
# "ERROR: Volume driver not enabled. Please provide hpe.conf "
# "file to enable it")
if 'file' in all_configs:
file_configs = all_configs['file']
f_backend_configs = file_configs[1]
self._file_req_ctxt_builder = FileRequestContextBuilder(
f_backend_configs)
else:
self._file_req_ctxt_builder = NullRequestContextBuilder(
"ERROR: File driver not enabled. Please provide hpe_file.conf "
"file to enable it")
def get_request_context_builder(self):
return self._file_req_ctxt_builder
class NullRequestContextBuilder(object):
def __init__(self, msg):
self._msg = msg
def build_request_context(self, contents, def_backend_name):
raise exception.InvalidInput(self._msg)
class RequestContextBuilder(object):
def __init__(self, backend_configs):
self._backend_configs = backend_configs
def build_request_context(self, contents, def_backend_name):
LOG.info("build_request_context: Entering...")
self._validate_name(contents['Name'])
req_ctxt_map = self._get_build_req_ctxt_map()
if 'Opts' in contents and contents['Opts']:
# self._validate_mutually_exclusive_ops(contents)
self._validate_dependent_opts(contents)
for op_name, req_ctxt_creator in req_ctxt_map.items():
op_name = op_name.split(',')
found = not (set(op_name) - set(contents['Opts'].keys()))
if found:
return req_ctxt_creator(contents, def_backend_name)
return self._default_req_ctxt_creator(contents)
@staticmethod
def _validate_name(vol_name):
is_valid_name = re.match("^[A-Za-z0-9]+[A-Za-z0-9_-]+$", vol_name)
if not is_valid_name:
msg = 'Invalid volume name: %s is passed.' % vol_name
raise exception.InvalidInput(reason=msg)
@staticmethod
def _get_int_option(options, option_name, default_val):
opt = options.get(option_name)
if opt and opt != '':
try:
opt = int(opt)
except ValueError as ex:
msg = "ERROR: Invalid value '%s' specified for '%s' option. " \
"Please specify an integer value." % (opt, option_name)
LOG.error(msg)
raise exception.InvalidInput(msg)
else:
opt = default_val
return opt
# This method does the following:
# 1. Option specified
# - Some value:
# -- return if valid value else exception
# - Blank value:
# -- Return default if provided
# ELSE
# -- Throw exception if value_unset_exception is set
# 2. Option NOT specified
# - Return default value
@staticmethod
def _get_str_option(options, option_name, default_val, valid_values=None,
value_unset_exception=False):
opt = options.get(option_name)
if opt:
if opt != '':
opt = str(opt)
if valid_values and opt.lower() not in valid_values:
msg = "ERROR: Invalid value '%s' specified for '%s'" \
"option. Valid values are: %s" %\
(opt, option_name, valid_values)
LOG.error(msg)
raise exception.InvalidInput(msg)
return opt
if default_val:
return default_val
if value_unset_exception:
return json.dumps({
'Err': "Value not set for option: %s" % opt
})
return default_val
def _validate_dependent_opts(self, contents):
pass
# To be implemented by derived class
@abc.abstractmethod
def _get_build_req_ctxt_map(self):
pass
def _default_req_ctxt_creator(self, contents):
pass
@staticmethod
def _validate_mutually_exclusive_ops(contents):
mutually_exclusive_ops = ['virtualCopyOf', 'cloneOf', 'importVol',
'replicationGroup']
if 'Opts' in contents and contents['Opts']:
received_opts = contents.get('Opts').keys()
diff = set(mutually_exclusive_ops) - set(received_opts)
if len(diff) < len(mutually_exclusive_ops) - 1:
mutually_exclusive_ops.sort()
msg = "Operations %s are mutually exclusive and cannot be " \
"specified together. Please check help for usage." % \
mutually_exclusive_ops
raise exception.InvalidInput(reason=msg)
@staticmethod
def _check_valid_fsMode_string(value):
valid_type = ['A', 'D', 'U', 'L']
valid_flag = ['f', 'd', 'p', 'i', 'S', 'F', 'g']
valid_perm1 = ['r', 'w', 'a', 'x', 'd', 'D', 't', 'T']
valid_perm2 = ['n', 'N', 'c', 'C', 'o', 'y']
valid_perm = valid_perm1 + valid_perm2
type_flag_perm = value.split(':')
if len(type_flag_perm) != 3:
msg = "Incorrect value passed , please check correct "\
"format and values to be passed in help"
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
vtype = type_flag_perm[0]
if vtype not in valid_type:
msg = "Incorrect value passed for type of a mode, please check "\
"correct format and values to be passed."
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
passed_vflag_len = len(list(type_flag_perm[1]))
vflag = list(set(list(type_flag_perm[1])))
if len(vflag) < passed_vflag_len:
msg = "Duplicate characters for given flag are passed. "\
"Please correct the passed flag characters for fsMode."
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if set(vflag) - set(valid_flag):
msg = "Invalid flag passed for the fsMode. Please "\
"pass the correct flag characters"
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
passed_vperm_len = len(list(type_flag_perm[2]))
vperm = list(set(list(type_flag_perm[2])))
if len(vperm) < passed_vperm_len:
msg = "Duplicate characters for given permission are passed. "\
"Please correct the passed permissions for fsMode."
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if set(vperm) - set(valid_perm):
msg = "Invalid characters for the permissions of fsMode are "\
"passed. Please remove the invalid characters."
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
return True
def _check_is_valid_acl_string(self, fsMode):
fsMode_list = fsMode.split(',')
if len(fsMode_list) != 3:
msg = "Passed acl string is not valid. "\
"Pass correct acl string."
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
for value in fsMode_list:
self._check_valid_fsMode_string(value)
return True
@staticmethod
def _is_valid_octal_num(fsMode):
return re.match('^0[0-7]{3}$', fsMode)
def _validate_fsMode(self, fsMode):
is_valid_fs_mode = True
if ':' in fsMode:
is_valid_fs_mode = self._check_is_valid_acl_string(fsMode)
else:
is_valid_fs_mode = self._is_valid_octal_num(fsMode)
if not is_valid_fs_mode:
msg = "Invalid value passed for the fsMode."
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
@staticmethod
def _validate_fsOwner(fsOwner):
fsOwner_list = fsOwner.split(':')
if len(fsOwner_list) != 2:
msg = "Invalid value specified for fsOwner Option."
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
try:
for val in fsOwner_list:
int(val)
except ValueError as ex:
msg = "Please provide correct fsowner inforamtion. You have "\
"passed non integer values."
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
@staticmethod
def _validate_opts(operation, contents, valid_opts, mandatory_opts=None):
LOG.info("Validating options for operation '%s'" % operation)
if 'Opts' in contents and contents['Opts']:
received_opts = contents.get('Opts').keys()
if mandatory_opts:
diff = set(mandatory_opts) - set(received_opts)
if diff:
# Print options in sorted manner
mandatory_opts.sort()
msg = "One or more mandatory options %s are missing " \
"for operation %s" % (mandatory_opts, operation)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
diff = set(received_opts) - set(valid_opts)
if diff:
diff = list(diff)
diff.sort()
msg = "Invalid option(s) %s specified for operation %s. " \
"Please check help for usage." % \
(diff, operation)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
class FileRequestContextBuilder(RequestContextBuilder):
def __init__(self, backend_configs):
super(FileRequestContextBuilder, self).__init__(backend_configs)
def _get_build_req_ctxt_map(self):
build_req_ctxt_map = OrderedDict()
# If share-dir is specified, file-store MUST be specified
build_req_ctxt_map['filePersona,help'] = self._create_help_req_ctxt
build_req_ctxt_map['filePersona'] = \
self._create_share_req_ctxt
# build_req_ctxt_map['persona,cpg'] = \
# self._create_share_req_ctxt
# build_req_ctxt_map['persona,cpg,size'] = \
# self._create_share_req_ctxt
# build_req_ctxt_map['persona,cpg,size,fpg_name'] = \
# self._create_share_req_ctxt
# build_req_ctxt_map['virtualCopyOf,shareName'] = \
# self._create_snap_req_ctxt
# build_req_ctxt_map['updateShare'] = \
# self._create_update_req_ctxt
return build_req_ctxt_map
def _create_share_req_params(self, name, options, def_backend_name):
LOG.info("_create_share_req_params: Entering...")
# import pdb
# pdb.set_trace()
backend = self._get_str_option(options, 'backend', def_backend_name)
if backend == 'DEFAULT_BLOCK':
msg = 'Backend DEFAULT_BLOCK is reserved for Block ' \
'operations. Cannot specify it for File operations'
LOG.error(msg)
raise exception.InvalidInput(msg)
config = self._backend_configs.get(backend)
if not config:
raise exception.InvalidInput(
'ERROR: Backend %s is not configured for File Persona'
% backend
)
cpg = self._get_str_option(
options, 'cpg',
config.hpe3par_cpg[0] if config.hpe3par_cpg else None)
if not cpg:
raise exception.InvalidInput(
"ERROR: CPG is not configured in hpe.conf. Please specify"
"name of an existing CPG in hpe.conf and restart plugin")
fpg = self._get_str_option(options, 'fpg', None)
fsMode = self._get_str_option(options, 'fsMode', None)
fsOwner = self._get_str_option(options, 'fsOwner', None)
if fsMode:
self._validate_fsMode(fsMode)
if fsOwner:
self._validate_fsOwner(fsOwner)
if fsMode:
if fsOwner is None:
raise exception.InvalidInput(
" ERROR: If mode bits or directory permissions"
" needs to be changed then, providing fsOwner"
" is mandetory")
size_gib = self._get_int_option(options, 'size', 1024)
# Default share size or quota in MiB which is 1TiB
size = size_gib * 1024
fpg_size_gib = int(config.hpe3par_default_fpg_size) * 1024
if size_gib > fpg_size_gib:
raise exception.InvalidInput(
"ERROR: Share size cannot be greater than the FPG size. "
"Either specify hpe3par_default_fpg_size >= %s GiB or "
"specify option '-o size' < %s GiB"
% (size_gib, fpg_size_gib))
# TODO: This check would be required when VFS needs to be created.
# NOT HERE
# if not ip_subnet and not config.hpe3par_ip_pool:
# raise exception.InvalidInput(
# "ERROR: Unable to create share as neither 'ipSubnet' "
# "option specified not IP address pool hpe3par_ip_pool "
# "configured in configuration file specified")
readonly_str = self._get_str_option(options, 'readonly', 'false')
readonly = str.lower(readonly_str)
if readonly == 'true':
readonly = True
elif readonly == 'false':
readonly = False
else:
raise exception.InvalidInput(
'ERROR: Invalid value "%s" supplied for "readonly" option. '
'Valid values are case insensitive ["true", "false"]'
% readonly_str)
nfs_options = self._get_str_option(options, 'nfsOptions', None)
comment = self._get_str_option(options, 'comment', None)
share_details = share.create_metadata(backend, cpg, fpg, name, size,
readonly=readonly,
nfs_options=nfs_options,
comment=comment, fsMode=fsMode,
fsOwner=fsOwner)
LOG.info("_create_share_req_params: %s" % share_details)
return share_details
def _create_share_req_ctxt(self, contents, def_backend_name):
LOG.info("_create_share_req_ctxt: Entering...")
valid_opts = ('backend', 'filePersona', 'cpg', 'fpg',
'size', 'mountConflictDelay', 'fsMode', 'fsOwner')
mandatory_opts = ('filePersona',)
self._validate_opts("create share", contents, valid_opts,
mandatory_opts)
share_args = self._create_share_req_params(contents['Name'],
contents['Opts'],
def_backend_name)
ctxt = {'orchestrator': 'file',
'operation': 'create_share',
'kwargs': share_args}
LOG.info("_create_share_req_ctxt: Exiting: %s" % ctxt)
return ctxt
def _create_help_req_ctxt(self, contents, def_backend_name):
LOG.info("_create_help_req_ctxt: Entering...")
valid_opts = ('filePersona', 'help', 'mountConflictDelay')
self._validate_opts("create help content for share", contents,
valid_opts, mandatory_opts=None)
options = contents['Opts']
if options:
value = self._get_str_option(options, 'help', None)
if not value:
return {
'orchestrator': 'file',
'operation': 'create_share_help',
'kwargs': {}
}
if value == 'backends':
return {
'orchestrator': 'file',
'operation': 'get_backends_status',
'kwargs': {}
}
else:
raise exception.InvalidInput(
"ERROR: Invalid value %s for option 'help' specified."
% value)
LOG.info("_create_help_req_ctxt: Exiting...")
def _create_snap_req_ctxt(self, contents):
pass
def _create_update_req_ctxt(self, contents):
pass
# TODO: This is work in progress - can be taken up later if agreed upon
# class VolumeRequestContextBuilder(RequestContextBuilder):
# def __init__(self, backend_configs):
# super(VolumeRequestContextBuilder, self).__init__(backend_configs)
#
# def _get_build_req_ctxt_map(self):
# build_req_ctxt_map = OrderedDict()
# build_req_ctxt_map['virtualCopyOf,scheduleName'] = \
# self._create_snap_schedule_req_ctxt,
# build_req_ctxt_map['virtualCopyOf,scheduleFrequency'] = \
# self._create_snap_schedule_req_ctxt
# build_req_ctxt_map['virtualCopyOf,snaphotPrefix'] = \
# self._create_snap_schedule_req_ctxt
# build_req_ctxt_map['virtualCopyOf'] = \
# self._create_snap_req_ctxt
# build_req_ctxt_map['cloneOf'] = \
# self._create_clone_req_ctxt
# build_req_ctxt_map['importVol'] = \
# self._create_import_vol_req_ctxt
# build_req_ctxt_map['replicationGroup'] = \
# self._create_rcg_req_ctxt
# build_req_ctxt_map['help'] = self._create_help_req_ctxt
# return build_req_ctxt_map
#
# def _default_req_ctxt_creator(self, contents):
# return self._create_vol_create_req_ctxt(contents)
#
# @staticmethod
# def _validate_mutually_exclusive_ops(contents):
# mutually_exclusive_ops = ['virtualCopyOf', 'cloneOf', 'importVol',
# 'replicationGroup']
# if 'Opts' in contents and contents['Opts']:
# received_opts = contents.get('Opts').keys()
# diff = set(mutually_exclusive_ops) - set(received_opts)
# if len(diff) < len(mutually_exclusive_ops) - 1:
# mutually_exclusive_ops.sort()
# msg = "Operations %s are mutually exclusive and cannot be " \
# "specified together. Please check help for usage." % \
# mutually_exclusive_ops
# raise exception.InvalidInput(reason=msg)
#
# @staticmethod
# def _validate_opts(operation, contents, valid_opts, mandatory_opts=None):
# if 'Opts' in contents and contents['Opts']:
# received_opts = contents.get('Opts').keys()
#
# if mandatory_opts:
# diff = set(mandatory_opts) - set(received_opts)
# if diff:
# # Print options in sorted manner
# mandatory_opts.sort()
# msg = "One or more mandatory options %s are missing " \
# "for operation %s" % (mandatory_opts, operation)
# raise exception.InvalidInput(reason=msg)
#
# diff = set(received_opts) - set(valid_opts)
# if diff:
# diff = list(diff)
# diff.sort()
# msg = "Invalid option(s) %s specified for operation %s. " \
# "Please check help for usage." % \
# (diff, operation)
# raise exception.InvalidInput(reason=msg)
#
# def _create_vol_create_req_ctxt(self, contents):
# valid_opts = ['compression', 'size', 'provisioning',
# 'flash-cache', 'qos-name', 'fsOwner',
# 'fsMode', 'mountConflictDelay', 'cpg',
# 'snapcpg', 'backend']
# self._validate_opts("create volume", contents, valid_opts)
# return {'operation': 'create_volume',
# '_vol_orchestrator': 'volume'}
#
# def _create_clone_req_ctxt(self, contents):
# valid_opts = ['cloneOf', 'size', 'cpg', 'snapcpg',
# 'mountConflictDelay']
# self._validate_opts("clone volume", contents, valid_opts)
# return {'operation': 'clone_volume',
# 'orchestrator': 'volume'}
#
# def _create_snap_req_ctxt(self, contents):
# valid_opts = ['virtualCopyOf', 'retentionHours', 'expirationHours',
# 'mountConflictDelay', 'size']
# self._validate_opts("create snapshot", contents, valid_opts)
# return {'operation': 'create_snapshot',
# '_vol_orchestrator': 'volume'}
#
# def _create_snap_schedule_req_ctxt(self, contents):
# valid_opts = ['virtualCopyOf', 'scheduleFrequency', 'scheduleName',
# 'snapshotPrefix', 'expHrs', 'retHrs',
# 'mountConflictDelay', 'size']
# mandatory_opts = ['scheduleName', 'snapshotPrefix',
# 'scheduleFrequency']
# self._validate_opts("create snapshot schedule", contents,
# valid_opts, mandatory_opts)
# return {'operation': 'create_snapshot_schedule',
# 'orchestrator': 'volume'}
#
# def _create_import_vol_req_ctxt(self, contents):
# valid_opts = ['importVol', 'backend', 'mountConflictDelay']
# self._validate_opts("import volume", contents, valid_opts)
#
# # Replication enabled backend cannot be used for volume import
# backend = contents['Opts'].get('backend', 'DEFAULT')
# if backend == '':
# backend = 'DEFAULT'
#
# try:
# config = self._backend_configs[backend]
# except KeyError:
# backend_names = list(self._backend_configs.keys())
# backend_names.sort()
# msg = "ERROR: Backend '%s' doesn't exist. Available " \
# "backends are %s. Please use " \
# "a valid backend name and retry." % \
# (backend, backend_names)
# raise exception.InvalidInput(reason=msg)
#
# if config.replication_device:
# msg = "ERROR: Import volume not allowed with replication " \
# "enabled backend '%s'" % backend
# raise exception.InvalidInput(reason=msg)
#
# volname = contents['Name']
# existing_ref = str(contents['Opts']['importVol'])
# manage_opts = contents['Opts']
# return {'orchestrator': 'volume',
# 'operation': 'import_volume',
# 'args': (volname,
# existing_ref,
# backend,
# manage_opts)}
#
# def _create_rcg_req_ctxt(self, contents):
# valid_opts = ['replicationGroup', 'size', 'provisioning',
# 'backend', 'mountConflictDelay', 'compression']
# self._validate_opts('create replicated volume', contents, valid_opts)
#
# # It is possible that the user configured replication in hpe.conf
# # but didn't specify any options. In that case too, this operation
# # must fail asking for "replicationGroup" parameter
# # Hence this validation must be done whether "Opts" is there or not
# options = contents['Opts']
# backend = self._get_str_option(options, 'backend', 'DEFAULT')
# create_vol_args = self._get_create_volume_args(options)
# rcg_name = create_vol_args['replicationGroup']
# try:
# self._validate_rcg_params(rcg_name, backend)
# except exception.InvalidInput as ex:
# return json.dumps({u"Err": ex.msg})
#
# return {'operation': 'create_volume',
# 'orchestrator': 'volume',
# 'args': create_vol_args}
#
# def _get_fs_owner(self, options):
# val = self._get_str_option(options, 'fsOwner', None)
# if val:
# fs_owner = val.split(':')
# if len(fs_owner) != 2:
# msg = "Invalid value '%s' specified for fsOwner. Please " \
# "specify a correct value." % val
# raise exception.InvalidInput(msg)
# return fs_owner
# return None
#
# def _get_fs_mode(self, options):
# fs_mode_str = self._get_str_option(options, 'fsMode', None)
# if fs_mode_str:
# try:
# int(fs_mode_str)
# except ValueError as ex:
# msg = "Invalid value '%s' specified for fsMode. Please " \
# "specify an integer value." % fs_mode_str
# raise exception.InvalidInput(msg)
#
# if fs_mode_str[0] != '0':
# msg = "Invalid value '%s' specified for fsMode. Please " \
# "specify an octal value." % fs_mode_str
# raise exception.InvalidInput(msg)
#
# for mode in fs_mode_str:
# if int(mode) > 7:
# msg = "Invalid value '%s' specified for fsMode. Please"\
# " specify an octal value." % fs_mode_str
# raise exception.InvalidInput(msg)
# return fs_mode_str
#
# def _get_create_volume_args(self, options):
# ret_args = dict()
# ret_args['size'] = self._get_int_option(
# options, 'size', volume.DEFAULT_SIZE)
# ret_args['provisioning'] = self._get_str_option(
# options, 'provisioning', volume.DEFAULT_PROV,
# ['full', 'thin', 'dedup'])
# ret_args['flash-cache'] = self._get_str_option(
# options, 'flash-cache', volume.DEFAULT_FLASH_CACHE,
# ['true', 'false'])
# ret_args['qos-name'] = self._get_str_option(
# options, 'qos-name', volume.DEFAULT_QOS)
# ret_args['compression'] = self._get_str_option(
# options, 'compression', volume.DEFAULT_COMPRESSION_VAL,
# ['true', 'false'])
# ret_args['fsOwner'] = self._get_fs_owner(options)
# ret_args['fsMode'] = self._get_fs_mode(options)
# ret_args['mountConflictDelay'] = self._get_int_option(
# options, 'mountConflictDelay',
# volume.DEFAULT_MOUNT_CONFLICT_DELAY)
# ret_args['cpg'] = self._get_str_option(options, 'cpg', None)
# ret_args['snapcpg'] = self._get_str_option(options, 'snapcpg', None)
# ret_args['replicationGroup'] = self._get_str_option(
# options, 'replicationGroup', None)
#
# return ret_args
#
# def _validate_rcg_params(self, rcg_name, backend_name):
# LOG.info("Validating RCG: %s, backend name: %s..." % (rcg_name,
# backend_name))
# hpepluginconfig = self._backend_configs[backend_name]
# replication_device = hpepluginconfig.replication_device
#
# LOG.info("Replication device: %s" % six.text_type(
# replication_device))
#
# if rcg_name and not replication_device:
# msg = "Request to create replicated volume cannot be fulfilled"\
# "without defining 'replication_device' entry defined in"\
# "hpe.conf for the backend '%s'. Please add it and execute"\
# "the request again." % backend_name
# raise exception.InvalidInput(reason=msg)
#
# if replication_device and not rcg_name:
# backend_names = list(self._backend_configs.keys())
# backend_names.sort()
#
# msg = "'%s' is a replication enabled backend. " \
# "Request to create replicated volume cannot be fulfilled "\
# "without specifying 'replicationGroup' option in the "\
# "request. Please either specify 'replicationGroup' or use"\
# "a normal backend and execute the request again. List of"\
# "backends defined in hpe.conf: %s" % (backend_name,
# backend_names)
# raise exception.InvalidInput(reason=msg)
#
# if rcg_name and replication_device:
#
# def _check_valid_replication_mode(mode):
# valid_modes = ['synchronous', 'asynchronous', 'streaming']
# if mode.lower() not in valid_modes:
# msg = "Unknown replication mode '%s' specified. Valid "\
# "values are 'synchronous | asynchronous | " \
# "streaming'" % mode
# raise exception.InvalidInput(reason=msg)
#
# rep_mode = replication_device['replication_mode'].lower()
# _check_valid_replication_mode(rep_mode)
# if replication_device.get('quorum_witness_ip'):
# if rep_mode.lower() != 'synchronous':
# msg = "For Peer Persistence, replication mode must be "\
# "synchronous"
# raise exception.InvalidInput(reason=msg)
#
# sync_period = replication_device.get('sync_period')
# if sync_period and rep_mode == 'synchronous':
# msg = "'sync_period' can be defined only for 'asynchronous'"\
# " and 'streaming' replicate modes"
# raise exception.InvalidInput(reason=msg)
#
# if (rep_mode == 'asynchronous' or rep_mode == 'streaming')\
# and sync_period:
# try:
# sync_period = int(sync_period)
# except ValueError as ex:
# msg = "Non-integer value '%s' not allowed for " \
# "'sync_period'. %s" % (
# replication_device.sync_period, ex)
# raise exception.InvalidInput(reason=msg)
# else:
# SYNC_PERIOD_LOW = 300
# SYNC_PERIOD_HIGH = 31622400
# if sync_period < SYNC_PERIOD_LOW or \
# sync_period > SYNC_PERIOD_HIGH:
# msg = "'sync_period' must be between 300 and " \
# "31622400 seconds."
# raise exception.InvalidInput(reason=msg)
#
# @staticmethod
# def _validate_name(vol_name):
# is_valid_name = re.match("^[A-Za-z0-9]+[A-Za-z0-9_-]+$", vol_name)
# if not is_valid_name:
# msg = 'Invalid volume name: %s is passed.' % vol_name
# raise exception.InvalidInput(reason=msg)
| 43.387727
| 79
| 0.567746
| 16,687
| 0.536404
| 0
| 0
| 6,461
| 0.207689
| 0
| 0
| 18,657
| 0.59973
|
0c51d4fd680a6be2f21491d3d55f99e1a13769ea
| 32,369
|
py
|
Python
|
scripts/train_image_.py
|
shafieelab/SPyDERMAN
|
1b3fe1d0fcb33dcaed85fb110c88575ffa6fb7b6
|
[
"MIT"
] | 1
|
2021-01-26T18:07:56.000Z
|
2021-01-26T18:07:56.000Z
|
scripts/train_image_.py
|
Deeksha-K/SPyDERMAN
|
8cb4a3efc2b8706133f81e7bf878439110402434
|
[
"MIT"
] | null | null | null |
scripts/train_image_.py
|
Deeksha-K/SPyDERMAN
|
8cb4a3efc2b8706133f81e7bf878439110402434
|
[
"MIT"
] | 3
|
2021-01-26T18:07:39.000Z
|
2021-04-07T22:07:01.000Z
|
import argparse
import csv
import os
import os.path as osp
import statistics
import tqdm
import time
from datetime import datetime
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import helper_utils.network as network
import helper_utils.loss as loss
import helper_utils.pre_process as prep
from sklearn.metrics import confusion_matrix
from torch.utils.data import DataLoader
import helper_utils.lr_schedule as lr_schedule
from helper_utils.data_list_m import ImageList
from helper_utils.logger import Logger
from helper_utils.sampler import ImbalancedDatasetSampler
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience."""
def __init__(self, patience=2000, verbose=False, model_path="../results/"):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.model_path = model_path
def __call__(self, val_loss, model):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model)
elif score < self.best_score:
self.counter += 1
# print(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
self.save_checkpoint(val_loss, model)
self.counter = 0
def save_checkpoint(self, val_loss, model):
'''Saves model when validation loss decrease.'''
if self.verbose:
if self.val_loss_min != val_loss:
print('Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
# torch.save(model, self.model_path)
self.val_loss_min = val_loss
def image_classification_test(loader, model, test_10crop=False, num_iterations=0, test='test', class_num=2):
start_test = True
with torch.no_grad():
if test_10crop:
iter_test = [iter(loader[test][i]) for i in range(1)]
for i in range(len(loader[test][0])):
data = [iter_test[j].next() for j in range(1)]
inputs = [data[j][0] for j in range(1)]
labels = data[0][1]
for j in range(1):
inputs[j] = inputs[j].cuda()
labels = labels
outputs = []
for j in range(1):
_, predict_out = model(inputs[j])
outputs.append(nn.Softmax(dim=1)(predict_out))
outputs = sum(outputs)
if start_test:
all_output = outputs.float().cpu()
all_label = labels.float()
start_test = False
else:
all_output = torch.cat((all_output, outputs.float().cpu()), 0)
all_label = torch.cat((all_label, labels.float()), 0)
else:
iter_test = iter(loader[test])
for i in range(len(loader[test])):
data = iter_test.next()
inputs = data[0]
labels = data[1]
inputs = inputs.cuda()
labels = labels.cuda()
_, raw_outputs = model(inputs)
outputs = nn.Softmax(dim=1)(raw_outputs)
if start_test:
all_output = outputs.float().cpu()
all_label = labels.float()
start_test = False
else:
all_output = torch.cat((all_output, outputs.float().cpu()), 0)
all_label = torch.cat((all_label, labels.float()), 0)
# print(len(all_output))
# print(len(all_label))
all_output_numpy = all_output.numpy()
# all_label_numpy= all_label.cpu().numpy()
_, predict = torch.max(all_output, 1)
all_values_CSV = []
predict_numpy = predict.numpy()
with open(config["logs_path"] + '/_' + str(num_iterations) + '_confidence_values_.csv', mode='w') as file:
csv_writer = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(['Image_Name', 'Prediction', 'class_0_conf', 'class_1_conf'])
for value in range(len(all_output_numpy)):
csv_writer.writerow(
[all_label[value], predict_numpy[value], all_output_numpy[value][0], all_output_numpy[value][1]])
accuracy = torch.sum(torch.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])
cm = confusion_matrix(all_label, torch.squeeze(predict).float())
print(cm)
print(accuracy)
# with open(config["output_path"] + '/_' + str(num_iterations) + 'accuracy.csv', mode='w') as file:
# csv_writer = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
#
#
# csv_writer.writerow(['Accuracy',str(num_iterations),accuracy])
# with open(config["output_path"] + '/_' + str(num_iterations) + '_CM.csv', 'a') as f:
# writer = csv.writer(f)
# writer.writerows(cm)
#
# f.close()
# exit()
return accuracy, cm
# exit()
# return no
def validation_loss(loader, model, test_10crop=False, data_name='valid_source', num_iterations=0, class_num=2):
start_test = True
with torch.no_grad():
if test_10crop:
iter_test = [iter(loader['test'][i]) for i in range(10)]
for i in range(len(loader['test'][0])):
data = [iter_test[j].next() for j in range(10)]
inputs = [data[j][0] for j in range(10)]
labels = data[0][1]
for j in range(10):
inputs[j] = inputs[j].cuda()
labels = labels
outputs = []
for j in range(10):
_, predict_out = model(inputs[j])
outputs.append(nn.Softmax(dim=1)(predict_out))
outputs = sum(outputs)
if start_test:
all_output = outputs.float().cpu()
all_label = labels.float()
start_test = False
else:
all_output = torch.cat((all_output, outputs.float().cpu()), 0)
all_label = torch.cat((all_label, labels.float()), 0)
else:
iter_test = iter(loader[data_name])
for i in range(len(loader[data_name])):
data = iter_test.next()
inputs = data[0]
labels = data[1]
inputs = inputs.cuda()
# labels = labels.cuda()
_, raw_outputs = model(inputs)
outputs = nn.Softmax(dim=1)(raw_outputs)
if start_test:
all_output = outputs.cpu()
all_label = labels
start_test = False
else:
all_output = torch.cat((all_output, outputs.cpu()), 0)
all_label = torch.cat((all_label, labels), 0)
# _, predict = torch.max(all_output, 1)
val_loss = nn.CrossEntropyLoss()(all_output, all_label)
val_loss = val_loss.numpy().item()
all_output = all_output.float()
_, predict = torch.max(all_output, 1)
all_label = all_label.float()
val_accuracy = torch.sum(torch.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])
all_output_numpy = all_output.numpy()
predict_numpy = predict.numpy()
if class_num == 2:
with open(config["logs_path"] + '/_' + str(num_iterations) + '_confidence_values_.csv', mode='w') as file:
csv_writer = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(['Image_Name', 'Prediction', 'class_0_conf', 'class_1_conf'])
for value in range(len(all_output_numpy)):
csv_writer.writerow(
[all_label[value], predict_numpy[value], all_output_numpy[value][0], all_output_numpy[value][1]])
accuracy = torch.sum(torch.squeeze(predict).float() == all_label).item() / float(all_label.size()[0])
cm = confusion_matrix(all_label, torch.squeeze(predict).float())
print(cm)
print(accuracy)
return val_accuracy, val_loss, accuracy, cm
# return val_accuracy,val_loss
def train(config):
now = dt_string.replace(" ", "_").replace(":", "_").replace(".", "_")
logger = Logger(config["logs_path"] + "tensorboard/" + now)
model_path = osp.join(config["output_path"], "best_model.pth.tar")
early_stopping = EarlyStopping(patience=2000, verbose=True, model_path=model_path)
# temp_acc = 0.0
# temp_loss = 10000
## set pre-process
prep_dict = {}
prep_config = config["prep"]
prep_dict["source"] = prep.image_train(**config["prep"]['params'])
prep_dict["target"] = prep.image_train(**config["prep"]['params'])
if prep_config["test_10crop"]:
prep_dict["test"] = prep.image_test_10crop(**config["prep"]['params'])
else:
prep_dict["test"] = prep.image_test(**config["prep"]['params'])
prep_dict["valid_source"] = prep.image_test(**config["prep"]['params'])
## prepare data
dsets = {}
dset_loaders = {}
data_config = config["data"]
train_bs = data_config["source"]["batch_size"]
test_bs = data_config["test"]["batch_size"]
valid_bs = data_config["test"]["batch_size"]
dsets["source"] = ImageList(open(data_config["source"]["list_path"]).readlines(), \
transform=prep_dict["source"], data="source")
dset_loaders["source"] = DataLoader(dsets["source"], batch_size=train_bs, \
shuffle=False, num_workers=4, drop_last=True) #sampler=ImbalancedDatasetSampler(dsets["source"])
dsets["target"] = ImageList(open(data_config["target"]["list_path"]).readlines(), \
transform=prep_dict["target"], data="target")
dset_loaders["target"] = DataLoader(dsets["target"], batch_size=train_bs, \
shuffle=True, num_workers=4, drop_last=True)
dsets["valid_source"] = ImageList(open(data_config["valid_source"]["list_path"]).readlines(), \
transform=prep_dict["valid_source"], data="source")
dset_loaders["valid_source"] = DataLoader(dsets["valid_source"], batch_size=test_bs, \
shuffle=False, num_workers=4)
if prep_config["test_10crop"]:
for i in range(10):
dsets["test"] = [ImageList(open(data_config["test"]["list_path"]).readlines(), \
transform=prep_dict["test"][i]) for i in range(10)]
dset_loaders["test"] = [DataLoader(dset, batch_size=test_bs, \
shuffle=False, num_workers=4) for dset in dsets['test']]
else:
dsets["test"] = ImageList(open(data_config["test"]["list_path"]).readlines(), \
transform=prep_dict["test"], data="target")
dset_loaders["test"] = DataLoader(dsets["test"], batch_size=test_bs, \
shuffle=False, num_workers=4)
class_num = config["network"]["params"]["class_num"]
## set base network
net_config = config["network"]
base_network = net_config["name"](**net_config["params"])
base_network = base_network.cuda()
## add additional network for some methods
if config["loss"]["random"]:
random_layer = network.RandomLayer([base_network.output_num(), class_num], config["loss"]["random_dim"])
ad_net = network.AdversarialNetwork(config["loss"]["random_dim"], 1024)
else:
random_layer = None
ad_net = network.AdversarialNetwork(base_network.output_num() * class_num, 1024)
if config["loss"]["random"]:
random_layer.cuda()
ad_net = ad_net.cuda()
parameter_list = base_network.get_parameters() + ad_net.get_parameters()
## set optimizer
optimizer_config = config["optimizer"]
optimizer = optimizer_config["type"](parameter_list, \
**(optimizer_config["optim_params"]))
param_lr = []
for param_group in optimizer.param_groups:
param_lr.append(param_group["lr"])
schedule_param = optimizer_config["lr_param"]
lr_scheduler = lr_schedule.schedule_dict[optimizer_config["lr_type"]]
gpus = config['gpu'].split(',')
if len(gpus) > 1:
ad_net = nn.DataParallel(ad_net, device_ids=[int(i) for i in gpus])
base_network = nn.DataParallel(base_network, device_ids=[int(i) for i in gpus])
## train
len_train_source = len(dset_loaders["source"])
len_train_target = len(dset_loaders["target"])
len_train_valid_source = len(dset_loaders["valid_source"])
transfer_loss_value = classifier_loss_value = total_loss_value = 0.0
best_acc = 0.0
best_loss_valid = 10000 # total
best_loss_transfer = 10000 # total
best_total_loss_numpy_transfer = best_transfer_loss_numpy_transfer = best_classifier_loss_numpy_transfer = 100000
best_loss_total = 10000 # total
best_total_loss_numpy_total = best_transfer_loss_numpy_total = best_classifier_loss_numpy_total = 100000
best_loss3 = 10000 # transfer
best_total_loss_numpy_acc = best_transfer_loss_numpy_acc = best_classifier_loss_numpy_acc = 100000
val_accuracy = 0.0
val_loss = 10.0
for i in range(config["num_iterations"]):
if i % config["test_interval"] == 0:
itr_log = "num_iterations " + str(i)
config["out_file"].write(itr_log + "\n")
config["out_file"].flush()
print(itr_log)
train_time_end = time.time()
test_time_start = time.time()
base_network.train(False)
# image_classification_test(dset_loaders, \
# base_network,
# test_10crop=prep_config[
# "test_10crop"],
# num_iterations=i)
# val_accuracy, val_loss= validation_loss(dset_loaders, \
# base_network,
#
# )
val_accuracy, val_loss, best_acc_new, best_cm = validation_loss(dset_loaders, \
base_network,
num_iterations=i,
data_name='valid_source')
# val_accuracy_target, val_loss_target, best_acc_new_target, best_cm_target = validation_loss(dset_loaders, \
# base_network,
#
# num_iterations=i,
# data_name='test')
temp_model = nn.Sequential(base_network)
if val_loss < best_loss_valid:
best_loss_valid = val_loss
best_acc = val_accuracy
best_model = copy.deepcopy(temp_model)
torch.save(best_model, osp.join(config["model_path"], "model_" + str(i) + ".pth.tar"))
best_itr = i
# torch.save(temp_model, osp.join(config["model_path"], \
# "iter_{:05d}_model.pth.tar".format(i)))
# if i % config["snapshot_interval"] == 0:
# torch.save(nn.Sequential(base_network), osp.join(config["model_path"], \
# "iter_{:05d}_model.pth.tar".format(i)))
loss_params = config["loss"]
## train one iter
base_network.train(True)
ad_net.train(True)
optimizer = lr_scheduler(optimizer, i, **schedule_param)
optimizer.zero_grad()
if i % len_train_source == 0:
iter_source = iter(dset_loaders["source"])
if i % len_train_target == 0:
iter_target = iter(dset_loaders["target"])
inputs_source, labels_source = iter_source.next()
inputs_target, labels_target = iter_target.next()
inputs_source, inputs_target, labels_source = inputs_source.cuda(), inputs_target.cuda(), labels_source.cuda()
features_source, outputs_source = base_network(inputs_source)
features_target, outputs_target = base_network(inputs_target)
features = torch.cat((features_source, features_target), dim=0)
outputs = torch.cat((outputs_source, outputs_target), dim=0)
softmax_out = nn.Softmax(dim=1)(outputs)
if config['method'] == 'CDAN+E':
entropy = loss.Entropy(softmax_out)
transfer_loss = loss.CDAN([features, softmax_out], ad_net, entropy, network.calc_coeff(i), random_layer)
elif config['method'] == 'CDAN':
transfer_loss = loss.CDAN([features, softmax_out], ad_net, None, None, random_layer)
elif config['method'] == 'DANN':
transfer_loss = loss.DANN(features, ad_net)
else:
raise ValueError('Method cannot be recognized.')
classifier_loss = nn.CrossEntropyLoss()(outputs_source, labels_source)
total_loss = loss_params["trade_off"] * transfer_loss + classifier_loss
total_loss.backward()
optimizer.step()
transfer_loss_numpy = transfer_loss.clone().cpu().detach().numpy()
classifier_loss_numpy = classifier_loss.clone().cpu().detach().numpy()
total_loss_numpy = total_loss.clone().cpu().detach().numpy()
entropy_numpy = torch.sum(entropy).clone().cpu().detach().numpy()
info = {'total_loss': total_loss_numpy.item(),
'classifier_loss': classifier_loss_numpy.item(), 'transfer_loss': transfer_loss_numpy.item(),
'entropy': entropy_numpy.item(),
'valid_source_loss': val_loss, 'valid_source_acc': val_accuracy,
# 'target_valid_loss': val_loss_target, 'target_valid_acc': val_accuracy_target,
}
for tag, value in info.items():
logger.scalar_summary(tag, value, i)
with open(config["logs_path"] + '/loss_values_.csv', mode='a') as file:
csv_writer = csv.writer(file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(
[i, total_loss_numpy, transfer_loss_numpy, classifier_loss_numpy, entropy_numpy, val_loss, val_accuracy,
])
early_stopping(val_loss, nn.Sequential(base_network))
# print(i)
if early_stopping.early_stop:
print("Early stopping")
break
# temp_model_total = nn.Sequential(base_network)
# temp_loss_total = transfer_loss_numpy
# if temp_loss_total < best_loss_total:
# best_loss_total = temp_loss_total
# best_model_total = temp_model_total
# best_itr_total = i
#
# best_classifier_loss_numpy_total = classifier_loss_numpy
# best_total_loss_numpy_total = total_loss_numpy
# best_transfer_loss_numpy_total = transfer_loss_numpy
#
# temp_model_transfer = nn.Sequential(base_network)
# temp_loss_transfer = transfer_loss_numpy
# if temp_loss_transfer < best_loss_transfer:
# best_loss_transfer = temp_loss_transfer
# best_model_transfer = temp_model_transfer
# best_itr_transfer = i
#
# best_classifier_loss_numpy_transfer = classifier_loss_numpy
# best_total_loss_numpy_transfer = total_loss_numpy
# best_transfer_loss_numpy_transfer = transfer_loss_numpy
# torch.save(best_model_transfer, osp.join(config["model_path"], "best_model_transfer.pth.tar"))
# torch.save(best_model_total, osp.join(config["model_path"], "best_model_total.pth.tar"))
#
def post_training(model, best_itr, best_classifier_loss, best_total_loss, best_transfer_loss, metric_name):
model.train(False)
#
if is_training:
torch.save(best_model, osp.join(config["model_path"], "best_model_acc.pth.tar"))
torch.save(best_model, osp.join(config["model_path"], "best_model" + str(best_itr) + ".pth.tar"))
model.train(False)
model = torch.load(osp.join(config["model_path"], "best_model" + str(best_itr) + ".pth.tar"))
else:
model = torch.load(model_path_for_testing)
best_itr = config["best_itr"]
source_val_accuracy, source_val_loss, best_acc_new, best_cm = validation_loss(dset_loaders, \
model,
num_iterations=best_itr,
data_name='valid_source')
# val_accuracy_target, val_loss_target, best_acc_new_target, best_cm_target = validation_loss(dset_loaders, \
# model,
#
# num_iterations=best_itr,
# data_name='test')
#print(val_accuracy_target, val_loss_target, best_cm_target)
config_array = ["trail-" + str(trial_number), metric_name,
source_val_accuracy, source_val_loss, best_cm,
#val_accuracy_target, val_loss_target, best_cm_target,
best_classifier_loss, best_transfer_loss,
best_total_loss
,
best_itr] + training_parameters
config["trial_parameters_log"].writerow(config_array)
if not is_training:
# print("error")
best_model = torch.load(model_path_for_testing)
best_itr = config["best_itr"]
post_training(best_model, best_itr, best_classifier_loss_numpy_acc, best_total_loss_numpy_acc, best_transfer_loss_numpy_acc, "Source Val_loss")
# post_training(best_model_total,best_itr_total,best_classifier_loss_numpy_total,best_total_loss_numpy_total,best_transfer_loss_numpy_total,"Total")
# post_training(best_model_transfer,best_itr_transfer,best_classifier_loss_numpy_transfer,best_total_loss_numpy_transfer,best_transfer_loss_numpy_transfer,"Transfer")
return best_acc
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--gpu_id', type=str, nargs='?', default='0', help="device id to run")
parser.add_argument('--dset', type=str, default='COVID19', help="The dataset or source dataset used")
parser.add_argument('--trail', type=str, default='mb', help="The dataset or source dataset used")
parser.add_argument('--lr', type=float, default=0.005)
args = parser.parse_args()
seed = 0
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
dt_string = dt_string.replace("/", "_").replace(" ", "_").replace(":", "_").replace(".", "_")
dataset = args.dset
valid_or_test = "" # "valid or "test" or "" if whole dataset
test_on_source_ed3 = False
is_training = True
if valid_or_test == "test":
is_training = False
testing = "testing"
# testing = "train"
if testing == "testing":
is_training = False
print(dataset)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
log_output_dir_root = '../logs/' + dataset + '/'
results_output_dir_root = '../experimental results/' + dataset + '/'
models_output_dir_root = '../models/' + dataset + '/'
trial_number = args.trail + "_" + dataset + "_" + testing + "_" + dt_string
if dataset == 'HCV':
source_path = "../Data/HCV/txt_80_20_tile/HCV_train_80.txt"
valid_source_path = "../Data/HCV/txt_80_20_tile/HCV_val_20.txt"
target_path = "../Data/HCV/HCV_target_tile.txt"
no_of_classes = 2
elif dataset == 'HIV':
source_path = "../Data/HIV/txt_80_20_tile/HIV_train_80.txt"
valid_source_path = "../Data/HIV/txt_80_20_tile/HIV_val_20.txt"
target_path = "../Data/HIV/HIV_target_tile.txt"
no_of_classes = 2
elif dataset == 'ZIKA':
source_path = "../Data/ZIKA/txt_90_10_tile/ZIKA_train_90.txt"
valid_source_path = "../Data/ZIKA/txt_90_10_tile/ZIKA_val_10.txt"
target_path = "../Data/ZIKA/ZIKA_target_tile.txt"
no_of_classes = 2
elif dataset == 'HBV':
source_path = "../Data/HBV/txt_80_20_tile/HBV_train_80.txt"
valid_source_path = "../Data/HBV/txt_80_20_tile/HBV_val_20.txt"
target_path = "../Data/HBV/HBV_target_tile.txt"
no_of_classes = 2
elif dataset == 'COVID19':
source_path = "../Data/COVID19/txt_80_20_tile/COVID19_train_80.txt"
valid_source_path = "../Data/COVID19/txt_80_20_tile/COVID19_val_20.txt"
target_path = "../Data/COVID19/COVID19_target_tile.txt"
no_of_classes = 2
elif dataset == 'CAS12':
source_path = "../Data/CAS12/txt_80_20_tile/CAS12_train_80.txt"
valid_source_path = "../Data/CAS12/txt_80_20_tile/CAS12_val_20.txt"
target_path = "../Data/CAS12/CAS12_target_tile.txt"
no_of_classes = 2
else:
no_of_classes = None
net = 'Xception'
# net = 'ResNet50'
dset = dataset
lr_ = args.lr
gamma = 0.001
power = 0.75
# power = 0.9
momentum = 0.9
weight_decay = 0.0005
nesterov = True
optimizer = optim.Adam
config = {}
config['method'] = 'CDAN+E'
config["gpu"] = '0'
config["num_iterations"] = 10000
config["test_interval"] = 50
config["snapshot_interval"] = 5000
batch_size = 8
batch_size_test = 128
use_bottleneck = False
bottleneck_dim = 256
adv_lay_random = False
random_dim = 1024
new_cls = True
if not is_training:
valid_source_path = "../Data/Test/mb1/mb_test.txt"
target_path = "../Data/Test/mb1/mb_test.txt"
model_path_for_testing = "../Final Models/CDAN + GAN/COVID19/model_1600.pth.tar"
config["num_iterations"] = 0
best_itr = "testing"
print("Testing:")
config["best_itr"] = "testing"
print("num_iterations", config["num_iterations"])
header_list = ["trail no ", 'metric name',
'source_val_accuracy', 'source_val_loss', 'best_cm',
# 'val_accuracy_target', 'val_loss_target', 'best_cm_target',
"best_classifier_loss", "best_transfer_loss", "best_total_loss"
,
"best_itr"] + \
["lr", "gamma", "power", "momentum", "weight_decay", "nesterov", "optimizer",
"batch_size", "batch_size_test", "use_bottleneck", "bottleneck_dim", "adv_lay_random",
"random_dim",
"no_of_classes", "new_cls", "dset", "net", "source_path", "target_path", "output_path",
"model_path"
, "logs_path", "gpu", "test_interval", "seed"]
log_output_path = log_output_dir_root + net + '/' + 'trial-' + trial_number + '/'
trial_results_path = net + '/trial-' + trial_number + '/'
config["output_path"] = results_output_dir_root + trial_results_path
config["model_path"] = models_output_dir_root + trial_results_path
config["logs_path"] = log_output_path
if not os.path.exists(config["logs_path"]):
os.makedirs(config["logs_path"])
if is_training:
if not os.path.exists(config["model_path"]):
os.makedirs(config["model_path"])
# if not os.path.exists(config["output_path"]):
# os.makedirs(config["output_path"])
if not os.path.isfile(osp.join(log_output_dir_root, "log.csv")):
with open(osp.join(log_output_dir_root, "log.csv"), mode='w') as param_log_file:
param_log_writer = csv.writer(param_log_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
param_log_writer.writerow(header_list)
config["out_file"] = open(osp.join(config["logs_path"], "log.txt"), "w")
config["trial_parameters_log"] = csv.writer(open(osp.join(log_output_dir_root, "log.csv"), "a"))
config["prep"] = {"test_10crop": False, 'params': {"resize_size": 224, "crop_size": 224, 'alexnet': False}}
config["loss"] = {"trade_off": 1.0}
if "Xception" in net:
config["network"] = \
{"name": network.XceptionFc,
"params":
{"use_bottleneck": use_bottleneck,
"bottleneck_dim": bottleneck_dim,
"new_cls": new_cls}}
elif "ResNet50" in net:
config["network"] = {"name": network.ResNetFc,
"params":
{"resnet_name": net,
"use_bottleneck": use_bottleneck,
"bottleneck_dim": bottleneck_dim,
"new_cls": new_cls}}
config["loss"]["random"] = adv_lay_random
config["loss"]["random_dim"] = random_dim
if optimizer == optim.SGD:
config["optimizer"] = {"type": optim.SGD, "optim_params": {'lr': lr_, "momentum": momentum,
"weight_decay": weight_decay, "nesterov": nesterov},
"lr_type": "inv",
"lr_param": {"lr": lr_, "gamma": gamma, "power": power}}
elif optimizer == optim.Adam:
config["optimizer"] = {"type": optim.Adam, "optim_params": {'lr': lr_,
"weight_decay": weight_decay},
"lr_type": "inv",
"lr_param": {"lr": lr_, "gamma": gamma, "power": power}}
config["dataset"] = dset
config["data"] = {"source": {"list_path": source_path, "batch_size": batch_size},
"target": {"list_path": target_path, "batch_size": batch_size},
"test": {"list_path": target_path, "batch_size": batch_size_test},
"valid_source": {"list_path": valid_source_path, "batch_size": batch_size}}
config["optimizer"]["lr_param"]["lr"] = lr_
config["network"]["params"]["class_num"] = no_of_classes
config["out_file"].write(str(config))
config["out_file"].flush()
training_parameters = [lr_, gamma, power, momentum, weight_decay, nesterov, optimizer,
batch_size, batch_size_test, use_bottleneck, bottleneck_dim, adv_lay_random, random_dim,
no_of_classes, new_cls, dset, net, source_path, target_path, config["output_path"],
config["model_path"]
, config["logs_path"], config["gpu"], config["test_interval"], str(seed)]
print("source_path", source_path)
print("target_path", target_path)
print("lr_", lr_)
print('GPU', os.environ["CUDA_VISIBLE_DEVICES"], config["gpu"])
train(config)
| 42.646904
| 170
| 0.575489
| 1,651
| 0.051006
| 0
| 0
| 0
| 0
| 0
| 0
| 9,515
| 0.293954
|
0c51eb0b9b67869087426ffee62488bbc0029d3f
| 1,230
|
py
|
Python
|
src/freshchat/client/configuration.py
|
twyla-ai/python-freshchat
|
5bb0ea730f82b63292688be61315b6b880896e1f
|
[
"MIT"
] | 4
|
2019-10-15T11:03:28.000Z
|
2021-08-19T01:14:12.000Z
|
src/freshchat/client/configuration.py
|
twyla-ai/python-freshchat
|
5bb0ea730f82b63292688be61315b6b880896e1f
|
[
"MIT"
] | 137
|
2019-10-18T04:36:21.000Z
|
2022-03-21T04:11:18.000Z
|
src/freshchat/client/configuration.py
|
twyla-ai/python-freshchat
|
5bb0ea730f82b63292688be61315b6b880896e1f
|
[
"MIT"
] | 1
|
2021-08-19T01:14:14.000Z
|
2021-08-19T01:14:14.000Z
|
import os
from dataclasses import dataclass, field
from typing import AnyStr, Dict, Optional
from urllib.parse import urljoin
@dataclass
class FreshChatConfiguration:
"""
Class represents the base configuration for Freshchat
"""
app_id: str
token: str = field(repr=False)
default_channel_id: Optional[str] = field(default=None)
default_initial_message: Optional[str] = field(default=None)
url: Optional[str] = field(
default_factory=lambda: os.environ.get(
"FRESHCHAT_API_URL", "https://api.freshchat.com/v2/"
)
)
@property
def authorization_header(self) -> Dict[AnyStr, AnyStr]:
"""
Property which returns the proper format of the authorization header
"""
return {
"Authorization": f"Bearer {self.token}"
if "Bearer" not in self.token
else self.token
}
def get_url(self, endpoint: str) -> str:
"""
Method responsible to build the url using the given endpoint
:param endpoint: String with the endpoint which needs to attached to URL
:return: a string which represents URL
"""
return urljoin(self.url, endpoint.lstrip("/"))
| 28.604651
| 80
| 0.64065
| 1,090
| 0.886179
| 0
| 0
| 1,101
| 0.895122
| 0
| 0
| 472
| 0.38374
|
0c521cb77fbca7152db05ece3eddd9a49ae59322
| 20,120
|
py
|
Python
|
get_headers.py
|
rupendrab/py_unstr_parse
|
3cece3fb7ca969734bf5e60fe5846a7148ce8be4
|
[
"MIT"
] | null | null | null |
get_headers.py
|
rupendrab/py_unstr_parse
|
3cece3fb7ca969734bf5e60fe5846a7148ce8be4
|
[
"MIT"
] | null | null | null |
get_headers.py
|
rupendrab/py_unstr_parse
|
3cece3fb7ca969734bf5e60fe5846a7148ce8be4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.5
import sys
import re
import os
import csv
import numpy as np
from operator import itemgetter
from time import time
from multiprocessing import Pool
from extract_toc import parseargs
from predict_using_toc_mapper import Mapper, get_topic, read_model_file
from find_topics import toc_entries, get_summary_map, read_topics
import dateparser
import check_309
import parse_name
from get_ratings import Ratings, Ratings2
import delimiterwriter
from predict_using_subtopic_mapper import SubtopicPredictor
from analyze_topic import SubtopicReader
from headerutil import *
# NEWLINE_WITHIN_COLUMN = '\r\n'
# NEWLINE_WITHIN_COLUMN = '\r\n'
# CSV_LINE_TERMINATOR = '\r\n'
# CSV_FIELD_DELIMITER = ','
# FD_REPLACED = None
p_region = re.compile('(^|.*\s+)region(\s*[:\']\s*|\s+)(.*)?\s*$', re.IGNORECASE)
p_region_with_other = re.compile('(.*)?\s{5,}(certificate\s+num[bh]e[ir]|certificate|charter\s+num[bh]er|charter|field\s+offic\s*e|url)\s*:?\s*(.*)?\s*$', re.IGNORECASE)
p_blank = re.compile('^\s*$')
p_from_first_uppercase_char = re.compile('^.*?([A-Z].*)$', re.MULTILINE)
p_cert_direct = re.compile('(^|^.*\s+)(certificate\s+number)(\s*:\s*|\s+)(\w+).*$', re.IGNORECASE)
p_region_direct = re.compile('(^|^.*\s+)(region)(\s*:\s*|\s+)(\w+).*$', re.IGNORECASE)
p_patterns_str = {
'bank_name' : [
'bank\s+name',
'institution\s+name',
'name'
],
'bank_location': [
'location'
],
'examiner_in_charge': [
'examiner[\s\-]*in[\s\-]*charge'
],
'exam_start_date': [
'examination[\s\-]*start[\s\-]*date'
],
'exam_date': [
'examination[\s\-]*date'
],
'exam_as_of_date': [
'examination[\s\-]*as[\s\-]*of[\s\-]*date'
]
}
all_matched = {}
for k,patterns in p_patterns_str.items():
all_matched[k] = []
p_patterns = {}
for k,patterns in p_patterns_str.items():
p_patterns[k] = [re.compile('(^|.*\s+)' + p + '(\s*[:\'][\'\s\.]*|[\'\s\.]+)' + '(.*)?\s*$', re.IGNORECASE) for p in patterns]
def get_pattern(line, pat):
ret = []
for i, p in enumerate(p_patterns[pat]):
quality = 0
m = p.match(line)
if (m):
st = m.group(1)
sep = m.group(2)
val = m.group(3)
vals = re.split('\s{5,}', val)
val = vals[0]
if (not st.strip()):
quality += 1 ## Higher quality for line starting pattern
else:
if (len(st) - len(st.rstrip()) < 2): ## Just one space
# quality -= 1
quality -= 0 ## Ignore this one for now
if (sep.strip() ==':'):
quality += 1 ## Higher qiuality in presence of :
if (len(vals) == 1):
quality += 1
ret.append((p_patterns_str[pat][i], val, quality))
return ret
def match_pattern(line, pat):
global all_matched
all_matched[pat] += get_pattern(line, pat)
def match_all_patterns(line):
for pat in p_patterns.keys():
match_pattern(line, pat)
"""
def best_match(pat):
# print('In best match', pat)
all_m = all_matched.get(pat)
if (all_m):
l = sorted(all_matched.get(pat), key=lambda x: (-1 * p_patterns_str[pat].index(x[0]), x[2]), reverse=True)
# print('Best match sorted list', l)
if (l):
if (l[0][2] > 0): ## Quality more than zero
return l[0]
"""
def best_match(pat, validationfn = None):
# print('In best match', pat)
all_m = all_matched.get(pat)
if (all_m):
l = sorted(all_matched.get(pat), key=lambda x: (-1 * p_patterns_str[pat].index(x[0]), x[2]), reverse=True)
# print('Best match sorted list', l)
if (l):
if (validationfn):
for item in l:
if (item[2] >= 0 and validationfn(item[1])): ## Quality more than zero
return item
else:
if (l[0][2] >= 0): ## Quality more than zero
return l[0]
"""
def best_match_text(pat):
bm_tuple = best_match(pat)
if (bm_tuple and len(bm_tuple) == 3):
return bm_tuple[1]
else:
return ""
"""
def best_match_text(pat, validationfn = None):
bm_tuple = best_match(pat, validationfn)
if (bm_tuple and len(bm_tuple) == 3):
return bm_tuple[1]
else:
return ""
def format_eic(eic_name):
if (not eic_name):
return eic_name
words = re.split('\s+', eic_name)
new_eic = []
for i, word in enumerate(words):
if word.endswith(';'):
new_eic += [word[:-1]]
break
if word[0].islower():
break
new_eic += [word]
return ' '.join(new_eic)
def format_date(dt):
if (not dt):
return dt
parts = re.split('\s{3,}', dt)
if (len(parts) >= 1):
return parts[0]
else:
return ""
def get_cert_from_line(line):
if not line:
return "";
m = p_cert_direct.match(line)
if m:
return m.group(4)
else:
return ""
def readfile(filename):
for line in open(filename, 'r', encoding='latin1'):
yield(line[:-1])
def remove_punct(str):
return re.sub(r'[^\w\s]','',str).strip()
def format_cert_number(cert):
return remove_punct(cert)
def format_region(region):
if not region:
return region
split_by_extra_spaces = re.split('\s{5,}', remove_punct(region))
return split_by_extra_spaces[0]
def format_bank_str(str):
if not str:
return str
m = p_from_first_uppercase_char.match(str.strip())
if (m):
return singlespace(m.group(1))
else:
return singlespace(str)
def singlespace(sent):
return ' '.join(re.split('\s+', sent))
def separate_cert(str):
cert = ""
newstr = str
m_str_with_other = p_region_with_other.match(str)
if (m_str_with_other):
newstr = m_str_with_other.group(1)
if m_str_with_other.group(2).lower() == "charter" and m_str_with_other.group(3).lower() == "bank":
return str, cert
if (m_str_with_other.group(2).strip().lower().startswith('certificate')):
cert = m_str_with_other.group(3)
return newstr, cert
def prev_nonblank_line(lines, lineno):
# print('In prev_nonblank_line', lineno, len(lines))
while lineno > 0:
lineno -= 1
line = lines[lineno]
if (p_blank.match(line)):
continue
else:
# print("Non Blank Line = ", line)
return lineno, line
return -1, ""
def init_all_matched():
for k,patterns in p_patterns_str.items():
all_matched[k] = []
def get_header_for_file(filename):
# global all_matched
init_all_matched()
region = ""
cert = ""
bank_name = ""
bank_location = ""
lines = []
lineno = 0
ff = chr(12)
prev_region_match_quality = 0
for line in readfile(filename):
# if line and ord(line[0]) == 12: ## Line starts with control-L
# line = line[1:]
if line: ## Delete all form feed characters
line = line.replace(ff, "")
# line = re.sub('\s+', ' ', line) ## Compress multile spaces to a single space character
lines += [line]
match_all_patterns(line)
m_region = p_region.match(line)
if (m_region):
if (m_region.group(1).strip() == ""):
if (m_region.group(2).strip() == ":"):
region_match_quality = 3
else:
region_match_quality = 2
else:
region_match_quality = 1
if (region_match_quality >= prev_region_match_quality):
prev_region_match_quality = region_match_quality
region = m_region.group(3)
# print("Region = ", region)
region, certx = separate_cert(region)
if (not cert):
cert = certx
# print("Evaluating previous lines:", line)
if (m_region.group(1).strip() == ""):
location_line, bank_location = prev_nonblank_line(lines, lineno)
bank_location, cert2 = separate_cert(bank_location)
if (not cert):
cert = cert2
bank_line, bank_name = prev_nonblank_line(lines, location_line)
# print("Bank Name = ", bank_name)
bank_name, cert2 = separate_cert(bank_name)
if (not cert):
cert = cert2
# print("Bank Name = ", bank_name)
if (not cert):
cert = get_cert_from_line(line)
lineno += 1
# print(all_matched)
if (not bank_name):
bank_name = best_match_text('bank_name')
if (not bank_location):
bank_location = best_match_text('bank_location')
examiner_in_charge = format_eic(best_match_text('examiner_in_charge'))
eic_first_name, eic_middle_name, eic_last_name, eic_suffix = parse_name.parse_name(examiner_in_charge)
exam_start_date = format_date(best_match_text('exam_start_date', dateparser.get_date))
if (not exam_start_date):
exam_start_date = format_date(best_match_text('exam_date', dateparser.get_date))
exam_start_year, exam_start_month, exam_start_day, exam_start_date_formatted = dateparser.get_year_month_day(exam_start_date)
exam_as_of_date = format_date(best_match_text('exam_as_of_date', dateparser.get_date))
exam_as_of_year, exam_as_of_month, exam_as_of_day, exam_as_of_date_formatted = dateparser.get_year_month_day(exam_as_of_date)
return (lines,
format_region(region).title().replace(' ', '_'),
format_cert_number(cert),
format_bank_str(bank_name).replace(' ', '_'),
format_bank_str(bank_location),
eic_first_name,
eic_middle_name,
eic_last_name,
exam_start_date_formatted,
exam_start_year, exam_start_month,
exam_as_of_date_formatted,
exam_as_of_year, exam_as_of_month)
def multiply_array(arr, factor):
if (factor > 1):
subscript = True
else:
subscript = False
if subscript:
return [newval + '_' + str(i+1) for subarr in [[val] * factor for val in arr] for i,newval in enumerate(subarr)]
else:
return [newval for subarr in [[val] * factor for val in arr] for i,newval in enumerate(subarr)]
def format_headercol(header):
header_new = re.sub('[\-\/]', ' ', header)
header_new = re.sub('\'', '', header_new)
header_new = re.sub(' +', '_', header_new)
return header_new
def replace_list(orig, start, end, new):
orig[start:end] = new
def find_in_list(lst, pattern):
p = re.compile("^" + pattern + "_?[0-9]*$", re.IGNORECASE)
inds = [i for i,val in enumerate(lst) if p.match(val)]
if (inds and len(inds) > 0):
return (inds[0], inds[-1] + 1)
def process_single_file(inputTuple):
(serialno, filename, topics, mapper, summary_map, nosplit, topic_split_times, ratings, smodelfile, stopic) = inputTuple
file_time = time()
# print("Processing file %s at %f" % (filename, (file_time - start_time)))
filedata = get_header_for_file(filename)
file_time = time()
# print("Processing file %s at %f" % (filename, (file_time - start_time)))
rowdata = [os.path.basename(filename)]
lines = filedata[0]
## Only write to exception file if part 309 is not present
# if not check_309.has_part_309_sents(lines):
# exf.write(os.path.abspath(filename) + '\r\n')
# continue
# print(summary_map)
no_topics = summary_map.get(os.path.basename(filename))
# print("No Topics", no_topics)
guess_allowed = False
if (not no_topics or no_topics == 0):
guess_allowed = True
topic_list = read_topics(filename, mapper, guess_allowed)
# print(topic_list)
if nosplit:
topic_data = ["" for i in range(len(topics))]
else:
topic_data = ["" for i in range(len(topics) * topic_split_times)]
# print("Topic Data length", len(topic_data))
no_topics_in_doc = len(topic_list)
if smodelfile:
stcol_topic_start, stcol_topic_end = find_in_list(topics, stopic)
if (not nosplit):
## Readjust for split columns
stcol_topic_start = stcol_topic_start * topic_split_times
stcol_topic_end = stcol_topic_start + topic_split_times
if smodelfile:
subtopicReader = SubtopicReader(stopic, mapper, summary_map)
subtopicPredictor = SubtopicPredictor(smodelfile)
stopic_start_line, stopic_end_line = None,None
for i, topic_line in enumerate(topic_list):
start_line = topic_line[0]
if (i < no_topics_in_doc -1):
end_line = topic_list[i+1][0]
if nosplit:
topic_text = NEWLINE_WITHIN_COLUMN.join(lines[start_line:end_line])
else:
topic_texts_split = break_into_pieces(lines[start_line:end_line], NEWLINE_WITHIN_COLUMN)
else:
end_line = None
if nosplit:
topic_text = NEWLINE_WITHIN_COLUMN.join(lines[start_line:])
else:
topic_texts_split = break_into_pieces(lines[start_line:], NEWLINE_WITHIN_COLUMN)
topic_name = topic_line[2]
topic_index = topics.index(topic_name)
if nosplit:
topic_data[topic_index] = topic_text
# topic_data[topic_index] = topic_text[:32000]
else:
if (len(topic_texts_split) > topic_split_times):
print('Problem in file %s for topic %s, number of splits needed is %d' % (os.path.basename(filename), topic_name, len(topic_texts_split)))
for topic_subindex in range(len(topic_texts_split)):
# print('Setting:', topic_index, topic_index * topic_split_times + topic_subindex)
topic_data[topic_index * topic_split_times + topic_subindex] = topic_texts_split[topic_subindex]
## Handle Subtopics
if smodelfile and topic_name == stopic:
stopic_start_line, stopic_end_line = start_line, end_line
# topic_data[topic_index] = topic_text[:300]
# if (len(topic_text) > 32000):
# print(rowdata[0], topic_name, topic_index, len(topic_text))
# if (len(topic_text) > 32000):
# print(topic_text)
# print(topic_name, topic_index)
# print('======================================================')
# print(topic_text)
if smodelfile:
## If Subtopic lines exit
if stopic_start_line:
subtopics_dict = subtopicReader.mapped_subtopics_from_lines(lines, stopic_start_line, stopic_end_line, subtopicPredictor)
subtopics_arr = subtopicReader.subtopic_array(subtopics_dict)
topic_data[stcol_topic_start:stcol_topic_end] = subtopics_arr
else:
topic_data[stcol_topic_start:stcol_topic_end] = subtopicReader.empty_subtopics(subtopicPredictor)
rowdata.insert(0, serialno)
rowdata += filedata[1:]
ratings.process_file(filename)
ratings.map_ratings()
rowdata += ratings.get_column_data()
rowdata += topic_data
return rowdata
def get_headers_for_files(files, topics, mapper, summary_map, outfile, exfile, nosplit, topic_split_times, ratings, smodels = None, stopics = None):
if (not outfile):
if (len(CSV_FIELD_DELIMITER) == 1):
writer = csv.writer(sys.stdout, delimiter = CSV_FIELD_DELIMITER, lineterminator = CSV_LINE_TERMINATOR)
else:
writer = delimiterwriter.writer(sys.stdout, CSV_FIELD_DELIMITER, CSV_LINE_TERMINATOR, FD_REPLACED)
else:
outf = open(outfile, 'w')
if (len(CSV_FIELD_DELIMITER) == 1):
writer = csv.writer(outf, delimiter = CSV_FIELD_DELIMITER, lineterminator = CSV_LINE_TERMINATOR)
else:
writer = delimiterwriter.writer(outf, CSV_FIELD_DELIMITER, CSV_LINE_TERMINATOR, FD_REPLACED)
exf = open(exfile, 'w')
headerline = [
'serial_no',
'file_name',
'region',
'certificate_number',
'bank_name',
'bank_location',
'examiner_in_charge_first_name',
'examiner_in_charge_middle_name',
'examiner_in_charge_last_name',
'exam_start_date',
'exam_start_year',
'exam_start_month',
'exam_as_of_date',
'exam_as_of_year',
'exam_as_of_month'
]
headerline += ratings.get_column_headers()
topic_start_index = len(headerline)
# topic_split_times = 4
topics.append('Confidential')
if (nosplit):
headerline += topics
else:
headerline += multiply_array(topics, topic_split_times)
# headerline_no_spaces = [headercol.replace(' ', '_') for headercol in headerline]
smodelfile=None
stopic=None
if (smodels and len(smodels) > 0):
smodelfile= smodels[0]
if (stopics and len(stopics) > 0):
stopic= stopics[0]
# smodelfile = "model_sub_svc.pkl"
# stopic = "IT Assessment"
# print("smodelfile = %s, stopic = %s" % (smodelfile, stopic))
if smodelfile:
subtopicReader = SubtopicReader(stopic, mapper, summary_map)
subtopicPredictor = SubtopicPredictor(smodelfile)
subtopic_columns = subtopicReader.get_column_names(subtopicPredictor)
stcol_start, stcol_end = find_in_list(headerline, stopic)
headerline[stcol_start:stcol_end] = subtopic_columns
headerline_no_spaces = [format_headercol(headercol) for headercol in headerline]
writer.writerow(headerline_no_spaces)
serial=0
start_time = time()
pool = Pool(processes=4)
inputList = []
for filename in files:
### Placeholder for now
serial += 1
# rowdata = process_single_file(serial, filename, topics, mapper, summary_map, nosplit, topic_split_times, ratings, smodelfile, stopic, subtopicReader, subtopicPredictor)
inputList.append((serial, filename, topics, mapper, summary_map, nosplit, topic_split_times, ratings, smodelfile, stopic))
# print(inputList)
for rowdata in pool.imap(process_single_file, inputList, chunksize=20):
writer.writerow(rowdata)
# for rowdata in pool.map(process_single_file, inputList):
# writer.writerow(rowdata)
if (outfile):
outf.close()
if (exf):
exf.close()
def break_into_pieces(lines, newlinechar, chunksize=32000):
fields = []
field = ""
fieldlen = 0
newlinelen = len(newlinechar)
for i, line in enumerate(lines):
if (fieldlen + len(line) + newlinelen) > chunksize:
fields.append(field)
field = ""
fieldlen = 0
if (field):
field += newlinechar
field += line
fieldlen = fieldlen + len(line) + newlinelen
if field:
fields.append(field)
return fields
def main(args):
global NEWLINE_WITHIN_COLUMN
argsmap = parseargs(args)
files = argsmap.get('files')
if (not files):
sys.exit(0)
summaryfile = argsmap.get("summary")
if (not summaryfile or len(summaryfile) == 0):
print('Summary file must be specified...')
sys.exit(1)
summary_map = get_summary_map(summaryfile[0])
# print(summary_map)
modelfile = argsmap.get("model")
if (not modelfile):
print('Model must be specified...')
sys.exit(1)
modelfile = modelfile[0]
(origmap, sorted_y, vectorizer, le, grid_search) = read_model_file(modelfile)
topics = toc_entries(origmap)
mapper = Mapper(origmap, sorted_y, vectorizer, le, grid_search)
nosplit = argsmap.get('nosplit')
if nosplit == []:
nosplit = True
else:
nosplit = False
if not nosplit:
topic_split_times = argsmap.get('split')
if (not topic_split_times):
topic_split_times = 4
else:
topic_split_times = int(topic_split_times[0])
else:
topic_split_times = 0
NL = argsmap.get('NL') ## Set newline character for multiline columns
if (NL):
NL = NL[0]
if (NL):
NEWLINE_WITHIN_COLUMN = NL
outfile = argsmap.get("out")
if (outfile):
outfile = outfile[0]
exfile = argsmap.get("err")
if exfile:
exfile = exfile[0]
if not exfile:
print("Exception file name must be entered using the --err option...")
sys.exit(1)
ratings_mapper_file = argsmap.get("rmap")
if ratings_mapper_file:
ratings_mapper_file = ratings_mapper_file[0]
if not ratings_mapper_file:
print("Ratings Mapper File file name must be entered using the --rmap option...")
sys.exit(1)
ratings = Ratings(ratings_mapper_file)
global CSV_FIELD_DELIMITER
field_delim = argsmap.get('fd')
if field_delim:
field_delim = field_delim[0]
if field_delim:
CSV_FIELD_DELIMITER = field_delim
global FD_REPLACED
fd_replaced = argsmap.get('fdr')
if fd_replaced:
fd_replaced = fd_replaced[0]
if fd_replaced:
FD_REPLACED = fd_replaced
smodels = argsmap.get("smodels")
stopics = argsmap.get("stopics")
get_headers_for_files(files, topics, mapper, summary_map, outfile, exfile, nosplit, topic_split_times, ratings, smodels, stopics)
if __name__ == '__main__':
args = sys.argv[1:]
main(args)
| 31.885895
| 174
| 0.655964
| 0
| 0
| 98
| 0.004871
| 0
| 0
| 0
| 0
| 4,431
| 0.220229
|
0c52238d0be9f0af598966fd7664c6c79e85f8cb
| 6,214
|
py
|
Python
|
dalle_pytorch/dalle_pytorch.py
|
tensorfork/DALLE-pytorch
|
0e8f5d9a7fe054c587ed91d9c9616c7a883f393b
|
[
"MIT"
] | 1
|
2021-06-22T08:26:20.000Z
|
2021-06-22T08:26:20.000Z
|
dalle_pytorch/dalle_pytorch.py
|
tensorfork/DALLE-pytorch
|
0e8f5d9a7fe054c587ed91d9c9616c7a883f393b
|
[
"MIT"
] | null | null | null |
dalle_pytorch/dalle_pytorch.py
|
tensorfork/DALLE-pytorch
|
0e8f5d9a7fe054c587ed91d9c9616c7a883f393b
|
[
"MIT"
] | null | null | null |
from math import log2
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
from x_transformers import Encoder, Decoder
# helpers
def exists(val):
return val is not None
def masked_mean(t, mask, dim = 1):
t = t.masked_fill(~mask[:, :, None], 0.)
return t.sum(dim = 1) / mask.sum(dim = 1)[..., None]
# classes
class DiscreteVAE(nn.Module):
def __init__(
self,
num_tokens,
dim = 512,
hidden_dim = 64
):
super().__init__()
hdim = hidden_dim
self.encoder = nn.Sequential(
nn.Conv2d(3, hdim, 4, stride = 2, padding = 1),
nn.ReLU(),
nn.Conv2d(hdim, hdim, 4, stride = 2, padding = 1),
nn.ReLU(),
nn.Conv2d(hdim, hdim, 4, stride = 2, padding = 1),
nn.ReLU(),
nn.Conv2d(hdim, num_tokens, 1)
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(dim, hdim, 4, stride = 2, padding = 1),
nn.ReLU(),
nn.ConvTranspose2d(hdim, hdim, 4, stride = 2, padding = 1),
nn.ReLU(),
nn.ConvTranspose2d(hdim, hdim, 4, stride = 2, padding = 1),
nn.ReLU(),
nn.Conv2d(hdim, 3, 1)
)
self.num_tokens = num_tokens
self.codebook = nn.Embedding(num_tokens, dim)
def forward(
self,
img,
return_recon_loss = False,
return_logits = False
):
logits = self.encoder(img)
if return_logits:
return logits # return logits for getting hard image indices for DALL-E training
soft_one_hot = F.gumbel_softmax(logits, tau = 1.)
sampled = einsum('b n h w, n d -> b d h w', soft_one_hot, self.codebook.weight)
out = self.decoder(sampled)
if not return_recon_loss:
return out
loss = F.mse_loss(img, out)
return loss
# main classes
class CLIP(nn.Module):
def __init__(
self,
*,
dim = 512,
num_text_tokens = 10000,
num_visual_tokens = 512,
text_enc_depth = 6,
visual_enc_depth = 6,
text_seq_len = 256,
visual_seq_len = 1024,
text_heads = 8,
visual_heads = 8
):
super().__init__()
self.scale = dim ** -0.5
self.text_emb = nn.Embedding(num_text_tokens, dim)
self.visual_emb = nn.Embedding(num_visual_tokens, dim)
self.text_pos_emb = nn.Embedding(text_seq_len, dim)
self.visual_pos_emb = nn.Embedding(visual_seq_len, dim)
self.text_transformer = Encoder(dim = dim, depth = text_enc_depth, heads = text_heads)
self.visual_transformer = Encoder(dim = dim, depth = visual_enc_depth, heads = visual_heads)
def forward(
self,
text,
image,
text_mask = None,
return_loss = False
):
b, device = text.shape[0], text.device
text_emb = self.text_emb(text)
text_emb += self.text_pos_emb(torch.arange(text.shape[1], device = device))
image_emb = self.visual_emb(image)
image_emb += self.visual_pos_emb(torch.arange(image.shape[1], device = device))
enc_text = self.text_transformer(text_emb, mask = text_mask)
enc_image = self.visual_transformer(image_emb)
if exists(text_mask):
text_latents = masked_mean(enc_text, text_mask, dim = 1)
else:
text_latents = enc_text.mean(dim = 1)
image_latents = enc_image.mean(dim = 1)
sim = einsum('i d, j d -> i j', text_latents, image_latents) * self.scale
if not return_loss:
return sim
labels = torch.arange(b, device = device)
loss = F.cross_entropy(sim, labels)
return loss
class DALLE(nn.Module):
def __init__(
self,
*,
dim,
num_text_tokens = 10000,
num_image_tokens = 512,
text_seq_len = 256,
image_seq_len = 1024,
depth = 6, # should be 64
heads = 8,
vae = None
):
super().__init__()
self.text_emb = nn.Embedding(num_text_tokens, dim)
self.image_emb = nn.Embedding(num_image_tokens, dim)
self.text_pos_emb = nn.Embedding(text_seq_len, dim)
self.image_pos_emb = nn.Embedding(image_seq_len, dim)
self.num_text_tokens = num_text_tokens # for offsetting logits index and calculating cross entropy loss
self.image_seq_len = image_seq_len
self.total_tokens = num_text_tokens + num_image_tokens + 1 # extra for EOS
self.vae = vae
self.image_emb = vae.codebook
self.transformer = Decoder(dim = dim, depth = depth, heads = heads)
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, self.total_tokens),
)
def forward(
self,
text,
image,
mask = None,
return_loss = False
):
device = text.device
is_raw_image = len(image.shape) == 4
text_emb = self.text_emb(text)
text_emb += self.text_pos_emb(torch.arange(text.shape[1], device = device))
if is_raw_image:
assert exists(self.vae), 'VAE must be passed into constructor if you are to train directly on raw images'
image_logits = self.vae(image, return_logits = True)
codebook_indices = image_logits.argmax(dim = 1).flatten(1)
image = codebook_indices
image_emb = self.image_emb(image)
image_emb += self.image_pos_emb(torch.arange(image.shape[1], device = device))
tokens = torch.cat((text_emb, image_emb), dim = 1)
if exists(mask):
mask = F.pad(mask, (0, self.image_seq_len), value = True)
out = self.transformer(tokens, mask = mask)
out = self.to_logits(out)
if not return_loss:
return out
offsetted_image = image + self.num_text_tokens
labels = torch.cat((text, offsetted_image), dim = 1)
labels = F.pad(labels, (0, 1), value = (self.total_tokens - 1)) # last token predicts EOS
loss = F.cross_entropy(out.transpose(1, 2), labels[:, 1:])
return loss
| 30.019324
| 117
| 0.587383
| 5,816
| 0.935951
| 0
| 0
| 0
| 0
| 0
| 0
| 338
| 0.054393
|
0c52795432861cbcf4e3ec45d893ec1acc331585
| 7,668
|
py
|
Python
|
aiida_phonopy/parsers/phonopy.py
|
giovannipizzi/aiida-phonopy
|
26e419c34415c68f815fa81ce2ac644aa387ae72
|
[
"MIT"
] | null | null | null |
aiida_phonopy/parsers/phonopy.py
|
giovannipizzi/aiida-phonopy
|
26e419c34415c68f815fa81ce2ac644aa387ae72
|
[
"MIT"
] | null | null | null |
aiida_phonopy/parsers/phonopy.py
|
giovannipizzi/aiida-phonopy
|
26e419c34415c68f815fa81ce2ac644aa387ae72
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from aiida.orm.data.folder import FolderData
from aiida.parsers.parser import Parser
from aiida.common.datastructures import calc_states
from aiida.parsers.exceptions import OutputParsingError
from aiida.common.exceptions import UniquenessError
import numpy
from aiida.orm.data.array import ArrayData
from aiida.orm.data.array.bands import BandsData
from aiida.orm.data.array.kpoints import KpointsData
from aiida.orm.data.parameter import ParameterData
from aiida.orm.data.structure import StructureData
import json
from aiida_phonopy.calculations.phonopy import PhonopyCalculation
__copyright__ = u"Copyright (c), 2014-2015, École Polytechnique Fédérale de Lausanne (EPFL), Switzerland, Laboratory of Theory and Simulation of Materials (THEOS). All rights reserved."
__license__ = "Non-Commercial, End-User Software License Agreement, see LICENSE.txt file"
__version__ = "0.4.1"
class PhonopyParser(Parser):
"""
This class is the implementation of the Parser class for a Phonopy calculator.
"""
_out_band_name = 'phonon_frequencies'
_out_dos_name = 'phonon_dos'
_out_thermal_name = 'thermal_properties'
def __init__(self,calc):
"""
Initialize the instance of PhonopyParser
"""
# check for valid input
if not isinstance(calc,PhonopyCalculation):
raise OutputParsingError("Input calculation must be a PhonopyCalculation")
self._calc = calc
def parse_with_retrieved(self, retrieved):
"""
Parses the datafolder, stores results.
This parser for this simple code does simply store in the DB a node
representing the file of forces in real space
"""
from aiida.common.exceptions import InvalidOperation
# suppose at the start that the job is successful
successful = True
# check that calculation is in the right state
# state = self._calc.get_state()
# if state != calc_states.PARSING:
# raise InvalidOperation("Calculation not in {} state"
# .format(calc_states.PARSING) )
# select the folder object
# Check that the retrieved folder is there
try:
out_folder = retrieved[self._calc._get_linkname_retrieved()]
except KeyError:
self.logger.error("No retrieved folder found")
return False, ()
# check what is inside the folder
list_of_files = out_folder.get_folder_list()
# at least the stdout should exist
if not self._calc._OUTPUT_FILE_NAME in list_of_files or not self._calc._RESULT_FILE_NAME:
successful = False
self.logger.error("Output/results not found",extra=logger_extra)
return successful,()
# load the results dictionary
json_outfile = out_folder.get_abs_path( self._calc._RESULT_FILE_NAME )
with open(json_outfile,'r') as f:
json_params = json.load(f)
# look at warnings
warnings = []
with open(out_folder.get_abs_path( self._calc._SCHED_ERROR_FILE )) as f:
errors = f.read()
if errors:
warnings = [errors]
# I implicitly assume that all data inside the json are arrays
# it should be very often the case for the phonon properties
# ====================== prepare the output nodes ======================
# save the outputs
new_nodes_list= []
# save dos
try:
frequencies_dos = json_params['frequencies_dos']
total_dos = json_params['total_dos']
array_dos = ArrayData()
array_dos.set_array('frequency', frequencies_dos)
array_dos.set_array('phonon_dos', total_dos)
new_nodes_list.append( (self._out_dos_name, array_dos) )
except KeyError: # keys not found in json
pass
# save thermodynamic quantities
try:
temperature = json_params['temperature']
free_energy = json_params['free_energy']
entropy = json_params['entropy']
cv = json_params['cv']
array_thermal = ArrayData()
array_thermal.set_array('temperature', temperature)
array_thermal.set_array('free_energy', free_energy)
array_thermal.set_array('entropy', entropy)
array_thermal.set_array('specific_heat', cv)
# TODO: in which units am I storing stuff???
new_nodes_list.append( (self._out_thermal_name, array_thermal) )
except KeyError: # keys not found in json
pass
# save frequencies
array_freq = BandsData()
try:
structure = self._calc.inp.structure
except AttributeError:
structure = self._calc.inp.force_constants.structure
inp_kpoints = self._calc.inp.qpoints
try:
inp_kpoints.get_kpoints()
array_freq.set_kpointsdata(inp_kpoints)
except AttributeError: # it had a mesh of kpoints in input
try:
cell = inp_kpoints.cell
except AttributeError:
cell = structure.cell
try:
pbc = inp_kpoints.pbc
except AttributeError:
pbc = structure.pbc
try:
the_kpoints = json_params['q_points']
except KeyError:
the_kpoints = inp_kpoints.get_kpoints()
try:
the_weights = json_params['weights']
except KeyError:
the_weights = None
array_freq.cell = cell
array_freq.pbc = pbc
array_freq.set_kpoints(the_kpoints, weights=the_weights)
array_freq.labels = inp_kpoints.labels
try:
frequencies = json_params['frequencies']
except KeyError:
warnings.append('Unable to read phonon frequencies')
new_nodes_list.append((self.get_linkname_outparams(), ParameterData(dict={'warnings': warnings})))
return False, new_nodes_list
labels = 'frequencies'
bands = frequencies
try:
group_velocities = json_params['group_velocities']
vx = [ _[0] for _ in group_velocities ]
vy = [ _[1] for _ in group_velocities ]
vz = [ _[2] for _ in group_velocities ]
bands = [frequencies]
labels = ['frequencies']
bands.append(vx)
bands.append(vy)
bands.append(vz)
labels += ['vx','vy','vz']
except KeyError:
pass
array_freq.set_bands(frequencies, units='THz', occupations=None, labels='frequencies')
#TODO: verify the units
try:
eigenvectors = json_params['eigenvectors']
array_freq.set_array('eigenvectors', eigenvectors)
except KeyError:
pass
print 'here'
new_nodes_list.append( (self._out_band_name, array_freq) )
#except KeyError as e: # keys not found in json
# raise e
# add the dictionary with warnings
new_nodes_list.append( (self.get_linkname_outparams(), ParameterData(dict={'warnings': warnings})))
return successful, new_nodes_list
| 36.865385
| 185
| 0.594027
| 6,749
| 0.879807
| 0
| 0
| 0
| 0
| 0
| 0
| 2,151
| 0.280407
|
0c52883ec5869dd4ebaf9438c8845a04d78492ff
| 1,128
|
py
|
Python
|
bagua/bagua_define.py
|
jphgxq/bagua
|
3444f79b8fe9c9d2975a8994a1a613ebd14c3d33
|
[
"MIT"
] | 1
|
2021-07-12T03:33:38.000Z
|
2021-07-12T03:33:38.000Z
|
bagua/bagua_define.py
|
jphgxq/bagua
|
3444f79b8fe9c9d2975a8994a1a613ebd14c3d33
|
[
"MIT"
] | null | null | null |
bagua/bagua_define.py
|
jphgxq/bagua
|
3444f79b8fe9c9d2975a8994a1a613ebd14c3d33
|
[
"MIT"
] | null | null | null |
import enum
from typing import List
import sys
if sys.version_info >= (3, 9):
from typing import TypedDict # pytype: disable=not-supported-yet
else:
from typing_extensions import TypedDict # pytype: disable=not-supported-yet
from pydantic import BaseModel
class TensorDtype(str, enum.Enum):
F32 = "f32"
F16 = "f16"
U8 = "u8"
class TensorDeclaration(TypedDict):
name: str
num_elements: int
dtype: TensorDtype
def get_tensor_declaration_bytes(td: TensorDeclaration) -> int:
dtype_unit_size = {
TensorDtype.F32.value: 4,
TensorDtype.F16.value: 2,
TensorDtype.U8.value: 1,
}
return td["num_elements"] * dtype_unit_size[td["dtype"]]
class BaguaHyperparameter(BaseModel):
"""
Structured all bagua hyperparameters
"""
buckets: List[List[TensorDeclaration]] = []
is_hierarchical_reduce: bool = False
def update(self, param_dict: dict):
tmp = self.dict()
tmp.update(param_dict)
for key, value in param_dict.items():
if key in tmp:
self.__dict__[key] = value
return self
| 22.56
| 80
| 0.656915
| 593
| 0.525709
| 0
| 0
| 0
| 0
| 0
| 0
| 157
| 0.139184
|
0c53db086eb0eb7f6a00e60d3b14eacbfe7ba92e
| 97
|
py
|
Python
|
instaphotos/apps.py
|
LekamCharity/insta-IG
|
0302440df3b2029297af54eb9c56090f82232973
|
[
"MIT"
] | null | null | null |
instaphotos/apps.py
|
LekamCharity/insta-IG
|
0302440df3b2029297af54eb9c56090f82232973
|
[
"MIT"
] | null | null | null |
instaphotos/apps.py
|
LekamCharity/insta-IG
|
0302440df3b2029297af54eb9c56090f82232973
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class InstaphotosConfig(AppConfig):
name = 'instaphotos'
| 16.166667
| 35
| 0.773196
| 60
| 0.618557
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 0.134021
|
0c5539475c0da1f3dfc53cbf5dc335c43077d9cf
| 2,835
|
py
|
Python
|
services/backend/expiring_links/tests/test_expiring_link_generator_serializer.py
|
patpio/drf_images_api
|
ef689bac10ce8b9d2f03d6b647fa4bbd70b02f1c
|
[
"Beerware"
] | 1
|
2022-02-27T16:34:46.000Z
|
2022-02-27T16:34:46.000Z
|
services/backend/expiring_links/tests/test_expiring_link_generator_serializer.py
|
patpio/drf_images_api
|
ef689bac10ce8b9d2f03d6b647fa4bbd70b02f1c
|
[
"Beerware"
] | null | null | null |
services/backend/expiring_links/tests/test_expiring_link_generator_serializer.py
|
patpio/drf_images_api
|
ef689bac10ce8b9d2f03d6b647fa4bbd70b02f1c
|
[
"Beerware"
] | null | null | null |
import pytest
from expiring_links.serializers import ExpiringLinkGeneratorSerializer
@pytest.mark.serializers
def test_fields(db, create_test_expiring_link_serializer_data):
assert list(create_test_expiring_link_serializer_data.keys()) == ['image_id', 'expiration_time']
@pytest.mark.serializers
def test_valid_serializer(db, create_test_expiring_link_serializer_data, create_test_image, create_test_user,
remove_test_data):
serializer = ExpiringLinkGeneratorSerializer(data=create_test_expiring_link_serializer_data,
context={'user': create_test_user})
assert serializer.is_valid()
@pytest.mark.serializers
def test_user_without_permission(db, create_test_expiring_link_serializer_data, create_test_image, create_test_user,
remove_test_data):
create_test_user.tier.expired_link_flag = False
serializer = ExpiringLinkGeneratorSerializer(data=create_test_expiring_link_serializer_data,
context={'user': create_test_user})
assert not serializer.is_valid()
assert set(serializer.errors) == {'non_field_errors'}
@pytest.mark.serializers
def test_wrong_image_id(db, create_test_expiring_link_serializer_data, create_test_image, create_test_user,
remove_test_data):
create_test_expiring_link_serializer_data['image_id'] = create_test_image.pk + 1
serializer = ExpiringLinkGeneratorSerializer(data=create_test_expiring_link_serializer_data,
context={'user': create_test_user})
assert not serializer.is_valid()
assert set(serializer.errors) == {'image_id'}
@pytest.mark.serializers
def test_too_short_expiration_time(db, create_test_expiring_link_serializer_data, create_test_image, create_test_user,
remove_test_data):
create_test_expiring_link_serializer_data['expiration_time'] = 200
serializer = ExpiringLinkGeneratorSerializer(data=create_test_expiring_link_serializer_data,
context={'user': create_test_user})
assert not serializer.is_valid()
assert set(serializer.errors) == {'expiration_time'}
@pytest.mark.serializers
def test_too_long_expiration_time(db, create_test_expiring_link_serializer_data, create_test_image, create_test_user,
remove_test_data):
create_test_expiring_link_serializer_data['expiration_time'] = 40000
serializer = ExpiringLinkGeneratorSerializer(data=create_test_expiring_link_serializer_data,
context={'user': create_test_user})
assert not serializer.is_valid()
assert set(serializer.errors) == {'expiration_time'}
| 42.954545
| 118
| 0.71358
| 0
| 0
| 0
| 0
| 2,731
| 0.963316
| 0
| 0
| 163
| 0.057496
|
0c553d8f4165e63fa177620f1fa3f79bb1b9cb45
| 91,609
|
py
|
Python
|
com/vmware/nsx/trust_management_client.py
|
adammillerio/vsphere-automation-sdk-python
|
c07e1be98615201139b26c28db3aa584c4254b66
|
[
"MIT"
] | null | null | null |
com/vmware/nsx/trust_management_client.py
|
adammillerio/vsphere-automation-sdk-python
|
c07e1be98615201139b26c28db3aa584c4254b66
|
[
"MIT"
] | null | null | null |
com/vmware/nsx/trust_management_client.py
|
adammillerio/vsphere-automation-sdk-python
|
c07e1be98615201139b26c28db3aa584c4254b66
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#---------------------------------------------------------------------------
# Copyright 2020 VMware, Inc. All rights reserved.
# AUTO GENERATED FILE -- DO NOT MODIFY!
#
# vAPI stub file for package com.vmware.nsx.trust_management.
#---------------------------------------------------------------------------
"""
"""
__author__ = 'VMware, Inc.'
__docformat__ = 'restructuredtext en'
import sys
from vmware.vapi.bindings import type
from vmware.vapi.bindings.converter import TypeConverter
from vmware.vapi.bindings.enum import Enum
from vmware.vapi.bindings.error import VapiError
from vmware.vapi.bindings.struct import VapiStruct
from vmware.vapi.bindings.stub import (
ApiInterfaceStub, StubFactoryBase, VapiInterface)
from vmware.vapi.bindings.common import raise_core_exception
from vmware.vapi.data.validator import (UnionValidator, HasFieldsOfValidator)
from vmware.vapi.exception import CoreException
from vmware.vapi.lib.constants import TaskType
from vmware.vapi.lib.rest import OperationRestMetadata
class Certificates(VapiInterface):
"""
"""
LIST_TYPE_CERTIFICATE = "cluster_api_certificate"
"""
Possible value for ``type`` of method :func:`Certificates.list`.
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.trust_management.certificates'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _CertificatesStub)
self._VAPI_OPERATION_IDS = {}
def delete(self,
cert_id,
):
"""
Removes the specified certificate. The private key associated with the
certificate is also deleted.
:type cert_id: :class:`str`
:param cert_id: ID of certificate to delete (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'cert_id': cert_id,
})
def get(self,
cert_id,
details=None,
):
"""
Returns information for the specified certificate ID, including the
certificate's UUID; resource_type (for example,
certificate_self_signed, certificate_ca, or certificate_signed);
pem_encoded data; and history of the certificate (who created or
modified it and when). For additional information, include the
?details=true modifier at the end of the request URI.
:type cert_id: :class:`str`
:param cert_id: ID of certificate to read (required)
:type details: :class:`bool` or ``None``
:param details: whether to expand the pem data and show all its details (optional,
default to false)
:rtype: :class:`com.vmware.nsx.model_client.Certificate`
:return: com.vmware.nsx.model.Certificate
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'cert_id': cert_id,
'details': details,
})
def importcertificate(self,
trust_object_data,
):
"""
Adds a new private-public certificate or a chain of certificates (CAs)
and, optionally, a private key that can be applied to one of the
user-facing components (appliance management or edge). The certificate
and the key should be stored in PEM format. If no private key is
provided, the certificate is used as a client certificate in the trust
store.
:type trust_object_data: :class:`com.vmware.nsx.model_client.TrustObjectData`
:param trust_object_data: (required)
:rtype: :class:`com.vmware.nsx.model_client.CertificateList`
:return: com.vmware.nsx.model.CertificateList
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('importcertificate',
{
'trust_object_data': trust_object_data,
})
def list(self,
cursor=None,
details=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
type=None,
):
"""
Returns all certificate information viewable by the user, including
each certificate's UUID; resource_type (for example,
certificate_self_signed, certificate_ca, or certificate_signed);
pem_encoded data; and history of the certificate (who created or
modified it and when). For additional information, include the
?details=true modifier at the end of the request URI.
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type details: :class:`bool` or ``None``
:param details: whether to expand the pem data and show all its details (optional,
default to false)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:type type: :class:`str` or ``None``
:param type: Type of certificate to return (optional)
:rtype: :class:`com.vmware.nsx.model_client.CertificateList`
:return: com.vmware.nsx.model.CertificateList
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'cursor': cursor,
'details': details,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
'type': type,
})
class CrlDistributionPoints(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.trust_management.crl_distribution_points'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _CrlDistributionPointsStub)
self._VAPI_OPERATION_IDS = {}
def create(self,
crl_distribution_point,
):
"""
Create an entity that will represent a Crl Distribution Point
:type crl_distribution_point: :class:`com.vmware.nsx.model_client.CrlDistributionPoint`
:param crl_distribution_point: (required)
:rtype: :class:`com.vmware.nsx.model_client.CrlDistributionPoint`
:return: com.vmware.nsx.model.CrlDistributionPoint
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('create',
{
'crl_distribution_point': crl_distribution_point,
})
def delete(self,
crl_distribution_point_id,
):
"""
Delete a CrlDistributionPoint. It does not delete the actual CRL.
:type crl_distribution_point_id: :class:`str`
:param crl_distribution_point_id: Unique id of the CrlDistributionPoint to delete (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'crl_distribution_point_id': crl_distribution_point_id,
})
def get(self,
crl_distribution_point_id,
):
"""
:type crl_distribution_point_id: :class:`str`
:param crl_distribution_point_id: (required)
:rtype: :class:`com.vmware.nsx.model_client.CrlDistributionPoint`
:return: com.vmware.nsx.model.CrlDistributionPoint
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'crl_distribution_point_id': crl_distribution_point_id,
})
def list(self,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Return the list of CrlDistributionPoints
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx.model_client.CrlDistributionPointList`
:return: com.vmware.nsx.model.CrlDistributionPointList
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def update(self,
crl_distribution_point_id,
crl_distribution_point,
):
"""
:type crl_distribution_point_id: :class:`str`
:param crl_distribution_point_id: (required)
:type crl_distribution_point: :class:`com.vmware.nsx.model_client.CrlDistributionPoint`
:param crl_distribution_point: (required)
:rtype: :class:`com.vmware.nsx.model_client.CrlDistributionPoint`
:return: com.vmware.nsx.model.CrlDistributionPoint
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'crl_distribution_point_id': crl_distribution_point_id,
'crl_distribution_point': crl_distribution_point,
})
class Crls(VapiInterface):
"""
"""
LIST_TYPE_CERTIFICATE = "cluster_api_certificate"
"""
Possible value for ``type`` of method :func:`Crls.list`.
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.trust_management.crls'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _CrlsStub)
self._VAPI_OPERATION_IDS = {}
def delete(self,
crl_id,
):
"""
Deletes an existing CRL.
:type crl_id: :class:`str`
:param crl_id: ID of CRL to delete (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'crl_id': crl_id,
})
def get(self,
crl_id,
details=None,
):
"""
Returns information about the specified CRL. For additional
information, include the ?details=true modifier at the end of the
request URI.
:type crl_id: :class:`str`
:param crl_id: ID of CRL to read (required)
:type details: :class:`bool` or ``None``
:param details: whether to expand the pem data and show all its details (optional,
default to false)
:rtype: :class:`com.vmware.nsx.model_client.Crl`
:return: com.vmware.nsx.model.Crl
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'crl_id': crl_id,
'details': details,
})
def importcrl(self,
crl_object_data,
):
"""
Adds a new certificate revocation list (CRL). The CRL is used to verify
the client certificate status against the revocation lists published by
the CA. For this reason, the administrator needs to add the CRL in
certificate repository as well.
:type crl_object_data: :class:`com.vmware.nsx.model_client.CrlObjectData`
:param crl_object_data: (required)
:rtype: :class:`com.vmware.nsx.model_client.CrlList`
:return: com.vmware.nsx.model.CrlList
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('importcrl',
{
'crl_object_data': crl_object_data,
})
def list(self,
cursor=None,
details=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
type=None,
):
"""
Returns information about all CRLs. For additional information, include
the ?details=true modifier at the end of the request URI.
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type details: :class:`bool` or ``None``
:param details: whether to expand the pem data and show all its details (optional,
default to false)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:type type: :class:`str` or ``None``
:param type: Type of certificate to return (optional)
:rtype: :class:`com.vmware.nsx.model_client.CrlList`
:return: com.vmware.nsx.model.CrlList
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'cursor': cursor,
'details': details,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
'type': type,
})
def update(self,
crl_id,
crl,
):
"""
Updates an existing CRL.
:type crl_id: :class:`str`
:param crl_id: ID of CRL to update (required)
:type crl: :class:`com.vmware.nsx.model_client.Crl`
:param crl: (required)
:rtype: :class:`com.vmware.nsx.model_client.Crl`
:return: com.vmware.nsx.model.Crl
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'crl_id': crl_id,
'crl': crl,
})
class Csrs(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.trust_management.csrs'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _CsrsStub)
self._VAPI_OPERATION_IDS = {}
def create(self,
csr,
):
"""
Creates a new certificate signing request (CSR). A CSR is encrypted
text that contains information about your organization (organization
name, country, and so on) and your Web server's public key, which is a
public certificate the is generated on the server that can be used to
forward this request to a certificate authority (CA). A private key is
also usually created at the same time as the CSR.
:type csr: :class:`com.vmware.nsx.model_client.Csr`
:param csr: (required)
:rtype: :class:`com.vmware.nsx.model_client.Csr`
:return: com.vmware.nsx.model.Csr
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('create',
{
'csr': csr,
})
def delete(self,
csr_id,
):
"""
Removes a specified CSR. If a CSR is not used for verification, you can
delete it. Note that the CSR import and upload POST actions
automatically delete the associated CSR.
:type csr_id: :class:`str`
:param csr_id: ID of CSR to delete (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'csr_id': csr_id,
})
def get(self,
csr_id,
):
"""
Returns information about the specified CSR.
:type csr_id: :class:`str`
:param csr_id: ID of CSR to read (required)
:rtype: :class:`com.vmware.nsx.model_client.Csr`
:return: com.vmware.nsx.model.Csr
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'csr_id': csr_id,
})
def importcsr(self,
csr_id,
trust_object_data,
):
"""
Imports a certificate authority (CA)-signed certificate for a CSR. This
action links the certificate to the private key created by the CSR. The
pem_encoded string in the request body is the signed certificate
provided by your CA in response to the CSR that you provide to them.
The import POST action automatically deletes the associated CSR.
:type csr_id: :class:`str`
:param csr_id: CSR this certificate is associated with (required)
:type trust_object_data: :class:`com.vmware.nsx.model_client.TrustObjectData`
:param trust_object_data: (required)
:rtype: :class:`com.vmware.nsx.model_client.CertificateList`
:return: com.vmware.nsx.model.CertificateList
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('importcsr',
{
'csr_id': csr_id,
'trust_object_data': trust_object_data,
})
def list(self,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Returns information about all of the CSRs that have been created.
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx.model_client.CsrList`
:return: com.vmware.nsx.model.CsrList
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
def selfsign(self,
csr_id,
days_valid,
):
"""
Self-signs the previously generated CSR. This action is similar to the
import certificate action, but instead of using a public certificate
signed by a CA, the self_sign POST action uses a certificate that is
signed with NSX's own private key.
:type csr_id: :class:`str`
:param csr_id: CSR this certificate is associated with (required)
:type days_valid: :class:`long`
:param days_valid: Number of days the certificate will be valid, default 10 years
(required)
:rtype: :class:`com.vmware.nsx.model_client.Certificate`
:return: com.vmware.nsx.model.Certificate
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('selfsign',
{
'csr_id': csr_id,
'days_valid': days_valid,
})
class PrincipalIdentities(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.trust_management.principal_identities'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _PrincipalIdentitiesStub)
self._VAPI_OPERATION_IDS = {}
def create(self,
principal_identity,
):
"""
Associates a principal's name with a certificate that is used to
authenticate. Deprecated, use POST
/trust-management/principal-identities/with-certificate instead.
:type principal_identity: :class:`com.vmware.nsx.model_client.PrincipalIdentity`
:param principal_identity: (required)
:rtype: :class:`com.vmware.nsx.model_client.PrincipalIdentity`
:return: com.vmware.nsx.model.PrincipalIdentity
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('create',
{
'principal_identity': principal_identity,
})
def delete(self,
principal_identity_id,
):
"""
Delete a principal identity. It does not delete the certificate.
:type principal_identity_id: :class:`str`
:param principal_identity_id: Unique id of the principal identity to delete (required)
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('delete',
{
'principal_identity_id': principal_identity_id,
})
def get(self,
principal_identity_id,
):
"""
Get a stored principal identity
:type principal_identity_id: :class:`str`
:param principal_identity_id: ID of Principal Identity to get (required)
:rtype: :class:`com.vmware.nsx.model_client.PrincipalIdentity`
:return: com.vmware.nsx.model.PrincipalIdentity
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'principal_identity_id': principal_identity_id,
})
def list(self):
"""
Returns the list of principals registered with a certificate.
:rtype: :class:`com.vmware.nsx.model_client.PrincipalIdentityList`
:return: com.vmware.nsx.model.PrincipalIdentityList
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list', None)
def updatecertificate(self,
update_principal_identity_certificate_request,
):
"""
Update a principal identity's certificate
:type update_principal_identity_certificate_request: :class:`com.vmware.nsx.model_client.UpdatePrincipalIdentityCertificateRequest`
:param update_principal_identity_certificate_request: (required)
:rtype: :class:`com.vmware.nsx.model_client.PrincipalIdentity`
:return: com.vmware.nsx.model.PrincipalIdentity
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('updatecertificate',
{
'update_principal_identity_certificate_request': update_principal_identity_certificate_request,
})
class _CertificatesStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'cert_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/api/v1/trust-management/certificates/{cert-id}',
path_variables={
'cert_id': 'cert-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'cert_id': type.StringType(),
'details': type.OptionalType(type.BooleanType()),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/trust-management/certificates/{cert-id}',
path_variables={
'cert_id': 'cert-id',
},
query_parameters={
'details': 'details',
},
content_type='application/json'
)
# properties for importcertificate operation
importcertificate_input_type = type.StructType('operation-input', {
'trust_object_data': type.ReferenceType('com.vmware.nsx.model_client', 'TrustObjectData'),
})
importcertificate_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
importcertificate_input_value_validator_list = [
]
importcertificate_output_validator_list = [
]
importcertificate_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/trust-management/certificates?action=import',
request_body_parameter='trust_object_data',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'cursor': type.OptionalType(type.StringType()),
'details': type.OptionalType(type.BooleanType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
'type': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/trust-management/certificates',
path_variables={
},
query_parameters={
'cursor': 'cursor',
'details': 'details',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
'type': 'type',
},
content_type='application/json'
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'Certificate'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'importcertificate': {
'input_type': importcertificate_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'CertificateList'),
'errors': importcertificate_error_dict,
'input_value_validator_list': importcertificate_input_value_validator_list,
'output_validator_list': importcertificate_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'CertificateList'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'importcertificate': importcertificate_rest_metadata,
'list': list_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.trust_management.certificates',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _CrlDistributionPointsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for create operation
create_input_type = type.StructType('operation-input', {
'crl_distribution_point': type.ReferenceType('com.vmware.nsx.model_client', 'CrlDistributionPoint'),
})
create_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
create_input_value_validator_list = [
]
create_output_validator_list = [
]
create_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/trust-management/crl-distribution-points',
request_body_parameter='crl_distribution_point',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'crl_distribution_point_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/api/v1/trust-management/crl-distribution-points/{crl-distribution-point-id}',
path_variables={
'crl_distribution_point_id': 'crl-distribution-point-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'crl_distribution_point_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/trust-management/crl-distribution-points/{crl-distribution-point-id}',
path_variables={
'crl_distribution_point_id': 'crl-distribution-point-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/trust-management/crl-distribution-points',
path_variables={
},
query_parameters={
'cursor': 'cursor',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
},
content_type='application/json'
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'crl_distribution_point_id': type.StringType(),
'crl_distribution_point': type.ReferenceType('com.vmware.nsx.model_client', 'CrlDistributionPoint'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/api/v1/trust-management/crl-distribution-points/{crl-distribution-point-id}',
request_body_parameter='crl_distribution_point',
path_variables={
'crl_distribution_point_id': 'crl-distribution-point-id',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'create': {
'input_type': create_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'CrlDistributionPoint'),
'errors': create_error_dict,
'input_value_validator_list': create_input_value_validator_list,
'output_validator_list': create_output_validator_list,
'task_type': TaskType.NONE,
},
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'CrlDistributionPoint'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'CrlDistributionPointList'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'CrlDistributionPoint'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'create': create_rest_metadata,
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.trust_management.crl_distribution_points',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _CrlsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'crl_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/api/v1/trust-management/crls/{crl-id}',
path_variables={
'crl_id': 'crl-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'crl_id': type.StringType(),
'details': type.OptionalType(type.BooleanType()),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/trust-management/crls/{crl-id}',
path_variables={
'crl_id': 'crl-id',
},
query_parameters={
'details': 'details',
},
content_type='application/json'
)
# properties for importcrl operation
importcrl_input_type = type.StructType('operation-input', {
'crl_object_data': type.ReferenceType('com.vmware.nsx.model_client', 'CrlObjectData'),
})
importcrl_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
importcrl_input_value_validator_list = [
]
importcrl_output_validator_list = [
]
importcrl_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/trust-management/crls?action=import',
request_body_parameter='crl_object_data',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'cursor': type.OptionalType(type.StringType()),
'details': type.OptionalType(type.BooleanType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
'type': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/trust-management/crls',
path_variables={
},
query_parameters={
'cursor': 'cursor',
'details': 'details',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
'type': 'type',
},
content_type='application/json'
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'crl_id': type.StringType(),
'crl': type.ReferenceType('com.vmware.nsx.model_client', 'Crl'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/api/v1/trust-management/crls/{crl-id}',
request_body_parameter='crl',
path_variables={
'crl_id': 'crl-id',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'Crl'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'importcrl': {
'input_type': importcrl_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'CrlList'),
'errors': importcrl_error_dict,
'input_value_validator_list': importcrl_input_value_validator_list,
'output_validator_list': importcrl_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'CrlList'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'Crl'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'importcrl': importcrl_rest_metadata,
'list': list_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.trust_management.crls',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _CsrsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for create operation
create_input_type = type.StructType('operation-input', {
'csr': type.ReferenceType('com.vmware.nsx.model_client', 'Csr'),
})
create_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
create_input_value_validator_list = [
]
create_output_validator_list = [
]
create_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/trust-management/csrs',
request_body_parameter='csr',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'csr_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/api/v1/trust-management/csrs/{csr-id}',
path_variables={
'csr_id': 'csr-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'csr_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/trust-management/csrs/{csr-id}',
path_variables={
'csr_id': 'csr-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for importcsr operation
importcsr_input_type = type.StructType('operation-input', {
'csr_id': type.StringType(),
'trust_object_data': type.ReferenceType('com.vmware.nsx.model_client', 'TrustObjectData'),
})
importcsr_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
importcsr_input_value_validator_list = [
]
importcsr_output_validator_list = [
]
importcsr_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/trust-management/csrs/{csr-id}?action=import',
request_body_parameter='trust_object_data',
path_variables={
'csr_id': 'csr-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/trust-management/csrs',
path_variables={
},
query_parameters={
'cursor': 'cursor',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
},
content_type='application/json'
)
# properties for selfsign operation
selfsign_input_type = type.StructType('operation-input', {
'csr_id': type.StringType(),
'days_valid': type.IntegerType(),
})
selfsign_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
selfsign_input_value_validator_list = [
]
selfsign_output_validator_list = [
]
selfsign_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/trust-management/csrs/{csr-id}?action=self_sign',
path_variables={
'csr_id': 'csr-id',
},
query_parameters={
'days_valid': 'days_valid',
},
content_type='application/json'
)
operations = {
'create': {
'input_type': create_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'Csr'),
'errors': create_error_dict,
'input_value_validator_list': create_input_value_validator_list,
'output_validator_list': create_output_validator_list,
'task_type': TaskType.NONE,
},
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'Csr'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'importcsr': {
'input_type': importcsr_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'CertificateList'),
'errors': importcsr_error_dict,
'input_value_validator_list': importcsr_input_value_validator_list,
'output_validator_list': importcsr_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'CsrList'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'selfsign': {
'input_type': selfsign_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'Certificate'),
'errors': selfsign_error_dict,
'input_value_validator_list': selfsign_input_value_validator_list,
'output_validator_list': selfsign_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'create': create_rest_metadata,
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'importcsr': importcsr_rest_metadata,
'list': list_rest_metadata,
'selfsign': selfsign_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.trust_management.csrs',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _PrincipalIdentitiesStub(ApiInterfaceStub):
def __init__(self, config):
# properties for create operation
create_input_type = type.StructType('operation-input', {
'principal_identity': type.ReferenceType('com.vmware.nsx.model_client', 'PrincipalIdentity'),
})
create_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
create_input_value_validator_list = [
]
create_output_validator_list = [
]
create_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/trust-management/principal-identities',
request_body_parameter='principal_identity',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'principal_identity_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/api/v1/trust-management/principal-identities/{principal-identity-id}',
path_variables={
'principal_identity_id': 'principal-identity-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'principal_identity_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/trust-management/principal-identities/{principal-identity-id}',
path_variables={
'principal_identity_id': 'principal-identity-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for list operation
list_input_type = type.StructType('operation-input', {})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/trust-management/principal-identities',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
# properties for updatecertificate operation
updatecertificate_input_type = type.StructType('operation-input', {
'update_principal_identity_certificate_request': type.ReferenceType('com.vmware.nsx.model_client', 'UpdatePrincipalIdentityCertificateRequest'),
})
updatecertificate_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
updatecertificate_input_value_validator_list = [
]
updatecertificate_output_validator_list = [
]
updatecertificate_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/trust-management/principal-identities?action=update_certificate',
request_body_parameter='update_principal_identity_certificate_request',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
operations = {
'create': {
'input_type': create_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'PrincipalIdentity'),
'errors': create_error_dict,
'input_value_validator_list': create_input_value_validator_list,
'output_validator_list': create_output_validator_list,
'task_type': TaskType.NONE,
},
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'PrincipalIdentity'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'PrincipalIdentityList'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'updatecertificate': {
'input_type': updatecertificate_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'PrincipalIdentity'),
'errors': updatecertificate_error_dict,
'input_value_validator_list': updatecertificate_input_value_validator_list,
'output_validator_list': updatecertificate_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'create': create_rest_metadata,
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'updatecertificate': updatecertificate_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.trust_management.principal_identities',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class StubFactory(StubFactoryBase):
_attrs = {
'Certificates': Certificates,
'CrlDistributionPoints': CrlDistributionPoints,
'Crls': Crls,
'Csrs': Csrs,
'PrincipalIdentities': PrincipalIdentities,
'crl_distribution_points': 'com.vmware.nsx.trust_management.crl_distribution_points_client.StubFactory',
'principal_identities': 'com.vmware.nsx.trust_management.principal_identities_client.StubFactory',
}
| 44.40572
| 156
| 0.596492
| 90,542
| 0.988353
| 0
| 0
| 0
| 0
| 0
| 0
| 51,538
| 0.562587
|
0c5549700625606ae1bd959bf730c22c941eb303
| 4,255
|
py
|
Python
|
bottleneck/tests/list_input_test.py
|
stroxler/bottleneck
|
6e91bcb8a21170588ee9a3f2c425a4e307ae05de
|
[
"BSD-2-Clause"
] | 2
|
2015-05-26T09:06:32.000Z
|
2015-05-26T09:06:46.000Z
|
bottleneck/tests/list_input_test.py
|
stroxler/bottleneck
|
6e91bcb8a21170588ee9a3f2c425a4e307ae05de
|
[
"BSD-2-Clause"
] | null | null | null |
bottleneck/tests/list_input_test.py
|
stroxler/bottleneck
|
6e91bcb8a21170588ee9a3f2c425a4e307ae05de
|
[
"BSD-2-Clause"
] | null | null | null |
"Test list input."
# For support of python 2.5
from __future__ import with_statement
import numpy as np
from numpy.testing import assert_equal, assert_array_almost_equal
import bottleneck as bn
# ---------------------------------------------------------------------------
# Check that functions can handle list input
def lists():
"Iterator that yields lists to use for unit testing."
ss = {}
ss[1] = {'size': 4, 'shapes': [(4,)]}
ss[2] = {'size': 6, 'shapes': [(1, 6), (2, 3)]}
ss[3] = {'size': 6, 'shapes': [(1, 2, 3)]}
ss[4] = {'size': 24, 'shapes': [(1, 2, 3, 4)]} # Unaccelerated
for ndim in ss:
size = ss[ndim]['size']
shapes = ss[ndim]['shapes']
a = np.arange(size)
for shape in shapes:
a = a.reshape(shape)
yield a.tolist()
def unit_maker(func, func0, args=tuple()):
"Test that bn.xxx gives the same output as bn.slow.xxx for list input."
msg = '\nfunc %s | input %s | shape %s\n'
msg += '\nInput array:\n%s\n'
for i, arr in enumerate(lists()):
argsi = tuple([list(arr)] + list(args))
actual = func(*argsi)
desired = func0(*argsi)
tup = (func.__name__, 'a'+str(i), str(np.array(arr).shape), arr)
err_msg = msg % tup
assert_array_almost_equal(actual, desired, err_msg=err_msg)
def test_nansum():
"Test nansum."
yield unit_maker, bn.nansum, bn.slow.nansum
def test_nanmax():
"Test nanmax."
yield unit_maker, bn.nanmax, bn.slow.nanmax
def test_nanargmin():
"Test nanargmin."
yield unit_maker, bn.nanargmin, bn.slow.nanargmin
def test_nanargmax():
"Test nanargmax."
yield unit_maker, bn.nanargmax, bn.slow.nanargmax
def test_nanmin():
"Test nanmin."
yield unit_maker, bn.nanmin, bn.slow.nanmin
def test_nanmean():
"Test nanmean."
yield unit_maker, bn.nanmean, bn.slow.nanmean
def test_nanstd():
"Test nanstd."
yield unit_maker, bn.nanstd, bn.slow.nanstd
def test_nanvar():
"Test nanvar."
yield unit_maker, bn.nanvar, bn.slow.nanvar
def test_median():
"Test median."
yield unit_maker, bn.median, bn.slow.median
def test_nanmedian():
"Test nanmedian."
yield unit_maker, bn.nanmedian, bn.slow.nanmedian
def test_rankdata():
"Test rankdata."
yield unit_maker, bn.rankdata, bn.slow.rankdata
def test_nanrankdata():
"Test nanrankdata."
yield unit_maker, bn.nanrankdata, bn.slow.nanrankdata
def test_partsort():
"Test partsort."
yield unit_maker, bn.partsort, bn.slow.partsort, (2,)
def test_argpartsort():
"Test argpartsort."
yield unit_maker, bn.argpartsort, bn.slow.argpartsort, (2,)
def test_ss():
"Test ss."
yield unit_maker, bn.ss, bn.slow.ss
def test_nn():
"Test nn."
a = [[1, 2], [3, 4]]
a0 = [1, 2]
assert_equal(bn.nn(a, a0), bn.slow.nn(a, a0))
def test_anynan():
"Test anynan."
yield unit_maker, bn.anynan, bn.slow.anynan
def test_allnan():
"Test allnan."
yield unit_maker, bn.allnan, bn.slow.allnan
def test_move_sum():
"Test move_sum."
yield unit_maker, bn.move_sum, bn.slow.move_sum, (2,)
def test_move_nansum():
"Test move_nansum."
yield unit_maker, bn.move_nansum, bn.slow.move_nansum, (2,)
def test_move_mean():
"Test move_mean."
yield unit_maker, bn.move_mean, bn.slow.move_mean, (2,)
def test_move_median():
"Test move_median."
yield unit_maker, bn.move_median, bn.slow.move_median, (2,)
def test_move_nanmean():
"Test move_nanmean."
yield unit_maker, bn.move_nanmean, bn.slow.move_nanmean, (2,)
def test_move_std():
"Test move_std."
yield unit_maker, bn.move_std, bn.slow.move_std, (2,)
def test_move_nanstd():
"Test move_nanstd."
yield unit_maker, bn.move_nanstd, bn.slow.move_nanstd, (2,)
def test_move_min():
"Test move_min."
yield unit_maker, bn.move_min, bn.slow.move_min, (2,)
def test_move_max():
"Test move_max."
yield unit_maker, bn.move_max, bn.slow.move_max, (2,)
def test_move_nanmin():
"Test move_nanmin."
yield unit_maker, bn.move_nanmin, bn.slow.move_nanmin, (2,)
def test_move_nanmax():
"Test move_nanmax."
yield unit_maker, bn.move_nanmax, bn.slow.move_nanmax, (2,)
| 22.632979
| 77
| 0.636193
| 0
| 0
| 3,207
| 0.753702
| 0
| 0
| 0
| 0
| 899
| 0.211281
|
0c587de94c3ee270415110f012b7d77cb256c5a4
| 1,475
|
py
|
Python
|
hanzo/warcindex.py
|
ukwa/warctools
|
f74061382d6bc37b6eec889a3aec26c5748d90d3
|
[
"MIT"
] | 1
|
2020-09-03T00:51:50.000Z
|
2020-09-03T00:51:50.000Z
|
hanzo/warcindex.py
|
martinsbalodis/warc-tools
|
d9d5e708e00bd0f6d9d0c2d95cbc9332f51b05e4
|
[
"MIT"
] | null | null | null |
hanzo/warcindex.py
|
martinsbalodis/warc-tools
|
d9d5e708e00bd0f6d9d0c2d95cbc9332f51b05e4
|
[
"MIT"
] | 1
|
2021-04-12T01:45:14.000Z
|
2021-04-12T01:45:14.000Z
|
#!/usr/bin/env python
"""warcindex - dump warc index"""
import os
import sys
import sys
import os.path
from optparse import OptionParser
from .warctools import WarcRecord, expand_files
parser = OptionParser(usage="%prog [options] warc warc warc")
parser.add_option("-l", "--limit", dest="limit")
parser.add_option("-O", "--output-format", dest="output_format", help="output format (ignored)")
parser.add_option("-o", "--output", dest="output_format", help="output file (ignored)")
parser.add_option("-L", "--log-level", dest="log_level")
parser.set_defaults(output=None, limit=None, log_level="info")
def main(argv):
(options, input_files) = parser.parse_args(args=argv[1:])
out = sys.stdout
if len(input_files) < 1:
parser.error("no imput warc file(s)")
print '#WARC filename offset warc-type warc-subject-uri warc-record-id content-type content-length'
for name in expand_files(input_files):
fh = WarcRecord.open_archive(name, gzip="auto")
for (offset, record, errors) in fh.read_records(limit=None):
if record:
print name, offset, record.type, record.url, record.id, record.content_type, record.content_length
elif errors:
pass
# ignore
else:
pass
# no errors at tail
fh.close()
return 0
def run():
sys.exit(main(sys.argv))
if __name__ == '__main__':
run()
| 23.412698
| 114
| 0.633898
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 412
| 0.279322
|
0c599f149ff2c8a006a46a9e33e3ef181a3cc037
| 1,469
|
py
|
Python
|
tsdata/migrations/0001_initial.py
|
OpenDataPolicingNC/Traffic-Stops
|
74e0d16ad2ac32addca6f04d34c2ddf36d023990
|
[
"MIT"
] | 25
|
2015-09-12T23:10:52.000Z
|
2021-03-24T08:39:46.000Z
|
tsdata/migrations/0001_initial.py
|
OpenDataPolicingNC/Traffic-Stops
|
74e0d16ad2ac32addca6f04d34c2ddf36d023990
|
[
"MIT"
] | 159
|
2015-07-01T03:57:23.000Z
|
2021-04-17T21:09:19.000Z
|
tsdata/migrations/0001_initial.py
|
copelco/NC-Traffic-Stops
|
74e0d16ad2ac32addca6f04d34c2ddf36d023990
|
[
"MIT"
] | 8
|
2015-10-02T16:56:40.000Z
|
2020-10-18T01:16:29.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Dataset',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('state', models.CharField(choices=[('nc', 'North Carolina'), ('md', 'Maryland')], max_length=2)),
('name', models.CharField(unique=True, max_length=255)),
('date_added', models.DateTimeField(auto_now_add=True)),
('date_received', models.DateField()),
('url', models.URLField(unique=True, verbose_name='URL')),
('destination', models.CharField(blank=True, max_length=1024, help_text='Absolute path to destination directory (helpful for testing)')),
],
),
migrations.CreateModel(
name='Import',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('date_started', models.DateTimeField(auto_now_add=True)),
('date_finished', models.DateTimeField(null=True)),
('successful', models.BooleanField(default=False)),
('dataset', models.ForeignKey(to='tsdata.Dataset')),
],
),
]
| 40.805556
| 153
| 0.582709
| 1,360
| 0.9258
| 0
| 0
| 0
| 0
| 0
| 0
| 281
| 0.191287
|
0c5b11a856de6baa5333d1f6f60e74187acb3fcd
| 1,836
|
py
|
Python
|
api/tests/opentrons/protocol_engine/execution/test_run_control_handler.py
|
mrod0101/opentrons
|
6450edb0421f1c2484c292f8583602d8f6fd13b8
|
[
"Apache-2.0"
] | 235
|
2017-10-27T20:37:27.000Z
|
2022-03-30T14:09:49.000Z
|
api/tests/opentrons/protocol_engine/execution/test_run_control_handler.py
|
koji/opentrons
|
0f339f45de238183b2c433e67f839363d5177582
|
[
"Apache-2.0"
] | 8,425
|
2017-10-26T15:25:43.000Z
|
2022-03-31T23:54:26.000Z
|
api/tests/opentrons/protocol_engine/execution/test_run_control_handler.py
|
mrod0101/opentrons
|
6450edb0421f1c2484c292f8583602d8f6fd13b8
|
[
"Apache-2.0"
] | 130
|
2017-11-09T21:02:37.000Z
|
2022-03-15T18:01:24.000Z
|
"""Run control side-effect handler."""
import pytest
from decoy import Decoy
from opentrons.protocol_engine.state import StateStore
from opentrons.protocol_engine.actions import ActionDispatcher, PauseAction
from opentrons.protocol_engine.execution.run_control import RunControlHandler
from opentrons.protocol_engine.state import EngineConfigs
@pytest.fixture
def state_store(decoy: Decoy) -> StateStore:
"""Get a mocked out StateStore."""
return decoy.mock(cls=StateStore)
@pytest.fixture
def action_dispatcher(decoy: Decoy) -> ActionDispatcher:
"""Get a mocked out ActionDispatcher."""
return decoy.mock(cls=ActionDispatcher)
@pytest.fixture
def subject(
state_store: StateStore,
action_dispatcher: ActionDispatcher,
) -> RunControlHandler:
"""Create a RunControlHandler with its dependencies mocked out."""
return RunControlHandler(
state_store=state_store,
action_dispatcher=action_dispatcher,
)
async def test_pause(
decoy: Decoy,
state_store: StateStore,
action_dispatcher: ActionDispatcher,
subject: RunControlHandler,
) -> None:
"""It should be able to execute a pause."""
decoy.when(state_store.get_configs()).then_return(EngineConfigs(ignore_pause=False))
await subject.pause()
decoy.verify(
action_dispatcher.dispatch(PauseAction()),
await state_store.wait_for(condition=state_store.commands.get_is_running),
)
async def test_pause_analysis(
decoy: Decoy,
state_store: StateStore,
action_dispatcher: ActionDispatcher,
subject: RunControlHandler,
) -> None:
"""It should no op during a protocol analysis."""
decoy.when(state_store.get_configs()).then_return(EngineConfigs(ignore_pause=True))
await subject.pause()
decoy.verify(action_dispatcher.dispatch(PauseAction()), times=0)
| 30.6
| 88
| 0.751634
| 0
| 0
| 0
| 0
| 605
| 0.329521
| 871
| 0.474401
| 270
| 0.147059
|
0c5b7ae73a2b618a79092df65cc9600f76dbf5e0
| 510
|
py
|
Python
|
Datasets/Generator/Healthcare/mergedrug.py
|
undraaa/m2bench
|
b661b61ca04470ed1c9c50531ce760a2cd5000d9
|
[
"RSA-MD"
] | null | null | null |
Datasets/Generator/Healthcare/mergedrug.py
|
undraaa/m2bench
|
b661b61ca04470ed1c9c50531ce760a2cd5000d9
|
[
"RSA-MD"
] | null | null | null |
Datasets/Generator/Healthcare/mergedrug.py
|
undraaa/m2bench
|
b661b61ca04470ed1c9c50531ce760a2cd5000d9
|
[
"RSA-MD"
] | 1
|
2021-11-29T10:31:36.000Z
|
2021-11-29T10:31:36.000Z
|
import json
import glob
def merge_drug(drug_dirpath):
#start_time = time.time()
print("\n----- MERGING json data into merged_drug.json -----")
result = []
for f in glob.glob(drug_dirpath+'/*.json'):
with open(f,"rb") as infile:
result.append(json.load(infile))
with open("merged_drug.json","w") as outfile:
json.dump(result,outfile)
print("----- merged_drug.json DONE -----")
#print("----- MERGE DRUG %s seconds -----" % (time.time() - start_time))
| 26.842105
| 76
| 0.592157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 221
| 0.433333
|
0c5c225bea97b848df7068538bc1df5271634638
| 10,326
|
py
|
Python
|
tests/test_rundramatiq_command.py
|
BradleyKirton/django_dramatiq
|
93a4a9ae39aee643cc4a987b18030ad8d1fc8480
|
[
"Apache-2.0"
] | null | null | null |
tests/test_rundramatiq_command.py
|
BradleyKirton/django_dramatiq
|
93a4a9ae39aee643cc4a987b18030ad8d1fc8480
|
[
"Apache-2.0"
] | null | null | null |
tests/test_rundramatiq_command.py
|
BradleyKirton/django_dramatiq
|
93a4a9ae39aee643cc4a987b18030ad8d1fc8480
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
from io import StringIO
from unittest.mock import patch
from django.core.management import call_command
from django_dramatiq.management.commands import rundramatiq
def test_rundramatiq_command_autodiscovers_modules():
assert rundramatiq.Command().discover_tasks_modules() == [
"django_dramatiq.setup",
"django_dramatiq.tasks",
"tests.testapp1.tasks",
"tests.testapp2.tasks",
"tests.testapp3.tasks.other_tasks",
"tests.testapp3.tasks.tasks",
"tests.testapp3.tasks.utils",
"tests.testapp3.tasks.utils.not_a_task",
]
@patch("os.execvp")
def test_rundramatiq_can_run_dramatiq(execvp_mock):
# Given an output buffer
buff = StringIO()
# When I call the rundramatiq command
call_command("rundramatiq", stdout=buff)
# Then stdout should contain a message about discovered task modules
assert "Discovered tasks module: 'tests.testapp1.tasks'" in buff.getvalue()
assert "Discovered tasks module: 'tests.testapp2.tasks'" in buff.getvalue()
assert "Discovered tasks module: 'tests.testapp3.tasks.tasks'" in buff.getvalue()
assert "Discovered tasks module: 'tests.testapp3.tasks.other_tasks'" in buff.getvalue()
# And execvp should be called with the appropriate arguments
cores = str(rundramatiq.CPU_COUNT)
expected_exec_name = "dramatiq"
expected_exec_path = os.path.join(
os.path.dirname(sys.executable),
expected_exec_name,
)
execvp_mock.assert_called_once_with(expected_exec_path, [
expected_exec_name, "--path", ".", "--processes", cores, "--threads", cores,
"django_dramatiq.setup",
"django_dramatiq.tasks",
"tests.testapp1.tasks",
"tests.testapp2.tasks",
"tests.testapp3.tasks.other_tasks",
"tests.testapp3.tasks.tasks",
"tests.testapp3.tasks.utils",
"tests.testapp3.tasks.utils.not_a_task",
])
@patch("os.execvp")
def test_rundramatiq_can_run_dramatiq_reload(execvp_mock):
# Given an output buffer
buff = StringIO()
# When I call the rundramatiq command with --reload-use-polling
call_command("rundramatiq", "--reload", stdout=buff)
# Then execvp should be called with the appropriate arguments
cores = str(rundramatiq.CPU_COUNT)
expected_exec_name = "dramatiq"
expected_exec_path = os.path.join(
os.path.dirname(sys.executable),
expected_exec_name,
)
execvp_mock.assert_called_once_with(expected_exec_path, [
expected_exec_name, "--path", ".", "--processes", cores, "--threads", cores,
"--watch", ".",
"django_dramatiq.setup",
"django_dramatiq.tasks",
"tests.testapp1.tasks",
"tests.testapp2.tasks",
"tests.testapp3.tasks.other_tasks",
"tests.testapp3.tasks.tasks",
"tests.testapp3.tasks.utils",
"tests.testapp3.tasks.utils.not_a_task",
])
@patch("os.execvp")
def test_rundramatiq_can_run_dramatiq_with_polling(execvp_mock):
# Given an output buffer
buff = StringIO()
# When I call the rundramatiq command with --reload-use-polling
call_command("rundramatiq", "--reload", "--reload-use-polling", stdout=buff)
# Then execvp should be called with the appropriate arguments
cores = str(rundramatiq.CPU_COUNT)
expected_exec_name = "dramatiq"
expected_exec_path = os.path.join(
os.path.dirname(sys.executable),
expected_exec_name,
)
execvp_mock.assert_called_once_with(expected_exec_path, [
expected_exec_name, "--path", ".", "--processes", cores, "--threads", cores,
"--watch", ".",
"--watch-use-polling",
"django_dramatiq.setup",
"django_dramatiq.tasks",
"tests.testapp1.tasks",
"tests.testapp2.tasks",
"tests.testapp3.tasks.other_tasks",
"tests.testapp3.tasks.tasks",
"tests.testapp3.tasks.utils",
"tests.testapp3.tasks.utils.not_a_task",
])
@patch("os.execvp")
def test_rundramatiq_can_run_dramatiq_with_only_some_queues(execvp_mock):
# Given an output buffer
buff = StringIO()
# When I call the rundramatiq command with --queues
call_command("rundramatiq", "--queues", "A B C", stdout=buff)
# Then execvp should be called with the appropriate arguments
cores = str(rundramatiq.CPU_COUNT)
expected_exec_name = "dramatiq"
expected_exec_path = os.path.join(
os.path.dirname(sys.executable),
expected_exec_name,
)
execvp_mock.assert_called_once_with(expected_exec_path, [
expected_exec_name, "--path", ".", "--processes", cores, "--threads", cores,
"django_dramatiq.setup",
"django_dramatiq.tasks",
"tests.testapp1.tasks",
"tests.testapp2.tasks",
"tests.testapp3.tasks.other_tasks",
"tests.testapp3.tasks.tasks",
"tests.testapp3.tasks.utils",
"tests.testapp3.tasks.utils.not_a_task",
"--queues", "A B C"
])
@patch("os.execvp")
def test_rundramatiq_can_run_dramatiq_with_specified_pid_file(execvp_mock):
# Given an output buffer
buff = StringIO()
# When I call the rundramatiq command with --pid-file
call_command("rundramatiq", "--pid-file", "drama.pid", stdout=buff)
# Then execvp should be called with the appropriate arguments
cores = str(rundramatiq.CPU_COUNT)
expected_exec_name = "dramatiq"
expected_exec_path = os.path.join(
os.path.dirname(sys.executable),
expected_exec_name,
)
execvp_mock.assert_called_once_with(expected_exec_path, [
expected_exec_name, "--path", ".", "--processes", cores, "--threads", cores,
"django_dramatiq.setup",
"django_dramatiq.tasks",
"tests.testapp1.tasks",
"tests.testapp2.tasks",
"tests.testapp3.tasks.other_tasks",
"tests.testapp3.tasks.tasks",
"tests.testapp3.tasks.utils",
"tests.testapp3.tasks.utils.not_a_task",
"--pid-file", "drama.pid"
])
@patch("os.execvp")
def test_rundramatiq_can_run_dramatiq_with_specified_log_file(execvp_mock):
# Given an output buffer
buff = StringIO()
# When I call the rundramatiq command with --log-file
call_command("rundramatiq", "--log-file", "drama.log", stdout=buff)
# Then execvp should be called with the appropriate arguments
cores = str(rundramatiq.CPU_COUNT)
expected_exec_name = "dramatiq"
expected_exec_path = os.path.join(
os.path.dirname(sys.executable),
expected_exec_name,
)
execvp_mock.assert_called_once_with(expected_exec_path, [
expected_exec_name, "--path", ".", "--processes", cores, "--threads", cores,
"django_dramatiq.setup",
"django_dramatiq.tasks",
"tests.testapp1.tasks",
"tests.testapp2.tasks",
"tests.testapp3.tasks.other_tasks",
"tests.testapp3.tasks.tasks",
"tests.testapp3.tasks.utils",
"tests.testapp3.tasks.utils.not_a_task",
"--log-file", "drama.log"
])
@patch("os.execvp")
def test_rundramatiq_can_ignore_modules(execvp_mock, settings):
# Given an output buffer
buff = StringIO()
# And 'tests.testapp2.tasks' in DRAMATIQ_IGNORED_MODULES
# And 'tests.testapp3.tasks.other_tasks' in DRAMATIQ_IGNORED_MODULES
settings.DRAMATIQ_IGNORED_MODULES = (
"tests.testapp2.tasks",
"tests.testapp3.tasks.other_tasks",
"tests.testapp3.tasks.utils.*",
)
# When I call the rundramatiq command
call_command("rundramatiq", stdout=buff)
# Then stdout should contain a message about ignored task modules
assert "Discovered tasks module: 'tests.testapp1.tasks'" in buff.getvalue()
assert "Discovered tasks module: 'tests.testapp3.tasks.tasks'" in buff.getvalue()
assert "Discovered tasks module: 'tests.testapp3.tasks.utils'" in buff.getvalue()
assert "Ignored tasks module: 'tests.testapp2.tasks'" in buff.getvalue()
assert "Ignored tasks module: 'tests.testapp3.tasks.other_tasks'" in buff.getvalue()
assert "Ignored tasks module: 'tests.testapp3.tasks.utils.not_a_task'" in buff.getvalue()
# And execvp should be called with the appropriate arguments
cores = str(rundramatiq.CPU_COUNT)
expected_exec_name = "dramatiq"
expected_exec_path = os.path.join(
os.path.dirname(sys.executable),
expected_exec_name,
)
execvp_mock.assert_called_once_with(expected_exec_path, [
expected_exec_name, "--path", ".", "--processes", cores, "--threads", cores,
"django_dramatiq.setup",
"django_dramatiq.tasks",
"tests.testapp1.tasks",
"tests.testapp3.tasks.tasks",
"tests.testapp3.tasks.utils",
])
@patch("os.execvp")
def test_rundramatiq_can_fork(execvp_mock, settings):
# Given an output buffer
buff = StringIO()
# When I call the rundramatiq command with --log-file
call_command("rundramatiq", "--fork-function", "a", "--fork-function", "b", stdout=buff)
# Then execvp should be called with the appropriate arguments
cores = str(rundramatiq.CPU_COUNT)
expected_exec_name = "dramatiq"
expected_exec_path = os.path.join(
os.path.dirname(sys.executable),
expected_exec_name,
)
execvp_mock.assert_called_once_with(expected_exec_path, [
expected_exec_name, "--path", ".", "--processes", cores, "--threads", cores,
"--fork-function", "a",
"--fork-function", "b",
"django_dramatiq.setup",
"django_dramatiq.tasks",
"tests.testapp1.tasks",
"tests.testapp2.tasks",
"tests.testapp3.tasks.other_tasks",
"tests.testapp3.tasks.tasks",
"tests.testapp3.tasks.utils",
"tests.testapp3.tasks.utils.not_a_task",
])
def test_rundramatiq_command_autodiscovers_additional_modules(settings):
settings.DRAMATIQ_AUTODISCOVER_MODULES = ("services", )
assert rundramatiq.Command().discover_tasks_modules() == [
"django_dramatiq.setup",
"django_dramatiq.tasks",
"tests.testapp1.tasks",
"tests.testapp2.tasks",
"tests.testapp3.tasks.other_tasks",
"tests.testapp3.tasks.tasks",
"tests.testapp3.tasks.utils",
"tests.testapp3.tasks.utils.not_a_task",
"tests.testapp4.services",
]
| 35.122449
| 93
| 0.673833
| 0
| 0
| 0
| 0
| 9,155
| 0.886597
| 0
| 0
| 4,946
| 0.478985
|
0c5c924b0477b69417c6a0474627207f48573e2f
| 3,620
|
py
|
Python
|
WordRPG/data/states/new_game.py
|
ChristopherLBruce/WordRPG
|
e545cf313afc430e8191a7c813db9ee9759a6fd4
|
[
"Apache-2.0"
] | 2
|
2018-12-15T15:06:35.000Z
|
2022-02-09T00:19:28.000Z
|
WordRPG/data/states/new_game.py
|
ChristopherLBruce/WordRPG
|
e545cf313afc430e8191a7c813db9ee9759a6fd4
|
[
"Apache-2.0"
] | null | null | null |
WordRPG/data/states/new_game.py
|
ChristopherLBruce/WordRPG
|
e545cf313afc430e8191a7c813db9ee9759a6fd4
|
[
"Apache-2.0"
] | null | null | null |
""" 'new_game' state. Includes character creation. """
from ...engine.gui.screen import const, Screen
from ...engine.state_machine import State
class New_Game(State):
def __init__(self):
""" Initiailize class and super class """
super(New_Game, self).__init__()
self.screen = self._init_screen()
def _init_screen(self):
""" Create the main game screen """
screen = Screen()
# creates standard double line frame around whole screen
screen.add_frame(size=(80, 30), offset=(0, 0),
fgcolor='BLUE', bgcolor='BLACK')
# add menu
menu = {
'text_format' : {'fgcolor':'CYAN','bgcolor':'BLACK','style':'NORMAL'},
'hotkey_format' : {'fgcolor':'YELLOW','bgcolor':'BLACK','style':'BRIGHT'},
'encap' : '()',
'sep' : ' - ',
'options' : [
('BOB THE WARRIOR', '1'),
('TIM THE MAGE', '2'),
# ('race', 'r'),
('SUE THE ARCHER', '3'),
('PEG THE ASSASSIN', '4'),
('start', 's'),
]
}
screen.add_menu(menu, offset=('center',12))
screen.add_footer()
return screen
def update_screen(self):
""" Draws the screen """
self.screen.draw()
def on_event(self, event, prev_state):
""" Handles events that are delegated to this State. """
self.update_screen()
while True:
key = self.get_key_press()
# if key == 'n':
# return 'character_name'
# if key == 'c':
# return 'character_class'
# if key == 'g':
# return 'character_gender'
if key == 's':
return 'game'
if key == 'esc':
return 'main_menu'
return self
class Character_Name(State):
def __init__(self):
""" Initiailize class and super class """
super(Character_Name, self).__init__()
def update_screen(self):
""" Draws the screen """
gui.main.clear()
print('PLACEHOLDER SCREEN FOR ENTERING CHARACTER NAME')
print('PRESS ANY KEY TO RETURN TO NEW CHARACTER SCREEN.')
def on_event(self, event, prev_state):
""" Handles events that are delegated to this State. """
self.update_screen()
self.wait_for_keypress()
return 'new_game'
class Character_Gender(State):
def __init__(self):
""" Initiailize class and super class """
super(Character_Gender, self).__init__()
def update_screen(self):
""" Draws the screen """
gui.main.clear()
print('PLACEHOLDER SCREEN FOR SELECTING CHARACTER GENDER')
print('PRESS ANY KEY TO RETURN TO NEW CHARACTER SCREEN.')
def on_event(self, event, prev_state):
""" Handles events that are delegated to this State. """
self.update_screen()
self.wait_for_keypress()
return 'new_game'
class Character_Class(State):
def __init__(self):
""" Initiailize class and super class """
super(Character_Class, self).__init__()
def update_screen(self):
""" Draws the screen """
gui.main.clear()
print('PLACEHOLDER SCREEN FOR SELECTING CHARACTER CLASS')
print('PRESS ANY KEY TO RETURN TO NEW CHARACTER SCREEN.')
def on_event(self, event, prev_state):
""" Handles events that are delegated to this State. """
self.update_screen()
self.wait_for_keypress()
return 'new_game'
| 27.424242
| 86
| 0.551381
| 3,458
| 0.955249
| 0
| 0
| 0
| 0
| 0
| 0
| 1,408
| 0.38895
|
0c5da302d0cfb597c70f8c34fe51028d86ae2e18
| 2,106
|
py
|
Python
|
guestbook.py
|
Tycx2ry/FKRTimeline
|
11e784f4a3800336abf19c42c15a06c86af970bd
|
[
"Apache-2.0"
] | null | null | null |
guestbook.py
|
Tycx2ry/FKRTimeline
|
11e784f4a3800336abf19c42c15a06c86af970bd
|
[
"Apache-2.0"
] | null | null | null |
guestbook.py
|
Tycx2ry/FKRTimeline
|
11e784f4a3800336abf19c42c15a06c86af970bd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr /bin/env python
# -*- coding: utf-8 -*-
__author__ = 'jiangge'
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from flask import Flask, request, render_template, redirect
application = Flask(__name__)
application.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///guestbook.db'
application.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(application)
class posts(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
comment = db.Column(db.Text)
url = db.Column(db.Text)
create_at = db.Column(db.DateTime)
def __init__(self, name, comment, url, create_at):
self.name = name
self.comment = comment
self.url = url
self.create_at = create_at
def save_data(name, comment, url, create_at):
"""
save data from form submitted
"""
db.session.add(posts(name, comment, url, create_at))
db.session.commit()
def load_data(page):
"""
load saved data
"""
record_list = posts.query.paginate(page, per_page=5, error_out=True)
return record_list
@application.route('/', methods=['GET', 'POST'])
@application.route('/index/ ', methods=['GET', 'POST'])
@application.route('/index/<int:page>', methods=['GET', 'POST'])
def index(page = 1):
"""Top page
Use template to show the page
"""
record_list = load_data(page)
return render_template('index.html', record_list=record_list.items)
@application.route('/post', methods=['POST'])
def post():
"""Comment's target url
"""
name = request.form.get('name')
comment = request.form.get('comment')
url = request.form.get('url')
create_at = datetime.now()
save_data(name, comment, url, create_at)
return redirect('/')
if __name__ == '__main__':
if True:
db.drop_all()
db.create_all()
db.session.add(posts(text=application.config["FIRST_MESSAGE"]))
db.session.commit()
application.run('0.0.0.0', port=80, debug=True)
| 27.350649
| 73
| 0.632479
| 393
| 0.18661
| 0
| 0
| 681
| 0.323362
| 0
| 0
| 451
| 0.21415
|
0c5db28673060acc0246927ee800263dd3a7f124
| 707
|
py
|
Python
|
dash_test_runner/testapp/migrations/0001_initial.py
|
Ilhasoft/dash
|
d9b900cc08d9238304a226d837a4c90dec6b46fc
|
[
"BSD-3-Clause"
] | null | null | null |
dash_test_runner/testapp/migrations/0001_initial.py
|
Ilhasoft/dash
|
d9b900cc08d9238304a226d837a4c90dec6b46fc
|
[
"BSD-3-Clause"
] | null | null | null |
dash_test_runner/testapp/migrations/0001_initial.py
|
Ilhasoft/dash
|
d9b900cc08d9238304a226d837a4c90dec6b46fc
|
[
"BSD-3-Clause"
] | 1
|
2018-04-12T20:18:34.000Z
|
2018-04-12T20:18:34.000Z
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orgs', '0015_auto_20160209_0926'),
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('uuid', models.CharField(unique=True, max_length=36)),
('name', models.CharField(max_length=128, verbose_name='Name')),
('is_active', models.BooleanField(default=True)),
('org', models.ForeignKey(to='orgs.Org', on_delete=models.PROTECT)),
],
),
]
| 32.136364
| 114
| 0.575672
| 663
| 0.937765
| 0
| 0
| 0
| 0
| 0
| 0
| 92
| 0.130127
|
0c5e4893a61a507b2525a971a14202b85e75581a
| 6,596
|
py
|
Python
|
tests/test_integration.py
|
Radico/business-rules
|
7dd0551e8b33234fcea0abaf04f9982eb6f3426f
|
[
"MIT"
] | null | null | null |
tests/test_integration.py
|
Radico/business-rules
|
7dd0551e8b33234fcea0abaf04f9982eb6f3426f
|
[
"MIT"
] | null | null | null |
tests/test_integration.py
|
Radico/business-rules
|
7dd0551e8b33234fcea0abaf04f9982eb6f3426f
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from business_rules.actions import rule_action, BaseActions
from business_rules.engine import check_condition, run_all
from business_rules.fields import FIELD_TEXT, FIELD_NUMERIC, FIELD_SELECT
from business_rules.variables import BaseVariables, string_rule_variable, numeric_rule_variable, boolean_rule_variable
from . import TestCase
class SomeVariables(BaseVariables):
@string_rule_variable()
def foo(self):
return "foo"
@numeric_rule_variable(label="Diez")
def ten(self):
return 10
@boolean_rule_variable()
def true_bool(self):
return True
@numeric_rule_variable(params=[{'field_type': FIELD_NUMERIC, 'name': 'x', 'label': 'X'}])
def x_plus_one(self, x):
return x + 1
@boolean_rule_variable()
def rule_received(self, **kwargs):
rule = kwargs.get('rule')
assert rule is not None
return rule is not None
@string_rule_variable(label="StringLabel", options=['one', 'two', 'three'])
def string_variable_with_options(self):
return "foo"
@string_rule_variable(public=False)
def private_string_variable(self):
return 'foo'
class SomeActions(BaseActions):
@rule_action(params={"foo": FIELD_NUMERIC})
def some_action(self, foo): pass
@rule_action(label="woohoo", params={"bar": FIELD_TEXT})
def some_other_action(self, bar): pass
@rule_action(params=[
{
'field_type': FIELD_SELECT,
'name': 'baz',
'label': 'Baz',
'options': [
{'label': 'Chose Me', 'name': 'chose_me'},
{'label': 'Or Me', 'name': 'or_me'}
]
}])
def some_select_action(self, baz): pass
@rule_action()
def action_with_no_params(self): pass
class IntegrationTests(TestCase):
""" Integration test, using the library like a user would.
"""
def test_true_boolean_variable(self):
condition = {
'name': 'true_bool',
'operator': 'is_true',
'value': ''
}
rule = {
'conditions': condition
}
condition_result = check_condition(condition, SomeVariables(), rule)
self.assertTrue(condition_result.result)
def test_false_boolean_variable(self):
condition = {
'name': 'true_bool',
'operator': 'is_false',
'value': ''
}
rule = {
'conditions': condition
}
condition_result = check_condition(condition, SomeVariables(), rule)
self.assertFalse(condition_result.result)
def test_check_true_condition_happy_path(self):
condition = {'name': 'foo',
'operator': 'contains',
'value': 'o'}
rule = {
'conditions': condition
}
condition_result = check_condition(condition, SomeVariables(), rule)
self.assertTrue(condition_result.result)
def test_check_false_condition_happy_path(self):
condition = {'name': 'foo',
'operator': 'contains',
'value': 'm'}
rule = {
'conditions': condition
}
condition_result = check_condition(condition, SomeVariables(), rule)
self.assertFalse(condition_result.result)
def test_numeric_variable_with_params(self):
condition = {
'name': 'x_plus_one',
'operator': 'equal_to',
'value': 10,
'params': {'x': 9}
}
rule = {
'conditions': condition
}
condition_result = check_condition(condition, SomeVariables(), rule)
self.assertTrue(condition_result.result)
def test_check_incorrect_method_name(self):
condition = {
'name': 'food',
'operator': 'equal_to',
'value': 'm'
}
rule = {
'conditions': condition
}
err_string = 'Variable food is not defined in class SomeVariables'
with self.assertRaisesRegexp(AssertionError, err_string):
check_condition(condition, SomeVariables(), rule)
def test_check_incorrect_operator_name(self):
condition = {
'name': 'foo',
'operator': 'equal_tooooze',
'value': 'foo'
}
rule = {
'conditions': condition
}
with self.assertRaises(AssertionError):
check_condition(condition, SomeVariables(), rule)
def test_check_missing_params(self):
condition = {
'name': 'x_plus_one',
'operator': 'equal_to',
'value': 10,
'params': {}
}
rule = {
'conditions': condition
}
err_string = 'Missing parameters x for variable x_plus_one'
with self.assertRaisesRegexp(AssertionError, err_string):
check_condition(condition, SomeVariables(), rule)
def test_check_invalid_params(self):
condition = {
'name': 'x_plus_one',
'operator': 'equal_to',
'value': 10,
'params': {'x': 9, 'y': 9}
}
rule = {
'conditions': condition
}
err_string = 'Invalid parameters y for variable x_plus_one'
with self.assertRaisesRegexp(AssertionError, err_string):
check_condition(condition, SomeVariables(), rule)
def test_variable_received_rules(self):
condition = {
'name': 'rule_received',
'operator': 'is_true',
'value': 'true',
}
rule = {
'conditions': condition
}
condition_result = check_condition(condition, SomeVariables(), rule)
self.assertTrue(condition_result)
def test_string_variable_with_options_with_wrong_value(self):
condition = {
'name': 'string_variable_with_options',
'operator': 'equal_to',
'value': 'foo',
}
rule = {
'conditions': condition
}
condition_result = check_condition(condition, SomeVariables(), rule)
self.assertTrue(condition_result)
def test_run_with_no_conditions(self):
actions = [
{
'name': 'action_with_no_params'
}
]
rule = {
'actions': actions
}
result = run_all(rule_list=[rule], defined_variables=SomeVariables(), defined_actions=SomeActions())
self.assertTrue(result)
| 27.256198
| 118
| 0.575045
| 6,213
| 0.941935
| 0
| 0
| 1,307
| 0.19815
| 0
| 0
| 1,143
| 0.173287
|
0c5f5d9ac8242efc8ccf5bafaa6e567b8ee2cc86
| 5,808
|
py
|
Python
|
cog/cli/user_argparser.py
|
Demonware/cog
|
b206066ebfd5faae000b1a1708988db8ca592b94
|
[
"BSD-3-Clause"
] | 2
|
2016-06-02T02:15:56.000Z
|
2016-08-16T08:37:27.000Z
|
cog/cli/user_argparser.py
|
Demonware/cog
|
b206066ebfd5faae000b1a1708988db8ca592b94
|
[
"BSD-3-Clause"
] | null | null | null |
cog/cli/user_argparser.py
|
Demonware/cog
|
b206066ebfd5faae000b1a1708988db8ca592b94
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import argparse
arg_no = len(sys.argv)
tool_parser = argparse.ArgumentParser(add_help=False)
tool_subparsers = tool_parser.add_subparsers(help='commands', dest='command')
# The rename command.
rename_parser = tool_subparsers.add_parser('rename', help='rename an existing user account.')
rename_parser.add_argument(
'name', action='store', metavar='<name>', help='account name'
)
rename_parser.add_argument(
'--new-name', '-n', action='store', dest='newName', metavar='<new account name>'
)
# The add command.
add_parser = tool_subparsers.add_parser('add', help='add new user account to the directory.')
add_parser.add_argument(
'--type', '-t', action='store', default='generic', dest='account_type', metavar='<type of account>'
)
add_parser.add_argument(
'name', action='store', help='account name', metavar='<name>'
)
group1_parser = add_parser.add_argument_group('account specific')
group1_parser.add_argument(
'--password', '-P', action='store', dest='userPassword', metavar='<account\'s owner password>'
)
group1_parser.add_argument(
'--home', action='store', dest='homeDirectory', metavar='<path to the home directory>'
)
group1_parser.add_argument(
'--shell', action='store', dest='loginShell', metavar='<path to the shell interpreter>'
)
group1_parser = add_parser.add_argument_group('personal information')
group1_parser.add_argument(
'--phone-no', action='append', dest='telephoneNumber', metavar='<phone number>'
)
group1_parser.add_argument(
'--last-name', action='store', dest='sn', metavar='<account owner\'s last name>'
)
group1_parser.add_argument(
'--first-name', action='store', dest='givenName', metavar='<account owner\'s first name>'
)
group1_parser.add_argument(
'--organization', '-o', action='store', dest='o', metavar='<organization>'
)
group1_parser.add_argument(
'--email', action='append', dest='mail', metavar='<email>'
)
group1_parser.add_argument(
'--full-name', action='store', dest='cn', metavar='<account owner\'s full name>'
)
group1_parser = add_parser.add_argument_group('uid and group management')
group1_parser.add_argument(
'--uid', action='store', dest='uid', metavar='<user\'s uid>'
)
group1_parser.add_argument(
'--add-group', action='append', dest='group', metavar='<secondary group>'
)
group1_parser.add_argument(
'--uid-number', action='store', dest='uidNumber', metavar='<user id number>'
)
group1_parser.add_argument(
'--gid', action='store', dest='gidNumber', metavar='<primary group id>'
)
# The show command.
show_parser = tool_subparsers.add_parser('show', help='show account data')
show_parser.add_argument(
'name', action='append', nargs='*', help='account name'
)
show_parser.add_argument(
'--verbose', '-v', action='store_true', dest='verbose', help='be verbose about it'
)
# The edit command.
edit_parser = tool_subparsers.add_parser('edit', help='edit existing user data in the directory')
edit_parser.add_argument(
'--type', '-t', action='store', dest='account_type', metavar='<change account type>'
)
edit_parser.add_argument(
'name', action='store', help='account name'
)
group1_parser = edit_parser.add_argument_group('account specific')
group1_parser.add_argument(
'--reset-password', '-r', dest='resetPassword', action='store_true', help='<reset user\'s password>'
)
group1_parser.add_argument(
'--home', action='store', dest='homeDirectory', metavar='<new home directory path>'
)
group1_parser.add_argument(
'--shell', action='store', dest='loginShell', metavar='<new shell interpreter path>'
)
group1_parser = edit_parser.add_argument_group('personal information')
group1_parser.add_argument(
'--first-name', action='store', dest='givenName', metavar='<new first name>'
)
group1_parser.add_argument(
'--del-email', action='append', dest='delMail', metavar='<remove email address>'
)
group1_parser.add_argument(
'--last-name', action='store', dest='sn', metavar='<new last name>'
)
group1_parser.add_argument(
'--add-email', action='append', dest='addMail', metavar='<add new email address>'
)
group1_parser.add_argument(
'--del-phone-no', action='append', dest='delTelephoneNumber', metavar='<phone number to remove>'
)
group1_parser.add_argument(
'--organization', '-o', action='store', dest='o', metavar='<organization>'
)
group1_parser.add_argument(
'--add-phone-no', action='append', dest='addTelephoneNumber', metavar='<phone number to add>'
)
group1_parser.add_argument(
'--full-name', action='store', dest='cn', metavar='<new full name>'
)
group1_parser = edit_parser.add_argument_group('uid and group management')
group1_parser.add_argument(
'--del-group', action='append', dest='delgroup', metavar='<remove user from the group>'
)
group1_parser.add_argument(
'--group-id', action='store', dest='gidNumber', metavar='<change primary group ID>'
)
group1_parser.add_argument(
'--add-group', action='append', dest='addgroup', metavar='<add user to the group>'
)
group1_parser.add_argument(
'--uid-number', action='store', dest='uidNumber', metavar='<change user ID number>'
)
group1_parser.add_argument(
'--uid', action='store', dest='uid', metavar='<user\'s uid>'
)
# The retire command.
retire_parser = tool_subparsers.add_parser('retire', help='retire an existing account and remove all its privileges.')
retire_parser.add_argument(
'name', action='store', metavar='<name>', help='account name'
)
# The type command.
type_parser = tool_subparsers.add_parser('type', help='manage user types')
type_parser.add_argument(
'--list', '-l', action='store_true', dest='list_types', help='list user types'
)
# The remove command.
remove_parser = tool_subparsers.add_parser('remove', help='remove an existing account.')
remove_parser.add_argument(
'name', action='store', metavar='<name>', help='account name'
)
| 35.2
| 118
| 0.722968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,622
| 0.451446
|
0c605a349671fad2588ca9a0e3c2afed9c2453f5
| 6,235
|
py
|
Python
|
custom_components/wisersmart/climate.py
|
tomtomfx/wiserSmartForHA
|
9878840b073250302e583bd2f6040a825de97803
|
[
"MIT"
] | 1
|
2020-10-06T19:49:59.000Z
|
2020-10-06T19:49:59.000Z
|
custom_components/wisersmart/climate.py
|
tomtomfx/wiserSmartForHA
|
9878840b073250302e583bd2f6040a825de97803
|
[
"MIT"
] | 1
|
2020-10-06T20:18:32.000Z
|
2020-10-24T19:50:53.000Z
|
custom_components/wisersmart/climate.py
|
tomtomfx/wiserSmartForHA
|
9878840b073250302e583bd2f6040a825de97803
|
[
"MIT"
] | 1
|
2021-04-12T16:37:40.000Z
|
2021-04-12T16:37:40.000Z
|
"""
Climate Platform Device for Wiser Smart
https://github.com/tomtomfx/wiserSmartForHA
thomas.fayoux@gmail.com
"""
import asyncio
import logging
import voluptuous as vol
from functools import partial
from ruamel.yaml import YAML as yaml
from homeassistant.components.climate import ClimateEntity
from homeassistant.core import callback
from homeassistant.components.climate.const import (
SUPPORT_TARGET_TEMPERATURE,
ATTR_CURRENT_TEMPERATURE,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .const import (
_LOGGER,
DOMAIN,
MANUFACTURER,
ROOM,
WISER_SMART_SERVICES,
)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Wiser climate device"""
data = hass.data[DOMAIN]
wiser_rooms = [
WiserSmartRoom(hass, data, room) for room in data.wiserSmart.getWiserRoomsThermostat()
]
async_add_entities(wiser_rooms, True)
""" Definition of WiserSmartRoom """
class WiserSmartRoom(ClimateEntity):
def __init__(self, hass, data, room_id):
"""Initialize the sensor."""
self.data = data
self.hass = hass
self.current_temp = None
self.target_temp = None
self.room_id = room_id
self._force_update = False
self._hvac_modes_list = [HVAC_MODE_HEAT, HVAC_MODE_OFF]
_LOGGER.info(
"WiserSmart Room: Initialisation for {}".format(self.room_id)
)
async def async_update(self):
_LOGGER.debug("WiserSmartRoom: Update requested for {}".format(self.name))
if self._force_update:
await self.data.async_update(no_throttle=True)
self._force_update = False
room = self.data.wiserSmart.getWiserRoomInfo(self.room_id)
self.current_temp = room.get("currentValue")
self.target_temp = room.get("targetValue")
if self.target_temp is None:
self.target_temp = -1
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def should_poll(self):
return False
@property
def state(self):
room = self.data.wiserSmart.getWiserRoomInfo(self.room_id)
self.current_temp = room.get("currentValue")
self.target_temp = room.get("targetValue")
if self.target_temp is None:
self.target_temp = -1
if self.current_temp < self.target_temp:
state = HVAC_MODE_HEAT
else:
state = HVAC_MODE_OFF
return state
@property
def name(self):
return "WiserSmart - Thermostat - " + self.room_id
@property
def temperature_unit(self):
return TEMP_CELSIUS
@property
def min_temp(self):
return self.data.minimum_temp
@property
def max_temp(self):
return self.data.maximum_temp
@property
def current_temperature(self):
temp = self.data.wiserSmart.getWiserRoomInfo(self.room_id).get("currentValue")
return temp
@property
def icon(self):
# Change icon to show if radiator is heating, not heating or set to off.
room = self.data.wiserSmart.getWiserRoomInfo(self.room_id)
self.current_temp = room.get("currentValue")
self.target_temp = room.get("targetValue")
if self.target_temp is None:
self.target_temp = -1
if self.current_temp < self.target_temp:
return "mdi:radiator"
else:
return "mdi:radiator-off"
@property
def unique_id(self):
return "WiserSmartRoom - {}".format(self.room_id)
@property
def device_info(self):
"""Return device specific attributes."""
return {
"name": self.name,
"identifiers": {(DOMAIN, self.unique_id)},
"manufacturer": MANUFACTURER,
"model": "Wiser Smart Room",
}
@property
def hvac_mode(self):
state = self.state()
return state
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return self._hvac_modes_list
@property
def target_temperature(self):
return self.data.wiserSmart.getWiserRoomInfo(self.room_id).get("targetValue")
@property
def state_attributes(self):
"""Return state attributes."""
# Generic attributes
attrs = super().state_attributes
# If VACT return valves infos
i = 1
valves = self.data.wiserSmart.getWiserRoomInfo(self.room_id).get("valve")
if (valves == None):
return attrs
for valve in valves:
attrs["valvePosition_" + str(i)] = valve.get("valvePosition")
attrs["calibrationStatus_" + str(i)] = valve.get("calibrationStatus")
attrs["internalTemp_" + str(i)] = valve.get("internalTemp")
i = i + 1
return attrs
async def async_set_temperature(self, **kwargs):
"""Set new target temperatures."""
target_temperature = kwargs.get(ATTR_TEMPERATURE)
if target_temperature is None:
_LOGGER.debug(
"No target temperature set for {}".format(self.name)
)
return False
_LOGGER.debug(
"Setting temperature for {} to {}".format(self.name, target_temperature)
)
await self.hass.async_add_executor_job(
partial(self.data.wiserSmart.setWiserRoomTemp, self.room_id, target_temperature)
)
self._force_update = True
await self.async_update_ha_state(True)
return True
async def async_added_to_hass(self):
"""Subscribe for update from the Controller"""
async def async_update_state():
"""Update sensor state."""
await self.async_update_ha_state(True)
async_dispatcher_connect(self.hass, "WiserSmartUpdateMessage", async_update_state)
| 28.865741
| 94
| 0.645549
| 5,001
| 0.802085
| 0
| 0
| 2,907
| 0.466239
| 1,800
| 0.288693
| 1,133
| 0.181716
|
0c60917a4d7a8f1d00442aa352ab85caf9e37f11
| 4,765
|
py
|
Python
|
src/dataset/utils/process_df.py
|
Fkaneko/kaggle_g2net_gravitational_wave_detection-
|
8bb32cc675e6b56171da8a3754fffeda41e934bb
|
[
"Apache-2.0"
] | null | null | null |
src/dataset/utils/process_df.py
|
Fkaneko/kaggle_g2net_gravitational_wave_detection-
|
8bb32cc675e6b56171da8a3754fffeda41e934bb
|
[
"Apache-2.0"
] | null | null | null |
src/dataset/utils/process_df.py
|
Fkaneko/kaggle_g2net_gravitational_wave_detection-
|
8bb32cc675e6b56171da8a3754fffeda41e934bb
|
[
"Apache-2.0"
] | null | null | null |
import os
from functools import partial
from multiprocessing import Pool
from typing import Any, Callable, Dict, List, Optional
import numpy as np
import pandas as pd
from tqdm import tqdm
from src.dataset.utils.waveform_preprocessings import preprocess_strain
def id_2_path(
image_id: str,
is_train: bool = True,
data_dir: str = "../input/g2net-gravitational-wave-detection",
) -> str:
"""
modify from https://www.kaggle.com/ihelon/g2net-eda-and-modeling
"""
folder = "train" if is_train else "test"
return "{}/{}/{}/{}/{}/{}.npy".format(
data_dir, folder, image_id[0], image_id[1], image_id[2], image_id
)
def path_2_id(path: str) -> str:
return os.path.basename(path).replace(".npy", "")
def add_dir(df: pd.DataFrame) -> pd.DataFrame:
df["top_dir"] = df["id"].apply(lambda x: x[0])
df["bottom_dir"] = df["id"].apply(lambda x: x[:3])
return df
def add_data_path(
df: pd.DataFrame,
is_train: bool = False,
data_dir: str = "../input/g2net-gravitational-wave-detection",
) -> pd.DataFrame:
df = add_dir(df=df)
df["path"] = df["id"].apply(
lambda x: id_2_path(image_id=x, is_train=is_train, data_dir=data_dir)
)
return df
def get_agg_feats(
path: str,
interp_psd: Optional[Callable] = None,
psds: Optional[np.ndarray] = None,
window: str = "tukey",
fs: int = 2048,
fband: List[int] = [10, 912],
psd_cache_path_suffix: Optional[str] = None,
T: float = 2.0,
) -> Dict[str, Any]:
sample_data = np.load(path)
data_id = path_2_id(path)
if interp_psd is None:
for i, strain in enumerate(sample_data):
_, strain_bp = preprocess_strain(
strain=strain,
interp_psd=interp_psd,
psd=psds[i],
window=window,
fs=fs,
fband=fband,
)
sample_data[i] = strain_bp
mean = sample_data.mean(axis=-1)
std = sample_data.std(axis=-1)
minim = sample_data.min(axis=-1)
maxim = sample_data.max(axis=-1)
ene = (sample_data ** 2).sum(axis=-1)
agg_dict = {
"id": data_id,
"mean_site0": mean[0],
"mean_site1": mean[1],
"mean_site2": mean[2],
"std_site0": std[0],
"std_site1": std[1],
"std_site2": std[2],
"min_site0": minim[0],
"min_site1": minim[1],
"min_site2": minim[2],
"max_site0": maxim[0],
"max_site1": maxim[1],
"max_site2": maxim[2],
"ene_site0": ene[0],
"ene_site1": ene[1],
"ene_site2": ene[2],
}
if psd_cache_path_suffix is not None:
cache_path = path.replace(".npy", psd_cache_path_suffix)
if os.path.exists(cache_path):
psd = np.load(cache_path)
psd_ranges = [10, 35, 350, 500, 912]
psd_hz_begin = 0
for psd_hz_end in psd_ranges:
psd_mean = psd[:, int(psd_hz_begin * T) : int(psd_hz_end * T)].mean(
axis=-1
)
for site_id, psd_mean_for_site in enumerate(psd_mean):
agg_dict[
f"psd_{psd_hz_begin}-{psd_hz_end}hz_site{site_id}"
] = psd_mean_for_site
psd_hz_begin = psd_hz_end
for site_id, psd_mean_for_site in enumerate(psd.mean(axis=-1)):
agg_dict[f"psd_all-hz_site{site_id}"] = psd_mean_for_site
return agg_dict
def get_site_metrics(
df: pd.DataFrame,
interp_psd: Optional[Callable] = None,
psds: Optional[np.ndarray] = None,
window: str = "tukey",
fs: int = 2048,
fband: List[int] = [10, 912],
psd_cache_path_suffix: Optional[str] = None,
num_workers: int = 8,
):
"""
Compute for each id the metrics for each site.
df: the complete df
modify from
https://www.kaggle.com/andradaolteanu/g2net-searching-the-sky-pytorch-effnet-w-meta
"""
func_ = partial(
get_agg_feats,
interp_psd=interp_psd,
psds=psds,
window=window,
fs=fs,
fband=fband,
psd_cache_path_suffix=psd_cache_path_suffix,
)
if num_workers > 1:
with Pool(processes=num_workers) as pool:
agg_dicts = list(
tqdm(
pool.imap(func_, df["path"].tolist()),
total=len(df),
)
)
else:
agg_dicts = []
for ID, path in tqdm(zip(df["id"].values, df["path"].values)):
# First extract the cronological info
agg_dict = func_(path=path)
agg_dicts.append(agg_dict)
agg_df = pd.DataFrame(agg_dicts)
df = pd.merge(df, agg_df, on="id")
return df
| 28.532934
| 87
| 0.570619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 769
| 0.161385
|
0c60c978bb3233d48fef80aac1fbd85b7650f54f
| 637
|
py
|
Python
|
sa/migrations/0051_managedobject_set_profile.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 84
|
2017-10-22T11:01:39.000Z
|
2022-02-27T03:43:48.000Z
|
sa/migrations/0051_managedobject_set_profile.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 22
|
2017-12-11T07:21:56.000Z
|
2021-09-23T02:53:50.000Z
|
sa/migrations/0051_managedobject_set_profile.py
|
prorevizor/noc
|
37e44b8afc64318b10699c06a1138eee9e7d6a4e
|
[
"BSD-3-Clause"
] | 23
|
2017-12-06T06:59:52.000Z
|
2022-02-24T00:02:25.000Z
|
# ----------------------------------------------------------------------
# managedobject set profile
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from noc.core.migration.base import BaseMigration
class Migration(BaseMigration):
def migrate(self):
r = self.db.execute("SELECT id FROM sa_managedobjectprofile WHERE name='default'")
p_id = r[0][0]
self.db.execute("UPDATE sa_managedobject SET object_profile_id = %s", [p_id])
| 37.470588
| 90
| 0.470958
| 254
| 0.398744
| 0
| 0
| 0
| 0
| 0
| 0
| 435
| 0.682889
|
0c6180591c4611e118c4ac0d8c026f5d2d7c99fa
| 305
|
py
|
Python
|
oct2py/compat.py
|
sdvillal/oct2py
|
f7aa89b909cbb5959ddedf3ab3e743898eac3d45
|
[
"MIT"
] | 8
|
2015-10-16T23:28:16.000Z
|
2020-06-19T18:49:18.000Z
|
oct2py/compat.py
|
sdvillal/oct2py
|
f7aa89b909cbb5959ddedf3ab3e743898eac3d45
|
[
"MIT"
] | 8
|
2015-06-25T20:57:56.000Z
|
2020-04-03T22:33:16.000Z
|
oct2py/compat.py
|
sdvillal/oct2py
|
f7aa89b909cbb5959ddedf3ab3e743898eac3d45
|
[
"MIT"
] | 6
|
2015-04-21T12:23:44.000Z
|
2021-10-01T00:08:47.000Z
|
# -*- coding: utf-8 -*-
import sys
PY2 = sys.version[0] == '2'
PY3 = sys.version[0] == '3'
if PY2:
unicode = unicode
long = long
from StringIO import StringIO
import Queue as queue
else: # pragma : no cover
unicode = str
long = int
from io import StringIO
import queue
| 16.944444
| 33
| 0.606557
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 48
| 0.157377
|
0c61d0a37539223a22a77c96706aa91e5bab6637
| 1,563
|
py
|
Python
|
lambda_functions/compute/campaign/aws.py
|
pierrealixt/MapCampaigner
|
7845bda4b0f6ccb7d18905a8c77d91ba6a4f78ad
|
[
"BSD-3-Clause"
] | null | null | null |
lambda_functions/compute/campaign/aws.py
|
pierrealixt/MapCampaigner
|
7845bda4b0f6ccb7d18905a8c77d91ba6a4f78ad
|
[
"BSD-3-Clause"
] | 1
|
2018-07-24T13:57:03.000Z
|
2018-07-24T13:57:03.000Z
|
lambda_functions/compute/campaign/aws.py
|
pierrealixt/MapCampaigner
|
7845bda4b0f6ccb7d18905a8c77d91ba6a4f78ad
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import json
import boto3
class S3Data(object):
"""
Class for AWS S3
"""
def __init__(self):
"""
Initialize the s3 client.
"""
self.s3 = boto3.client('s3')
self.bucket = os.environ['S3_BUCKET']
def list(self, prefix):
"""
List S3 objects in bucket starting with prefix.
There aren't files or folders in a S3 bucket, only objects.
A key is the name of an object. The key is used to retrieve an object.
examples of keys:
- campaign/
- campaign/uuid.json
- campaign/uuid.geojson
- surveys/
- surveys/buildings
- kartoza.jpg
:param prefix: keys has to start with prefix.
:type prefix: string
:returns: list of keys starting with prefix in the bucket.
:rtype: list
"""
prefix = '{}/'.format(prefix)
objects = []
try:
for obj in self.s3.list_objects(
Bucket=self.bucket,
Prefix=prefix)['Contents']:
if obj['Key'] != prefix:
key = obj['Key'].replace(prefix, '')
objects.append(key.split('/')[0])
return list(set(objects))
except KeyError:
return []
def delete(self, key):
"""
Delete a key in the S3 bucket.
:param key: pathname + filename
:type key: string
:returns:
"""
self.s3.delete_object(
Bucket=self.bucket,
Key=key)
| 24.809524
| 78
| 0.515035
| 1,526
| 0.976328
| 0
| 0
| 0
| 0
| 0
| 0
| 835
| 0.534229
|
0c6200ff0e4e0bec1acf3bffde906f26e624c332
| 5,980
|
py
|
Python
|
infra/utils/launch_ec2.py
|
philipmac/nephele2
|
50acba6b7bb00da6209c75e26c8c040ffacbaa1e
|
[
"CC0-1.0"
] | 1
|
2021-02-26T23:00:10.000Z
|
2021-02-26T23:00:10.000Z
|
infra/utils/launch_ec2.py
|
philipmac/nephele2
|
50acba6b7bb00da6209c75e26c8c040ffacbaa1e
|
[
"CC0-1.0"
] | 1
|
2020-11-16T01:55:06.000Z
|
2020-11-16T01:55:06.000Z
|
infra/utils/launch_ec2.py
|
philipmac/nephele2
|
50acba6b7bb00da6209c75e26c8c040ffacbaa1e
|
[
"CC0-1.0"
] | 2
|
2021-08-12T13:59:49.000Z
|
2022-01-19T17:16:26.000Z
|
#!/usr/bin/env python3
import os
import boto3
import botocore.exceptions
import argparse
import yaml
from nephele2 import NepheleError
mand_vars = ['AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY']
perm_error = """\n\nIt seems you have not set up your AWS correctly.
Should you be running this with Awssume? Or have profile with appropriate role?
Exiting now.\n"""
def main(args):
"""Launch ec2 instance"""
if args.profile is None:
ec2_resource = boto3.Session(region_name='us-east-1').resource('ec2')
else:
ec2_resource = boto3.Session(region_name='us-east-1', profile_name=args.profile).resource('ec2')
test_sanity(ec2_resource, args)
envs = load_stack_vars(args.yaml_env.name)
start_EC2(ec2_resource, args.ami_id, args.instance_type,
args.key_path, args.label, envs, args.dry_run)
def load_stack_vars(fname):
try:
with open(fname) as f:
data_map = yaml.safe_load(f)
return data_map
except FileNotFoundError as fnf:
print(fnf)
print('Unable to find yaml file, exiting.')
exit(1)
except:
raise
def gen_mnt_str(efs_ip):
mnt_opts = 'nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport' # from AWS
return 'mount -t nfs -o {opts} {trgt}:/ {mnt}/'.format(opts=mnt_opts,
trgt=efs_ip,
mnt='/mnt/EFS')
def read_key(key_path):
try:
with open(key_path, 'r') as f:
key = f.read()
return key
except:
raise
def test_sanity(ec2_resource, args):
"""Test if env vars are set, key exists, and can access ec2"""
if args.profile is None:
for var in mand_vars:
if os.environ.get(var) is None:
print(var + ' must be set as an evironment variable. \nExiting.')
exit(1)
if not os.path.exists(args.key_path):
print('Unable to see your key: {}, exiting now :-('.format(args.key_path))
exit(1)
try:
ec2_resource.instances.all().__iter__().__next__()
except botocore.exceptions.ClientError as expn:
print(expn)
print(perm_error)
exit(1)
def create_EC2(ec2_resource, ami_id, i_type, envs, u_data='', dry_run=True):
"""create ec2 instance. by default DryRun is T, and only checks perms."""
inst = ec2_resource.create_instances(
DryRun=dry_run,
SecurityGroupIds=[envs['INTERNAL_SECURITY_GROUP'],
envs['ecs_cluster_security_group_id']],
IamInstanceProfile={'Arn': envs['N2_WORKER_INSTANCE_PROFILE']},
InstanceType=i_type,
ImageId=ami_id,
MinCount=1,
MaxCount=1,
InstanceInitiatedShutdownBehavior='terminate',
SubnetId=envs['VPC_SUBNET'],
UserData=u_data
)
return inst
def start_EC2(ec2_resource, ami_id, i_type, key_path, label, envs, dry_run):
"""check if have perms to create instance.
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/ec2-example-managing-instances.html#start-and-stop-instances
if so, the create instance and tag with label.
"""
try:
create_EC2(ec2_resource, ami_id, i_type, envs)
except botocore.exceptions.ClientError as e:
if 'DryRunOperation' not in str(e):
print(e.response['Error']['Message'])
print(perm_error)
exit(1)
elif dry_run:
print(e.response['Error']['Message'])
exit(0)
else:
pass
mnt_str = gen_mnt_str(envs['EFS_IP'])
key_str = read_key(key_path)
auth_key_str = 'printf "{}" >> /home/admin/.ssh/authorized_keys;'.format(
key_str)
u_data = '#!/bin/bash\n{mnt_str}\n{auth_key_str}\n'.format(mnt_str=mnt_str,
auth_key_str=auth_key_str)
print('Creating EC2...')
try:
instances = create_EC2(ec2_resource, ami_id, i_type, envs, u_data, False)
except botocore.exceptions.ClientError as bce:
print(bce)
print('\nUnable to launch EC2. \nExiting.')
exit(1)
if len(instances) is not 1:
msg = 'Instances launched: %s' % str(instances)
raise NepheleError.UnableToStartEC2Exception(msg=msg)
instance = instances[0]
instance.wait_until_running()
instance.create_tags(Tags=[{'Key': 'Name', 'Value': label}])
print(str(instance) + ' has been created.')
print('To connect type:\nssh {ip_addr}'.format(
ip_addr=instance.instance_id))
print('To terminate instance type:')
print('awssume aws ec2 terminate-instances --instance-ids ' + instance.instance_id)
if __name__ == "__main__":
usage = 'Eg:\nsource ~/code/neph2-envs/dev/environment_vars\n'\
'awssume launch_ec2.py -e ../../neph2-envs/dev/dev_outputs.yaml -a ami-0ae1b7201f4a236f9 -t m5.4xlarge -k ~/.ssh/id_rsa.pub --label instance_name_tag\n\n'\
'Alternately, pass profile which has correct role/permissions:\n'\
'launch_ec2.py -e dev_outputs.yaml -a ami-003eed27e5bf2ef91 -t t2.micro -k ~/.ssh/id_rsa.pub -l name_tag --profile aws_profile_name'
parser = argparse.ArgumentParser(
description='CLI Interface to N2.', usage=usage)
req = parser.add_argument_group('required args')
req.add_argument("-e", "--yaml_env",
type=argparse.FileType('r'), required=True)
req.add_argument("-t", "--instance_type", type=str, required=True)
req.add_argument("-a", "--ami_id", type=str, required=True)
req.add_argument("-k", "--key_path", type=str, required=True)
req.add_argument("-l", "--label", type=str, required=True)
parser.add_argument("-p", "--profile", type=str)
parser.add_argument("-d", "--dry_run", action='store_true')
args = parser.parse_args()
main(args)
| 39.084967
| 167
| 0.623746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,986
| 0.332107
|
0c6419af7c4ea362b8097a85b3a1cb0ca9746ce0
| 9,196
|
py
|
Python
|
tests/test_wvlns.py
|
seignovert/pyvims
|
a70b5b9b8bc5c37fa43b7db4d15407f312a31849
|
[
"BSD-3-Clause"
] | 4
|
2019-09-16T15:50:22.000Z
|
2021-04-08T15:32:48.000Z
|
tests/test_wvlns.py
|
seignovert/pyvims
|
a70b5b9b8bc5c37fa43b7db4d15407f312a31849
|
[
"BSD-3-Clause"
] | 3
|
2018-05-04T09:28:24.000Z
|
2018-12-03T09:00:31.000Z
|
tests/test_wvlns.py
|
seignovert/pyvims
|
a70b5b9b8bc5c37fa43b7db4d15407f312a31849
|
[
"BSD-3-Clause"
] | 1
|
2020-10-12T15:14:17.000Z
|
2020-10-12T15:14:17.000Z
|
"""Test VIMS wavelength module."""
from pathlib import Path
import numpy as np
from numpy.testing import assert_array_almost_equal as assert_array
from pyvims import QUB
from pyvims.vars import ROOT_DATA
from pyvims.wvlns import (BAD_IR_PIXELS, CHANNELS, FWHM, SHIFT,
VIMS_IR, VIMS_VIS, WLNS, YEARS,
bad_ir_pixels, ir_multiplexer, ir_hot_pixels,
is_hot_pixel, median_spectrum, moving_median,
sample_line_axes)
from pytest import approx, raises
DATA = Path(__file__).parent / 'data'
def test_vims_csv():
"""Test CSV global variables."""
assert len(CHANNELS) == len(WLNS) == len(FWHM) == 352
assert CHANNELS[0] == 1
assert CHANNELS[-1] == 352
assert WLNS[0] == .350540
assert WLNS[-1] == 5.1225
assert FWHM[0] == .007368
assert FWHM[-1] == .016
assert len(YEARS) == len(SHIFT) == 58
assert YEARS[0] == 1999.6
assert YEARS[-1] == 2017.8
assert SHIFT[0] == -25.8
assert SHIFT[-1] == 9.8
def test_vims_ir():
"""Test VIMS IR wavelengths."""
# Standard wavelengths
wvlns = VIMS_IR()
assert len(wvlns) == 256
assert wvlns[0] == .884210
assert wvlns[-1] == 5.122500
# Full-width at half maximum value
fwhms = VIMS_IR(fwhm=True)
assert len(fwhms) == 256
assert fwhms[0] == .012878
assert fwhms[-1] == .016
# Wavenumber (cm-1)
wvnb = VIMS_IR(sigma=True)
assert len(wvnb) == 256
assert wvnb[0] == approx(11309.53, abs=1e-2)
assert wvnb[-1] == approx(1952.17, abs=1e-2)
# Single band
assert VIMS_IR(band=97) == .884210
assert VIMS_IR(band=97, fwhm=True) == .012878
assert VIMS_IR(band=97, sigma=True) == approx(11309.53, abs=1e-2)
assert VIMS_IR(band=97, fwhm=True, sigma=True) == approx(164.72, abs=1e-2)
# Selected bands array
assert_array(VIMS_IR(band=[97, 352]), [.884210, 5.122500])
assert_array(VIMS_IR(band=[97, 352], fwhm=True), [.012878, .016])
# Time offset
assert VIMS_IR(band=97, year=2002) == approx(.884210, abs=1e-6)
assert VIMS_IR(band=97, year=2005) == approx(.884210, abs=1e-6)
assert VIMS_IR(band=97, year=2001.5) == approx(.885410, abs=1e-6) # +.0012
assert VIMS_IR(band=97, year=2011) == approx(.890210, abs=1e-6) # +.006
# Time offset on all IR bands
wvlns_2011 = VIMS_IR(year=2011)
assert len(wvlns_2011) == 256
assert wvlns_2011[0] == approx(.890210, abs=1e-6)
assert wvlns_2011[-1] == approx(5.128500, abs=1e-6)
# No change in FWHM with time
assert VIMS_IR(band=97, year=2001.5, fwhm=True) == .012878
# Outside IR band range
assert np.isnan(VIMS_IR(band=0))
assert np.isnan(VIMS_IR(band=96, fwhm=True))
assert np.isnan(VIMS_IR(band=353, sigma=True))
def test_vims_vis():
"""Test VIMS VIS wavelengths."""
# Standard wavelengths
wvlns = VIMS_VIS()
assert len(wvlns) == 96
assert wvlns[0] == .350540
assert wvlns[-1] == 1.045980
# Full-width at half maximum value
fwhms = VIMS_VIS(fwhm=True)
assert len(fwhms) == 96
assert fwhms[0] == .007368
assert fwhms[-1] == .012480
# Wavenumber (cm-1)
wvnb = VIMS_VIS(sigma=True)
assert len(wvnb) == 96
assert wvnb[0] == approx(28527.41, abs=1e-2)
assert wvnb[-1] == approx(9560.41, abs=1e-2)
# Single band
assert VIMS_VIS(band=96) == 1.045980
assert VIMS_VIS(band=96, fwhm=True) == .012480
assert VIMS_VIS(band=96, sigma=True) == approx(9560.41, abs=1e-2)
assert VIMS_VIS(band=96, fwhm=True, sigma=True) == approx(114.07, abs=1e-2)
# Selected bands array
assert_array(VIMS_VIS(band=[1, 96]), [.350540, 1.045980])
assert_array(VIMS_VIS(band=[1, 96], fwhm=True), [.007368, .012480])
# Time offset
with raises(ValueError):
_ = VIMS_VIS(band=97, year=2002)
with raises(ValueError):
_ = VIMS_VIS(year=2011)
# Outside IR band range
assert np.isnan(VIMS_VIS(band=0))
assert np.isnan(VIMS_VIS(band=97, fwhm=True))
assert np.isnan(VIMS_VIS(band=353, sigma=True))
def test_bad_ir_pixels():
"""Test bad IR pixels list."""
csv = np.loadtxt(ROOT_DATA / 'wvlns_std.csv',
delimiter=',', usecols=(0, 1, 2, 3),
dtype=str, skiprows=98)
# Extract bad pixels
wvlns = np.transpose([
(int(channel), float(wvln) - .5 * float(fwhm), float(fwhm))
for channel, wvln, fwhm, comment in csv
if comment
])
# Group bad pixels
news = [True] + list((wvlns[0, 1:] - wvlns[0, :-1]) > 1.5)
bads = []
for i, new in enumerate(news):
if new:
bads.append(list(wvlns[1:, i]))
else:
bads[-1][1] += wvlns[2, i]
assert_array(BAD_IR_PIXELS, bads)
coll = bad_ir_pixels()
assert len(coll.get_paths()) == len(bads)
def test_moving_median():
"""Test moving median filter."""
a = [1, 2, 3, 4, 5]
assert_array(moving_median(a, width=1), a)
assert_array(moving_median(a, width=3),
[1.5, 2, 3, 4, 4.5])
assert_array(moving_median(a, width=5),
[2, 2.5, 3, 3.5, 4])
assert_array(moving_median(a, width=2),
[1.5, 2.5, 3.5, 4.5, 5])
assert_array(moving_median(a, width=4),
[2, 2.5, 3.5, 4, 4.5])
def test_is_hot_pixel():
"""Test hot pixel detector."""
# Create random signal
signal = np.random.default_rng().integers(20, size=100)
# Add hot pixels
signal[10::20] = 50
signal[10::30] = 150
hot_pix = is_hot_pixel(signal)
assert len(hot_pix) == 100
assert 3 <= sum(hot_pix) < 6
assert all(hot_pix[10::30])
hot_pix = is_hot_pixel(signal, tol=1.5, frac=90)
assert len(hot_pix) == 100
assert 6 <= sum(hot_pix) < 12
assert all(hot_pix[10::20])
def test_sample_line_axes():
"""Test locatation sample and line axes."""
# 2D case
assert sample_line_axes((64, 352)) == (0, )
assert sample_line_axes((256, 32)) == (1, )
# 3D case
assert sample_line_axes((32, 64, 352)) == (0, 1)
assert sample_line_axes((32, 352, 64)) == (0, 2)
assert sample_line_axes((352, 32, 64)) == (1, 2)
# 1D case
with raises(TypeError):
_ = sample_line_axes((352))
# No band axis
with raises(ValueError):
_ = sample_line_axes((64, 64))
def test_median_spectrum():
"""Test the median spectrum extraction."""
# 2D cases
spectra = [CHANNELS, CHANNELS]
spectrum = median_spectrum(spectra) # (2, 352)
assert spectrum.shape == (352,)
assert spectrum[0] == 1
assert spectrum[-1] == 352
spectrum = median_spectrum(np.transpose(spectra)) # (352, 2)
assert spectrum.shape == (352,)
assert spectrum[0] == 1
assert spectrum[-1] == 352
# 3D cases
spectra = [[CHANNELS, CHANNELS]]
spectrum = median_spectrum(spectra) # (1, 2, 352)
assert spectrum.shape == (352,)
assert spectrum[0] == 1
assert spectrum[-1] == 352
spectrum = median_spectrum(np.moveaxis(spectra, 1, 2)) # (1, 352, 2)
assert spectrum.shape == (352,)
assert spectrum[0] == 1
assert spectrum[-1] == 352
spectrum = median_spectrum(np.moveaxis(spectra, 2, 0)) # (352, 1, 2)
assert spectrum.shape == (352,)
assert spectrum[0] == 1
assert spectrum[-1] == 352
def test_ir_multiplexer():
"""Test spectrum split in each IR multiplexer."""
# Full spectrum
spec_1, spec_2 = ir_multiplexer(CHANNELS)
assert len(spec_1) == 128
assert len(spec_2) == 128
assert spec_1[0] == 97
assert spec_1[-1] == 351
assert spec_2[0] == 98
assert spec_2[-1] == 352
# IR spectrum only
spec_1, spec_2 = ir_multiplexer(CHANNELS[96:])
assert len(spec_1) == 128
assert len(spec_2) == 128
assert spec_1[0] == 97
assert spec_1[-1] == 351
assert spec_2[0] == 98
assert spec_2[-1] == 352
# 2D spectra
spectra = [CHANNELS, CHANNELS]
spec_1, spec_2 = ir_multiplexer(spectra)
assert len(spec_1) == 128
assert len(spec_2) == 128
assert spec_1[0] == 97
assert spec_1[-1] == 351
assert spec_2[0] == 98
assert spec_2[-1] == 352
# 3D spectra
spectra = [[CHANNELS, CHANNELS]]
spec_1, spec_2 = ir_multiplexer(spectra)
assert len(spec_1) == 128
assert len(spec_2) == 128
assert spec_1[0] == 97
assert spec_1[-1] == 351
assert spec_2[0] == 98
assert spec_2[-1] == 352
# VIS spectrum only
with raises(ValueError):
_ = ir_multiplexer(CHANNELS[:96])
# Dimension too high
with raises(ValueError):
_ = ir_multiplexer([[[CHANNELS]]])
def test_ir_hot_pixels():
"""Test IR hot pixel detector from spectra."""
qub = QUB('1787314297_1', root=DATA)
# 1D spectrum
hot_pixels = ir_hot_pixels(qub['BACKGROUND'][0])
assert len(hot_pixels) == 10
assert_array(hot_pixels,
[105, 119, 124, 168, 239, 240, 275, 306, 317, 331])
# 2D spectra
hot_pixels = ir_hot_pixels(qub['BACKGROUND'])
assert len(hot_pixels) == 10
assert_array(hot_pixels,
[105, 119, 124, 168, 239, 240, 275, 306, 317, 331])
| 27.450746
| 79
| 0.605154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,145
| 0.124511
|
0c661084ef2dc9a119cb718b8362035d15b03067
| 909
|
py
|
Python
|
Outliers/loss/losses.py
|
MakotoTAKAMATSU013/Outliers
|
80043027d64b8f07355a05b281925f00bbf1a442
|
[
"MIT"
] | null | null | null |
Outliers/loss/losses.py
|
MakotoTAKAMATSU013/Outliers
|
80043027d64b8f07355a05b281925f00bbf1a442
|
[
"MIT"
] | null | null | null |
Outliers/loss/losses.py
|
MakotoTAKAMATSU013/Outliers
|
80043027d64b8f07355a05b281925f00bbf1a442
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
class LabelSmoothingCrossEntropy(nn.Module):
def __init__(self, smoothing = 0.1):
super(LabelSmoothingCrossEntropy, self).__init__()
assert smoothing < 0.1
self.smoothing = smoothing
self.confidence = 1. - smoothing
def forward(self, x, target):
logprobs = F.log_softmax(x, dim=-1)
null_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
null_loss = null_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * null_loss + self.smoothing * smooth_loss
return loss.mean()
class SoftTargetCrossEntropy(nn.module):
def __init__(self):
super(SoftTargetCrossEntropy, self).__init__()
def forward(self, x, target):
loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1)
return loss.mean()
| 34.961538
| 73
| 0.660066
| 839
| 0.922992
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0c663401e4bd928831a371cae4be0b6a743a91c8
| 5,783
|
py
|
Python
|
esiosdata/importdemdata.py
|
azogue/esiosdata
|
680c7918955bc6ceee5bded92b3a4485f5ea8151
|
[
"MIT"
] | 20
|
2017-06-04T20:34:16.000Z
|
2021-10-31T22:55:22.000Z
|
esiosdata/importdemdata.py
|
azogue/esiosdata
|
680c7918955bc6ceee5bded92b3a4485f5ea8151
|
[
"MIT"
] | null | null | null |
esiosdata/importdemdata.py
|
azogue/esiosdata
|
680c7918955bc6ceee5bded92b3a4485f5ea8151
|
[
"MIT"
] | 4
|
2020-01-28T19:02:24.000Z
|
2022-03-08T15:59:11.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 27 18:16:24 2015
@author: Eugenio Panadero
A raíz del cambio previsto:
DESCONEXIÓN DE LA WEB PÚBLICA CLÁSICA DE E·SIOS
La Web pública clásica de e·sios (http://www.esios.ree.es) será desconectada el día 29 de marzo de 2016.
Continuaremos ofreciendo servicio en la nueva Web del Operador del Sistema:
https://www.esios.ree.es.
Por favor, actualice sus favoritos apuntando a la nueva Web.
IMPORTANTE!!!
En la misma fecha (29/03/2016), también dejará de funcionar el servicio Solicitar y Descargar,
utilizado para descargar información de la Web pública clásica de e·sios.
Por favor, infórmese sobre descarga de información en
https://www.esios.ree.es/es/pagina/api
y actualice sus procesos de descarga.
"""
import json
import pandas as pd
import re
from dataweb.requestweb import get_data_en_intervalo
from esiosdata.esios_config import DATE_FMT, TZ, SERVER, HEADERS, D_TIPOS_REQ_DEM, KEYS_DATA_DEM
from esiosdata.prettyprinting import print_redb, print_err
__author__ = 'Eugenio Panadero'
__copyright__ = "Copyright 2015, AzogueLabs"
__credits__ = ["Eugenio Panadero"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Eugenio Panadero"
RG_FUNC_CONTENT = re.compile('(?P<func>.*)\((?P<json>.*)\);')
def dem_url_dia(dt_day='2015-06-22'):
"""Obtiene las urls de descarga de los datos de demanda energética de un día concreto."""
def _url_tipo_dato(str_dia, k):
url = SERVER + '/archives/{}/download_json?locale=es'.format(D_TIPOS_REQ_DEM[k])
if type(str_dia) is str:
return url + '&date=' + str_dia
else:
return url + '&date=' + str_dia.date().isoformat()
urls = [_url_tipo_dato(dt_day, k) for k in D_TIPOS_REQ_DEM.keys()]
return urls
def _extract_func_json_data(data_raw):
try:
busca = RG_FUNC_CONTENT.match(data_raw).groupdict()
ind, data = busca['func'], None
data = json.loads(busca['json'])
if len(data.keys()) == 1:
return ind, data[list(data.keys())[0]]
else:
return ind, data
except AttributeError:
# print('ERROR REG_EXP [{}] --> RAW: {}'.format(e, data_raw))
return None, None
def _import_daily_max_min(data):
# IND_MaxMinRenovEol / IND_MaxMin
df = pd.DataFrame(data, index=[0])
cols_ts = df.columns.str.startswith('ts')
is_max_min_renov = any(cols_ts)
if is_max_min_renov:
df.index = pd.DatetimeIndex([pd.Timestamp(df['tsMaxRenov'][0]).date()], freq='D')
else:
df = pd.DataFrame(df.set_index(pd.DatetimeIndex([pd.Timestamp(df['date'][0]).date()], freq='D')
).drop('date', axis=1))
cols_ts = df.columns.str.contains('timeStamp', regex=False)
for c, is_ts in zip(df.columns, cols_ts):
if is_ts:
df[c] = df[c].apply(pd.Timestamp)
else:
df[c] = df[c].astype(float)
return df
def _import_json_ts_data(data):
df = pd.DataFrame(data)
try:
return pd.DataFrame(df.set_index(pd.DatetimeIndex(df['ts'].apply(lambda x: pd.Timestamp(x, tz=TZ)),
freq='10T', tz=TZ), verify_integrity=True
).drop('ts', axis=1)).sort_index().applymap(float)
except ValueError: # ES DST
df['ts'] = pd.DatetimeIndex(start=pd.Timestamp(df['ts'].iloc[0]), periods=len(df), freq='10T', tz=TZ)
# , ambiguous="infer")
return df.set_index('ts', verify_integrity=True).sort_index().applymap(float)
def dem_procesa_datos_dia(key_day, response):
"""Procesa los datos descargados en JSON."""
dfs_import, df_import, dfs_maxmin, hay_errores = [], None, [], 0
for r in response:
tipo_datos, data = _extract_func_json_data(r)
if tipo_datos is not None:
if ('IND_MaxMin' in tipo_datos) and data:
df_import = _import_daily_max_min(data)
dfs_maxmin.append(df_import)
elif data:
df_import = _import_json_ts_data(data)
dfs_import.append(df_import)
if tipo_datos is None or df_import is None:
hay_errores += 1
if hay_errores == 4:
# No hay nada, salida temprana sin retry:
print_redb('** No hay datos para el día {}!'.format(key_day))
return None, -2
else: # if hay_errores < 3:
# TODO formar datos incompletos!! (max-min con NaN's, etc.)
data_import = {}
if dfs_import:
data_import[KEYS_DATA_DEM[0]] = dfs_import[0].join(dfs_import[1])
if len(dfs_maxmin) == 2:
data_import[KEYS_DATA_DEM[1]] = dfs_maxmin[0].join(dfs_maxmin[1])
elif dfs_maxmin:
data_import[KEYS_DATA_DEM[1]] = dfs_maxmin[0]
if not data_import:
print_err('DÍA: {} -> # ERRORES: {}'.format(key_day, hay_errores))
return None, -2
return data_import, 0
def dem_data_dia(str_dia='2015-10-10', str_dia_fin=None):
"""Obtiene datos de demanda energética en un día concreto o un intervalo, accediendo directamente a la web."""
params = {'date_fmt': DATE_FMT, 'usar_multithread': False, 'num_retries': 1, "timeout": 10,
'func_procesa_data_dia': dem_procesa_datos_dia, 'func_url_data_dia': dem_url_dia,
'data_extra_request': {'json_req': False, 'headers': HEADERS}}
if str_dia_fin is not None:
params['usar_multithread'] = True
data, hay_errores, str_import = get_data_en_intervalo(str_dia, str_dia_fin, **params)
else:
data, hay_errores, str_import = get_data_en_intervalo(str_dia, str_dia, **params)
if not hay_errores:
return data
else:
print_err(str_import)
return None
| 39.609589
| 114
| 0.639633
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,782
| 0.306871
|
a73d0e2b381469762428cb4845c16d12f86b59d9
| 4,744
|
py
|
Python
|
brainfrick.py
|
rium9/brainfrick
|
37f8e3417cde5828e3ed2c2099fc952259f12844
|
[
"MIT"
] | null | null | null |
brainfrick.py
|
rium9/brainfrick
|
37f8e3417cde5828e3ed2c2099fc952259f12844
|
[
"MIT"
] | null | null | null |
brainfrick.py
|
rium9/brainfrick
|
37f8e3417cde5828e3ed2c2099fc952259f12844
|
[
"MIT"
] | null | null | null |
class BrainfuckException(Exception):
pass
class BLexer:
""" Static class encapsulating functionality for lexing Brainfuck programs. """
symbols = [
'>', '<', '+', '-',
'.', ',', '[', ']'
]
@staticmethod
def lex(code):
""" Return a generator for tokens in some Brainfuck code. """
# The syntax of Brainfuck is so simple that nothing is really gained from converting
# symbols to some sort of Token object. Just ignore everything that isn't in Brainfuck's
# syntax.
return (char for char in code if char in BLexer.symbols)
class BrainfuckMachine:
""" Class encapsulating the core operations of a brainfuck machine. Namely,
- Move pointer left,
- Move pointer right,
- Increment cell under pointer,
- Decrement cell under pointer,
- Output value of cell under pointer,
- Input value to cell under pointer.
"""
def __init__(self, cells=256, in_func=input, out_func=chr):
self.cells = [0] * cells
self.ptr = 0
self.in_func = in_func
self.out_func = out_func
self.looppos = []
self.out_buffer = []
def right(self):
if self.ptr < len(self.cells)-1:
self.ptr += 1
def left(self):
if self.ptr > 0:
self.ptr -= 1
def incr(self):
self.cells[self.ptr] += 1
def decr(self):
self.cells[self.ptr] -= 1
def value(self):
""" Return the value of the cell under the pointer. """
return self.cells[self.ptr]
def outf(self):
return self.out_func(self.cells[self.ptr])
def inf(self):
self.cells[self.ptr] = self.in_func()
class BInterpreter:
""" Class encapsulating interpretation functionality for brainfuck code. """
def __init__(self, machine=None):
if machine:
self.machine = machine
else:
self.machine = BrainfuckMachine()
def interpret_code(self, code):
""" Interpret each character in a list or string of brainfuck tokens. """
# Iterate through every character in the code. Use indexing so that we can
# jump as necessary for square bracket loops. To identify matching brackets for forward
# jumps, move forward a position at a time, keeping track of the nesting level (starts at 1). When a
# open bracket ([) is encountered, increment the nesting level, and when a close bracket
# (]) is found, decrement it. When nesting level reaches 0, the matching bracket has been found.
# For finding the correct bracket for a backwards jump, do the same thing but with
# backwards iteration and swap when you increment and decrement.
pos = 0
while pos < len(code):
if code[pos] == '[':
if self.machine.value() == 0:
nest = 1
while nest != 0:
pos += 1
if code[pos] == '[':
nest += 1
elif code[pos] == ']':
nest -= 1
pos += 1
else:
pos += 1
elif code[pos] == ']':
if self.machine.value() != 0:
nest = 1
while nest != 0:
pos -= 1
if code[pos] == ']':
nest += 1
elif code[pos] == '[':
nest -= 1
pos += 1
else:
pos += 1
else:
self.interpret_one(code[pos])
pos += 1
def interpret_one(self, char):
""" Perform the appropriate operation for a single brainfuck character. """
if char == '>':
self.machine.right()
elif char == '<':
self.machine.left()
elif char == '+':
self.machine.incr()
elif char == '-':
self.machine.decr()
elif char == '.':
# TODO output checks
print(self.machine.outf(), end='')
elif char == ',':
# TODO input checks
self.machine.inf()
if __name__ == '__main__':
bfm = BrainfuckMachine(cells=8, out_func=chr)
bi = BInterpreter(bfm)
f = open('helloworld', 'r').read()
code = list(BLexer.lex(f))
bi.interpret_code(code)
| 32.493151
| 110
| 0.490304
| 4,501
| 0.948777
| 0
| 0
| 381
| 0.080312
| 0
| 0
| 1,674
| 0.352867
|
a73f4577fe0a30a2fdd1d7b44615b63fb0d34f1e
| 3,476
|
bzl
|
Python
|
infra_macros/fbcode_macros/build_defs/build_info.bzl
|
martarozek/buckit
|
343cc5a5964c1d43902b6a77868652adaefa0caa
|
[
"BSD-3-Clause"
] | null | null | null |
infra_macros/fbcode_macros/build_defs/build_info.bzl
|
martarozek/buckit
|
343cc5a5964c1d43902b6a77868652adaefa0caa
|
[
"BSD-3-Clause"
] | null | null | null |
infra_macros/fbcode_macros/build_defs/build_info.bzl
|
martarozek/buckit
|
343cc5a5964c1d43902b6a77868652adaefa0caa
|
[
"BSD-3-Clause"
] | null | null | null |
load("@fbcode_macros//build_defs:config.bzl", "config")
load("@fbcode_macros//build_defs/config:read_configs.bzl", "read_int")
load("@fbcode_macros//build_defs:core_tools.bzl", "core_tools")
def _create_build_info(
build_mode,
buck_package,
name,
rule_type,
platform,
epochtime=0,
host="",
package_name="",
package_version="",
package_release="",
path="",
revision="",
revision_epochtime=0,
time="",
time_iso8601="",
upstream_revision="",
upstream_revision_epochtime=0,
user="",
):
return struct(
build_mode=build_mode,
rule="fbcode:" + buck_package + ":" + name,
platform=platform,
rule_type=rule_type,
epochtime=epochtime,
host=host,
package_name=package_name,
package_version=package_version,
package_release=package_release,
path=path,
revision=revision,
revision_epochtime=revision_epochtime,
time=time,
time_iso8601=time_iso8601,
upstream_revision=upstream_revision,
upstream_revision_epochtime=upstream_revision_epochtime,
user=user,
)
def _get_build_info(package_name, name, rule_type, platform):
"""
Gets a build_info struct from various configurations (or default values)
This struct has values passed in by the packaging system in order to
stamp things like the build epoch, platform, etc into the final binary.
This returns stable values by default so that non-release builds do not
affect rulekeys.
Args:
package_name: The name of the package that contains the build rule
that needs build info. No leading slashes
name: The name of the rule that needs build info
rule_type: The type of rule that is being built. This should be the
macro name, not the underlying rule type. (e.g. cpp_binary,
not cxx_binary)
platform: The platform that is being built for
"""
build_mode = config.get_build_mode()
if core_tools.is_core_tool(package_name,name):
return _create_build_info(
build_mode,
package_name,
name,
rule_type,
platform,
)
else:
return _create_build_info(
build_mode,
package_name,
name,
rule_type,
platform,
epochtime=read_int("build_info", "epochtime", 0),
host=native.read_config("build_info", "host", ""),
package_name=native.read_config("build_info", "package_name", ""),
package_version=native.read_config("build_info", "package_version", ""),
package_release=native.read_config("build_info", "package_release", ""),
path=native.read_config("build_info", "path", ""),
revision=native.read_config("build_info", "revision", ""),
revision_epochtime=read_int("build_info", "revision_epochtime", 0),
time=native.read_config("build_info", "time", ""),
time_iso8601=native.read_config("build_info", "time_iso8601", ""),
upstream_revision=native.read_config("build_info", "upstream_revision", ""),
upstream_revision_epochtime=read_int("build_info", "upstream_revision_epochtime", 0),
user=native.read_config("build_info", "user", ""),
)
build_info = struct(
get_build_info = _get_build_info,
)
| 35.469388
| 97
| 0.635788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,335
| 0.384062
|
a7401ff3c28629b2dc0848d7b3f999f8226d524f
| 1,885
|
py
|
Python
|
src/scan.py
|
Unitato/github-public-alert
|
29dbcf72dd8c18c45385c29f25174c28c3428560
|
[
"MIT"
] | null | null | null |
src/scan.py
|
Unitato/github-public-alert
|
29dbcf72dd8c18c45385c29f25174c28c3428560
|
[
"MIT"
] | null | null | null |
src/scan.py
|
Unitato/github-public-alert
|
29dbcf72dd8c18c45385c29f25174c28c3428560
|
[
"MIT"
] | null | null | null |
#!#!/usr/bin/env python
import os
from github import Github
from libraries.notify import Notify
import json
print("")
print("Scanning Github repos")
GITHUB_API_KEY = os.environ.get('GITHUB_API_KEY')
WHITELIST = json.loads(os.environ.get('GITHUB_WHITELIST').lower())
GITHUB_SCAN = json.loads(os.environ.get('GITHUB_SCAN'))
SENDGRID_API_KEY = os.environ.get('SENDGRID_API_KEY')
SENDGRID_FROM = os.environ.get('SENDGRID_FROM')
SENDGRID_SUBJECT = os.environ.get('SENDGRID_SUBJECT')
SENDGRID_TEMPLATE = os.environ.get('SENDGRID_TEMPLATE')
SENDGRID_NOTIFY = json.loads(os.environ.get('SENDGRID_NOTIFY'))
results = []
print(" Target: {}".format(GITHUB_SCAN))
print(" Github:{}".format(len(GITHUB_API_KEY[:-4])*"#"+GITHUB_API_KEY[-4:]))
print(" Whitelist: {}".format(WHITELIST))
print("")
def load_template(_file):
try:
with open(_file) as f:
# print(f.readlines())
return f.readlines()
except IOError:
print("Template file not accessible")
# or using an access token
g = Github(GITHUB_API_KEY)
for ITEM in GITHUB_SCAN:
print("Checking {}".format(ITEM))
for repo in g.get_user(ITEM).get_repos():
if repo.name.lower() in WHITELIST:
print(" [-] {}".format(repo.name))
# commits = repo.get_commits()
# for com in commits:
# print(com)
else:
print(" [+] {}".format(repo.name))
results.append("{}/{}".format(ITEM,repo.name))
if results:
print("FOUND NEW REPOs!!! SENDING EMAIL!!!")
#exit()
notify = Notify(SENDGRID_API_KEY)
notify.add_from(SENDGRID_FROM)
notify.add_mailto(SENDGRID_NOTIFY)
notify.add_subject(SENDGRID_SUBJECT)
notify.add_content_html(load_template(SENDGRID_TEMPLATE))
notify.update_content_html("<!--RESULTS-->", results)
notify.send_mail()
else:
print("Nothing found, going to sleep")
| 30.901639
| 76
| 0.671088
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 501
| 0.265782
|
a7403e0780a57d1602d030f1189826ad5b0324b5
| 3,634
|
py
|
Python
|
models.py
|
YavorPaunov/await
|
0ea7ad1d0d48b66686e35702d39695268451b688
|
[
"MIT"
] | null | null | null |
models.py
|
YavorPaunov/await
|
0ea7ad1d0d48b66686e35702d39695268451b688
|
[
"MIT"
] | null | null | null |
models.py
|
YavorPaunov/await
|
0ea7ad1d0d48b66686e35702d39695268451b688
|
[
"MIT"
] | null | null | null |
from flask.ext.sqlalchemy import SQLAlchemy
from util import hex_to_rgb, rgb_to_hex
from time2words import relative_time_to_text
from datetime import datetime
from dateutil.tz import tzutc
import pytz
db = SQLAlchemy()
def created_on_default():
return datetime.utcnow()
class Counter(db.Model):
__tablename__ = 'counters'
id = db.Column(db.Integer, primary_key=True)
created_on = db.Column(db.DateTime, default=created_on_default)
updated_on = db.Column(
db.DateTime, default=created_on_default, onupdate=created_on_default)
time = db.Column(db.DateTime)
text_after = db.Column(db.String())
text_before = db.Column(db.String())
theme = db.Column(
db.Enum('simple', 'trip', name='themes'), default='simple')
url = db.Column(db.String, unique=True)
secret = db.Column(db.String)
# Foreign keys
trip_theme = db.relationship(
'TripTheme', backref='counter', lazy='joined', uselist=False)
def __repr__(self):
return '<Counter (id: {0}, time:{1})>'.format(self.id, self.time)
def time_left_in_text(self):
time_in_seconds = int((self.time.replace(tzinfo=pytz.utc)
- datetime.utcnow().replace(tzinfo=pytz.utc)).total_seconds())
return relative_time_to_text(seconds=abs(time_in_seconds))
def has_passed(self):
return self.time.replace(tzinfo=pytz.utc) < datetime.utcnow().replace(tzinfo=pytz.utc)
def full_text(self):
full_text_list = []
if self.has_passed():
full_text_list = [self.time_left_in_text(), "ago"]
else:
if len(self.text_before) > 0:
full_text_list.append(self.text_before)
full_text_list.append(self.time_left_in_text())
if len(self.text_after) > 0:
full_text_list.append(self.text_after)
full_text = " ".join(full_text_list)
full_text = full_text[0].upper() + full_text[1:]
return full_text
def to_dict(self, just_created=False):
data = {
'id': self.id,
'created_on': self.created_on,
'updated_on': self.updated_on,
'time': self.time,
'text_after': self.text_after,
'text_before': self.text_before,
'full_text': self.full_text(),
'url': self.url,
'theme': self.theme
}
if self.theme == 'trip':
data['city_origin'] = self.trip_theme.origin
data['city_destination'] = self.trip_theme.destination
if just_created:
data['secret'] = self.secret
return data
def to_json(self):
return jsonify(self.to_dict())
class TripTheme(db.Model):
__tablename__ = 'trip_themes'
id = db.Column(db.Integer, primary_key=True)
created_on = db.Column(db.DateTime, default=created_on_default)
updated_on = db.Column(
db.DateTime, default=created_on_default, onupdate=created_on_default)
origin = db.Column(db.String(255), nullable=False)
destination = db.Column(db.String(255), nullable=False)
# Relationships
counter_id = db.Column(
db.Integer, db.ForeignKey('counters.id'), nullable=False)
def to_dict(self):
data = {
'origin': self.origin,
'destination': self.destination
}
return data
def get_or_create(session, model, **kwargs):
instance = session.query(model).filter_by(**kwargs).first()
if instance:
return instance
else:
instance = model(**kwargs)
session.add(instance)
session.commit()
return instance
| 30.79661
| 94
| 0.632911
| 3,077
| 0.846725
| 0
| 0
| 0
| 0
| 0
| 0
| 310
| 0.085305
|
a74179e3bf17c46fcdccafdc139bb260a2c60cb7
| 732
|
py
|
Python
|
setup.py
|
ManuelMeraz/ReinforcementLearning
|
5d42a88776428308d35c8031c01bf5afdf080079
|
[
"MIT"
] | 1
|
2020-04-19T15:29:47.000Z
|
2020-04-19T15:29:47.000Z
|
setup.py
|
ManuelMeraz/ReinforcementLearning
|
5d42a88776428308d35c8031c01bf5afdf080079
|
[
"MIT"
] | null | null | null |
setup.py
|
ManuelMeraz/ReinforcementLearning
|
5d42a88776428308d35c8031c01bf5afdf080079
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import setuptools
DIR = os.path.dirname(__file__)
REQUIREMENTS = os.path.join(DIR, "requirements.txt")
with open(REQUIREMENTS) as f:
reqs = f.read().strip().split("\n")
setuptools.setup(
name="rl",
version="0.0.1",
description="Reinforcement Learning: An Introduction",
url="github.com/manuelmeraz/ReinforcementLearning",
author="Manuel Meraz-Rodriguez",
license="MIT",
packages=setuptools.find_packages(),
install_requires=reqs,
entry_points={
"console_scripts": [
"tictactoe = rl.book.chapter_1.tictactoe.main:main",
"bandits = rl.book.chapter_2.main:main",
"rlgrid = rl.rlgrid.main:main",
]
},
)
| 25.241379
| 64
| 0.648907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 308
| 0.420765
|
a743058f6e943a66d50447c9ef87971c35895cc0
| 169
|
py
|
Python
|
taxcalc/tbi/__init__.py
|
ClarePan/Tax-Calculator
|
d2d6cb4b551f34017db7166d91d982b5c4670816
|
[
"CC0-1.0"
] | 1
|
2021-02-23T21:03:43.000Z
|
2021-02-23T21:03:43.000Z
|
taxcalc/tbi/__init__.py
|
ClarePan/Tax-Calculator
|
d2d6cb4b551f34017db7166d91d982b5c4670816
|
[
"CC0-1.0"
] | null | null | null |
taxcalc/tbi/__init__.py
|
ClarePan/Tax-Calculator
|
d2d6cb4b551f34017db7166d91d982b5c4670816
|
[
"CC0-1.0"
] | null | null | null |
from taxcalc.tbi.tbi import (run_nth_year_taxcalc_model,
run_nth_year_gdp_elast_model,
reform_warnings_errors)
| 42.25
| 58
| 0.585799
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a7433e8c895ee751d0a668a187a9eb4c45927efe
| 6,223
|
py
|
Python
|
mooc_access_number.py
|
mengshouer/mooc_access_number
|
8de596ce34006f1f8c5d0404f5e40546fb438b2a
|
[
"MIT"
] | 6
|
2020-05-12T14:36:17.000Z
|
2021-12-03T01:56:58.000Z
|
mooc_access_number.py
|
mengshouer/mooc_tools
|
8de596ce34006f1f8c5d0404f5e40546fb438b2a
|
[
"MIT"
] | 2
|
2020-05-11T06:21:13.000Z
|
2020-05-23T12:34:18.000Z
|
mooc_access_number.py
|
mengshouer/mooc_tools
|
8de596ce34006f1f8c5d0404f5e40546fb438b2a
|
[
"MIT"
] | 1
|
2020-05-11T04:19:15.000Z
|
2020-05-11T04:19:15.000Z
|
import requests,time,json,re,base64
requests.packages.urllib3.disable_warnings()
from io import BytesIO
from PIL import Image,ImageDraw,ImageChops
from lxml import etree
from urllib.parse import urlparse, parse_qs
username = "" #登录账号
password = "" #登录密码
s = requests.Session()
s.headers.update({'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.122 Safari/537.36'})
def login():
global uid,username,password
if(username == "" or password == ""):
username = input("登录账号:")
password = input("登录密码:")
#旧接口,已失效
#url="http://i.chaoxing.com/vlogin?passWord="+str(password)+"&userName="+str(username)
url = f'https://passport2-api.chaoxing.com/v11/loginregister?uname='+str(username)+'&code='+str(password)
res= s.get(url)
if("验证通过" in str(res.text)):
print('Login success!')
for key, value in res.cookies.items():
if key=="_uid":
uid=value
return s
else:
print(username,password)
print('账号密码有误,请重试。')
username = ""
password = ""
login()
'''
def captchalogin(username,password):
if(username == "" or password == ""):
username = input("登录账号:")
password = input("登录密码:")
#以下两个用于自动识别验证码,手动输入验证码可无视
#百度云内的人工智能文字识别orc创建应用获得,不保证识别成功率,可以试试
APIKey = ""
SecretKey = ""
#免费的每天有api限制
if(APIKey != "" or SecretKey != ""):
getkeyurl = f'https://aip.baidubce.com/oauth/2.0/token'
data = {
"grant_type" : "client_credentials",
"client_id" : APIKey,
"client_secret" : SecretKey
}
getkey = requests.post(getkeyurl,data).text
access_token = json.loads(getkey)["access_token"]
numcode = ""
while 1:
t = int(round(time.time()*1000))
codeurl = f'http://passport2.chaoxing.com/num/code?'+ str(t)
img_numcode = s.get(codeurl).content
img = base64.b64encode(img_numcode)
orcurl = f'https://aip.baidubce.com/rest/2.0/ocr/v1/accurate_basic?access_token='+access_token
data = {"image":img}
headers = {'content-type': 'application/x-www-form-urlencoded'}
captcha = requests.post(orcurl,data=data,headers=headers).text
numcodelen = json.loads(captcha)["words_result_num"]
if numcodelen == 0:
print("验证码识别错误,重新获取验证码识别")
time.sleep(1)
else:
numcode = json.loads(captcha)["words_result"][0]["words"]
numcode = re.sub("\D","",numcode)
if len(numcode) < 4:
print("验证码识别错误,重新获取验证码识别")
time.sleep(1)
else:
print("识别成功")
break
else:
t = int(round(time.time()*1000))
url = f'http://passport2.chaoxing.com/num/code?'+ str(t)
web = s.get(url,verify=False)
img = Image.open(BytesIO(web.content))
img.show()
numcode = input('验证码:')
url = 'http://passport2.chaoxing.com/login?refer=http://i.mooc.chaoxing.com'
data = {'refer_0x001': 'http%3A%2F%2Fi.mooc.chaoxing.com',
'pid':'-1',
'pidName':'',
'fid':'1467', #院校id 1467:a系统
'fidName':'',
'allowJoin':'0',
'isCheckNumCode':'1',
'f':'0',
'productid':'',
'uname':username,
'password':password,
'numcode':numcode,
'verCode':''
}
web = s.post(url,data=data,verify=False)
time.sleep(2)
if('账号管理' in str(web.text)):
print('Login success!')
return s
else:
print('账号密码或验证码有误,请重试。')
username = ""
password = ""
captchalogin(username,password)
'''
def getuserdata():
web = s.get('http://mooc1-1.chaoxing.com/visit/courses')
h1 = etree.HTML(web.text)
name = h1.xpath('//h3[@class = "clearfix"]/a/text()')
print("-----------课程名称-----------")
print(name)
print("------------------------------")
global count
try:
count
except NameError:
count_exist = False
else:
count_exist = True
if(count_exist):
pass
else:
if(len(name) == 1):
count = 0
else:
#count = 0
count = int(input("请用数字选择要访问的课程(从0开始):"))
geturl = h1.xpath('//div[@class = "Mcon1img httpsClass"]/a/@href')
i = 0
courseurl = []
for temp in range(0,len(geturl)):
if("course" in geturl[i]):
courseurl.append(geturl[i])
i += 1
url = 'https://mooc1-1.chaoxing.com' + courseurl[count]
url_query = urlparse(url).query
userdata = dict([(k, v[0]) for k, v in parse_qs(url_query).items()])
global cpi, enc, courseId, classId, encode
cpi = userdata["cpi"]
#enc = userdata["enc"]
courseId = userdata["courseid"]
classId = userdata["clazzid"]
web = s.get(url)
h2 = etree.HTML(web.text)
encodeurl = h2.xpath('//script[@type = "text/javascript"]/@src')
i=0
for temp in range(0,len(encodeurl)):
if("encode" in encodeurl[i]):
break
i += 1
url_query = urlparse(encodeurl[i]).query
userdata = dict([(k, v[0]) for k, v in parse_qs(url_query).items()])
encode = userdata["encode"]
def main():
getuserdata()
url = 'https://fystat-ans.chaoxing.com/log/setlog?personid='+cpi+'&courseId='+courseId+'&classId='+classId+'&encode=' +encode
i = 0
while 1:
web = s.get(url,verify=False)
time.sleep(5)
i+=1
print(i)
if(i == 500):
break
main()
if __name__ == "__main__":
print("登录成功后等待访问数慢慢增加,显示的数字并不代表访问数,只是用于计数")
try:
#captchalogin(username,password)
login()
main()
except:
print("登录信息尝试重新登录")
#captchalogin(username,password)
login()
main()
| 32.752632
| 151
| 0.527398
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,263
| 0.635226
|
a743c86ba9ec1ed2c5e5910bec35a0fda5523988
| 11,174
|
py
|
Python
|
tests/test_json_api.py
|
Padraic-O-Mhuiris/fava
|
797ae1ee1f7378c8e7347d2970fc52c4be366b01
|
[
"MIT"
] | null | null | null |
tests/test_json_api.py
|
Padraic-O-Mhuiris/fava
|
797ae1ee1f7378c8e7347d2970fc52c4be366b01
|
[
"MIT"
] | null | null | null |
tests/test_json_api.py
|
Padraic-O-Mhuiris/fava
|
797ae1ee1f7378c8e7347d2970fc52c4be366b01
|
[
"MIT"
] | null | null | null |
# pylint: disable=missing-docstring
from __future__ import annotations
import hashlib
from io import BytesIO
from pathlib import Path
from typing import Any
import pytest
from beancount.core.compare import hash_entry
from flask import url_for
from flask.testing import FlaskClient
from fava.context import g
from fava.core import FavaLedger
from fava.core.charts import PRETTY_ENCODER
from fava.core.misc import align
from fava.json_api import validate_func_arguments
from fava.json_api import ValidationError
dumps = PRETTY_ENCODER.encode
def test_validate_get_args() -> None:
def func(test: str):
assert test and isinstance(test, str)
validator = validate_func_arguments(func)
with pytest.raises(ValidationError):
validator({"notest": "value"})
assert validator({"test": "value"}) == ["value"]
def assert_api_error(response, msg: str | None = None) -> None:
"""Asserts that the response errored and contains the message."""
assert response.status_code == 200
assert not response.json["success"], response.json
if msg:
assert msg == response.json["error"]
def assert_api_success(response, data: Any | None = None) -> None:
"""Asserts that the request was successful and contains the data."""
assert response.status_code == 200
assert response.json["success"], response.json
if data:
assert data == response.json["data"]
def test_api_changed(app, test_client: FlaskClient) -> None:
with app.test_request_context("/long-example/"):
app.preprocess_request()
url = url_for("json_api.get_changed")
response = test_client.get(url)
assert_api_success(response, False)
def test_api_add_document(
app, test_client: FlaskClient, tmp_path, monkeypatch
) -> None:
with app.test_request_context("/long-example/"):
app.preprocess_request()
monkeypatch.setitem(g.ledger.options, "documents", [str(tmp_path)])
request_data = {
"folder": str(tmp_path),
"account": "Expenses:Food:Restaurant",
"file": (BytesIO(b"asdfasdf"), "2015-12-12 test"),
}
url = url_for("json_api.put_add_document")
response = test_client.put(url)
assert_api_error(response, "No file uploaded.")
filename = (
tmp_path / "Expenses" / "Food" / "Restaurant" / "2015-12-12 test"
)
response = test_client.put(url, data=request_data)
assert_api_success(response, f"Uploaded to {filename}")
assert Path(filename).is_file()
request_data["file"] = (BytesIO(b"asdfasdf"), "2015-12-12 test")
response = test_client.put(url, data=request_data)
assert_api_error(response, f"{filename} already exists.")
def test_api_errors(app, test_client: FlaskClient) -> None:
with app.test_request_context("/long-example/"):
app.preprocess_request()
url = url_for("json_api.get_errors")
response = test_client.get(url)
assert_api_success(response, 0)
def test_api_context(
app, test_client: FlaskClient, snapshot, example_ledger: FavaLedger
) -> None:
with app.test_request_context("/long-example/"):
app.preprocess_request()
url = url_for("json_api.get_context")
response = test_client.get(url)
assert_api_error(
response, "Invalid API request: Parameter `entry_hash` is missing."
)
url = url_for(
"json_api.get_context",
entry_hash=hash_entry(
next(
entry
for entry in example_ledger.all_entries
if entry.meta["lineno"] == 3732
)
),
)
response = test_client.get(url)
assert_api_success(response)
snapshot(response.json)
url = url_for(
"json_api.get_context",
entry_hash=hash_entry(example_ledger.entries[10]),
)
response = test_client.get(url)
assert_api_success(response)
assert not response.json.get("balances_before")
snapshot(response.json)
def test_api_payee_accounts(app, test_client: FlaskClient) -> None:
with app.test_request_context("/long-example/"):
app.preprocess_request()
url = url_for("json_api.get_payee_accounts", payee="test")
response = test_client.get(url)
assert_api_success(response, [])
def test_api_move(app, test_client: FlaskClient) -> None:
with app.test_request_context("/long-example/"):
app.preprocess_request()
url = url_for("json_api.get_move")
response = test_client.get(url)
assert_api_error(
response, "Invalid API request: Parameter `account` is missing."
)
def test_api_source_put(app, test_client: FlaskClient) -> None:
with app.test_request_context("/long-example/"):
app.preprocess_request()
url = url_for("json_api.put_source")
path = g.ledger.beancount_file_path
# test bad request
response = test_client.put(url)
assert_api_error(response, "Invalid JSON request.")
with open(path, encoding="utf-8") as file_handle:
payload = file_handle.read()
with open(path, mode="rb") as bfile_handle:
sha256sum = hashlib.sha256(bfile_handle.read()).hexdigest()
# change source
response = test_client.put(
url,
data=dumps(
{
"source": "asdf" + payload,
"sha256sum": sha256sum,
"file_path": path,
}
),
content_type="application/json",
)
with open(path, mode="rb") as bfile_handle:
sha256sum = hashlib.sha256(bfile_handle.read()).hexdigest()
assert_api_success(response, sha256sum)
# check if the file has been written
with open(path, encoding="utf-8") as file_handle:
assert file_handle.read() == "asdf" + payload
# write original source file
result = test_client.put(
url,
data=dumps(
{"source": payload, "sha256sum": sha256sum, "file_path": path}
),
content_type="application/json",
)
assert result.status_code == 200
with open(path, encoding="utf-8") as file_handle:
assert file_handle.read() == payload
def test_api_format_source(app, test_client: FlaskClient) -> None:
with app.test_request_context("/long-example/"):
app.preprocess_request()
url = url_for("json_api.put_format_source")
path = g.ledger.beancount_file_path
with open(path, encoding="utf-8") as file_handle:
payload = file_handle.read()
response = test_client.put(
url,
data=dumps({"source": payload}),
content_type="application/json",
)
assert_api_success(response, align(payload, 61))
def test_api_format_source_options(
app, test_client: FlaskClient, monkeypatch
) -> None:
with app.test_request_context("/long-example/"):
app.preprocess_request()
path = g.ledger.beancount_file_path
with open(path, encoding="utf-8") as file_handle:
payload = file_handle.read()
url = url_for("json_api.put_format_source")
monkeypatch.setattr(g.ledger.fava_options, "currency_column", 90)
response = test_client.put(
url,
data=dumps({"source": payload}),
content_type="application/json",
)
assert_api_success(response, align(payload, 90))
def test_api_add_entries(app, test_client: FlaskClient, tmp_path, monkeypatch):
with app.test_request_context("/long-example/"):
app.preprocess_request()
test_file = tmp_path / "test_file"
test_file.open("a")
monkeypatch.setattr(g.ledger, "beancount_file_path", str(test_file))
data = {
"entries": [
{
"type": "Transaction",
"date": "2017-12-12",
"flag": "*",
"payee": "Test3",
"narration": "",
"meta": {},
"postings": [
{
"account": "Assets:US:ETrade:Cash",
"amount": "100 USD",
},
{"account": "Assets:US:ETrade:GLD"},
],
},
{
"type": "Transaction",
"date": "2017-01-12",
"flag": "*",
"payee": "Test1",
"narration": "",
"meta": {},
"postings": [
{
"account": "Assets:US:ETrade:Cash",
"amount": "100 USD",
},
{"account": "Assets:US:ETrade:GLD"},
],
},
{
"type": "Transaction",
"date": "2017-02-12",
"flag": "*",
"payee": "Test",
"narration": "Test",
"meta": {},
"postings": [
{
"account": "Assets:US:ETrade:Cash",
"amount": "100 USD",
},
{"account": "Assets:US:ETrade:GLD"},
],
},
]
}
url = url_for("json_api.put_add_entries")
response = test_client.put(
url, data=dumps(data), content_type="application/json"
)
assert_api_success(response, "Stored 3 entries.")
assert (
test_file.read_text("utf-8")
== """
2017-01-12 * "Test1" ""
Assets:US:ETrade:Cash 100 USD
Assets:US:ETrade:GLD
2017-02-12 * "Test" "Test"
Assets:US:ETrade:Cash 100 USD
Assets:US:ETrade:GLD
2017-12-12 * "Test3" ""
Assets:US:ETrade:Cash 100 USD
Assets:US:ETrade:GLD
"""
)
@pytest.mark.parametrize(
"query_string,result_str",
[
("balances from year = 2014", "5086.65 USD"),
("nononono", "ERROR: Syntax error near"),
("select sum(day)", "43558"),
],
)
def test_api_query_result(
query_string, result_str, app, test_client: FlaskClient
) -> None:
with app.test_request_context("/long-example/"):
app.preprocess_request()
url = url_for("json_api.get_query_result", query_string=query_string)
response = test_client.get(url)
assert response.status_code == 200
assert result_str in response.get_data(True)
def test_api_query_result_filters(app, test_client: FlaskClient) -> None:
with app.test_request_context("/long-example/"):
app.preprocess_request()
url = url_for(
"json_api.get_query_result",
query_string="select sum(day)",
time="2021",
)
response = test_client.get(url)
assert response.status_code == 200
assert "6882" in response.get_data(True)
| 31.564972
| 79
| 0.578038
| 0
| 0
| 0
| 0
| 600
| 0.053696
| 0
| 0
| 2,586
| 0.23143
|
a747752e784483f13e0672fa7ef44261d743dd9f
| 403
|
py
|
Python
|
babybuddy/migrations/0017_promocode_max_usage_per_account.py
|
amcquistan/babyasst
|
310a7948f06b71ae0d62593a3b5932abfd4eb444
|
[
"BSD-2-Clause"
] | null | null | null |
babybuddy/migrations/0017_promocode_max_usage_per_account.py
|
amcquistan/babyasst
|
310a7948f06b71ae0d62593a3b5932abfd4eb444
|
[
"BSD-2-Clause"
] | null | null | null |
babybuddy/migrations/0017_promocode_max_usage_per_account.py
|
amcquistan/babyasst
|
310a7948f06b71ae0d62593a3b5932abfd4eb444
|
[
"BSD-2-Clause"
] | null | null | null |
# Generated by Django 2.2.6 on 2019-11-27 20:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0016_auto_20191127_1424'),
]
operations = [
migrations.AddField(
model_name='promocode',
name='max_usage_per_account',
field=models.IntegerField(default=1),
),
]
| 21.210526
| 49
| 0.615385
| 310
| 0.769231
| 0
| 0
| 0
| 0
| 0
| 0
| 117
| 0.290323
|
a74a33620df33a15eb19e53e0f2d731815811072
| 6,218
|
py
|
Python
|
tests/test_upload.py
|
LuminosoInsight/luminoso-api-client-python
|
bae7db9b02123718ded5a8345a860bd12680b367
|
[
"MIT"
] | 5
|
2016-09-14T02:02:30.000Z
|
2021-06-20T21:11:19.000Z
|
tests/test_upload.py
|
LuminosoInsight/luminoso-api-client-python
|
bae7db9b02123718ded5a8345a860bd12680b367
|
[
"MIT"
] | 29
|
2015-01-13T15:07:38.000Z
|
2021-06-14T21:03:06.000Z
|
tests/test_upload.py
|
LuminosoInsight/luminoso-api-client-python
|
bae7db9b02123718ded5a8345a860bd12680b367
|
[
"MIT"
] | 3
|
2016-03-07T13:04:34.000Z
|
2017-08-07T21:15:53.000Z
|
from luminoso_api.v5_client import LuminosoClient
from luminoso_api.v5_upload import create_project_with_docs, BATCH_SIZE
from unittest.mock import patch
import pytest
BASE_URL = 'http://mock-api.localhost/api/v5/'
DOCS_TO_UPLOAD = [
{'title': 'Document 1', 'text': 'Bonjour', 'extra': 'field'},
{'title': 'Document 2', 'text': 'Au revoir'},
]
DOCS_UPLOADED = [
{'title': 'Document 1', 'text': 'Bonjour', 'metadata': []},
{'title': 'Document 2', 'text': 'Au revoir', 'metadata': []},
]
REPETITIVE_DOC = {'title': 'Yadda', 'text': 'yadda yadda', 'metadata': []}
def _build_info_response(ndocs, language, done):
"""
Construct the expected response when we get the project's info after
requesting a build.
"""
response = {
'json': {
'project_id': 'projid',
'document_count': ndocs,
'language': language,
'last_build_info': {
'number': 1,
'start_time': 0.,
'stop_time': None,
},
}
}
if done:
response['json']['last_build_info']['success'] = True
response['json']['last_build_info']['stop_time'] = 1.
return response
def test_project_creation(requests_mock):
"""
Test creating a project by mocking what happens when it is successful.
"""
# First, configure what the mock responses should be:
# The initial response from creating the project
requests_mock.post(
BASE_URL + 'projects/',
json={
'project_id': 'projid',
'document_count': 0,
'language': 'fr',
'last_build_info': None,
},
)
# Empty responses from further build steps
requests_mock.post(BASE_URL + 'projects/projid/upload/', json={})
requests_mock.post(BASE_URL + 'projects/projid/build/', json={})
# Build status response, which isn't done yet the first time it's checked,
# and is done the second time
requests_mock.get(
BASE_URL + 'projects/projid/',
[
_build_info_response(2, 'fr', done=False),
_build_info_response(2, 'fr', done=True),
],
)
# Now run the main uploader function and get the result
client = LuminosoClient.connect(BASE_URL, token='fake')
with patch('time.sleep', return_value=None):
response = create_project_with_docs(
client,
DOCS_TO_UPLOAD,
language='fr',
name='Projet test',
progress=False,
)
# Test that the right sequence of requests happened
history = requests_mock.request_history
assert history[0].method == 'POST'
assert history[0].url == BASE_URL + 'projects/'
params = history[0].json()
assert params['name'] == 'Projet test'
assert params['language'] == 'fr'
assert history[1].method == 'POST'
assert history[1].url == BASE_URL + 'projects/projid/upload/'
params = history[1].json()
assert params['docs'] == DOCS_UPLOADED
assert history[2].method == 'POST'
assert history[2].url == BASE_URL + 'projects/projid/build/'
assert history[2].json() == {}
assert history[3].method == 'GET'
assert history[3].url == BASE_URL + 'projects/projid/'
assert history[4].method == 'GET'
assert history[4].url == BASE_URL + 'projects/projid/'
assert len(history) == 5
assert response['last_build_info']['success']
def test_missing_text(requests_mock):
"""
Test a project that fails to be created, on the client side, because a bad
document is supplied.
"""
# The initial response from creating the project
requests_mock.post(
BASE_URL + 'projects/',
json={
'project_id': 'projid',
'document_count': 0,
'language': 'en',
'last_build_info': None,
},
)
with pytest.raises(ValueError):
client = LuminosoClient.connect(BASE_URL, token='fake')
create_project_with_docs(
client,
[{'bad': 'document'}],
language='en',
name='Bad project test',
progress=False,
)
def test_pagination(requests_mock):
"""
Test that we can create a project whose documents would be broken into
multiple pages, and when we iterate over its documents, we correctly
request all the pages.
"""
# The initial response from creating the project
requests_mock.post(
BASE_URL + 'projects/',
json={
'project_id': 'projid',
'document_count': 0,
'language': 'fr',
'last_build_info': None,
},
)
# Empty responses from further build steps
requests_mock.post(BASE_URL + 'projects/projid/upload/', json={})
requests_mock.post(BASE_URL + 'projects/projid/build/', json={})
ndocs = BATCH_SIZE + 2
# Build status response, which isn't done yet the first or second time
# it's checked, and is done the third time
requests_mock.get(
BASE_URL + 'projects/projid/',
[
_build_info_response(ndocs, 'fr', done=False),
_build_info_response(ndocs, 'fr', done=False),
_build_info_response(ndocs, 'fr', done=True),
],
)
# Now run the main uploader function and get the result
client = LuminosoClient.connect(BASE_URL, token='fake')
with patch('time.sleep', return_value=None):
create_project_with_docs(
client,
[REPETITIVE_DOC] * (BATCH_SIZE + 2),
language='fr',
name='Projet test',
progress=False,
)
# Test that the right sequence of requests happened, this time just as
# a list of URLs
history = requests_mock.request_history
reqs = [(req.method, req.url) for req in history]
assert reqs == [
('POST', BASE_URL + 'projects/'),
('POST', BASE_URL + 'projects/projid/upload/'),
('POST', BASE_URL + 'projects/projid/upload/'),
('POST', BASE_URL + 'projects/projid/build/'),
('GET', BASE_URL + 'projects/projid/'),
('GET', BASE_URL + 'projects/projid/'),
('GET', BASE_URL + 'projects/projid/'),
]
| 31.72449
| 78
| 0.595529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,525
| 0.406079
|
a74ad7dc8ca825fa0b64d0132540f37da6f4e67a
| 1,259
|
py
|
Python
|
src/oca_github_bot/webhooks/on_command.py
|
eLBati/oca-github-bot
|
4fa974f8ec123c9ccfd7bcad22e4baa939c985ac
|
[
"MIT"
] | null | null | null |
src/oca_github_bot/webhooks/on_command.py
|
eLBati/oca-github-bot
|
4fa974f8ec123c9ccfd7bcad22e4baa939c985ac
|
[
"MIT"
] | null | null | null |
src/oca_github_bot/webhooks/on_command.py
|
eLBati/oca-github-bot
|
4fa974f8ec123c9ccfd7bcad22e4baa939c985ac
|
[
"MIT"
] | null | null | null |
# Copyright (c) initOS GmbH 2019
# Distributed under the MIT License (http://opensource.org/licenses/MIT).
from ..commands import CommandError, parse_commands
from ..config import OCABOT_EXTRA_DOCUMENTATION, OCABOT_USAGE
from ..router import router
from ..tasks.add_pr_comment import add_pr_comment
@router.register("issue_comment", action="created")
async def on_command(event, gh, *args, **kwargs):
"""On pull request review, tag if approved or ready to merge."""
if not event.data["issue"].get("pull_request"):
# ignore issue comments
return
org, repo = event.data["repository"]["full_name"].split("/")
pr = event.data["issue"]["number"]
username = event.data["comment"]["user"]["login"]
text = event.data["comment"]["body"]
try:
for command in parse_commands(text):
command.delay(org, repo, pr, username)
except CommandError as e:
# Add a comment on the current PR, if
# the command were misunderstood by the bot
add_pr_comment.delay(
org,
repo,
pr,
f"Hi @{username}. Your command failed:\n\n"
f"``{e}``.\n\n"
f"{OCABOT_USAGE}\n\n"
f"{OCABOT_EXTRA_DOCUMENTATION}",
)
| 34.972222
| 73
| 0.629071
| 0
| 0
| 0
| 0
| 956
| 0.759333
| 904
| 0.71803
| 505
| 0.401112
|
a74cb2eb35421327d8faf002d2a0cd393a5579ab
| 1,151
|
py
|
Python
|
splitListToParts.py
|
pflun/learningAlgorithms
|
3101e989488dfc8a56f1bf256a1c03a837fe7d97
|
[
"MIT"
] | null | null | null |
splitListToParts.py
|
pflun/learningAlgorithms
|
3101e989488dfc8a56f1bf256a1c03a837fe7d97
|
[
"MIT"
] | null | null | null |
splitListToParts.py
|
pflun/learningAlgorithms
|
3101e989488dfc8a56f1bf256a1c03a837fe7d97
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def splitListToParts(self, root, k):
res = []
size = 0
traverse = root
while traverse:
size += 1
traverse = traverse.next
# 制作一个queue,[4, 3, 3],代表每一回合走几步
d, r = divmod(size, k)
queue = []
for _ in range(d):
queue.append(k)
for i in range(r):
queue[i] += 1
# 从queue里取该回合走的步数,一边走一边加入tmp,然后tmp加入res
for q in queue:
tmp = []
for _ in range(q):
tmp.append(root)
root = root.next
res.append(tmp)
return res
head = ListNode(1)
p1 = ListNode(2)
p2 = ListNode(3)
p3 = ListNode(4)
p4 = ListNode(5)
p5 = ListNode(6)
p6 = ListNode(7)
p7 = ListNode(8)
p8 = ListNode(9)
p9 = ListNode(10)
head.next = p1
p1.next = p2
p2.next = p3
p3.next = p4
p4.next = p5
p5.next = p6
p6.next = p7
p7.next = p8
p8.next = p9
test = Solution()
print test.splitListToParts(head, 3)
| 20.553571
| 47
| 0.536056
| 815
| 0.664222
| 0
| 0
| 0
| 0
| 0
| 0
| 205
| 0.167074
|
a74cdb915e99b5e47e7fb18dd30921d17381a256
| 7,744
|
py
|
Python
|
pmlearn/mixture/tests/test_dirichlet_process.py
|
john-veillette/pymc-learn
|
267b0084438616b869866194bc167c332c3e3547
|
[
"BSD-3-Clause"
] | 187
|
2018-10-16T02:33:51.000Z
|
2022-03-27T14:06:36.000Z
|
pmlearn/mixture/tests/test_dirichlet_process.py
|
john-veillette/pymc-learn
|
267b0084438616b869866194bc167c332c3e3547
|
[
"BSD-3-Clause"
] | 20
|
2018-10-31T15:13:29.000Z
|
2022-01-20T18:54:00.000Z
|
pmlearn/mixture/tests/test_dirichlet_process.py
|
john-veillette/pymc-learn
|
267b0084438616b869866194bc167c332c3e3547
|
[
"BSD-3-Clause"
] | 20
|
2018-10-19T21:32:06.000Z
|
2022-02-07T06:04:55.000Z
|
import unittest
import shutil
import tempfile
import numpy as np
# import pandas as pd
# import pymc3 as pm
# from pymc3 import summary
# from sklearn.mixture import BayesianGaussianMixture as skBayesianGaussianMixture
from sklearn.model_selection import train_test_split
from pmlearn.exceptions import NotFittedError
from pmlearn.mixture import DirichletProcessMixture
class DirichletProcessMixtureTestCase(unittest.TestCase):
def setUp(self):
self.num_truncate = 3
self.num_components = 3
self.num_pred = 1
self.num_training_samples = 100
self.pi = np.array([0.35, 0.4, 0.25])
self.means = np.array([0, 5, 10])
self.sigmas = np.array([0.5, 0.5, 1.0])
self.components = np.random.randint(0,
self.num_components,
self.num_training_samples)
X = np.random.normal(loc=self.means[self.components],
scale=self.sigmas[self.components])
X.shape = (self.num_training_samples, 1)
self.X_train, self.X_test = train_test_split(X, test_size=0.3)
self.test_DPMM = DirichletProcessMixture()
self.test_nuts_DPMM = DirichletProcessMixture()
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.test_dir)
# class DirichletProcessMixtureFitTestCase(DirichletProcessMixtureTestCase):
# def test_advi_fit_returns_correct_model(self):
# # This print statement ensures PyMC3 output won't overwrite the test name
# print('')
# self.test_DPMM.fit(self.X_train)
#
# self.assertEqual(self.num_pred, self.test_DPMM.num_pred)
# self.assertEqual(self.num_components, self.test_DPMM.num_components)
# self.assertEqual(self.num_truncate, self.test_DPMM.num_truncate)
#
# self.assertAlmostEqual(self.pi[0],
# self.test_DPMM.summary['mean']['pi__0'],
# 0)
# self.assertAlmostEqual(self.pi[1],
# self.test_DPMM.summary['mean']['pi__1'],
# 0)
# self.assertAlmostEqual(self.pi[2],
# self.test_DPMM.summary['mean']['pi__2'],
# 0)
#
# self.assertAlmostEqual(
# self.means[0],
# self.test_DPMM.summary['mean']['cluster_center_0__0'],
# 0)
# self.assertAlmostEqual(
# self.means[1],
# self.test_DPMM.summary['mean']['cluster_center_1__0'],
# 0)
# self.assertAlmostEqual(
# self.means[2],
# self.test_DPMM.summary['mean']['cluster_center_2__0'],
# 0)
#
# self.assertAlmostEqual(
# self.sigmas[0],
# self.test_DPMM.summary['mean']['cluster_variance_0__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[1],
# self.test_DPMM.summary['mean']['cluster_variance_1__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[2],
# self.test_DPMM.summary['mean']['cluster_variance_2__0'],
# 0)
#
# def test_nuts_fit_returns_correct_model(self):
# # This print statement ensures PyMC3 output won't overwrite the test name
# print('')
# self.test_nuts_DPMM.fit(self.X_train,
# inference_type='nuts',
# inference_args={'draws': 1000,
# 'chains': 2})
#
# self.assertEqual(self.num_pred, self.test_nuts_DPMM.num_pred)
# self.assertEqual(self.num_components, self.test_nuts_DPMM.num_components)
# self.assertEqual(self.num_components, self.test_nuts_DPMM.num_truncate)
#
# self.assertAlmostEqual(self.pi[0],
# self.test_nuts_DPMM.summary['mean']['pi__0'],
# 0)
# self.assertAlmostEqual(self.pi[1],
# self.test_nuts_DPMM.summary['mean']['pi__1'],
# 0)
# self.assertAlmostEqual(self.pi[2],
# self.test_nuts_DPMM.summary['mean']['pi__2'],
# 0)
#
# self.assertAlmostEqual(
# self.means[0],
# self.test_nuts_DPMM.summary['mean']['cluster_center_0__0'],
# 0)
# self.assertAlmostEqual(
# self.means[1],
# self.test_nuts_DPMM.summary['mean']['cluster_center_1__0'],
# 0)
# self.assertAlmostEqual(
# self.means[2],
# self.test_nuts_DPMM.summary['mean']['cluster_center_2__0'],
# 0)
#
# self.assertAlmostEqual(
# self.sigmas[0],
# self.test_nuts_DPMM.summary['mean']['cluster_variance_0__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[1],
# self.test_nuts_DPMM.summary['mean']['cluster_variance_1__0'],
# 0)
# self.assertAlmostEqual(
# self.sigmas[2],
# self.test_nuts_DPMM.summary['mean']['cluster_variance_2__0'],
# 0)
#
#
class DirichletProcessMixturePredictTestCase(DirichletProcessMixtureTestCase):
# def test_predict_returns_predictions(self):
# print('')
# self.test_DPMM.fit(self.X_train, self.y_train)
# preds = self.test_DPMM.predict(self.X_test)
# self.assertEqual(self.y_test.shape, preds.shape)
# def test_predict_returns_mean_predictions_and_std(self):
# print('')
# self.test_DPMM.fit(self.X_train, self.y_train)
# preds, stds = self.test_DPMM.predict(self.X_test, return_std=True)
# self.assertEqual(self.y_test.shape, preds.shape)
# self.assertEqual(self.y_test.shape, stds.shape)
def test_predict_raises_error_if_not_fit(self):
print('')
with self.assertRaises(NotFittedError) as no_fit_error:
test_DPMM = DirichletProcessMixture()
test_DPMM.predict(self.X_train)
expected = 'Run fit on the model before predict.'
self.assertEqual(str(no_fit_error.exception), expected)
# class DirichletProcessMixtureScoreTestCase(DirichletProcessMixtureTestCase):
# def test_score_matches_sklearn_performance(self):
# print('')
# skDPMM = skBayesianGaussianMixture(n_components=3)
# skDPMM.fit(self.X_train)
# skDPMM_score = skDPMM.score(self.X_test)
#
# self.test_DPMM.fit(self.X_train)
# test_DPMM_score = self.test_DPMM.score(self.X_test)
#
# self.assertAlmostEqual(skDPMM_score, test_DPMM_score, 0)
#
#
# class DirichletProcessMixtureSaveAndLoadTestCase(DirichletProcessMixtureTestCase):
# def test_save_and_load_work_correctly(self):
# print('')
# self.test_DPMM.fit(self.X_train)
# score1 = self.test_DPMM.score(self.X_test)
# self.test_DPMM.save(self.test_dir)
#
# DPMM2 = DirichletProcessMixture()
# DPMM2.load(self.test_dir)
#
# self.assertEqual(self.test_DPMM.inference_type, DPMM2.inference_type)
# self.assertEqual(self.test_DPMM.num_pred, DPMM2.num_pred)
# self.assertEqual(self.test_DPMM.num_training_samples,
# DPMM2.num_training_samples)
# self.assertEqual(self.test_DPMM.num_truncate, DPMM2.num_truncate)
#
# pd.testing.assert_frame_equal(summary(self.test_DPMM.trace),
# summary(DPMM2.trace))
#
# score2 = DPMM2.score(self.X_test)
# self.assertAlmostEqual(score1, score2, 0)
| 38.914573
| 84
| 0.594008
| 1,995
| 0.257619
| 0
| 0
| 0
| 0
| 0
| 0
| 5,949
| 0.768208
|
a74d82ac6813ed8153326a2d69c62b3256148e18
| 1,096
|
py
|
Python
|
algorithms/utils.py
|
billvb/oblio-game
|
c1c95b9d7bffe4e2841a978e4338cf72c38174ac
|
[
"MIT"
] | 2
|
2016-03-20T03:03:18.000Z
|
2021-02-15T22:23:44.000Z
|
algorithms/utils.py
|
billvb/oblio-game
|
c1c95b9d7bffe4e2841a978e4338cf72c38174ac
|
[
"MIT"
] | null | null | null |
algorithms/utils.py
|
billvb/oblio-game
|
c1c95b9d7bffe4e2841a978e4338cf72c38174ac
|
[
"MIT"
] | null | null | null |
import random
TUPLE_SIZE = 4
DIGIT_BASE = 10
MAX_GUESS = DIGIT_BASE ** TUPLE_SIZE
def yield_all():
for i in xrange(DIGIT_BASE ** TUPLE_SIZE):
tup = tuple([int(x) for x in '%04d' % i])
assert len(tup) == TUPLE_SIZE
for l in tup:
if tup.count(l) != 1:
break
else:
yield OblioTuple(tup)
def weighted_dice_roll(weight_map, exclusions):
# Actually, this does an UNWEIGHTED dice roll. Never got around to doing weighted.
# Don't think it would matter much anyway.
new_map = {k: v for k, v in weight_map.iteritems() if k not in exclusions}
return new_map.keys()[random.randint(0, len(new_map) - 1)]
class OblioTuple(tuple):
@staticmethod
def get_random():
pile = range(0, DIGIT_BASE)
secret = []
for i in xrange(0, TUPLE_SIZE):
r = random.randint(0, len(pile) - 1)
secret.append(pile[r])
del pile[r]
# Assert that the tuple contains 4 distinct digits
assert len(list(set(secret))) == 4
return OblioTuple(secret)
| 28.842105
| 86
| 0.603102
| 406
| 0.370438
| 277
| 0.252737
| 377
| 0.343978
| 0
| 0
| 180
| 0.164234
|
a74d8736deea9179712853219ede84e9608d42dd
| 1,276
|
py
|
Python
|
utils/utils.py
|
cheng052/H3DNet
|
872dabb37d8c2ca3581cf4e242014e6464debe57
|
[
"MIT"
] | 212
|
2020-06-11T01:03:36.000Z
|
2022-03-17T17:29:21.000Z
|
utils/utils.py
|
cheng052/H3DNet
|
872dabb37d8c2ca3581cf4e242014e6464debe57
|
[
"MIT"
] | 25
|
2020-06-15T13:35:13.000Z
|
2022-03-10T05:44:05.000Z
|
utils/utils.py
|
cheng052/H3DNet
|
872dabb37d8c2ca3581cf4e242014e6464debe57
|
[
"MIT"
] | 24
|
2020-06-11T01:17:24.000Z
|
2022-03-30T13:34:45.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
def conv3x3x3(in_planes, out_planes, stride):
# 3x3x3 convolution with padding
return nn.Conv3d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=1)
def upconv3x3x3(in_planes, out_planes, stride):
return nn.ConvTranspose3d(
in_planes,
out_planes,
kernel_size=3,
stride=1,
padding=1,
output_padding=1)
def conv_block_3d(in_dim, out_dim, activation):
return nn.Sequential(
nn.Conv3d(in_dim, out_dim, kernel_size=3, stride=1, padding=1),
nn.BatchNorm3d(out_dim),
activation,)
def conv_trans_block_3d(in_dim, out_dim, activation, stride=2):
return nn.Sequential(
nn.ConvTranspose3d(in_dim, out_dim, kernel_size=3, stride=stride, padding=1, output_padding=1),
nn.BatchNorm3d(out_dim),
activation,)
def max_pooling_3d():
return nn.MaxPool3d(kernel_size=2, stride=2, padding=0)
def conv_block_2_3d(in_dim, out_dim, activation, stride=1):
return nn.Sequential(
conv_block_3d(in_dim, out_dim, activation),
nn.Conv3d(out_dim, out_dim, kernel_size=3, stride=stride, padding=1),
nn.BatchNorm3d(out_dim),)
| 27.73913
| 103
| 0.670063
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 0.025078
|
a74f41b8c63e9716f46430fe18d6b543d0682cb3
| 8,258
|
py
|
Python
|
device/app.py
|
panjanek/IotCenter
|
e139617d14617c10a18c35515e2d3aaae797bcac
|
[
"MIT"
] | 2
|
2016-12-12T15:16:16.000Z
|
2018-10-30T02:35:36.000Z
|
device/app.py
|
panjanek/IotCenter
|
e139617d14617c10a18c35515e2d3aaae797bcac
|
[
"MIT"
] | null | null | null |
device/app.py
|
panjanek/IotCenter
|
e139617d14617c10a18c35515e2d3aaae797bcac
|
[
"MIT"
] | null | null | null |
import logging
import threading
import json
import base64
import os
from subprocess import Popen
import glob
import time
import urllib2
import re
import string
import datetime
class DeviceHandler:
logger = logging.getLogger()
def __init__(self, config):
self.service = None
self.tunnel = None
self.video = None
self.config = config
self.first = True
self.counter = 1;
self.uploadfile = '/tmp/upload.txt'
def start(self):
self.logger.info("starting device handler")
def getMessagePayload(self):
self.logger.debug("Preparing client->device message payload")
gputemp = os.popen("vcgencmd measure_temp").readline().replace("temp=","").replace("'C","")
cputemp = os.popen("cat /sys/class/thermal/thermal_zone0/temp").readline()
payloadDict = {"values":{}}
payloadDict["mid"] = self.counter
self.counter += 1
payloadDict["values"]["status"] = 1
payloadDict["values"]["gpu_temp"] = float(gputemp)
payloadDict["values"]["cpu_temp"] = float(cputemp) / 1000
log = self.getLogToUpload()
if log is not None:
payloadDict["log"] = log
payload = json.dumps(payloadDict)
return payload
def getLogToUpload(self):
log = None
if self.first:
self.first = False
with open(self.uploadfile, "a") as upfile:
upfile.write("First message, communucation started\n")
uploadfiletmp = self.uploadfile + ".tmp"
if os.path.exists(self.uploadfile) and os.path.getsize(self.uploadfile) > 0:
with open(self.uploadfile, 'r+') as upfile:
content = upfile.read()
upfile.truncate(0)
self.logger.info("found log data to upload: {0}, moving to {1}".format(content, uploadfiletmp))
with open(uploadfiletmp, "a") as tmpfile:
tmpfile.write(content)
if os.path.exists(uploadfiletmp) and os.path.getsize(uploadfiletmp) > 0:
with open(uploadfiletmp, 'r') as tmpfile:
toupload = tmpfile.read()
log = toupload
return log
def handleServerCall(self, payload):
self.logger.info("Handling server callback with payload {0}".format(payload))
payloadDict = json.loads(payload)
if "ack" in payloadDict:
mid = payloadDict["ack"]
self.logger.info("received ack for mid {0}".format(mid))
uploadfiletmp = self.uploadfile + ".tmp"
if mid == self.counter - 1 and os.path.exists(uploadfiletmp) and os.path.getsize(uploadfiletmp) > 0:
self.logger.info("Removing file {0}".format(uploadfiletmp))
os.remove(uploadfiletmp)
if "command" in payloadDict:
command = payloadDict["command"]
self.logger.info("Received command: {0}".format(command))
if command == "blink":
self.logger.info("Blinking status LED")
os.system("echo none | sudo tee /sys/class/leds/led0/trigger")
os.system("echo 1 | sudo tee /sys/class/leds/led0/brightness")
time.sleep(0.5)
os.system("echo 0 | sudo tee /sys/class/leds/led0/brightness")
time.sleep(0.5)
os.system("echo 1 | sudo tee /sys/class/leds/led0/brightness")
time.sleep(0.5)
os.system("echo 0 | sudo tee /sys/class/leds/led0/brightness")
time.sleep(0.5)
os.system("echo 1 | sudo tee /sys/class/leds/led0/brightness")
time.sleep(0.5)
os.system("echo 0 | sudo tee /sys/class/leds/led0/brightness")
time.sleep(0.5)
os.system("echo 1 | sudo tee /sys/class/leds/led0/brightness")
elif command == "reboot":
self.logger.info("REBOOT!!!")
os.system("sudo reboot")
elif command == "photo":
quality = payloadDict.get("quality", "sd")
self.logger.info("Taking {0} photo".format(quality))
photoFile = "/tmp/snapshot_{0}.jpg".format(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
if quality == "hd":
os.system("raspistill -hf -t 1000 -o {0}".format(photoFile))
else:
os.system("raspistill -hf -t 1000 -w 640 -h 480 -o {0}".format(photoFile))
with open(photoFile, mode='rb') as file:
photoData = file.read()
base64data = base64.b64encode(photoData)
self.service.sendMessage(json.dumps({'image':base64data, 'type':'jpg'}))
elif command == "relay":
state = payloadDict.get("state", 1)
self.logger.info("Changing relay state to: {0}".format(state))
os.system("curl {0}/?relay={1}".format(relay1_addr, state))
elif command == "light":
state = payloadDict.get("state", 1)
self.logger.info("Changing light state to: {0}".format(state))
if state == 0:
led_rgb(0,0,0)
else:
led_rgb(1,1,0)
elif command == "tunnel":
if self.tunnel:
self.logger.warning("Tunnel already active - ingoring command")
else:
remotePort = payloadDict.get("remotePort", 18888)
localPort = payloadDict.get("localPort", 22)
addr = payloadDict["addr"]
self.startTunnel(remotePort, localPort, addr)
elif command == "video":
if self.tunnel:
self.logger.warning("Tunnel already active - ingoring command")
else:
port = payloadDict.get("port", 8081)
addr = payloadDict["addr"]
self.startVideo(port, addr)
elif command == "tunnel-close":
if self.tunnel:
self.logger.info("terminating tunnel process")
self.tunnel.kill()
self.tunnel = None
else:
self.logger.warning("no tunnel process active, ignoring command")
if self.video:
self.logger.info("terminating video process")
self.video.kill()
self.video = None
else:
self.logger.info("Command '{0}' unknown".format(command))
def startTunnel(self, remotePort, localPort, addr):
sshPrivateKeyFile = self.config.get('client', 'sshPrivateKeyFile')
self.logger.info("Opening SSH tunneling session for remotePort={0}, localPort={1}, addr={2} using privateKey={3}".format(remotePort, localPort, addr, sshPrivateKeyFile))
cmd = "/usr/bin/ssh -o BatchMode=yes -o StrictHostKeyChecking=no -i {0} -N -R {1}:localhost:{2} {3}".format(sshPrivateKeyFile, remotePort, localPort, addr)
self.logger.info("Starting process: {0}".format(cmd))
self.tunnel = Popen(cmd.split())
self.logger.info("SSH tunneling process started")
def startVideo(self, port, addr):
sshPrivateKeyFile = self.config.get('client', 'sshPrivateKeyFile')
self.logger.info("Starting video streaming session")
self.logger.info("loading driver bcm2835-v4l2")
os.system("sudo modprobe bcm2835-v4l2")
time.sleep(0.5)
cmdVideo = "sudo motion"
self.logger.info("Starting processes: {0}".format(cmdVideo))
self.video = Popen(cmdVideo.split())
cmdTunnel = "sudo /usr/bin/ssh -o BatchMode=yes -o StrictHostKeyChecking=no -i {0} -N -R {1}:localhost:8081 {2}".format(sshPrivateKeyFile, port, addr)
self.logger.info("Starting processes: {0}".format(cmdTunnel))
self.tunnel = Popen(cmdTunnel.split())
self.logger.info("SSH video tunneling session started")
| 47.45977
| 177
| 0.552434
| 8,073
| 0.977597
| 0
| 0
| 0
| 0
| 0
| 0
| 2,109
| 0.255389
|
a74fb2c9000b17ff11193cacddad30429c023b4c
| 7,882
|
py
|
Python
|
deepsource/utils.py
|
vafaei-ar/deepsource
|
cbb06f5a2105506b63539ae5bfe73a3e62d4055f
|
[
"BSD-3-Clause"
] | null | null | null |
deepsource/utils.py
|
vafaei-ar/deepsource
|
cbb06f5a2105506b63539ae5bfe73a3e62d4055f
|
[
"BSD-3-Clause"
] | 1
|
2020-12-15T10:03:50.000Z
|
2020-12-16T10:39:00.000Z
|
deepsource/utils.py
|
vafaei-ar/deepsource
|
cbb06f5a2105506b63539ae5bfe73a3e62d4055f
|
[
"BSD-3-Clause"
] | 2
|
2019-09-02T10:24:22.000Z
|
2021-03-30T01:29:03.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from skimage import draw
from skimage import measure
from astropy.io import fits
from astropy import units as u
from astropy import wcs, coordinates
from scipy.ndimage.filters import gaussian_filter
def standard(X):
"""
standard : This function makes data ragbe between 0 and 1.
Arguments:
X (numoy array) : input data.
--------
Returns:
standard data.
"""
xmin = X.min()
X = X-xmin
xmax = X.max()
X = X/xmax
return X
def fetch_data(image_file,model_file,do_standard=True,ignore_error=False):
"""
fetch_data : This function reads image and model.
Arguments:
image_file (string) : path to image file.
model_file (string) : path to model file.
do_standard (logical) (default=True) : if true, minimum/maximum value of image will be set to 0/1.
--------
Returns:
image, x coordinates, y coordinates
"""
with fits.open(image_file) as hdulist:
data = hdulist[0].data
header = hdulist[0].header
lx = header['NAXIS1']
ly = header['NAXIS2']
coord_sys = wcs.WCS(header)
model_file = model_file
sources = np.loadtxt(model_file, dtype={'names': ('name', 'ra', 'dec', 'I'),
'formats': ('S10', 'f4', 'f4', 'f4')})
ra, dec = sources['ra'],sources['dec']
num_sources = len(ra)
radec_coords = coordinates.SkyCoord(ra, dec, unit='deg', frame='fk5')
coords_ar = np.vstack([radec_coords.ra*u.deg, radec_coords.dec*u.deg,
np.zeros(num_sources), np.zeros(num_sources)]).T
xy_coords = coord_sys.wcs_world2pix(coords_ar, 0)
x_coords, y_coords = xy_coords[:,0], xy_coords[:,1]
filt = (0<=x_coords) & (x_coords<lx) & (0<=y_coords) & (y_coords<ly)
if ignore_error:
x_coords, y_coords = x_coords[filt], y_coords[filt]
else:
assert np.sum(filt)==num_sources,'There are some sources out of images! The problem might be in coordinate conversion system or simulation!'
if do_standard==True:
data = standard(data)
return np.moveaxis(data, 0, -1), x_coords, y_coords
def fetch_data_3ch(image_file,model_file,do_standard=True):
"""
fetch_data_3ch : This function reads 3 images of 3 robust and model.
Arguments:
image_file (string) : path to robust 0 image file.
model_file (string) : path to model file.
do_standard (logical) (default=True) : if true, minimum/maximum value of image will be set to 0/1.
--------
Returns:
image, x coordinates, y coordinates
"""
data0, x_coords, y_coords = fetch_data(image_file,model_file,do_standard=do_standard)
# lx,ly = data0[0,:,:,0].shape
try:
data1, x_coords, y_coords = fetch_data(image_file.replace('robust-0','robust-1'),model_file,do_standard=do_standard)
except:
assert 0,'Robust 1 does not exist.'
try:
data2, x_coords, y_coords = fetch_data(image_file.replace('robust-0','robust-2'),model_file,do_standard=do_standard)
except:
assert 0,'Robust 1 does not exist.'
return np.concatenate((data0,data1,data2), axis=-1), x_coords, y_coords
def cat2map(lx,ly,x_coords,y_coords):
"""
cat2map : This function converts a catalog to a 0/1 map which are representing background/point source.
Arguments:
lx (int): number of pixels of the image in first dimension.
ly (int): number of pixels of the image in second dimension.
x_coords (numpy array): list of the first dimension of point source positions.
y_coords (numpy array): list of the second dimension of point source positions.
--------
Returns:
catalog image as nupmy array.
"""
cat = np.zeros((lx,ly))
for i,j in zip(x_coords.astype(int), y_coords.astype(int)):
cat[j, i] = 1
return cat
def magnifier(y,radius=15,value=1):
"""
magnifier (numpy array): This function magnifies any pixel with value one by a given value.
Arguments:
y : input 2D map.
radius (int) (default=15) : radius of magnification.
value (float) (default=True) : the value you want to use in magnified pixels.
--------
Returns:
image with magnified objects as numpy array.
"""
mag = np.zeros(y.shape)
for i,j in np.argwhere(y==1):
rr, cc = draw.circle(i, j, radius=radius, shape=mag.shape)
mag[rr, cc] = value
return mag
def circle(y,radius=15):
"""
circle : This function add some circles around any pixel with value one.
Arguments:
y (numpy array): input 2D map.
radius (int) (default=15): circle radius.
--------
Returns:
image with circles around objects.
"""
mag = np.zeros(y.shape)
for i,j in np.argwhere(y==1):
rr, cc = draw.circle_perimeter(i, j, radius=radius, shape=mag.shape)
mag[rr, cc] = 1
return mag
def horn_kernel(y,radius=10,step_height=1):
"""
horn_kernel : Horn shape kernel.
Arguments:
y (numpy array): input 2D map.
radius (int) (default=15): effective radius of kernel.
--------
Returns:
kerneled image.
"""
mag = np.zeros(y.shape)
for r in range(1,radius):
for i,j in np.argwhere(y==1):
rr, cc = draw.circle(i, j, radius=r, shape=mag.shape)
mag[rr, cc] += 1.*step_height/radius
return mag
def gaussian_kernel(y,sigma=7):
"""
gaussian_kernel: Gaussian filter.
Arguments:
y (numpy array): input 2D map.
sigma (float) (default=7): effective length of Gaussian smoothing.
--------
Returns:
kerneled image.
"""
return gaussian_filter(y, sigma)
def ch_mkdir(directory):
"""
ch_mkdir : This function creates a directory if it does not exist.
Arguments:
directory (string): Path to the directory.
--------
Returns:
null.
"""
if not os.path.exists(directory):
os.makedirs(directory)
def the_print(text,style='bold',tc='gray',bgc='red'):
"""
prints table of formatted text format options
"""
colors = ['black','red','green','yellow','blue','purple','skyblue','gray']
if style == 'bold':
style = 1
elif style == 'underlined':
style = 4
else:
style = 0
fg = 30+colors.index(tc)
bg = 40+colors.index(bgc)
form = ';'.join([str(style), str(fg), str(bg)])
print('\x1b[%sm %s \x1b[0m' % (form, text))
#def ps_extract(xp):
# xp = xp-xp.min()
# xp = xp/xp.max()
# nb = []
# for trsh in np.linspace(0,0.2,200):
# blobs = measure.label(xp>trsh)
# nn = np.unique(blobs).shape[0]
# nb.append(nn)
# nb = np.array(nb)
# nb = np.diff(nb)
# trshs = np.linspace(0,0.2,200)[:-1]
# thrsl = trshs[~((-5<nb) & (nb<5))]
# if thrsl.shape[0]==0:
# trsh = 0.1
# else:
# trsh = thrsl[-1]
#2: 15, 20
#3: 30,10
#4: 50, 10
# nnp = 0
# for tr in np.linspace(1,0,1000):
# blobs = measure.label(xp>tr)
# nn = np.unique(blobs).shape[0]
# if nn-nnp>50:
# break
# nnp = nn
# trsh = tr
# blobs = measure.label(xp>trsh)
# xl = []
# yl = []
# pl = []
# for v in np.unique(blobs)[1:]:
# filt = blobs==v
# pnt = np.round(np.mean(np.argwhere(filt),axis=0)).astype(int)
# if filt.sum()>10:
# xl.append(pnt[1])
# yl.append(pnt[0])
# pl.append(np.mean(xp[blobs==v]))
# return np.array([xl,yl]).T,np.array(pl)
| 29.520599
| 148
| 0.586399
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,345
| 0.551256
|
a74fd79fe36c35a1329c69bf98a54c22cc8f9a55
| 12,349
|
py
|
Python
|
ftc/lib/net/network.py
|
efulet/ann_text_classification
|
fba05a1789a19aa6d607ee36069dda419bb98e28
|
[
"MIT"
] | null | null | null |
ftc/lib/net/network.py
|
efulet/ann_text_classification
|
fba05a1789a19aa6d607ee36069dda419bb98e28
|
[
"MIT"
] | null | null | null |
ftc/lib/net/network.py
|
efulet/ann_text_classification
|
fba05a1789a19aa6d607ee36069dda419bb98e28
|
[
"MIT"
] | null | null | null |
"""
@created_at 2015-01-18
@author Exequiel Fuentes Lettura <efulet@gmail.com>
"""
from pybrain.datasets import ClassificationDataSet
from pybrain.tools.shortcuts import buildNetwork
from pybrain.structure.modules import SoftmaxLayer
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.utilities import percentError
from pybrain.tools.validation import Validator
# Only needed for data generation and graphical output
import pylab as pl
import numpy as np
# Only needed for saving and loading trained network
import pickle
import os
from lib.util import SystemUtils
from network_exception import NetworkException
class Network:
""""""
# Define the split proportion into 75% training and 25% test data sets
SPLIT_PROPORTION = 0.25
# Define 5 hidden units
HIDDEN_NEURONS = 5
# Define the momentum, which is the ratio by which the gradient of the last
# timestep is used
MOMENTUM = 0.1
# Weightdecay corresponds to the weightdecay rate, where 0 is no weight decay at all.
WEIGHTDECAY = 0.01
# Define epochs
EPOCHS = 100
def __init__(self, input, classes, options, logger=None):
"""
:param input: Dataset
:param classes: Class values
:param options: Optional vales
:param logger: logger object [opcional]
"""
if input == None or len(input) == 0:
raise NetworkException("Empty dataset")
self._input = input
if classes == None or len(classes) == 0:
raise NetworkException("Empty class vector")
self._classes = classes
self._options = options
if self._options.hidden_neurons:
self._hidden_neurons = self._options.hidden_neurons
else:
self._hidden_neurons = self.HIDDEN_NEURONS
if self._options.momentum:
self._momentum = self._options.momentum
else:
self._momentum = self.MOMENTUM
if self._options.weightdecay:
self._weightdecay = self._options.weightdecay
else:
self._weightdecay = self.WEIGHTDECAY
if self._options.epochs:
self._epochs = self._options.epochs
else:
self._epochs = self.EPOCHS
if self._options.verbose:
self._verbose = True
else:
self._verbose = False
self._logger = logger or SystemUtils().configure_log()
self._dataset = None
self._X_train = None
self._X_test = None
self._feed_forward_network = None
self._X_train_results = []
self._X_test_results = []
def fit(self):
"""
Fit network using PyBrain library
"""
# Create the dataset
# http://pybrain.org/docs/api/datasets/classificationdataset.html
self._dataset = ClassificationDataSet(len(self._input[0][0]), 1, \
nb_classes=len(self._classes), \
class_labels=self._classes)
# Add samples
# http://pybrain.org/docs/tutorial/fnn.html
for sample in self._input:
self._dataset.addSample(sample[0], [sample[1]])
# Print statistics
#print self._dataset.calculateStatistics()
# Randomly split the dataset into 75% training and 25% test data sets.
# Of course, we could also have created two different datasets to begin with.
self._X_test, self._X_train = self._dataset.splitWithProportion(self.SPLIT_PROPORTION)
# For neural network classification, it is highly advisable to encode
# classes with one output neuron per class. Note that this operation
# duplicates the original targets and stores them in an (integer) field
# named 'class'.
self._X_train._convertToOneOfMany()
self._X_test._convertToOneOfMany()
if self._verbose:
# Test our dataset by printing a little information about it.
self._logger.info("Number of training patterns: %4d" % len(self._X_train))
self._logger.info("Input dimensions: %4d" % self._X_train.indim)
self._logger.info("Output dimensions: %4d" % self._X_train.outdim)
#print "First sample (input, target, class):"
#print self._X_train['input'][0], self._X_train['target'][0], self._X_train['class'][0]
# Now build a feed-forward network with 5 hidden units. We use the shortcut
# buildNetwork() for this. The input and output layer size must match
# the dataset's input and target dimension. You could add additional
# hidden layers by inserting more numbers giving the desired layer sizes.
# The output layer uses a softmax function because we are doing classification.
# There are more options to explore here, e.g. try changing the hidden
# layer transfer function to linear instead of (the default) sigmoid.
self._feed_forward_network = buildNetwork(self._X_train.indim, \
self._hidden_neurons, \
self._X_train.outdim, \
outclass=SoftmaxLayer)
# Set up a trainer that basically takes the network and training dataset
# as input. We are using a BackpropTrainer for this.
trainer = BackpropTrainer(self._feed_forward_network, dataset=self._X_train, \
momentum=self._momentum, verbose=self._verbose, \
weightdecay=self._weightdecay)
# Start the training iterations
epoch_results = []
train_error_results = []
test_error_results = []
for i in xrange(self._epochs):
# Train the network for some epochs. Usually you would set something
# like 5 here, but for visualization purposes we do this one epoch
# at a time.
trainer.trainEpochs(1)
# http://pybrain.org/docs/api/supervised/trainers.html
X_train_result = percentError(trainer.testOnClassData(), self._X_train['class'])
X_test_result = percentError(trainer.testOnClassData(dataset=self._X_test), self._X_test['class'])
# Store the results
epoch_results.append(trainer.totalepochs)
train_error_results.append(X_train_result)
test_error_results.append(X_test_result)
if (trainer.totalepochs == 1 or trainer.totalepochs % 10 == 0 or \
trainer.totalepochs == self._epochs) and self._verbose:
self._logger.info("Epoch: %4d" % trainer.totalepochs +
" Train error: %5.2f%%" % X_train_result +
" Test error: %5.2f%%" % X_test_result)
# Now, plot the train and test data
pl.figure(1)
pl.ioff() # interactive graphics off
pl.clf() # clear the plot
pl.hold(True) # overplot on
pl.plot(epoch_results, train_error_results, 'b',
epoch_results, test_error_results, 'r')
pl.xlabel('Epoch number')
pl.ylabel('Error')
pl.legend(['Training result', 'Test result'])
pl.title('Training/Test results')
pl.ion() # interactive graphics on
pl.draw() # update the plot
if self._verbose:
# Print network coefs
#self._logger.info(self._feed_forward_network['in'].outputbuffer[self._feed_forward_network['in'].offset])
#self._logger.info(self._feed_forward_network['hidden0'].outputbuffer[self._feed_forward_network['hidden0'].offset])
#self._logger.info(self._feed_forward_network['out'].outputbuffer[self._feed_forward_network['out'].offset])
# Finally, keep showing the plot.
pl.ioff()
# Store the results
self._X_train_results = (epoch_results, train_error_results)
self._X_test_results = (epoch_results, test_error_results)
def predict(self, validation_dataset):
"""
Generate predictions
:param validation_dataset: Validation dataset
"""
y_pred = []
for i in xrange(len(validation_dataset)):
output = self._feed_forward_network.activate(validation_dataset[i][0])
class_index = max(xrange(len(output)), key=output.__getitem__)
y_pred.append(class_index)
return y_pred
def classification_performance(self, output, target):
"""
Returns the hit rate of the outputs compared to the targets.
http://pybrain.org/docs/api/tools.html#pybrain.tools.validation.Validator.classificationPerformance
"""
return Validator.classificationPerformance(np.array(output), np.array(target))
def explained_sum_squares(self, output, target):
"""
Returns the explained sum of squares (ESS).
http://pybrain.org/docs/api/tools.html#pybrain.tools.validation.Validator.ESS
"""
return Validator.ESS(np.array(output), np.array(target))
def mean_squared_error(self, output, target):
"""
Returns the mean squared error. The multidimensional arrays will get
flattened in order to compare them.
http://pybrain.org/docs/api/tools.html#pybrain.tools.validation.Validator.MSE
"""
return Validator.MSE(np.array(output), np.array(target))
def show_plot(self):
pl.show()
def show_error(self):
"""
Show training and test process versus epochs
"""
pl.figure(1)
pl.plot(self._X_train_results[0], self._X_train_results[1], 'b',
self._X_test_results[0], self._X_test_results[1], 'r')
pl.xlabel('Epoch number')
pl.ylabel('Error')
pl.legend(['Training result', 'Test result'])
pl.title('Training/Test results')
pl.draw()
def show_layer(self):
"""
Show network layers in text format
"""
for mod in self._feed_forward_network.modules:
print "Module:", mod.name
if mod.paramdim > 0:
print "--parameters:", mod.params
for conn in self._feed_forward_network.connections[mod]:
print "-connection to", conn.outmod.name
if conn.paramdim > 0:
print "- parameters", conn.params
if hasattr(self._feed_forward_network, "recurrentConns"):
print "Recurrent connections"
for conn in self._feed_forward_network.recurrentConns:
print "-", conn.inmod.name, " to", conn.outmod.name
if conn.paramdim > 0:
print "- parameters", conn.params
def save(self, file_path):
"""
Save network
"""
try:
file_net = None
file_net = open(file_path, 'w')
pickle.dump(self._feed_forward_network, file_net)
except Exception, err:
raise NetworkException(str(err))
finally:
if file_net != None:
file_net.close()
def load(self, file_path):
"""
Load network from file
"""
try:
file_net = None
if os.path.isfile(file_path) == False:
raise NetworkException("No such file: " + file_path)
file_net = open(file_path,'r')
self._feed_forward_network = pickle.load(file_net)
except Exception, err:
raise NetworkException(str(err))
finally:
if file_net != None:
file_net.close()
| 38.711599
| 128
| 0.581667
| 11,709
| 0.948174
| 0
| 0
| 0
| 0
| 0
| 0
| 4,274
| 0.346101
|
a75006d06757a5f27ac00ff68ada7211ab1bbdc4
| 342
|
py
|
Python
|
python2/probe_yd.py
|
Nzen/run_ydl
|
90d7075ba8ec5771b5edcbe2ad52211d95546f83
|
[
"WTFPL"
] | null | null | null |
python2/probe_yd.py
|
Nzen/run_ydl
|
90d7075ba8ec5771b5edcbe2ad52211d95546f83
|
[
"WTFPL"
] | null | null | null |
python2/probe_yd.py
|
Nzen/run_ydl
|
90d7075ba8ec5771b5edcbe2ad52211d95546f83
|
[
"WTFPL"
] | null | null | null |
from sys import argv
from subprocess import call
try :
link = argv[ 1 ]
except IndexError:
link = raw_input( " - which url interests you? " )
try:
ydl_answ = call( "youtube-dl -F "+ link, shell = True )
if ydl_answ is not 0 :
print "-- failed "+ link + " code "+ str(ydl_answ)
except OSError as ose :
print "Execution failed:", ose
| 21.375
| 56
| 0.663743
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 85
| 0.248538
|
a7538f1279770f7607c3e20bb1757708788234b0
| 9,689
|
py
|
Python
|
src/cogs/welcome.py
|
Cr4zi/SynatxBot
|
eeb59555c1cfa81e05c924b84c601c0b240e5ee3
|
[
"MIT"
] | 4
|
2021-08-12T08:11:21.000Z
|
2021-08-12T08:15:22.000Z
|
src/cogs/welcome.py
|
Cr4zi/SynatxBot
|
eeb59555c1cfa81e05c924b84c601c0b240e5ee3
|
[
"MIT"
] | null | null | null |
src/cogs/welcome.py
|
Cr4zi/SynatxBot
|
eeb59555c1cfa81e05c924b84c601c0b240e5ee3
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands
from discord import Embed
from discord.utils import get
import datetime
import psycopg2
from bot import DB_NAME, DB_PASS, DB_HOST, DB_USER, logger, private_message
class Welcome(commands.Cog):
def __init__(self, bot):
self.bot = bot
logger.info("Welcome Cog loaded")
@commands.group(invoke_without_command=True, description="Welcome group command")
@commands.check(private_message)
async def welcome(self, ctx):
await ctx.send("Welcome commands: set_channel, set_message, message_type and auto_role")
@welcome.command(name='channel', aliases=['setChannel'], description="Setting the welcomew channel where all the messages will be sent.")
@commands.has_permissions(manage_guild=True)
@commands.check(private_message)
async def channel(self, ctx):
async def ask_channel():
def check(m):
return m.author.id == ctx.author.id and m.channel.id == ctx.channel.id
try:
msg = await self.bot.wait_for("message", check=check)
return await commands.TextChannelConverter().convert(ctx, msg.content)
except commands.errors.ChannelNotFound as e:
await ctx.send(f"Invalid channel `{e.argument}`. Please enter a channel name again.")
return await ask_channel()
await ctx.send("Please enter the channel name where all the welcome messages will be sent.")
channel = await ask_channel()
main = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASS, host=DB_HOST)
cur = main.cursor()
cur.execute(f"SELECT channel_id FROM welcome WHERE guild_id = '{ctx.guild.id}'")
result = cur.fetchone()
if result is None:
sql = ("INSERT INTO welcome(guild_id, channel_id) VALUES(%s,%s)")
val = (str(ctx.guild.id), channel.id)
else:
sql = ("UPDATE welcome SET channel_id = %s WHERE guild_id = %s")
val = (channel.id, str(ctx.guild.id))
cur.execute(sql, val)
main.commit()
cur.close()
main.close()
await ctx.send(f"Set welcome channel to: {channel.mention}")
@welcome.command(name='set_message', aliases=['set_msg', 'setMessage', 'text', 'message'], description="Setting the welcome message.")
@commands.has_permissions(manage_guild=True)
@commands.check(private_message)
async def set_message(self, ctx):
async def ask_msg():
def check(m):
return m.author.id == ctx.author.id and m.channel.id == ctx.channel.id
message = await self.bot.wait_for("message", check=check)
return message.content
await ctx.send("Please enter the message that will show up for every welcome message.")
message = await ask_msg()
main = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASS, host=DB_HOST)
cur = main.cursor()
cur.execute(f"SELECT msg FROM welcome WHERE guild_id = '{ctx.guild.id}'")
result = cur.fetchone()
if result is None:
sql = ("INSERT INTO welcome(guild_id, msg) VALUES(%s,%s)")
val = (str(ctx.guild.id), message)
else:
sql = ("UPDATE welcome SET msg = %s WHERE guild_id = %s")
val = (message, str(ctx.guild.id))
cur.execute(sql, val)
main.commit()
cur.close()
main.close()
await ctx.send(f"Message has been set: `{message}`")
@welcome.command(name='message_type', aliases=['msg_type', 'MessageType', 'type'], description="Setting welcome message type embed or text.")
@commands.has_permissions(manage_guild=True)
@commands.check(private_message)
async def message_type(self, ctx):
async def ask_type():
def check(m):
return m.author.id == ctx.author.id and m.channel.id == ctx.channel.id
msg_type = await self.bot.wait_for("message", check=check)
if msg_type.content.lower() != "embed" and msg_type.content.lower() != "text":
await ctx.send("Invalid type! You only have 2 types of messages: embed and text. Please a message type again.")
return await ask_type()
return msg_type.content.lower()
await ctx.send("Please enter the message type that will show up for every welcome message. (text | embed)")
msg_type = await ask_type()
main = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASS, host=DB_HOST)
cur = main.cursor()
cur.execute(f"SELECT msg_type FROM welcome WHERE guild_id = '{ctx.guild.id}'")
result = cur.fetchone()
if result is None:
sql = ("INSERT INTO welcome(guild_id, msg_type) VALUES(%s,%s)")
val = (str(ctx.guild.id), msg_type)
else:
sql = ("UPDATE welcome SET msg_type = %s WHERE guild_id = %s")
val = (msg_type, str(ctx.guild.id))
cur.execute(sql, val)
main.commit()
cur.close()
main.close()
await ctx.send(f"Welcome message type has been set: `{msg_type}`")
@welcome.command(name='auto_role', aliases=['auto_roles', 'autoRoles'], description="Setting auto role that will be given to a new users.")
@commands.has_permissions(manage_guild=True)
@commands.check(private_message)
async def auto_role(self, ctx, *, roles_id=None):
main = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASS, host=DB_HOST)
cur = main.cursor()
cur.execute(f"SELECT roles_id FROM welcome WHERE guild_id = '{ctx.guild.id}'")
result = cur.fetchone()
if result is None:
cur.execute(f"INSERT INTO welcome(roles_id, guild_id) VALUES(%s,%s)", (f"{roles_id}", f"{ctx.guild.id}"))
else:
cur.execute("UPDATE welcome SET roles_id = %s WHERE guild_id = %s", (f"{roles_id}", f"{ctx.guild.id}"))
main.commit()
cur.close()
main.close()
await ctx.send(f"Welcome roles id's set to: {roles_id}")
@commands.Cog.listener()
async def on_member_join(self, member):
main = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASS, host=DB_HOST)
cur = main.cursor()
cur.execute(f"SELECT channel_id FROM welcome WHERE guild_id = '{member.guild.id}'")
result = cur.fetchone()
if result is None:
return
else:
cur.execute(f"SELECT roles_id FROM welcome WHERE guild_id = '{member.guild.id}'")
result3 = cur.fetchone()
try:
for role_id in result3[0].split(" "):
role = get(member.guild.roles, id=int(role_id))
await member.add_roles(role)
except AttributeError as e:
pass
except ValueError:
pass
members_count = member.guild.member_count
mention = member.mention
user = member.name
guild = member.guild.name
cur.execute(f"SELECT msg_type FROM welcome WHERE guild_id = '{member.guild.id}'")
result1 = cur.fetchone()
cur.execute(f"SELECT msg FROM welcome WHERE guild_id = '{member.guild.id}'")
result2 = cur.fetchone()
if result1 is None:
if result2 is None:
embed = Embed(
description=f"Welcome to my server! your are the {member.guild.member_count} member in the server")
else:
embed = Embed(
description=f"{result2[0]}".format(
members_count=members_count, mention=mention, user=user, guild=guild))
embed.set_thumbnail(url=f"{member.avatar_url}")
embed.set_author(name=f"{member.name}", icon_url=f"{member.avatar_url}")
embed.set_footer(text=f"{member.guild}", icon_url=f"{member.guild.icon_url}")
embed.timestamp = datetime.datetime.utcnow()
channel = self.bot.get_channel(id=int(result[0]))
await channel.send(embed=embed)
else:
if result1[0].lower() == "embed":
if result2 is None:
embed = Embed(
description=f"Welcome to my server! your are the {member.guild.member_count} member in the server")
else:
embed = Embed(
description=f"{result2[0]}".format(
members_count=members_count, mention=mention, user=user, guild=guild))
embed.set_thumbnail(url=f"{member.avatar_url}")
embed.set_author(name=f"{member.name}", icon_url=f"{member.avatar_url}")
embed.set_footer(text=f"{member.guild}", icon_url=f"{member.guild.icon_url}")
embed.timestamp = datetime.datetime.utcnow()
channel = self.bot.get_channel(id=int(result[0]))
await channel.send(embed=embed)
else:
channel = self.bot.get_channel(id=int(result[0]))
await channel.send(f"{result2[0]}".format(members_count=members_count, mention=mention, user=user, guild=guild))
cur.close()
main.close()
def setup(bot):
bot.add_cog(Welcome(bot))
| 46.581731
| 146
| 0.58489
| 9,412
| 0.971411
| 0
| 0
| 9,239
| 0.953556
| 8,157
| 0.841883
| 2,520
| 0.260089
|
a7551310f1a028ec26dd2191bdc424bc482a29c5
| 468
|
py
|
Python
|
etcd_restore_rebuild_util/edit_yaml_for_rebuild.py
|
Cray-HPE/utils
|
dd6e13b46500e1c2f6ad887a8c1604044465d1d8
|
[
"MIT"
] | null | null | null |
etcd_restore_rebuild_util/edit_yaml_for_rebuild.py
|
Cray-HPE/utils
|
dd6e13b46500e1c2f6ad887a8c1604044465d1d8
|
[
"MIT"
] | null | null | null |
etcd_restore_rebuild_util/edit_yaml_for_rebuild.py
|
Cray-HPE/utils
|
dd6e13b46500e1c2f6ad887a8c1604044465d1d8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import sys
import yaml
file_name=sys.argv[1]
file_name = '/root/etcd/' + file_name + '.yaml'
with open(file_name) as f:
y=yaml.safe_load(f)
del y['metadata']['creationTimestamp']
del y['metadata']['generation']
del y['metadata']['resourceVersion']
del y['metadata']['uid']
del y['status']
with open(file_name, 'w') as outputFile:
yaml.dump(y,outputFile, default_flow_style=False, sort_keys=False)
| 22.285714
| 70
| 0.67094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 146
| 0.311966
|
a75582560560cf86bc8bb8744feee3c442ea60e2
| 1,514
|
py
|
Python
|
src/Segmentation/segmentation.py
|
odigous-labs/video-summarization
|
c125bf9fa1016d76680d5e9389e4bdb0f83bc4fb
|
[
"MIT"
] | 1
|
2019-03-05T06:00:38.000Z
|
2019-03-05T06:00:38.000Z
|
src/Segmentation/segmentation.py
|
odigous-labs/video-summarization
|
c125bf9fa1016d76680d5e9389e4bdb0f83bc4fb
|
[
"MIT"
] | 2
|
2019-03-02T05:12:59.000Z
|
2019-09-26T17:03:56.000Z
|
src/Segmentation/segmentation.py
|
odigous-labs/video-summarization
|
c125bf9fa1016d76680d5e9389e4bdb0f83bc4fb
|
[
"MIT"
] | null | null | null |
import os
import cv2
from Segmentation import CombinedHist, get_histograms, HistQueue
import matplotlib.pyplot as plt
import numpy as np
listofFiles = os.listdir('generated_frames')
# change the size of queue accordingly
queue_of_hists = HistQueue.HistQueue(25)
x = []
y_r = []
y_g = []
y_b = []
def compare(current_hist, frame_no):
avg_histr = queue_of_hists.getAverageHist()
red_result = cv2.compareHist(current_hist.getRedHistr(), avg_histr.getRedHistr(), 0)
green_result = cv2.compareHist(current_hist.getGreenHistr(), avg_histr.getGreenHistr(), 0)
blue_result = cv2.compareHist(current_hist.getBlueHistr(), avg_histr.getBlueHistr(), 0)
x.append(i)
y_r.append(red_result)
y_g.append(green_result)
y_b.append(blue_result)
# print(red_result)
for i in range(0, 4000):
blue_histr, green_histr, red_histr = get_histograms.get_histograms('generated_frames/frame' + str(i) + ".jpg")
hist_of_image = CombinedHist.CombinedHist(blue_histr, green_histr, red_histr)
compare(hist_of_image, i)
queue_of_hists.insert_histr(hist_of_image)
print("frame" + str(i) + ".jpg")
fig = plt.figure(figsize=(18, 5))
y = np.add(np.add(y_r, y_g), y_b) / 3
value = np.percentile(y, 5)
median = np.median(y)
minimum = np.amin(y)
y_sorted = np.sort(y)
getting_index = y_sorted[8]
print("quartile" + str(value))
print("median" + str(median))
plt.plot(x, y, color='k')
plt.axhline(y=value, color='r', linestyle='-')
plt.xticks(np.arange(min(x), max(x) + 1, 100.0))
plt.show()
| 29.115385
| 114
| 0.718626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 145
| 0.095773
|
a755b8f4c107bcf90ce08cbfeeeaa2d842ac3f66
| 12,369
|
py
|
Python
|
stickerbot.py
|
gumblex/stickerindexbot
|
8e8edaabac54d2747e4b620464670a60a65efcb5
|
[
"MIT"
] | 1
|
2017-01-20T18:11:46.000Z
|
2017-01-20T18:11:46.000Z
|
stickerbot.py
|
gumblex/stickerindexbot
|
8e8edaabac54d2747e4b620464670a60a65efcb5
|
[
"MIT"
] | null | null | null |
stickerbot.py
|
gumblex/stickerindexbot
|
8e8edaabac54d2747e4b620464670a60a65efcb5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Telegram Sticker Index Bot
'''
import re
import sys
import time
import json
import queue
import sqlite3
import logging
import requests
import functools
import threading
import collections
import concurrent.futures
import zhconv
logging.basicConfig(stream=sys.stderr, format='%(asctime)s [%(name)s:%(levelname)s] %(message)s', level=logging.DEBUG if sys.argv[-1] == '-v' else logging.INFO)
logger_botapi = logging.getLogger('botapi')
executor = concurrent.futures.ThreadPoolExecutor(5)
HSession = requests.Session()
_re_one_emoji = (
'[🇦-🇿]|'
'(?:(?:[🌀-🏺]|[🐀-🙏]|[🚀-\U0001f6ff]|[\U0001f900-\U0001f9ff])[🏻-🏿]?\u200d)*'
'(?:[🌀-🏺]|[🐀-🙏]|[🚀-\U0001f6ff]|[\U0001f900-\U0001f9ff])[🏻-🏿]?'
)
re_emoji = re.compile('(%s)' % _re_one_emoji)
re_qtag = re.compile(r"#?\w+", re.UNICODE)
re_tag = re.compile(r"#\w+", re.UNICODE)
re_tags = re.compile(r"#\w+(?:\s+#\w+)*", re.UNICODE)
class Sticker(collections.namedtuple('Sticker', 'file_id width height emoji file_size')):
@classmethod
def from_telegram(cls, sticker):
return cls(
sticker['file_id'],
sticker['width'],
sticker['height'],
sticker.get('emoji'),
sticker.get('file_size')
)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class SQLiteStateStore(collections.UserDict):
TABLE = 'bot_status'
def __init__(self, connection):
self.conn = connection
cur = self.conn.cursor()
data = {k: json.loads(v) for k,v in cur.execute(
'SELECT key, value FROM ' + self.TABLE)}
super().__init__(data)
def commit(self):
cur = self.conn.cursor()
for k, v in self.data.items():
cur.execute('REPLACE INTO %s (key, value) VALUES (?,?)' % self.TABLE,
(k, json.dumps(v)))
self.conn.commit()
def close(self):
self.commit()
def nt_from_dict(nt, d, default=None):
kwargs = dict.fromkeys(nt._fields, default)
kwargs.update(d)
return nt(**kwargs)
# Bot API
class BotAPIFailed(Exception):
pass
def async_func(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
def func_noerr(*args, **kwargs):
try:
func(*args, **kwargs)
except Exception:
logger_botapi.exception('Async function failed.')
executor.submit(func_noerr, *args, **kwargs)
return wrapped
def bot_api(method, **params):
for att in range(3):
try:
req = HSession.post(('https://api.telegram.org/bot%s/' %
CFG.apitoken) + method, data=params, timeout=45)
retjson = req.content
if not retjson:
continue
ret = json.loads(retjson.decode('utf-8'))
break
except Exception as ex:
if att < 1:
time.sleep((att + 1) * 2)
else:
raise ex
if not ret['ok']:
raise BotAPIFailed(repr(ret))
return ret['result']
@async_func
def sendmsg(text, chat_id, reply_to_message_id=None, **kwargs):
text = text.strip()
if not text:
logger_botapi.warning('Empty message ignored: %s, %s' % (chat_id, reply_to_message_id))
return
logger_botapi.debug('sendMessage(%s): %s' % (len(text), text[:20]))
if len(text) > 2000:
text = text[:1999] + '…'
reply_id = reply_to_message_id
if reply_to_message_id and reply_to_message_id < 0:
reply_id = None
return bot_api('sendMessage', chat_id=chat_id, text=text,
reply_to_message_id=reply_id, **kwargs)
@async_func
def answer(inline_query_id, results, **kwargs):
return bot_api('answerInlineQuery', inline_query_id=inline_query_id,
results=json.dumps(results), **kwargs)
def getupdates():
global CFG, STATE
while 1:
try:
updates = bot_api('getUpdates', offset=STATE.get('offset', 0), timeout=10)
except Exception:
logger_botapi.exception('Get updates failed.')
continue
if updates:
STATE['offset'] = updates[-1]["update_id"] + 1
for upd in updates:
MSG_Q.put(upd)
time.sleep(.2)
def parse_cmd(text: str):
t = text.strip().replace('\xa0', ' ').split(' ', 1)
if not t:
return (None, None)
cmd = t[0].rsplit('@', 1)
if len(cmd[0]) < 2 or cmd[0][0] != "/":
return (None, None)
if len(cmd) > 1 and 'username' in CFG and cmd[-1] != CFG.username:
return (None, None)
expr = t[1] if len(t) > 1 else ''
return (cmd[0][1:], expr.strip())
# DB stuff
def init_db(filename):
db = sqlite3.connect(filename)
db.row_factory = sqlite3.Row
cur = db.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS stickers ('
'file_id TEXT PRIMARY KEY,'
'width INTEGER,'
'height Integer,'
'emoji TEXT,'
'file_size INTEGER'
')')
# should have some special tags
cur.execute('CREATE TABLE IF NOT EXISTS tags ('
'sticker TEXT,'
'tag TEXT,'
'PRIMARY KEY (sticker, tag),'
'FOREIGN KEY (sticker) REFERENCES stickers(file_id)'
')')
cur.execute('CREATE TABLE IF NOT EXISTS tag_index ('
'tag TEXT PRIMARY KEY,'
'indexed TEXT,'
'FOREIGN KEY (tag) REFERENCES tags(tag)'
')')
cur.execute('CREATE TABLE IF NOT EXISTS bot_status ('
'key TEXT PRIMARY KEY,'
'value TEXT'
')')
cur.execute('CREATE INDEX IF NOT EXISTS idx_emoji ON stickers (emoji)')
cur.execute('CREATE INDEX IF NOT EXISTS idx_tags ON tags (sticker, tag)')
cur.execute('CREATE INDEX IF NOT EXISTS idx_tag_index ON tag_index (indexed)')
db.commit()
state = SQLiteStateStore(db)
return db, state
@functools.lru_cache(maxsize=64)
def normalize_tag(tag):
'''
Normalize tag to be indexable
`tag` must be \w+
'''
indexedtag = tag.lower().replace('_', '')
return zhconv.convert(indexedtag, 'zh-hans')
def add_sticker(sticker, tags=None):
cur = DB.cursor()
if isinstance(sticker, Sticker):
cur.execute('INSERT OR IGNORE INTO stickers VALUES (?,?,?,?,?)', sticker)
sticker_id = sticker.file_id
emoji = sticker.emoji
else:
sticker_id = sticker
emoji = '[?]'
if tags:
logging.info('Tags %r added for %s %s', tags, emoji, sticker_id)
for tag in tags:
indexed = normalize_tag(tag)
cur.execute('INSERT OR IGNORE INTO tags VALUES (?,?)', (sticker_id, tag))
cur.execute('INSERT OR IGNORE INTO tag_index VALUES (?,?)', (tag, indexed))
else:
logging.debug('Seen %s %s', emoji, sticker_id)
DB.commit()
def del_tag(sticker, tag):
if isinstance(sticker, Sticker):
sticker_id = sticker.file_id
else:
sticker_id = sticker
cur = DB.cursor()
cur.execute('DELETE FROM tags WHERE sticker=? AND tag=?', (sticker_id, tag))
DB.commit()
def vacuum_db():
cur = DB.cursor()
cur.execute('DELETE FROM tag_index WHERE tag NOT IN (SELECT DISTINCT tag from tags)')
DB.commit()
DB.vacuum()
def get_sticker(text, num=50):
text = text.strip()
if not text:
return []
emojis = []
tags = []
where, vals = '', []
for frag in re_emoji.split(text):
if re_emoji.match(frag):
emojis.append(frag)
else:
tags.extend(x.lstrip('#') for x in frag.strip().split() if re_qtag.match(x))
join = ''
if emojis:
where = ' OR '.join('emoji = ?' for x in emojis)
vals = emojis.copy()
if tags:
if where:
where = '(%s) AND ' % where
where += ' AND '.join('t.indexed = ?' for x in tags)
join = (' LEFT JOIN tags ON tags.sticker = stickers.file_id'
' LEFT JOIN tag_index t ON t.tag = tags.tag')
vals.extend(normalize_tag(t) for t in tags)
if not where:
return []
sql = 'SELECT file_id, emoji FROM stickers%s WHERE %s LIMIT ?' % (join, where)
vals.append(num)
cur = DB.cursor()
ret = [Sticker(file_id, None, None, emoji, None) for file_id, emoji in
cur.execute(sql, vals)]
if not ret and tags:
where = ' OR '.join('t.indexed LIKE ?' for x in tags)
vals = ['%' + normalize_tag(t) + '%' for t in tags]
vals.append(num)
sql = 'SELECT file_id, emoji FROM stickers%s WHERE %s LIMIT ?' % (join, where)
ret = [Sticker(file_id, None, None, emoji, None) for file_id, emoji in
cur.execute(sql, vals)]
return ret
# Query handling
START = 'This is the Sticker Index Bot. Send /help, or directly use its inline mode.'
HELP = ('You can search for stickers by tags or emoji in its inline mode.\n'
'This bot will collect tags for stickers in groups or private chat, '
'after seeing stickers being replied to in the format "#tagone #tagtwo".'
)
def handle_api_update(d: dict):
logger_botapi.debug('Update: %r' % d)
try:
if 'inline_query' in d:
query = d['inline_query']
text = query['query'].strip()
if text:
stickers = get_sticker(text)
logging.info('Got %d stickers for %s', len(stickers), text)
r = answer(query['id'], inline_result(stickers))
logger_botapi.debug(r)
elif 'message' in d:
msg = d['message']
text = msg.get('text', '')
sticker = msg.get('sticker')
ret = None
if sticker:
on_sticker(sticker, msg['chat'], msg)
elif text:
cmd, expr = parse_cmd(text)
if not cmd:
ret = on_text(text, msg['chat'], msg['message_id'], msg)
elif msg['chat']['type'] == 'private':
msgchatid = str(msg['chat']['id'])
if msgchatid in STATE:
STATE[msgchatid] = None
if cmd == 'start':
ret = START
# elif cmd == 'help':
else:
ret = HELP
if ret:
sendmsg(ret, msg['chat']['id'], msg['message_id'])
except Exception:
logger_botapi.exception('Failed to process a message.')
def inline_result(stickers):
ret = []
for d in stickers:
ret.append({
'type': 'sticker',
'id': d.file_id,
'sticker_file_id': d.file_id
})
return ret
def on_text(text, chat, replyid, msg):
if not re_tags.match(text):
if chat['type'] == 'private':
return 'Please send me a sticker and its tag(s).'
if 'reply_to_message' in msg and 'sticker' in msg['reply_to_message']:
tags = []
if chat['type'] != 'private':
match = re_tags.match(text.strip())
if match:
tags = [x.lstrip('#') for x in match.group(0).split()]
else:
tags = [x.lstrip('#') for x in text.strip().split() if re_qtag.match(x)]
add_sticker(Sticker.from_telegram(msg['reply_to_message']['sticker']), tags)
if chat['type'] == 'private':
return 'Tags added.'
elif chat['type'] == 'private':
sticker = STATE.get(str(chat['id']))
if sticker:
tags = [x.lstrip('#') for x in text.strip().split() if re_qtag.match(x)]
add_sticker(sticker, tags)
STATE[str(chat['id'])] = None
return 'Tags added.'
def on_sticker(sticker, chat, msg):
sticker_obj = Sticker.from_telegram(sticker)
add_sticker(sticker_obj)
if chat['type'] == 'private':
STATE[str(chat['id'])] = sticker_obj.file_id
def load_config():
return AttrDict(json.load(open('config.json', encoding='utf-8')))
if __name__ == '__main__':
CFG = load_config()
MSG_Q = queue.Queue()
DB, STATE = init_db(CFG.database)
try:
apithr = threading.Thread(target=getupdates)
apithr.daemon = True
apithr.start()
logging.info('Satellite launched')
while 1:
handle_api_update(MSG_Q.get())
finally:
STATE.close()
| 32.379581
| 160
| 0.570216
| 1,132
| 0.091151
| 0
| 0
| 1,552
| 0.12497
| 0
| 0
| 3,050
| 0.245591
|
a755c3e60d6f4943e03a99183eadd47ca1d97d29
| 4,571
|
py
|
Python
|
tests.py
|
AndreLouisCaron/requests-wsgi-adapter
|
5506c4785824673147449daabb5c4e06192e5078
|
[
"BSD-3-Clause"
] | null | null | null |
tests.py
|
AndreLouisCaron/requests-wsgi-adapter
|
5506c4785824673147449daabb5c4e06192e5078
|
[
"BSD-3-Clause"
] | null | null | null |
tests.py
|
AndreLouisCaron/requests-wsgi-adapter
|
5506c4785824673147449daabb5c4e06192e5078
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import unittest
import requests
from urllib3._collections import HTTPHeaderDict
from wsgiadapter import WSGIAdapter
class WSGITestHandler(object):
def __init__(self, extra_headers=None):
self.extra_headers = extra_headers or tuple()
def __call__(self, environ, start_response):
headers = HTTPHeaderDict({'Content-Type': 'application/json'})
for key, value in self.extra_headers:
headers.add(key, value)
start_response('200 OK', headers, exc_info=None)
return [bytes(json.dumps({
'result': '__works__',
'body': environ['wsgi.input'].read().decode('utf-8'),
'content_type': environ['CONTENT_TYPE'],
'content_length': environ['CONTENT_LENGTH'],
'path_info': environ['PATH_INFO'],
'request_method': environ['REQUEST_METHOD'],
'server_name': environ['SERVER_NAME'],
'server_port': environ['SERVER_PORT'],
}).encode('utf-8'))]
class WSGIAdapterTest(unittest.TestCase):
def setUp(self):
self.session = requests.session()
self.session.mount('http://localhost', WSGIAdapter(app=WSGITestHandler()))
self.session.mount('https://localhost', WSGIAdapter(app=WSGITestHandler()))
def test_basic_response(self):
response = self.session.get('http://localhost/index', headers={'Content-Type': 'application/json'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.headers['Content-Type'], 'application/json')
self.assertEqual(response.json()['result'], '__works__')
self.assertEqual(response.json()['content_type'], 'application/json')
self.assertEqual(response.json()['path_info'], '/index')
self.assertEqual(response.json()['request_method'], 'GET')
self.assertEqual(response.json()['server_name'], 'localhost')
def test_request_with_body(self):
response = self.session.post('http://localhost/index', data='__test__')
self.assertEqual(response.json()['body'], '__test__')
self.assertEqual(response.json()['content_length'], len('__test__'))
def test_request_with_https(self):
response = self.session.get('https://localhost/index')
self.assertEqual(response.json()['server_port'], '443')
def test_request_with_json(self):
response = self.session.post('http://localhost/index', json={})
self.assertEqual(response.json()['body'], '{}')
self.assertEqual(response.json()['content_length'], len('{}'))
class WSGIAdapterCookieTest(unittest.TestCase):
def setUp(self):
app = WSGITestHandler(
extra_headers=[
("Set-Cookie", "c1=v1; Path=/"),
("Set-Cookie", "c2=v2; Path=/")])
self.session = requests.session()
self.session.mount('http://localhost', WSGIAdapter(app=app))
self.session.mount('https://localhost', WSGIAdapter(app=app))
def test_request_with_cookies(self):
response = self.session.get("http://localhost/cookies")
self.assertEqual(response.cookies['c1'], 'v1')
self.assertEqual(self.session.cookies['c1'], 'v1')
def test_multiple_cookies():
app = WSGITestHandler(
extra_headers=[
("Set-Cookie", "flimble=floop; Path=/"),
("Set-Cookie", "flamble=flaap; Path=/")])
session = requests.session()
session.mount('http://localhost', WSGIAdapter(app=app))
session.get(
"http://localhost/cookies/set?flimble=floop&flamble=flaap")
assert session.cookies['flimble'] == "floop"
assert session.cookies['flamble'] == "flaap"
def test_delete_cookies():
session = requests.session()
set_app = WSGITestHandler(
extra_headers=[
("Set-Cookie", "flimble=floop; Path=/"),
("Set-Cookie", "flamble=flaap; Path=/")])
delete_app = WSGITestHandler(
extra_headers=[(
"Set-Cookie",
"flimble=; Expires=Thu, 01-Jan-1970 00:00:00 GMT; Max-Age=0; Path=/")])
session.mount(
'http://localhost/cookies/set', WSGIAdapter(app=set_app))
session.mount(
'http://localhost/cookies/delete', WSGIAdapter(app=delete_app))
session.get(
"http://localhost/cookies/set?flimble=floop&flamble=flaap")
assert session.cookies['flimble'] == "floop"
assert session.cookies['flamble'] == "flaap"
session.get(
"http://localhost/cookies/delete?flimble")
assert 'flimble' not in session.cookies
assert session.cookies['flamble'] == "flaap"
| 39.068376
| 107
| 0.640123
| 3,051
| 0.667469
| 0
| 0
| 0
| 0
| 0
| 0
| 1,350
| 0.29534
|
a75611852715e07033587bffa7d94fdf7b98243d
| 548
|
py
|
Python
|
setup.py
|
ducandu/aiopening
|
214d8d6dfc928ab4f8db634018092dc43eaf0e3c
|
[
"MIT"
] | null | null | null |
setup.py
|
ducandu/aiopening
|
214d8d6dfc928ab4f8db634018092dc43eaf0e3c
|
[
"MIT"
] | null | null | null |
setup.py
|
ducandu/aiopening
|
214d8d6dfc928ab4f8db634018092dc43eaf0e3c
|
[
"MIT"
] | null | null | null |
"""
-------------------------------------------------------------------------
shine -
setup
!!TODO: add file description here!!
created: 2017/06/04 in PyCharm
(c) 2017 Sven - ducandu GmbH
-------------------------------------------------------------------------
"""
from setuptools import setup
setup(name='aiopening', version='1.0', description='AI (but even opener)', url='http://github.com/sven1977/aiopening', author='Sven Mika',
author_email='sven.mika@ducandu.com', license='MIT', packages=['aiopening'], zip_safe=False)
| 34.25
| 138
| 0.501825
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 405
| 0.739051
|
a756ca330f0702ca67f549b4365c53dd8dc05dbc
| 1,932
|
py
|
Python
|
podcast_dl/podcasts.py
|
RMPR/simple-podcast-dl
|
bb4419d3beb1a893bfac5aa6546ba25522531b00
|
[
"MIT"
] | null | null | null |
podcast_dl/podcasts.py
|
RMPR/simple-podcast-dl
|
bb4419d3beb1a893bfac5aa6546ba25522531b00
|
[
"MIT"
] | null | null | null |
podcast_dl/podcasts.py
|
RMPR/simple-podcast-dl
|
bb4419d3beb1a893bfac5aa6546ba25522531b00
|
[
"MIT"
] | null | null | null |
"""
List of podcasts and their filename parser types.
"""
from .rss_parsers import BaseItem, TalkPythonItem, ChangelogItem, IndieHackersItem
import attr
@attr.s(slots=True, frozen=True)
class Podcast:
name = attr.ib(type=str)
title = attr.ib(type=str)
url = attr.ib(type=str)
rss = attr.ib(type=str)
rss_parser = attr.ib(type=BaseItem)
PODCASTS = [
Podcast(
name="talkpython",
title="Talk Python To Me",
url="https://talkpython.fm",
rss="https://talkpython.fm/episodes/rss",
rss_parser=TalkPythonItem,
),
Podcast(
name="pythonbytes",
title="Python Bytes",
url="https://pythonbytes.fm/",
rss="https://pythonbytes.fm/episodes/rss",
rss_parser=TalkPythonItem,
),
Podcast(
name="changelog",
title="The Changelog",
url="https://changelog.com/podcast",
rss="https://changelog.com/podcast/feed",
rss_parser=ChangelogItem,
),
Podcast(
name="podcastinit",
title="Podcast.__init__",
url="https://www.podcastinit.com/",
rss="https://www.podcastinit.com/feed/mp3/",
rss_parser=BaseItem,
),
Podcast(
name="indiehackers",
title="Indie Hackers",
url="https://www.indiehackers.com/podcast",
rss="http://feeds.backtracks.fm/feeds/indiehackers/indiehackers/feed.xml",
rss_parser=IndieHackersItem,
),
Podcast(
name="realpython",
title="Real Python",
url="https://realpython.com/podcasts/rpp/",
rss="https://realpython.com/podcasts/rpp/feed",
rss_parser=BaseItem,
),
Podcast(
name="kubernetespodcast",
title="Kubernetes Podcast",
url="https://kubernetespodcast.com/",
rss="https://kubernetespodcast.com/feeds/audio.xml",
rss_parser=BaseItem,
),
]
PODCAST_MAP = {p.name: p for p in PODCASTS}
| 27.6
| 82
| 0.608696
| 169
| 0.087474
| 0
| 0
| 202
| 0.104555
| 0
| 0
| 788
| 0.407867
|
a75700da032ade0f2e5909a09f4ffc60c4abd193
| 20,543
|
py
|
Python
|
07_spitzer_aor_extraction.py
|
rsiverd/ultracool
|
cbeb2e0e4aee0acc9f8ed2bde7ecdf8be5fa85a1
|
[
"BSD-2-Clause"
] | null | null | null |
07_spitzer_aor_extraction.py
|
rsiverd/ultracool
|
cbeb2e0e4aee0acc9f8ed2bde7ecdf8be5fa85a1
|
[
"BSD-2-Clause"
] | null | null | null |
07_spitzer_aor_extraction.py
|
rsiverd/ultracool
|
cbeb2e0e4aee0acc9f8ed2bde7ecdf8be5fa85a1
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# vim: set fileencoding=utf-8 ts=4 sts=4 sw=4 et tw=80 :
#
# Extract and save extended object catalogs from the specified data and
# uncertainty images. This version of the script jointly analyzes all
# images from a specific AOR/channel to enable more sophisticated
# analysis.
#
# Rob Siverd
# Created: 2021-02-02
# Last modified: 2021-08-24
#--------------------------------------------------------------------------
#**************************************************************************
#--------------------------------------------------------------------------
## Logging setup:
import logging
#logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
#logger.setLevel(logging.DEBUG)
logger.setLevel(logging.INFO)
## Current version:
__version__ = "0.3.5"
## Python version-agnostic module reloading:
try:
reload # Python 2.7
except NameError:
try:
from importlib import reload # Python 3.4+
except ImportError:
from imp import reload # Python 3.0 - 3.3
## Modules:
import argparse
import shutil
#import resource
#import signal
import glob
#import gc
import os
import sys
import time
import numpy as np
#from numpy.lib.recfunctions import append_fields
#import datetime as dt
#from dateutil import parser as dtp
#from functools import partial
#from collections import OrderedDict
#from collections.abc import Iterable
#import multiprocessing as mp
#np.set_printoptions(suppress=True, linewidth=160)
_have_np_vers = float('.'.join(np.__version__.split('.')[:2]))
##--------------------------------------------------------------------------##
## Disable buffering on stdout/stderr:
class Unbuffered(object):
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
##--------------------------------------------------------------------------##
## Spitzer pipeline filesystem helpers:
try:
import spitz_fs_helpers
reload(spitz_fs_helpers)
except ImportError:
logger.error("failed to import spitz_fs_helpers module!")
sys.exit(1)
sfh = spitz_fs_helpers
## Spitzer pipeline cross-correlation:
try:
import spitz_xcorr_stacking
reload(spitz_xcorr_stacking)
except ImportError:
logger.error("failed to import spitz_xcor_stacking module!")
sys.exit(1)
sxc = spitz_xcorr_stacking.SpitzerXCorr()
## Catalog pruning helpers:
try:
import catalog_tools
reload(catalog_tools)
except ImportError:
logger.error("failed to import catalog_tools module!")
sys.exit(1)
xcp = catalog_tools.XCorrPruner()
## Spitzer star detection routine:
try:
import spitz_extract
reload(spitz_extract)
spf = spitz_extract.SpitzFind()
except ImportError:
logger.error("spitz_extract module not found!")
sys.exit(1)
## Hybrid stack+individual position calculator:
try:
import spitz_stack_astrom
reload(spitz_stack_astrom)
ha = spitz_stack_astrom.HybridAstrom()
except ImportError:
logger.error("failed to import spitz_stack_astrom module!")
sys.exit(1)
## HORIZONS ephemeris tools:
try:
import jpl_eph_helpers
reload(jpl_eph_helpers)
except ImportError:
logger.error("failed to import jpl_eph_helpers module!")
sys.exit(1)
eee = jpl_eph_helpers.EphTool()
##--------------------------------------------------------------------------##
## Fast FITS I/O:
try:
import fitsio
except ImportError:
logger.error("fitsio module not found! Install and retry.")
sys.stderr.write("\nError: fitsio module not found!\n")
sys.exit(1)
## Save FITS image with clobber (fitsio):
def qsave(iname, idata, header=None, **kwargs):
this_func = sys._getframe().f_code.co_name
parent_func = sys._getframe(1).f_code.co_name
sys.stderr.write("Writing to '%s' ... " % iname)
fitsio.write(iname, idata, clobber=True, header=header, **kwargs)
sys.stderr.write("done.\n")
##--------------------------------------------------------------------------##
##------------------ Parse Command Line ----------------##
##--------------------------------------------------------------------------##
## Dividers:
halfdiv = '-' * 40
fulldiv = '-' * 80
## Parse arguments and run script:
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
## Enable raw text AND display of defaults:
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
## Parse the command line:
if __name__ == '__main__':
# ------------------------------------------------------------------
prog_name = os.path.basename(__file__)
descr_txt = """
Extract catalogs from the listed Spitzer data/uncertainty images.
Version: %s
""" % __version__
parser = MyParser(prog=prog_name, description=descr_txt)
#formatter_class=argparse.RawTextHelpFormatter)
# ------------------------------------------------------------------
parser.set_defaults(imtype=None) #'cbcd') #'clean')
#parser.set_defaults(sigthresh=3.0)
parser.set_defaults(sigthresh=2.0)
parser.set_defaults(skip_existing=True)
parser.set_defaults(save_registered=True)
#parser.set_defaults(save_reg_subdir=None)
# ------------------------------------------------------------------
#parser.add_argument('firstpos', help='first positional argument')
#parser.add_argument('-w', '--whatever', required=False, default=5.0,
# help='some option with default [def: %(default)s]', type=float)
# ------------------------------------------------------------------
# ------------------------------------------------------------------
iogroup = parser.add_argument_group('File I/O')
iogroup.add_argument('--overwrite', required=False, dest='skip_existing',
action='store_false', help='overwrite existing catalogs')
#iogroup.add_argument('-E', '--ephem_data', default=None, required=True,
# help='CSV file with SST ephemeris data', type=str)
iogroup.add_argument('-I', '--input_folder', default=None, required=True,
help='where to find input images', type=str)
iogroup.add_argument('-O', '--output_folder', default=None, required=False,
help='where to save extended catalog outputs', type=str)
iogroup.add_argument('-W', '--walk', default=False, action='store_true',
help='recursively walk subfolders to find CBCD images')
imtype = iogroup.add_mutually_exclusive_group()
#imtype.add_argument('--cbcd', required=False, action='store_const',
# dest='imtype', const='cbcd', help='use cbcd images')
imtype.add_argument('--hcfix', required=False, action='store_const',
dest='imtype', const='hcfix', help='use hcfix images')
imtype.add_argument('--clean', required=False, action='store_const',
dest='imtype', const='clean', help='use clean images')
imtype.add_argument('--nudge', required=False, action='store_const',
dest='imtype', const='nudge', help='use nudge images')
#iogroup.add_argument('-R', '--ref_image', default=None, required=True,
# help='KELT image with WCS')
# ------------------------------------------------------------------
# ------------------------------------------------------------------
# Miscellany:
miscgroup = parser.add_argument_group('Miscellany')
miscgroup.add_argument('--debug', dest='debug', default=False,
help='Enable extra debugging messages', action='store_true')
miscgroup.add_argument('-q', '--quiet', action='count', default=0,
help='less progress/status reporting')
miscgroup.add_argument('-v', '--verbose', action='count', default=0,
help='more progress/status reporting')
# ------------------------------------------------------------------
context = parser.parse_args()
context.vlevel = 99 if context.debug else (context.verbose-context.quiet)
context.prog_name = prog_name
# Unless otherwise specified, output goes into input folder:
if not context.output_folder:
context.output_folder = context.input_folder
# Ensure an image type is selected:
if not context.imtype:
sys.stderr.write("\nNo image type selected!\n\n")
sys.exit(1)
## Use imtype-specific folder for registered file output:
#if not context.save_reg_subdir:
# context.save_reg_subdir = 'aligned_%s' % context.imtype
##--------------------------------------------------------------------------##
##------------------ Make Input Image List ----------------##
##--------------------------------------------------------------------------##
tstart = time.time()
sys.stderr.write("Listing %s frames ... " % context.imtype)
#im_wildpath = 'SPITZ*%s.fits' % context.imtype
#im_wildcard = os.path.join(context.input_folder, 'SPIT*'
#_img_types = ['cbcd', 'clean', 'cbunc']
#_type_suff = dict([(x, x+'.fits') for x in _im_types])
#img_list = {}
#for imsuff in suffixes:
# wpath = '%s/SPITZ*%s.fits' % (context.input_folder, imsuff)
# img_list[imsuff] = sorted(glob.glob(os.path.join(context.
#img_files = sorted(glob.glob(os.path.join(context.input_folder, im_wildpath)))
if context.walk:
img_files = sfh.get_files_walk(context.input_folder, flavor=context.imtype)
else:
img_files = sfh.get_files_single(context.input_folder, flavor=context.imtype)
sys.stderr.write("done.\n")
## Abort in case of no input:
if not img_files:
sys.stderr.write("No input (%s) files found in folder:\n" % context.imtype)
sys.stderr.write("--> %s\n\n" % context.input_folder)
sys.exit(1)
n_images = len(img_files)
## List of uncertainty frames (warn if any missing):
#unc_files = [x.replace(context.imtype, 'cbunc') for x in img_files]
#sys.stderr.write("Checking error-images ... ")
#have_unc = [os.path.isfile(x) for x in unc_files]
#if not all(have_unc):
# sys.stderr.write("WARNING: some uncertainty frames missing!\n")
#else:
# sys.stderr.write("done.\n")
##--------------------------------------------------------------------------##
##------------------ Load SST Ephemeris Data ----------------##
##--------------------------------------------------------------------------##
### Ephemeris data file must exist:
#if not context.ephem_data:
# logger.error("context.ephem_data not set?!?!")
# sys.exit(1)
#if not os.path.isfile(context.ephem_data):
# logger.error("Ephemeris file not found: %s" % context.ephem_data)
# sys.exit(1)
#
### Load ephemeris data:
#eee.load(context.ephem_data)
##--------------------------------------------------------------------------##
##------------------ Unique AOR/Channel Combos ----------------##
##--------------------------------------------------------------------------##
unique_tags = sorted(list(set([sfh.get_irac_aor_tag(x) for x in img_files])))
images_by_tag = {x:[] for x in unique_tags}
for ii in img_files:
images_by_tag[sfh.get_irac_aor_tag(ii)].append(ii)
##--------------------------------------------------------------------------##
##------------------ Diagnostic Region Files ----------------##
##--------------------------------------------------------------------------##
def regify_excat_pix(data, rpath, win=False, rr=2.0):
colnames = ('wx', 'wy') if win else ('x', 'y')
xpix, ypix = [data[x] for x in colnames]
with open(rpath, 'w') as rfile:
for xx,yy in zip(xpix, ypix):
rfile.write("image; circle(%8.3f, %8.3f, %8.3f)\n" % (xx, yy, rr))
return
##--------------------------------------------------------------------------##
##------------------ ExtendedCatalog Ephem Format ----------------##
##--------------------------------------------------------------------------##
#def reformat_ephem(edata):
##--------------------------------------------------------------------------##
##------------------ Stack/Image Comparison ----------------##
##--------------------------------------------------------------------------##
#def xcheck(idata, sdata):
# nstack = len(sdata)
# nimage = len(idata)
# sys.stderr.write("nstack: %d\n" % nstack)
# sys.stderr.write("nimage: %d\n" % nimage)
# return
##--------------------------------------------------------------------------##
##------------------ Process All Images ----------------##
##--------------------------------------------------------------------------##
ntodo = 0
nproc = 0
ntotal = len(img_files)
min_sobj = 10 # bark if fewer than this many found in stack
skip_stuff = False
#context.save_registered = False
#context.skip_existing = False
## Reduce bright pixel threshold:
#sxc.set_bp_thresh(10.0)
#sxc.set_bp_thresh(5.0)
sxc.set_bp_thresh(10.0)
#sxc.set_vlevel(10)
sxc.set_roi_rfrac(0.90)
sxc.set_roi_rfrac(2.00)
#sys.exit(0)
#for aor_tag,tag_files in images_by_tag.items():
for aor_tag in unique_tags:
sys.stderr.write("\n\nProcessing images from %s ...\n" % aor_tag)
tag_files = images_by_tag[aor_tag]
n_tagged = len(tag_files)
if n_tagged < 2:
sys.stderr.write("WARNING: only %d images with tag %s\n"
% (n_tagged, aor_tag))
sys.stderr.write("This case is not currently handled ...\n")
sys.exit(1)
# File/folder paths:
aor_dir = os.path.dirname(tag_files[0])
stack_ibase = '%s_%s_stack.fits' % (aor_tag, context.imtype)
stack_cbase = '%s_%s_stack.fcat' % (aor_tag, context.imtype)
medze_ibase = '%s_%s_medze.fits' % (aor_tag, context.imtype)
stack_ipath = os.path.join(aor_dir, stack_ibase)
stack_cpath = os.path.join(aor_dir, stack_cbase)
medze_ipath = os.path.join(aor_dir, medze_ibase)
#sys.stderr.write("stack_ibase: %s\n" % stack_ibase)
#sys.stderr.write("As of this point ...\n")
#sys.stderr.write("sxc._roi_rfrac: %.5f\n" % sxc._roi_rfrac)
sys.stderr.write("Cross-correlating and stacking ... ")
result = sxc.shift_and_stack(tag_files)
sys.stderr.write("done.\n")
sxc.save_istack(stack_ipath)
#sys.exit(0)
#istack = sxc.get_stacked()
#qsave(stack_ipath, istack)
# Dump registered data to disk:
if context.save_registered:
save_reg_subdir = 'aligned_%s_%s' % (aor_tag, context.imtype)
sys.stderr.write("Saving registered frames for inspection ...\n")
#reg_dir = os.path.join(aor_dir, context.save_reg_subdir)
reg_dir = os.path.join(aor_dir, save_reg_subdir)
if os.path.isdir(reg_dir):
shutil.rmtree(reg_dir)
os.mkdir(reg_dir)
sxc.dump_registered_images(reg_dir)
sxc.dump_bright_pixel_masks(reg_dir)
sys.stderr.write("\n")
# Extract stars from stacked image:
spf.use_images(ipath=stack_ipath)
stack_cat = spf.find_stars(context.sigthresh)
sdata = stack_cat.get_catalog()
nsobj = len(sdata)
sys.stderr.write(" \nFound %d sources in stacked image.\n\n" % nsobj)
if (nsobj < min_sobj):
sys.stderr.write("Fewer than %d objects found in stack ... \n" % min_sobj)
sys.stderr.write("Found %d objects.\n\n" % nsobj)
sys.stderr.write("--> %s\n\n" % stack_ipath)
sys.exit(1)
stack_cat.save_as_fits(stack_cpath, overwrite=True)
# region file for diagnostics:
stack_rfile = stack_ipath + '.reg'
regify_excat_pix(sdata, stack_rfile, win=True)
# Make/save 'medianize' stack for comparison:
sxc.make_mstack()
sxc.save_mstack(medze_ipath)
# Set up pruning system:
xshifts, yshifts = sxc.get_stackcat_offsets()
xcp.set_master_catalog(sdata)
xcp.set_image_offsets(xshifts, yshifts)
# Set up hybrid astrometry system:
ha.set_stack_excat(stack_cat) # catalog of detections
ha.set_xcorr_metadata(sxc) # pixel offsets by image
## Stop here for now ...
#if skip_stuff:
# continue
# process individual files with cross-correlation help:
for ii,img_ipath in enumerate(tag_files, 1):
sys.stderr.write("%s\n" % fulldiv)
unc_ipath = img_ipath.replace(context.imtype, 'cbunc')
if not os.path.isfile(unc_ipath):
sys.stderr.write("WARNING: file not found:\n--> %s\n" % unc_ipath)
continue
img_ibase = os.path.basename(img_ipath)
#cat_ibase = img_ibase.replace(context.imtype, 'fcat')
cat_fbase = img_ibase + '.fcat'
cat_pbase = img_ibase + '.pcat'
cat_mbase = img_ibase + '.mcat'
### FIXME ###
### context.output_folder is not appropriate for walk mode ...
save_dir = context.output_folder # NOT FOR WALK MODE
save_dir = os.path.dirname(img_ipath)
cat_fpath = os.path.join(save_dir, cat_fbase)
cat_ppath = os.path.join(save_dir, cat_pbase)
cat_mpath = os.path.join(save_dir, cat_mbase)
### FIXME ###
sys.stderr.write("Catalog %s ... " % cat_fpath)
if context.skip_existing:
if os.path.isfile(cat_mpath):
sys.stderr.write("exists! Skipping ... \n")
continue
nproc += 1
sys.stderr.write("not found ... creating ...\n")
spf.use_images(ipath=img_ipath, upath=unc_ipath)
result = spf.find_stars(context.sigthresh)
## FIXME: this just grabs the ephemeris from the header content
## of the first ExtendedCatalog produced. This should be obtained
## separately to make things easier to follow (and to eliminate
## the need to pre-modify the image headers ...)
eph_data = eee.eph_from_header(result.get_header())
result.set_ephem(eph_data)
result.save_as_fits(cat_fpath, overwrite=True)
nfound = len(result.get_catalog())
frame_rfile = img_ipath + '.reg'
regify_excat_pix(result.get_catalog(), frame_rfile, win=True)
# prune sources not detected in stacked frame:
pruned = xcp.prune_spurious(result.get_catalog(), img_ipath)
npruned = len(pruned)
sys.stderr.write("nfound: %d, npruned: %d\n" % (nfound, npruned))
if (len(pruned) < 5):
sys.stderr.write("BARKBARKBARK\n")
sys.exit(1)
result.set_catalog(pruned)
result.save_as_fits(cat_ppath, overwrite=True)
# build and save hybrid catalog:
mcat = ha.make_hybrid_excat(result)
mcat.set_ephem(eph_data)
mcat.save_as_fits(cat_mpath, overwrite=True)
mxcat_rfile = img_ipath + '.mcat.reg'
#regify_excat_pix(mcat.get_catalog(), mxcat_rfile, win=True)
# stop early if requested:
if (ntodo > 0) and (nproc >= ntodo):
break
#break
#sys.exit(0)
if (ntodo > 0) and (nproc >= ntodo):
break
tstop = time.time()
ttook = tstop - tstart
sys.stderr.write("Extraction completed in %.3f seconds.\n" % ttook)
#import astropy.io.fits as pf
#
#imra = np.array([hh['CRVAL1'] for hh in sxc._im_hdrs])
#imde = np.array([hh['CRVAL2'] for hh in sxc._im_hdrs])
#
##sys.stderr.write("\n\n\n")
##sys.stderr.write("sxc.shift_and_stack(tag_files)\n")
##result = sxc.shift_and_stack(tag_files)
#sys.exit(0)
#
#layers = sxc.pad_and_shift(sxc._im_data, sxc._x_shifts, sxc._y_shifts)
#tstack = sxc.dumb_stack(layers)
#pf.writeto('tstack.fits', tstack, overwrite=True)
#
#tdir = 'zzz'
#if not os.path.isdir(tdir):
# os.mkdir(tdir)
#
##tag_bases = [os.path.basename(x) for x in tag_files]
##for ibase,idata in zip(tag_bases, layers):
## tsave = os.path.join(tdir, 'r' + ibase)
## sys.stderr.write("Saving %s ... \n" % tsave)
## pf.writeto(tsave, idata, overwrite=True)
#
#sys.stderr.write("\n\n\n")
#sys.stderr.write("visual inspection with:\n")
#sys.stderr.write("flztfs %s\n" % ' '.join(tag_files))
##--------------------------------------------------------------------------##
######################################################################
# CHANGELOG (07_spitzer_aor_extraction.py):
#---------------------------------------------------------------------
#
# 2021-02-02:
# -- Increased __version__ to 0.1.0.
# -- First created 07_spitzer_aor_extraction.py.
#
| 37.148282
| 82
| 0.577861
| 540
| 0.026286
| 0
| 0
| 0
| 0
| 0
| 0
| 11,171
| 0.543786
|
a7574a31d3793e68486c1afc1807fc0afcd14ce5
| 6,594
|
py
|
Python
|
project/apps/CI-producer/app/producers_test.py
|
Monxun/PortainerPractice
|
a3be077efe5c5eb2aa27b6a2fcf626989bdbbbe4
|
[
"MIT"
] | null | null | null |
project/apps/CI-producer/app/producers_test.py
|
Monxun/PortainerPractice
|
a3be077efe5c5eb2aa27b6a2fcf626989bdbbbe4
|
[
"MIT"
] | 1
|
2022-03-02T22:54:36.000Z
|
2022-03-02T22:54:36.000Z
|
project/apps/CI-producer/app/producers_test.py
|
Monxun/PortainerPractice
|
a3be077efe5c5eb2aa27b6a2fcf626989bdbbbe4
|
[
"MIT"
] | null | null | null |
from os import strerror
import os
import pytest
import datetime
import sqlalchemy
from sqlalchemy import inspect
from sqlalchemy import select
from sqlalchemy.orm import session
from sqlalchemy.sql.expression import func
#################################################
# DATABASE CONNECTOR
user = 'user'
password = 'root'
host = 'localhost'
port = 3306
name = 'alinedb'
engine = sqlalchemy.create_engine(
f'mysql+pymysql://{user}:{password}@{host}:{port}/{name}',
echo=True
)
inspector = inspect(engine)
for table_name in inspector.get_table_names():
print(table_name)
Session = session.sessionmaker()
Session.configure(bind=engine)
my_session = Session()
#################################################
# TEST
'''
Module to test producers
'''
from models import (
Applicant,
Bank,
Merchant,
Application,
Branch,
Member,
Account,
User,
OneTimePasscode,
Transaction,
UserRegistrationToken
)
def test_create_applicants() -> None:
test_object = my_session.query(Applicant).first()
assert isinstance(test_object.id, int)
assert isinstance(test_object.address, str)
assert isinstance(test_object.city, str)
assert isinstance(test_object.created_at, datetime.datetime)
assert isinstance(test_object.date_of_birth, datetime.date)
assert isinstance(test_object.drivers_license, str)
assert isinstance(test_object.email, str)
assert isinstance(test_object.first_name, str)
assert isinstance(test_object.gender, str)
assert isinstance(test_object.income, int)
assert isinstance(test_object.last_modified_at_at, datetime.datetime)
assert isinstance(test_object.last_name, str)
assert isinstance(test_object.mailing_address, str)
assert isinstance(test_object.mailing_city, str)
assert isinstance(test_object.mailing_state, str)
assert isinstance(test_object.mailing_zipcode, str)
assert isinstance(test_object.middle_name, str)
assert isinstance(test_object.phone, str)
assert isinstance(test_object.social_security, str)
assert isinstance(test_object.state, str)
assert isinstance(test_object.zipcode, str)
def test_create_banks() -> None:
test_object = my_session.query(Bank).first()
assert isinstance(test_object.id, int)
assert isinstance(test_object.address, str)
assert isinstance(test_object.city, str)
assert isinstance(test_object.routing_number, str)
assert isinstance(test_object.state, str)
assert isinstance(test_object.zipcode, str)
def test_create_merchants() -> None:
test_object = my_session.query(Merchant).first()
assert isinstance(test_object.code, str)
assert isinstance(test_object.address, str)
assert isinstance(test_object.city, str)
assert isinstance(test_object.description, str)
assert isinstance(test_object.name, str)
assert isinstance(test_object.registered_at, datetime.datetime)
assert isinstance(test_object.state, str)
assert isinstance(test_object.zipcode, str)
def test_create_applications() -> None:
test_object = my_session.query(Application).first()
assert isinstance(test_object.id, int)
assert isinstance(test_object.application_status, str)
assert isinstance(test_object.application_type, str)
assert isinstance(test_object.primary_applicant_id, int)
def test_create_branches() -> None:
test_object = my_session.query(Branch).first()
assert isinstance(test_object.id, int)
assert isinstance(test_object.address, str)
assert isinstance(test_object.city, str)
assert isinstance(test_object.name, str)
assert isinstance(test_object.phone, str)
assert isinstance(test_object.state, str)
assert isinstance(test_object.zipcode, str)
assert isinstance(test_object.bank_id, int)
def test_create_members() -> None:
test_object = my_session.query(Member).first()
assert isinstance(test_object.id, int)
assert isinstance(test_object.membership_id, str)
assert isinstance(test_object.applicant_id, int)
assert isinstance(test_object.branch_id, int)
def test_create_accounts() -> None:
test_object = my_session.query(Account).first()
assert isinstance(test_object.account_type, str)
assert isinstance(test_object.id, int)
assert isinstance(test_object.account_number, str)
assert isinstance(test_object.balance, int)
assert isinstance(test_object.status, str)
assert isinstance(test_object.available_balance, int)
assert isinstance(test_object.apy, float)
assert isinstance(test_object.primary_account_holder_id, int)
def test_create_users() -> None:
test_object = my_session.query(User).first()
assert isinstance(test_object.role, str)
assert isinstance(test_object.id, int)
assert isinstance(test_object.enabled, int)
assert isinstance(test_object.password, str)
assert isinstance(test_object.username, str)
assert isinstance(test_object.email, str)
assert isinstance(test_object.first_name, str)
assert isinstance(test_object.last_name, str)
assert isinstance(test_object.phone, str)
assert isinstance(test_object.member_id, int)
def test_create_one_time_passcodes() -> None:
test_object = my_session.query(OneTimePasscode).first()
assert isinstance(test_object.id, int)
assert isinstance(test_object.checked, int)
assert isinstance(test_object.otp, str)
assert isinstance(test_object.user_id, int)
def test_create_transactions() -> None:
test_object = my_session.query(Transaction).first()
assert isinstance(test_object.id, int)
assert isinstance(test_object.amount, int)
assert isinstance(test_object.date, datetime.datetime)
assert isinstance(test_object.description, str)
assert isinstance(test_object.initial_balance, int)
assert isinstance(test_object.last_modified, datetime.date)
assert isinstance(test_object.method, str)
assert isinstance(test_object.posted_balance, int)
assert isinstance(test_object.state, str)
assert isinstance(test_object.status, str)
assert isinstance(test_object.type, str)
assert isinstance(test_object.account_id, int)
assert isinstance(test_object.merchant_code, str)
def test_create_user_registration_tokens() -> None:
test_object = my_session.query(UserRegistrationToken).first()
assert isinstance(test_object.token, str)
assert isinstance(test_object.created, datetime.datetime)
assert isinstance(test_object.expiration_delay, int)
assert isinstance(test_object.user_id, int)
| 30.957746
| 74
| 0.746588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 245
| 0.037155
|
a7574f04a38567a940cb678fc874747f83a2b6d9
| 223
|
py
|
Python
|
quran/domain/edition.py
|
octabytes/quran
|
974d351cf5e6a12a28a5ac9f29c8d2753ae6dd86
|
[
"Apache-2.0"
] | null | null | null |
quran/domain/edition.py
|
octabytes/quran
|
974d351cf5e6a12a28a5ac9f29c8d2753ae6dd86
|
[
"Apache-2.0"
] | null | null | null |
quran/domain/edition.py
|
octabytes/quran
|
974d351cf5e6a12a28a5ac9f29c8d2753ae6dd86
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass
from quran.domain.entity import Entity
@dataclass
class Edition(Entity):
id: str
language: str
name: str
translator: str
type: str
format: str
direction: str
| 14.866667
| 38
| 0.690583
| 135
| 0.605381
| 0
| 0
| 146
| 0.654709
| 0
| 0
| 0
| 0
|
a75778c132db31042c63da3f963565d091dded6a
| 1,231
|
py
|
Python
|
dataflow/core/visualization.py
|
alphamatic/amp
|
5018137097159415c10eaa659a2e0de8c4e403d4
|
[
"BSD-3-Clause"
] | 5
|
2021-08-10T23:16:44.000Z
|
2022-03-17T17:27:00.000Z
|
dataflow/core/visualization.py
|
alphamatic/amp
|
5018137097159415c10eaa659a2e0de8c4e403d4
|
[
"BSD-3-Clause"
] | 330
|
2021-06-10T17:28:22.000Z
|
2022-03-31T00:55:48.000Z
|
dataflow/core/visualization.py
|
alphamatic/amp
|
5018137097159415c10eaa659a2e0de8c4e403d4
|
[
"BSD-3-Clause"
] | 6
|
2021-06-10T17:20:32.000Z
|
2022-03-28T08:08:03.000Z
|
"""
Helper functions to visualize a graph in a notebook or save the plot to file.
Import as:
import dataflow.core.visualization as dtfcorvisu
"""
import IPython
import networkx as networ
import pygraphviz
import dataflow.core.dag as dtfcordag
import helpers.hdbg as hdbg
import helpers.hio as hio
def draw(dag: dtfcordag.DAG) -> IPython.core.display.Image:
"""
Render DAG in a notebook.
"""
agraph = _extract_agraph_from_dag(dag)
image = IPython.display.Image(agraph.draw(format="png", prog="dot"))
return image
def draw_to_file(dag: dtfcordag.DAG, file_name: str = "graph.png") -> str:
"""
Save DAG rendering to a file.
"""
agraph = _extract_agraph_from_dag(dag)
# Save to file.
hio.create_enclosing_dir(file_name)
agraph.draw(file_name, prog="dot")
return file_name
def _extract_agraph_from_dag(dag: dtfcordag.DAG) -> pygraphviz.agraph.AGraph:
"""
Extract a pygraphviz `agraph` from a DAG.
"""
# Extract networkx DAG.
hdbg.dassert_isinstance(dag, dtfcordag.DAG)
graph = dag.dag
hdbg.dassert_isinstance(graph, networ.Graph)
# Convert the DAG into a pygraphviz graph.
agraph = networ.nx_agraph.to_agraph(graph)
return agraph
| 25.122449
| 77
| 0.707555
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 396
| 0.32169
|
a758f541fb2e3c2ec9bc820cd471a439cd2c4443
| 7,714
|
py
|
Python
|
scripts/pixel_error.py
|
ling-k/STOVE
|
fcf36139f41dee5ef892e90dedf1d2208da6fd3c
|
[
"MIT"
] | 31
|
2019-10-14T01:48:44.000Z
|
2022-01-20T19:19:14.000Z
|
scripts/pixel_error.py
|
ling-k/STOVE
|
fcf36139f41dee5ef892e90dedf1d2208da6fd3c
|
[
"MIT"
] | 3
|
2020-05-08T11:01:25.000Z
|
2021-05-24T07:50:10.000Z
|
scripts/pixel_error.py
|
ling-k/STOVE
|
fcf36139f41dee5ef892e90dedf1d2208da6fd3c
|
[
"MIT"
] | 9
|
2020-01-13T11:25:16.000Z
|
2021-05-10T06:04:08.000Z
|
"""Calculate pixel errors for a single run or all runs in an experiment dir."""
import torch
import itertools
import numpy as np
import imageio
import argparse
import os
import glob
from model.main import main as restore_model
from model.utils.utils import bw_transform
os.environ["CUDA_VISIBLE_DEVICES"] = '-1'
def run_fmt(x, with_under=False):
"""Format array x of ints to become valid run folder names."""
return 'run{:03d}'.format(x) if not with_under else 'run_{:03d}'.format(x)
def get_pixel_error(restore, linear=False, path='', real_mpe=False, checkpoint='checkpoint'):
"""Restore a model and calculate error from reconstructions."""
# do not write any new runs
extras = {
'nolog': True,
'checkpoint_path': os.path.join(restore, checkpoint)}
self = restore_model(restore=restore, extras=extras)
# ignore supairvised runs for now
if self.c.supairvised is True:
return None
# make sure all runs access the same data!
print(self.c.testdata)
step = self.c.frame_step
visible = self.c.num_visible
batch_size = self.c.batch_size
skip = self.c.skip
# make sure this is the same
print(step, visible, batch_size, skip)
long_rollout_length = self.c.num_frames // step - visible
lrl = long_rollout_length
total_images = self.test_dataset.total_img
total_labels = self.test_dataset.total_data
# apply step and batch size once
total_images = total_images[:batch_size, ::step]
total_labels = total_labels[:batch_size, ::step]
# true data to compare against
true_images = total_images[:, skip:(visible+long_rollout_length)]
true_images = torch.tensor(true_images).to(self.c.device).type(self.c.dtype)
# First obtain reconstruction of input.
stove_input = total_images[:, :visible]
stove_input = torch.tensor(stove_input).to(self.c.device).type(self.c.dtype)
_, prop_dict2, _ = self.stove(stove_input, self.c.plot_every)
z_recon = prop_dict2['z']
# Use last state to do rollout
if not linear:
z_pred, _ = self.stove.rollout(z_recon[:, -1], long_rollout_length)
else:
# propagate last speed
v = z_recon[:, -1, :, 4:6].unsqueeze(1)
v = v.repeat(1, long_rollout_length, 1, 1)
t = torch.arange(1, long_rollout_length+1)
t = t.repeat(v.shape[0], *v.shape[2:], 1).permute(0, 3, 1, 2).double()
dx = v * t
new_x = z_recon[:, -1, :, 2:4].unsqueeze(1)
new_x = new_x.repeat(1, long_rollout_length, 1, 1) + dx
z_pred = torch.cat(
[z_recon[:, -1, :, :2].unsqueeze(1).repeat(1, lrl, 1, 1),
new_x,
v,
z_recon[:, -1, :, 6:].unsqueeze(1).repeat(1, lrl, 1, 1)],
-1
)
z_seq = torch.cat([z_recon, z_pred], 1)
# sigmoid positions to make errors comparable
if linear:
print('clamp positions to 0.9')
frame_lim = 0.8 if self.c.coord_lim == 10 else 0.9
z_seq = torch.cat([
z_seq[..., :2],
torch.clamp(z_seq[..., 2:4], -frame_lim, frame_lim),
z_seq[..., 6:]], -1)
# Simple Reconstruction of Sequences
# stove_input = total_images[:10]
# stove_input = torch.tensor(stove_input).to(self.c.device).type(self.c.dtype)
# elbo, prop_dict2, _ = self.stove(stove_input, self.c.plot_every)
# z_recon = prop_dict2['z']
# if self.c.debug_bw:
# img = stove_input.sum(2)
# img = torch.clamp(img, 0, 1)
# img = torch.unsqueeze(img, 2)
# model_images = self.stove.reconstruct_from_z(
# z_recon, img[:, skip:], max_activation=False, single_image=False)
# use mpe to get reconstructed images
if real_mpe:
if self.c.debug_bw:
img = stove_input[:, skip].sum(1)
img = torch.clamp(img, 0, 1)
img = torch.unsqueeze(img, 1)
model_images = self.stove.reconstruct_from_z(
z_seq, img, max_activation=False, single_image=True)
else:
model_images = self.stove.reconstruct_from_z(z_seq)
if self.c.debug_bw:
true_images = bw_transform(true_images)
model_images = torch.clamp(model_images, 0, 1)
mse = torch.mean(((true_images - model_images)**2), dim=(0, 2, 3, 4))
plot_sample = model_images[:10, :, 0].detach().cpu().numpy()
plot_sample = (255 * plot_sample.reshape(-1, self.c.height, self.c.width))
plot_sample = plot_sample.astype(np.uint8)
filename = 'linear_' if linear else ''
filename += 'pixel_error_sample.gif'
filepath = os.path.join(path, filename)
print('Saving gif to ', filepath)
imageio.mimsave(
filepath, plot_sample, fps=24)
# also log state differences
# bug_potential... for some reason self.c.coord_lim is 30 but max
# true_states is 10 for gravity
true_states = total_labels[:, skip:(visible+long_rollout_length)]
print(true_states.max(), ' is coord max.')
true_states = torch.tensor(true_states).to(self.c.device).type(self.c.dtype)
permutations = list(itertools.permutations(range(0, self.c.num_obj)))
errors = []
for perm in permutations:
error = ((true_states[:, :5, :, :2]-z_seq[:, :5, perm, 2:4])**2).sum(-1)
error = torch.sqrt(error).mean((1, 2))
errors += [error]
errors = torch.stack(errors, 1)
_, idx = errors.min(1)
selector = list(zip(range(idx.shape[0]), idx.cpu().tolist()))
pos_matched = [z_seq[i, :, permutations[j]] for i, j in selector]
pos_matched = torch.stack(pos_matched, 0)
mse_states = torch.sqrt(((
true_states[..., :2] - pos_matched[..., 2:4])**2).sum(-1)).mean((0, 2))
return mse, mse_states
def main(script_args):
"""Parse arguments, find runs, execute pixel_error."""
parser = argparse.ArgumentParser()
parser.add_argument(
"-p", "--path", type=str,
help="Set folder from which to create pixel errors for." +
"Must contain runs of model.")
parser.add_argument(
'--linear', action='store_true',
help='create linear errors')
parser.add_argument(
'--no-save', dest='no_save', action='store_true')
parser.add_argument(
'--real-mpe', dest='real_mpe', action='store_true')
parser.add_argument(
'--checkpoint', type=str, default='checkpoint')
args = parser.parse_args(script_args)
filename = 'pixel_errors.csv'
if args.linear:
filename = 'linear_' + filename
if 'run' not in args.path[-10:]:
restores = glob.glob(args.path+'run*')
restores = sorted(restores)
else:
restores = [args.path]
print(restores)
if len(restores) == 0:
raise ValueError('No runs found in path {}.'.format(args.path))
# debug
# mse, mse_states = get_pixel_error(
# restores[0], args.linear, args.path, args.real_mpe, args.checkpoint)
# return 0
for restore in restores:
try:
mse, mse_states = get_pixel_error(
restore, args.linear, args.path, args.real_mpe, args.checkpoint)
except Exception as e:
print(e)
print('Not possible for run {}.'.format(restore))
continue
mse = mse.cpu().detach().numpy()
if args.no_save:
continue
save_dir = os.path.join(args.path, 'test')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
with open(os.path.join(save_dir, filename), 'a') as f:
f.write(','.join(['{:.6f}'.format(i) for i in mse])+'\n')
with open(os.path.join(save_dir, 'states_'+filename), 'a') as f:
f.write(','.join(['{:.6f}'.format(i) for i in mse_states])+'\n')
| 34.4375
| 93
| 0.621208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,907
| 0.247213
|
a75a94acdbd36e7b6da2d3d837a50b906558f9b8
| 770
|
py
|
Python
|
users/admin.py
|
JVacca12/FIRST
|
e3906209cae1198e1fbda4d00bc0a906e8294a69
|
[
"MIT"
] | null | null | null |
users/admin.py
|
JVacca12/FIRST
|
e3906209cae1198e1fbda4d00bc0a906e8294a69
|
[
"MIT"
] | null | null | null |
users/admin.py
|
JVacca12/FIRST
|
e3906209cae1198e1fbda4d00bc0a906e8294a69
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
"""User admin classes."""
# Django
from django.contrib import admin
# Models
from users.models import User
@admin.register(User)
class UserAdmin(admin.ModelAdmin):
"""User admin."""
list_display = ('pk', 'username', 'email','first_name','last_name') #Columnas de la tabla visibles, first and last name son campos intrínsecos de user
list_display_links = ('pk', 'username', 'email','first_name','last_name') #Cliqueables
search_fields = (
'email',
'username',
'first_name',
'last_name',
)
list_filter = (
'is_active',
'is_staff',
'date_joined',
'modified',
)
readonly_fields = ('date_joined', 'modified',)
| 23.333333
| 154
| 0.633766
| 576
| 0.747082
| 0
| 0
| 598
| 0.775616
| 0
| 0
| 376
| 0.487678
|
a75aa0bd60c43a11405b09d22589cf2d9c586cc5
| 3,469
|
py
|
Python
|
mud/migrations/0001_initial.py
|
lambda-mud-cs18/backend
|
060c5c1a317d8b6557e778cd539e75f24eff05dd
|
[
"MIT"
] | 1
|
2022-01-12T17:44:26.000Z
|
2022-01-12T17:44:26.000Z
|
mud/migrations/0001_initial.py
|
lambda-mud-cs18/backend
|
060c5c1a317d8b6557e778cd539e75f24eff05dd
|
[
"MIT"
] | 8
|
2020-02-12T01:12:46.000Z
|
2022-02-10T10:17:28.000Z
|
mud/migrations/0001_initial.py
|
lambda-mud-cs18/backend
|
060c5c1a317d8b6557e778cd539e75f24eff05dd
|
[
"MIT"
] | 2
|
2022-01-12T17:44:29.000Z
|
2022-01-12T17:44:29.000Z
|
# Generated by Django 2.2.3 on 2019-07-31 17:10
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('description', models.CharField(max_length=200)),
('weight', models.IntegerField()),
('itemtype', models.CharField(max_length=200)),
('level', models.IntegerField()),
('exp', models.IntegerField()),
('attributes', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Map',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
],
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('playername', models.CharField(max_length=200)),
('name', models.CharField(max_length=200)),
('password', models.CharField(max_length=200)),
('team_id', models.IntegerField()),
('current_room', models.IntegerField()),
('cooldown', models.FloatField()),
('encumbrance', models.IntegerField()),
('strength', models.IntegerField()),
('speed', models.IntegerField()),
('gold', models.IntegerField()),
('inventory', models.CharField(max_length=1000)),
('status', models.CharField(max_length=1000)),
('errors', models.CharField(max_length=1000)),
('messages', models.CharField(max_length=1000)),
('token', models.CharField(max_length=1000)),
],
),
migrations.CreateModel(
name='PlayerInventory',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('player_id', models.IntegerField()),
('item_id', models.IntegerField()),
('quantity', models.IntegerField()),
],
),
migrations.CreateModel(
name='Room',
fields=[
('map_id', models.IntegerField()),
('room_id', models.IntegerField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=200)),
('description', models.CharField(max_length=200)),
('coordinates', models.CharField(max_length=200)),
('elevation', models.IntegerField()),
('terrain', models.CharField(max_length=200)),
('north', models.CharField(max_length=200)),
('south', models.CharField(max_length=200)),
('east', models.CharField(max_length=200)),
('west', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=200)),
('map_id', models.IntegerField()),
],
),
]
| 39.420455
| 84
| 0.51254
| 3,376
| 0.973191
| 0
| 0
| 0
| 0
| 0
| 0
| 457
| 0.131738
|
a75c1979034dbafe33e7945478e87745ce9ce8e5
| 918
|
py
|
Python
|
scripts/quest/q22504s.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 54
|
2019-04-16T23:24:48.000Z
|
2021-12-18T11:41:50.000Z
|
scripts/quest/q22504s.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 3
|
2019-05-19T15:19:41.000Z
|
2020-04-27T16:29:16.000Z
|
scripts/quest/q22504s.py
|
G00dBye/YYMS
|
1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb
|
[
"MIT"
] | 49
|
2020-11-25T23:29:16.000Z
|
2022-03-26T16:20:24.000Z
|
sm.setSpeakerID(1013000)
sm.sendNext("Ugh. This isn't going to work. I need something else. No plants. No meat. What, you have no idea? But you're the master, and you're older than me, too. You must know what'd be good for me!")
sm.setPlayerAsSpeaker()
sm.sendSay("#bBut I don't. It's not like age has anything to do with this...")
sm.setSpeakerID(1013000)
if sm.sendAskAccept("Since you're older, you must be more experienced in the world, too. Makes sense that you'd know more than me. Oh, fine. I'll ask someone who's even older than you, master!"):
if not sm.hasQuest(parentID):
sm.startQuest(parentID)
sm.setPlayerAsSpeaker()
sm.sendSayOkay("#b#b(You already asked Dad once, but you don't have any better ideas. Time to ask him again!)")
else:
sm.sendNext("No use trying to find an answer to this on my own. I'd better look for #bsomeone older and wiser than master#k!")
sm.dispose()
| 61.2
| 203
| 0.721133
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 637
| 0.6939
|
a75cf13072fe0194f0d08765f3c331975a5d8df7
| 424
|
py
|
Python
|
user/migrations/0002_user_photo.py
|
martinlehoux/erp-reloaded
|
db7dea603095dec558f4b0ad9a0d2dbd20f8703c
|
[
"MIT"
] | null | null | null |
user/migrations/0002_user_photo.py
|
martinlehoux/erp-reloaded
|
db7dea603095dec558f4b0ad9a0d2dbd20f8703c
|
[
"MIT"
] | 5
|
2021-04-08T18:54:04.000Z
|
2021-06-10T18:37:26.000Z
|
user/migrations/0002_user_photo.py
|
martinlehoux/erp-reloaded
|
db7dea603095dec558f4b0ad9a0d2dbd20f8703c
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.3 on 2020-03-01 00:58
from django.db import migrations, models
import user.models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='photo',
field=models.ImageField(null=True, upload_to=user.models.UploadTo('photo')),
),
]
| 21.2
| 88
| 0.606132
| 312
| 0.735849
| 0
| 0
| 0
| 0
| 0
| 0
| 87
| 0.205189
|
a75f0071595f1cf5e30f78a377181f6b55570f76
| 61
|
py
|
Python
|
core/models/__init__.py
|
Bhaskers-Blu-Org1/bLVNet-TAM
|
feadcd3a1a25723dc28bed867580032181e824a3
|
[
"Apache-2.0"
] | 62
|
2019-10-22T14:52:30.000Z
|
2021-07-27T12:07:38.000Z
|
core/models/__init__.py
|
Bhaskers-Blu-Org1/bLVNet-TAM
|
feadcd3a1a25723dc28bed867580032181e824a3
|
[
"Apache-2.0"
] | 6
|
2019-12-16T06:03:42.000Z
|
2020-08-31T07:59:04.000Z
|
core/models/__init__.py
|
IBM/bLVNet-TAM
|
feadcd3a1a25723dc28bed867580032181e824a3
|
[
"Apache-2.0"
] | 16
|
2019-11-02T06:49:19.000Z
|
2021-12-30T14:51:48.000Z
|
from .blvnet_tam import bLVNet_TAM
__all__ = ['bLVNet_TAM']
| 15.25
| 34
| 0.770492
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.196721
|
a760fe286388453e9bf13c54cc23324198919723
| 438
|
py
|
Python
|
monodepth/geometry/utils.py
|
vguizilini/packnet-sfm
|
e462716837f24c11cb227ca99fe30bcf12b3cc56
|
[
"MIT"
] | 1
|
2020-04-30T07:32:57.000Z
|
2020-04-30T07:32:57.000Z
|
monodepth/geometry/utils.py
|
muzi2045/packnet-sfm
|
fec6d0b493b784cabe5e6bf9c65b996a83c63fe1
|
[
"MIT"
] | null | null | null |
monodepth/geometry/utils.py
|
muzi2045/packnet-sfm
|
fec6d0b493b784cabe5e6bf9c65b996a83c63fe1
|
[
"MIT"
] | null | null | null |
# Copyright 2020 Toyota Research Institute. All rights reserved.
"""
Geometry utilities
"""
import numpy as np
def invert_pose_numpy(T):
"""
'Invert' 4x4 extrinsic matrix
Parameters
----------
T: 4x4 matrix (world to camera)
Returns
-------
4x4 matrix (camera to world)
"""
Tc = np.copy(T)
R, t = Tc[:3, :3], Tc[:3, 3]
Tc[:3, :3], Tc[:3, 3] = R.T, - np.matmul(R.T, t)
return Tc
| 16.222222
| 65
| 0.547945
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 261
| 0.59589
|
a7624496ee4975eb04a3c005275217a54323fb5d
| 27,209
|
py
|
Python
|
minesweeper.py
|
MrAttoAttoAtto/Cool-Programming-Project
|
68214d089b612fdcca7fe76dce3464edec35ce2b
|
[
"MIT"
] | null | null | null |
minesweeper.py
|
MrAttoAttoAtto/Cool-Programming-Project
|
68214d089b612fdcca7fe76dce3464edec35ce2b
|
[
"MIT"
] | null | null | null |
minesweeper.py
|
MrAttoAttoAtto/Cool-Programming-Project
|
68214d089b612fdcca7fe76dce3464edec35ce2b
|
[
"MIT"
] | null | null | null |
#Minesweeper!
from tkinter import *
import random, time, math, threading, os.path, os
#Tkinter Class
class MinesweeperMain: #Initialising class
def __init__(self, xLength, yLength, percentOfBombs, caller=None, winChoice=True):
try: #kills the 'play again' host (if it exists)
caller.root.destroy()
except TclError:
pass
except AttributeError:
pass
self.gameStarted = False #makes the necessary variables
self.gameOver = False
self.vlc64bitInstalled = True
self.squaresRevealed = 0
try: #checks if the user has vlc
import vlc
except OSError:
self.vlc64bitInstalled = False
self.xLength = xLength #sets these variables to the object
self.yLength = yLength
self.percentOfBombs = percentOfBombs #sets the variable
self.numOfBombs = math.floor(self.percentOfBombs/100*self.xLength*self.yLength) #setting the number of bombs
self.bombsLeftToReveal = self.numOfBombs #sets a variable that will allow for enough labels to be created
if self.vlc64bitInstalled:
self.explosionSound = vlc.MediaPlayer(os.path.join('sounds', 'explosion-sound.mp3')) #loads the sounds
self.alertSound = vlc.MediaPlayer(os.path.join('sounds', 'alert-sound.mp3')) #alert sound
self.winChoice = winChoice
if self.winChoice: #chooses the sound to load
self.winSound = vlc.MediaPlayer(os.path.join('sounds', 'win-sound.mp3'))
else:
self.winSound = vlc.MediaPlayer(os.path.join('sounds', 'win-sound.wav'))
self.mapData = [] #creating the variable which holds the map data
self.revealedSquareIds = [] #list so that, when the loss occurs and all tiles are revealed, already revealed squares are not affected
self.bombLocationsReserved = [] #creates a list that will hold the locations where no more bombs can be placed
self.root = Tk()
self.root.title('Minesweeper') #sets up the tkinter window
self.listOfNumberImages = [] #sets up this list for holding the images of the numbers
for x in range(9):
self.listOfNumberImages.append(PhotoImage(file='numbers'+os.sep+str(x)+'.PNG')) #fills said list
self.transImage = PhotoImage(file=os.path.join('pictures', 'transparent.png'))
self.flagImage = PhotoImage(file=os.path.join('pictures', 'flag.png'))
self.bombImage = PhotoImage(file=os.path.join('pictures', 'mine2-11.png'))
self.explosionImage = PhotoImage(file=os.path.join('pictures', 'explosion.png')) #sets up the rest of the images
self.frame = Frame(self.root) #makes the frame widget
self.frame.pack()
self.bombLabelList = [] #list for storing the bomb pictures
for i in range(self.numOfBombs): #adds all the necessary bomb picture labels
self.bombLabelList.append(Label(self.frame, image=self.bombImage, width=62, height=51)) #adds the right amount of bomb pictures to the list
if self.xLength % 2 == 0:
timeXPos = int(self.xLength/2-1) #sets the positions so they are in the middle
bombCountXPos = timeXPos + 1
else:
timeXPos = int(self.xLength/2-1.5)
bombCountXPos = timeXPos + 2
self.timeSecs = 0 #sets these time variables
self.timeMins = 0
self.timeLabel = Label(self.frame, text='Time') #puts the time and bomb count onto the tkinter window
self.timeLabel.grid(row=0, column=timeXPos)
self.bombLabel = Label(self.frame, text='Bombs')
self.bombLabel.grid(row=0, column=bombCountXPos)
self.timeStrVar = StringVar()
self.timeStrVar.set('00:00')
self.timeClock = Label(self.frame, textvariable=self.timeStrVar)
self.timeClock.grid(row=1, column=timeXPos)
self.bombStrVar = StringVar()
self.bombStrVar.set(str(self.numOfBombs))
self.bombsLeftLabel = Label(self.frame, textvariable=self.bombStrVar)
self.bombsLeftLabel.grid(row=1, column=bombCountXPos)
self.buttonList = [] #lists to hold data for buttons/labels
self.buttonStringVarList = []
self.labelList = []
self.isFlaggedList = []
self.mapData = [] #creating the variable which holds the map data
for l in range(self.yLength): #fills the lists with their required starting data
self.buttonStringVarList.append([])
self.buttonList.append([])
self.labelList.append([])
self.isFlaggedList.append([])
self.mapData.append([])
for p in range(self.xLength):
self.buttonStringVarList[l].append(StringVar())
self.buttonList[l].append('')
self.labelList[l].append('')
self.isFlaggedList[l].append(False)
self.mapData[l].append('')
xPos = 0 #sets the working positions of the button creation
yPos = 0
for pos in range(0, self.xLength*self.yLength): #creates all of the buttons required
if xPos == self.xLength:
yPos += 1
xPos = 0
self.buttonList[yPos][xPos] = Button(self.frame, height=49, width=60, textvariable=self.buttonStringVarList[yPos][xPos], image=self.transImage)
self.buttonList[yPos][xPos].grid(row=yPos+2, column=xPos)
self.buttonList[yPos][xPos].bind('<Button-1>', lambda e, xPosLoc=xPos, yPosLoc=yPos: self.revealSquare(xPosLoc, yPosLoc)) #reveals the square if left-clicked
self.buttonList[yPos][xPos].bind('<Button-3>', lambda e, xPosLoc=xPos, yPosLoc=yPos: self.markSquare(xPosLoc, yPosLoc)) #marks the square if right-clicked
xPos += 1
self.timerThread = threading.Thread(target=self.timerCode, name="timer") #starts the timer
self.timerThread.start()
self.root.mainloop() #mainloop!
def timerCode(self):
while True:
if self.gameOver: #if the game is over, exit this loop of the timer
return
self.timeSecs = int(self.timeSecs) #turns them back into ints (just in case they were converted into strings to add 0s to the front of them)
self.timeMins = int(self.timeMins)
start = time.time()
self.timeSecs += 1 #increments the seconds
if self.timeSecs == 60: #if it is a minute...
self.timeSecs = 0 #change the seconds to 0 and add 1 to the mins
self.timeMins += 1
if self.timeSecs < 10: #if either is lower than 10, make sure it has a 0 in front of the number
self.timeSecs = '0'+str(self.timeSecs)
if self.timeMins < 10:
self.timeMins = '0'+str(self.timeMins)
try:
self.timeStrVar.set(str(self.timeMins)+':'+str(self.timeSecs)) #sets the visual time
except RuntimeError: #if the window has been forcefully ended
return
while time.time() < start+1: #waits for a sec
continue
def generateBoard(self, xPos, yPos): #generating the board
self.bombLocationsReserved.append(xPos+yPos*self.xLength) #reserving the 3x3 area around the button placed
self.bombLocationsReserved.append(xPos+yPos*self.xLength-1)
self.bombLocationsReserved.append(xPos+yPos*self.xLength+1)
self.bombLocationsReserved.append(xPos+yPos*self.xLength-self.xLength)
self.bombLocationsReserved.append(xPos+yPos*self.xLength+self.xLength)
self.bombLocationsReserved.append(xPos+yPos*self.xLength-self.xLength-1)
self.bombLocationsReserved.append(xPos+yPos*self.xLength-self.xLength+1)
self.bombLocationsReserved.append(xPos+yPos*self.xLength+self.xLength-1)
self.bombLocationsReserved.append(xPos+yPos*self.xLength+self.xLength+1)
bombsLeftToPlace = self.numOfBombs #sets a helpful temporary variable
while bombsLeftToPlace > 0:
yPlace = 0
bombPlacement = random.randint(0, self.xLength*self.yLength-1) #random square id
placementValue = bombPlacement #another helpful variable
while bombPlacement >= self.xLength: #figures out the x and y from that
bombPlacement = bombPlacement - self.xLength
yPlace += 1
xPlace = bombPlacement
if not placementValue in self.bombLocationsReserved: #checks the place isnt reserved
self.mapData[yPlace][xPlace] = 'B' #updates the map
bombsLeftToPlace = bombsLeftToPlace - 1 #self-explanatory
self.bombLocationsReserved.append(placementValue) #reserves the place just taken
for squareXPos in range(0, self.xLength): #for EVERY square...
for squareYPos in range(0, self.yLength):
bombsSurrounding = 0 #sets this to 0
if self.mapData[squareYPos][squareXPos] == 'B': #if a bomb...
self.buttonStringVarList[squareYPos][squareXPos].set('B') #sets the strVar to B (debugging)
continue #goes back to the loop
if squareXPos > 0: #all of this next part finds how many bombs surround a square (and makes sure that it does not wrap around or throw an error)
if squareYPos > 0:
if self.mapData[squareYPos-1][squareXPos-1] == 'B':
bombsSurrounding += 1
if self.mapData[squareYPos][squareXPos-1] == 'B':
bombsSurrounding += 1
try:
if self.mapData[squareYPos+1][squareXPos-1] == 'B':
bombsSurrounding += 1
except IndexError:
pass
if squareYPos > 0:
if self.mapData[squareYPos-1][squareXPos] == 'B':
bombsSurrounding += 1
try:
if self.mapData[squareYPos+1][squareXPos] == 'B':
bombsSurrounding += 1
except IndexError:
pass
if squareYPos > 0:
try:
if self.mapData[squareYPos-1][squareXPos+1] == 'B':
bombsSurrounding += 1
except IndexError:
pass
try:
if self.mapData[squareYPos][squareXPos+1] == 'B':
bombsSurrounding += 1
except IndexError:
pass
try:
if self.mapData[squareYPos+1][squareXPos+1] == 'B':
bombsSurrounding += 1
except IndexError:
pass
self.mapData[squareYPos][squareXPos] = bombsSurrounding #updates the mapData with the value of the square
def revealSquare(self, xPos, yPos): #if a square is left-clicked...
if not self.gameStarted: #is the game hasnt been generated yet...
self.generateBoard(xPos, yPos) #generate it having been clicked at xPos, yPos
self.gameStarted = True #the board has been generated
if xPos+yPos*self.xLength in self.revealedSquareIds or (self.isFlaggedList[yPos][xPos] and not self.gameOver): #if the id has already been revealed or the square if flagged...
return #exit the function
self.squaresRevealed += 1 #increments the squares revealed
self.revealedSquareIds.append(xPos+yPos*self.xLength) #append the id to the revealed ids
self.buttonList[yPos][xPos].destroy() #destroy the button
if self.mapData[yPos][xPos] != 'B': #if it is NOT a bomb...
self.labelList[yPos][xPos] = Label(self.frame, width=62, height=51, image=self.listOfNumberImages[self.mapData[yPos][xPos]]) #create a label for it,
self.labelList[yPos][xPos].grid(row=yPos+2, column=xPos)
self.labelList[yPos][xPos].bind('<Button-2>', lambda e, xPos=xPos, yPos=yPos: self.chordSquare(xPos, yPos)) # and if middle-clicked, it will call chordSquare
if not self.gameOver: #if the game hasn't been failed...
self.root.update() #update the window (for nice looking 0 chain reactions)
time.sleep(0.02) #sleep a bit
if self.mapData[yPos][xPos] == 0 and not self.gameOver: #if it is a 0 and the game has not been lost...
if xPos > 0: #reveal all round it (nice recursiveness)
if yPos > 0:
try:
self.revealSquare(xPos-1, yPos-1)
except Exception:
pass
try:
self.revealSquare(xPos-1, yPos)
except Exception:
pass
try:
self.revealSquare(xPos-1, yPos+1)
except Exception:
pass
if yPos > 0:
try:
self.revealSquare(xPos, yPos-1)
except Exception:
pass
try:
self.revealSquare(xPos, yPos+1)
except Exception:
pass
if yPos > 0:
try:
self.revealSquare(xPos+1, yPos-1)
except Exception:
pass
try:
self.revealSquare(xPos+1, yPos)
except Exception:
pass
try:
self.revealSquare(xPos+1, yPos+1)
except Exception:
pass
if self.mapData[yPos][xPos] == 'B': #if it's a bomb...
self.bombLabelList[self.bombsLeftToReveal-1].grid(row=yPos+2, column=xPos) #put the pic in its place
self.bombsLeftToReveal = self.bombsLeftToReveal-1 #self-explanatory
if self.mapData[yPos][xPos] == 'B' and not self.gameOver: #if it is the bomb which made you lose...
self.gameOver = True #you failed
print('Working...')
self.explosionLabel = Label(self.frame, width=62, height=51, image=self.explosionImage) #it becomes an explosion image
self.explosionLabel.grid(row=yPos+2, column=xPos)# and is placed where it was
if self.vlc64bitInstalled: #if vlc is installed...
self.alertSound.play() #play alert
time.sleep(0.3)
self.explosionSound.play() #play the sound
self.root.update() #update to show the explosion
for xFail in range(self.xLength*self.yLength): #open all squares
yFail = 0
while xFail >= self.xLength:
xFail = xFail - self.xLength
yFail += 1
self.revealSquare(xFail, yFail)
self.root.update() #update after all this is done
print('Done!')
gameOver = GameOverBox(self, 'loss') #activate the game over dialog
if self.squaresRevealed == self.xLength*self.yLength-self.numOfBombs and not self.gameOver: #if you have revealed all of the non-bomb squares and not failed...
self.gameOver = True
print('Working...')
if self.vlc64bitInstalled: #if vlc is installed...
self.winSound.play() #play the win sound
bombLocIds = self.bombLocationsReserved[8:] #give the bomb ids
for bombId in bombLocIds: #iterate through them
yLocBomb = 0
while bombId >= self.xLength: #turn the ids into coordinates
bombId = bombId - self.xLength
yLocBomb += 1
xLocBomb = bombId
self.revealSquare(xLocBomb, yLocBomb) #reveal those coords
print('Done!')
gameOver = GameOverBox(self, 'win') #open the win dialog box
def markSquare(self, xPos, yPos): #flagging
if not self.isFlaggedList[yPos][xPos]: #if the square is NOT flagged...
self.buttonList[yPos][xPos].configure(image=self.flagImage, height=49, width=60) #flag it
self.bombStrVar.set(int(self.bombStrVar.get())-1) #increment the bombs left
self.isFlaggedList[yPos][xPos] = True
else:
self.buttonList[yPos][xPos].configure(image=self.transImage, height=49, width=60) #get rid of the flag
self.bombStrVar.set(int(self.bombStrVar.get())+1) #increment the bombs left
self.isFlaggedList[yPos][xPos] = False
def chordSquare(self, xPos, yPos): #chording
flagsSurrounding = 0
flagsNeeded = self.mapData[yPos][xPos]
if xPos > 0: #all of this next part finds how many flags surround a square (and makes sure that it does not wrap around or throw an error)
if yPos > 0:
try:
if self.isFlaggedList[yPos-1][xPos-1]:
flagsSurrounding += 1
except Exception:
pass
try:
if self.isFlaggedList[yPos][xPos-1]:
flagsSurrounding += 1
except Exception:
pass
try:
if self.isFlaggedList[yPos+1][xPos-1]:
flagsSurrounding += 1
except IndexError:
pass
except Exception:
pass
if yPos > 0:
try:
if self.isFlaggedList[yPos-1][xPos]:
flagsSurrounding += 1
except Exception:
pass
try:
if self.isFlaggedList[yPos+1][xPos]:
flagsSurrounding += 1
except IndexError:
pass
except Exception:
pass
if yPos > 0:
try:
if self.isFlaggedList[yPos-1][xPos+1]:
flagsSurrounding += 1
except IndexError:
pass
except Exception:
pass
try:
if self.isFlaggedList[yPos][xPos+1]:
flagsSurrounding += 1
except IndexError:
pass
except Exception:
pass
try:
if self.isFlaggedList[yPos+1][xPos+1]:
flagsSurrounding += 1
except IndexError:
pass
except Exception:
pass
if flagsSurrounding == flagsNeeded: #if there are enough, but not too many flags...
if xPos > 0: #reveal all around it
if yPos > 0:
try:
self.revealSquare(xPos-1, yPos-1)
except Exception:
pass
try:
self.revealSquare(xPos-1, yPos)
except Exception:
pass
try:
self.revealSquare(xPos-1, yPos+1)
except Exception:
pass
if yPos > 0:
try:
self.revealSquare(xPos, yPos-1)
except Exception:
pass
try:
self.revealSquare(xPos, yPos+1)
except Exception:
pass
if yPos > 0:
try:
self.revealSquare(xPos+1, yPos-1)
except Exception:
pass
try:
self.revealSquare(xPos+1, yPos)
except Exception:
pass
try:
self.revealSquare(xPos+1, yPos+1)
except Exception:
pass
class GameOverBox: #end of game dialog
def __init__(self, master, state):
if state == 'loss': #if you lost
self.title = 'Game Over' #set these variables
self.message = 'You Lost!'
self.color = 'red'
else: #if you won
self.title = 'Congratulations' #set these variables
self.message = 'You Won, Well Done! It took you '+master.timeStrVar.get()+'!'
self.color = 'green'
self.root = Tk()
self.root.title(self.title) #create the window
self.frame = Frame(self.root) #create the frame
self.frame.pack()
self.label = Label(self.frame, text=self.message, fg=self.color) #create the label
self.label.grid(row=0, column=1)
self.playAgainButton = Button(self.frame, text='Play Again', fg='green', command=lambda: self.restart(master)) #create the play again button
self.playAgainButton.grid(row=0, column=0)
self.exitButton = Button(self.frame, text='Exit and Close', fg='red', command=lambda: self.exit(master)) #create the exit button
self.exitButton.grid(row=0, column=2)
self.playOtherButton = Button(self.frame, text='Play another configuration', command=lambda: self.playOther(master)) #create the 'play another config' button
self.playOtherButton.grid(row=1, column=1)
self.root.mainloop() #Mainloop!
def restart(self, master): #the restart function
try:
master.root.destroy() #kill the MinesweeperMain window
except Exception:
pass
openMain(self, master=master) #re-call it
def exit(self, master): #exit func
try:
master.root.destroy() #kill the MinesweepreMain window
except Exception:
pass
self.root.destroy() #kill the end of game dialog
def playOther(self, master):
global start
try:
master.root.destroy() #kill the MinesweeperMain window
except Exception:
pass
start = StartBox(self) #start the Start Box
class StartBox:
def __init__(self, caller=None):
try:
caller.root.destroy() #try killing the play again box (if it exists)
except Exception:
pass
self.choice = True #choice defaults to true
self.root = Tk() #creates the window
self.root.title('Start Minesweeper')
self.frame = Frame(self.root) #creates the frame
self.frame.pack()
self.xLabel = Label(self.frame, text='Enter the width of the minesweeper board')
self.xLabel.grid(row=0, column=0) #creates the xLabel
self.xLengthStrVar = StringVar()
self.xInput = Entry(self.frame, width=5, textvariable=self.xLengthStrVar)
self.xInput.grid(row=1, column=0) #creates the x entry box
self.yLabel = Label(self.frame, text='Enter the height of the minesweeper board')
self.yLabel.grid(row=3, column=0) #etc
self.yLengthStrVar = StringVar()
self.yInput = Entry(self.frame, width=5, textvariable=self.yLengthStrVar)
self.yInput.grid(row=4, column=0) #etc
self.bombPercentLabel = Label(self.frame, text='Enter the percentage of the squares you would like to be bombs')
self.bombPercentLabel.grid(row=6, column=0) #etc
self.bombPercentStrVar = StringVar()
self.bombPercentInput = Entry(self.frame, width=5, textvariable=self.bombPercentStrVar)
self.bombPercentInput.grid(row=7, column=0) #etc
self.winChoiceLabel = Label(self.frame, text='Select either the orchestral or vocal win event')
self.winChoiceLabel.grid(row=9, column=0) #creates the win choice label
self.vocalWinButton = Button(self.frame, text='Change to vocal', command=lambda: self.setWin(True))
self.orchestralWinButton = Button(self.frame, text='Change to orchestral', command=lambda: self.setWin(False))
self.orchestralWinButton.grid(row=10, column=0) #creates both win choice buttons and activates the orchestral one
self.winChoiceChoiceStrVar = StringVar()
self.winChoiceChoiceStrVar.set('The vocal win event is selected')
self.winChoiceChoiceLabel = Label(self.frame, textvariable=self.winChoiceChoiceStrVar)
self.winChoiceChoiceLabel.grid(row=10, column=1) #creates the StringVar which will tell you which choice you have selected
self.submitButton = Button(self.frame, text='Submit', fg='green', command=self.completeRequest)
self.submitButton.grid(row=12, column=0) #submit button
self.cancelButton = Button(self.frame, text='Cancel and Exit', fg='red', command=self.root.destroy)
self.cancelButton.grid(row=12, column=1) #exit button
self.root.mainloop() #Mainloop!
def setWin(self, choice):
self.choice = choice #sets the variable
if self.choice:
self.vocalWinButton.grid_forget() #updates which buttons you can press and the stringvar
self.orchestralWinButton.grid(row=10, column=0)
self.winChoiceChoiceStrVar.set('The vocal win event is selected')
else:
self.orchestralWinButton.grid_forget() #see above
self.vocalWinButton.grid(row=10, column=0)
self.winChoiceChoiceStrVar.set('The orchestral win event is selected')
def completeRequest(self): #completes the request
try:
self.xLen = int(self.xLengthStrVar.get()) #tries to make them ints/floats
self.yLen = int(self.yLengthStrVar.get())
self.bombPercent = float(self.bombPercentStrVar.get())
if not (self.xLen*self.yLen)-(self.xLen*self.yLen*self.bombPercent/100) >= 9: #if 9 squares cannot be reserved for the first click, dont allow them to play
error = ErrorBox('The percentage of bombs is too high, the game will not generate')
return
openMain(self, self.xLen, self.yLen, self.bombPercent, self.choice) #opens the opener
except ValueError:
error = ErrorBox('One or more values you have entered is invalid (all have to be numbers but the percentage does not have to be an integer)') #these have to be numbers!
pass
class ErrorBox:
def __init__(self, error):
self.error = error #sets the error
self.root = Tk() #creates the window
self.root.title('Error')
self.frame = Frame(self.root) #creates the frame
self.frame.pack()
self.label = Label(self.frame, text=error, fg='red') #shows the error
self.label.grid(row=0, column=0)
self.button = Button(self.frame, text='Ok', command=self.root.destroy) #button to kill the error box
self.button.grid(row=1, column=0)
self.root.mainloop() #Mainloop!
def openMain(caller, xLength=None, yLength=None, percentOfBombs=None, winChoice=None, master=None): #restarts it outside of the class
global minesweeper
if master != None: #if it has been called from the play again box...
minesweeper = MinesweeperMain(master.xLength, master.yLength, master.percentOfBombs, caller, master.winChoice) #use the old configs
else: #else
minesweeper = MinesweeperMain(xLength, yLength, percentOfBombs, caller, winChoice) #use the new configs
if __name__ == '__main__':
start = StartBox()
minesweeper = None
| 40.429421
| 183
| 0.592745
| 26,522
| 0.974751
| 0
| 0
| 0
| 0
| 0
| 0
| 6,153
| 0.226138
|
a7625f42a7dd6cbf1419217f4da8ae9f6f00c5f6
| 5,431
|
py
|
Python
|
cannlytics/utils/scraper.py
|
mindthegrow/cannlytics
|
c266bc1169bef75214985901cd3165f415ad9ba7
|
[
"MIT"
] | 7
|
2021-05-31T15:30:22.000Z
|
2022-02-05T14:12:31.000Z
|
cannlytics/utils/scraper.py
|
mindthegrow/cannlytics
|
c266bc1169bef75214985901cd3165f415ad9ba7
|
[
"MIT"
] | 17
|
2021-06-09T01:04:27.000Z
|
2022-03-18T14:48:12.000Z
|
cannlytics/utils/scraper.py
|
mindthegrow/cannlytics
|
c266bc1169bef75214985901cd3165f415ad9ba7
|
[
"MIT"
] | 5
|
2021-06-07T13:52:33.000Z
|
2021-08-04T00:09:39.000Z
|
# -*- coding: utf-8 -*-
"""
Scrape Website Data | Cannlytics
Copyright © 2021 Cannlytics
Author: Keegan Skeate <keegan@cannlytics.com>
Created: 1/10/2021
License GPLv3+: GNU GPL version 3 or later <https://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Resources:
https://stackoverflow.com/questions/54416896/how-to-scrape-email-and-phone-numbers-from-a-list-of-websites
https://hackersandslackers.com/scraping-urls-with-beautifulsoup/
TODO:
Improve with requests-html - https://github.com/psf/requests-html
- Get #about
- Get absolute URLs
- Search for text (prices/analyses)
r.html.search('Python is a {} language')[0]
"""
import re
import requests
from bs4 import BeautifulSoup
def get_page_metadata(url):
"""Scrape target URL for metadata."""
headers = {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "GET",
"Access-Control-Allow-Headers": "Content-Type",
"Access-Control-Max-Age": "3600",
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:52.0) Gecko/20100101 Firefox/52.0",
}
# Handle URLs without http beginning
if not url.startswith("http"):
url = "http://" + url
response = requests.get(url, headers=headers)
html = BeautifulSoup(response.content, "html.parser")
metadata = {
"description": get_description(html),
"image_url": get_image(html), # FIXME: Append URL if relative path.
"favicon": get_favicon(html, url),
"brand_color": get_theme_color(html),
}
return response, html, metadata
def get_description(html):
"""Scrape page description."""
description = None
if html.find("meta", property="description"):
description = html.find("meta", property="description").get("content")
elif html.find("meta", property="og:description"):
description = html.find("meta", property="og:description").get("content")
elif html.find("meta", property="twitter:description"):
description = html.find("meta", property="twitter:description").get("content")
elif html.find("p"):
description = html.find("p").contents
if isinstance(description, list):
try:
description = description[0]
except IndexError:
pass
return description
def get_image(html):
"""Scrape share image."""
image = None
if html.find("meta", property="image"):
image = html.find("meta", property="image").get("content")
elif html.find("meta", property="og:image"):
image = html.find("meta", property="og:image").get("content")
elif html.find("meta", property="twitter:image"):
image = html.find("meta", property="twitter:image").get("content")
elif html.find("img", src=True):
image = html.find_all("img")[0].get("src")
return image
def get_favicon(html, url):
"""Scrape favicon."""
if html.find("link", attrs={"rel": "icon"}):
favicon = html.find("link", attrs={"rel": "icon"}).get("href")
elif html.find("link", attrs={"rel": "shortcut icon"}):
favicon = html.find("link", attrs={"rel": "shortcut icon"}).get("href")
else:
favicon = f'{url.rstrip("/")}/favicon.ico'
return favicon
def get_theme_color(html):
"""Scrape brand color."""
if html.find("meta", property="theme-color"):
color = html.find("meta", property="theme-color").get("content")
return color
return None
def get_phone(html, response):
"""Scrape phone number."""
try:
phone = html.select("a[href*=callto]")[0].text
return phone
except:
pass
try:
phone = re.findall(
r"\(?\b[2-9][0-9]{2}\)?[-][2-9][0-9]{2}[-][0-9]{4}\b", response.text
)[0]
return phone
except:
pass
try:
phone = re.findall(
r"\(?\b[2-9][0-9]{2}\)?[-. ]?[2-9][0-9]{2}[-. ]?[0-9]{4}\b", response.text
)[-1]
return phone
except:
print("Phone number not found")
phone = ""
return phone
def get_email(html, response):
"""Get email."""
try:
email = re.findall(
r"([a-zA-Z0-9._-]+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9_-]+)", response.text
)[-1]
return email
except:
pass
try:
email = html.select("a[href*=mailto]")[-1].text
except:
print("Email not found")
email = ""
return email
def find_lab_address():
"""
TODO: Tries to find a lab's address from their website, then Google Maps.
"""
street, city, state, zipcode = None, None, None, None
return street, city, state, zipcode
def find_lab_linkedin():
"""
TODO: Tries to find a lab's LinkedIn URL. (Try to find LinkedIn on homepage?)
"""
return ""
def find_lab_url():
"""
TODO: Find a lab's website URL. (Google search for name?)
"""
return ""
def clean_string_columns(df):
"""Clean string columns in a dataframe."""
for column in df.columns:
try:
df[column] = df[column].str.title()
df[column] = df[column].str.replace("Llc", "LLC")
df[column] = df[column].str.replace("L.L.C.", "LLC")
df[column] = df[column].str.strip()
except AttributeError:
pass
return df
| 30.511236
| 110
| 0.598048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,377
| 0.437592
|
a762725a417c914c2de8c1cfaad398234b972ef4
| 22,326
|
py
|
Python
|
acsm/utils/bird_vis.py
|
eldar/acsm
|
04069e8bb4c12185473dc10c3355e5367fa98968
|
[
"Apache-2.0"
] | 52
|
2020-04-02T12:35:55.000Z
|
2022-03-11T07:47:30.000Z
|
acsm/utils/bird_vis.py
|
eldar/acsm
|
04069e8bb4c12185473dc10c3355e5367fa98968
|
[
"Apache-2.0"
] | 8
|
2020-06-04T07:34:34.000Z
|
2021-09-18T21:17:26.000Z
|
acsm/utils/bird_vis.py
|
eldar/acsm
|
04069e8bb4c12185473dc10c3355e5367fa98968
|
[
"Apache-2.0"
] | 6
|
2020-07-12T02:12:18.000Z
|
2021-03-06T05:03:33.000Z
|
"""
Code borrowed from
https://github.com/akanazawa/cmr/blob/master/utils/bird_vis.py
Visualization helpers specific to birds.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from torch.autograd import Variable
import numpy as np
import os.path as osp
import cv2
import pdb
from . import cub_parse
from ..nnutils.nmr import NeuralRenderer
from ..utils import transformations
from . import visutil
import pdb
class VisRenderer(object):
"""
Utility to render meshes using pytorch NMR
faces are F x 3 or 1 x F x 3 numpy
"""
def __init__(self, img_size, faces, t_size=3):
self.renderer = NeuralRenderer(img_size)
self.faces = Variable(
torch.IntTensor(faces).cuda(), requires_grad=False)
if self.faces.dim() == 2:
self.faces = torch.unsqueeze(self.faces, 0)
default_tex = np.ones((1, self.faces.shape[1], t_size, t_size, t_size,
3))
blue = np.array([156, 199, 234.]) / 255.
default_tex = default_tex * blue
# Could make each triangle different color
self.default_tex = Variable(
torch.FloatTensor(default_tex).cuda(), requires_grad=False)
# rot = transformations.quaternion_about_axis(np.pi/8, [1, 0, 0])
# This is median quaternion from sfm_pose
# rot = np.array([ 0.66553962, 0.31033762, -0.02249813, 0.01267084])
# This is the side view:
import cv2
R0 = cv2.Rodrigues(np.array([np.pi / 3, 0, 0]))[0]
R1 = cv2.Rodrigues(np.array([0, np.pi / 2, 0]))[0]
R = R1.dot(R0)
R = np.vstack((np.hstack((R, np.zeros((3, 1)))), np.array([0, 0, 0,
1])))
rot = transformations.quaternion_from_matrix(R, isprecise=True)
cam = np.hstack([0.75, 0, 0, rot])
self.default_cam = Variable(
torch.FloatTensor(cam).cuda(), requires_grad=False)
self.default_cam = torch.unsqueeze(self.default_cam, 0)
def __call__(self, verts, cams=None, texture=None, rend_mask=False):
"""
verts is |V| x 3 cuda torch Variable
cams is 7, cuda torch Variable
Returns N x N x 3 numpy
"""
if texture is None:
texture = self.default_tex
elif texture.dim() == 5:
# Here input it F x T x T x T x 3 (instead of F x T x T x 3)
# So add batch dim.
texture = torch.unsqueeze(texture, 0)
if cams is None:
cams = self.default_cam
elif cams.dim() == 1:
cams = torch.unsqueeze(cams, 0)
if verts.dim() == 2:
verts = torch.unsqueeze(verts, 0)
verts = asVariable(verts)
cams = asVariable(cams)
texture = asVariable(texture)
if rend_mask:
rend = self.renderer.forward(verts, self.faces, cams)
rend = rend.repeat(3, 1, 1)
rend = rend.unsqueeze(0)
else:
rend = self.renderer.forward(verts, self.faces, cams, texture)
rend = rend.data.cpu().numpy()[0].transpose((1, 2, 0))
rend = np.clip(rend, 0, 1) * 255.0
return rend.astype(np.uint8)
def rotated(self, vert, deg, axis=[0, 1, 0], cam=None, texture=None):
"""
vert is N x 3, torch FloatTensor (or Variable)
"""
import cv2
new_rot = cv2.Rodrigues(np.deg2rad(deg) * np.array(axis))[0]
new_rot = convert_as(torch.FloatTensor(new_rot), vert)
center = vert.mean(0)
new_vert = torch.t(torch.matmul(new_rot,
torch.t(vert - center))) + center
# new_vert = torch.matmul(vert - center, new_rot) + center
return self.__call__(new_vert, cams=cam, texture=texture)
def diff_vp(self,
verts,
cam=None,
angle=90,
axis=[1, 0, 0],
texture=None,
kp_verts=None,
new_ext=None,
extra_elev=False):
if cam is None:
cam = self.default_cam[0]
if new_ext is None:
new_ext = [0.6, 0, 0]
# Cam is 7D: [s, tx, ty, rot]
import cv2
cam = asVariable(cam)
quat = cam[-4:].view(1, 1, -1)
R = transformations.quaternion_matrix(
quat.squeeze().data.cpu().numpy())[:3, :3]
rad_angle = np.deg2rad(angle)
rotate_by = cv2.Rodrigues(rad_angle * np.array(axis))[0]
# new_R = R.dot(rotate_by)
new_R = rotate_by.dot(R)
if extra_elev:
# Left multiply the camera by 30deg on X.
R_elev = cv2.Rodrigues(np.array([np.pi / 9, 0, 0]))[0]
new_R = R_elev.dot(new_R)
# Make homogeneous
new_R = np.vstack(
[np.hstack((new_R, np.zeros((3, 1)))),
np.array([0, 0, 0, 1])])
new_quat = transformations.quaternion_from_matrix(
new_R, isprecise=True)
new_quat = Variable(torch.Tensor(new_quat).cuda(), requires_grad=False)
# new_cam = torch.cat([cam[:-4], new_quat], 0)
new_ext = Variable(torch.Tensor(new_ext).cuda(), requires_grad=False)
new_cam = torch.cat([new_ext, new_quat], 0)
rend_img = self.__call__(verts, cams=new_cam, texture=texture)
if kp_verts is None:
return rend_img
else:
kps = self.renderer.project_points(
kp_verts.unsqueeze(0), new_cam.unsqueeze(0))
kps = kps[0].data.cpu().numpy()
return kp2im(kps, rend_img, radius=1)
def set_bgcolor(self, color):
self.renderer.set_bgcolor(color)
def set_light_dir(self, direction, int_dir=0.8, int_amb=0.8):
renderer = self.renderer.renderer.renderer
renderer.light_direction = direction
renderer.light_intensity_directional = int_dir
renderer.light_intensity_ambient = int_amb
def set_light_status(self, use_lights):
renderer = self.renderer.renderer.renderer
renderer.use_lights = use_lights
return
def contour_alphas(H, W, real_img=False):
alphas = np.zeros((H,W))
n_lines_H = 20
n_lines_W = 10
if real_img:
line_width_x = 10
line_width_y = 10
else:
line_width_x = 10
line_width_y = 10
line_pos_x = (np.linspace(0, W-1, n_lines_W, endpoint=False) + 0.5*W/n_lines_W).astype(np.int)
line_pos_y = (np.linspace(0, H-1, n_lines_H, endpoint=False) + 0.5*H/n_lines_H).astype(np.int)
for x in line_pos_x:
alphas[:, x-line_width_x: x+line_width_x+1] = 1
for y in line_pos_y:
alphas[y-line_width_y: y+line_width_y+1, :] = 1
return torch.Tensor(alphas).unsqueeze(0)
def sample_UV_contour(img, uv_map, uv_img, mask, real_img=False):
img = img.unsqueeze(0)
uv_map = uv_map.unsqueeze(0)
uv_img = uv_img.unsqueeze(0)
uv_sample = torch.nn.functional.grid_sample(uv_img, 2*uv_map-1).squeeze(0)
uv_sample = uv_sample*mask + (1-mask)
# alphas = contour_alphas(uv_img.shape[2], uv_img.shape[3], real_img).unsqueeze(0)
alphas = contour_alphas(uv_img.shape[2], uv_img.shape[3], real_img).unsqueeze(0)* 0 + 1
# pdb.set_trace()
alpha_sample = torch.nn.functional.grid_sample(alphas, 2*uv_map-1).squeeze(0)
# alpha_sample = alpha_sample*0.95 + 0.05
alpha_sample = (alpha_sample>0.0).float()*0.7
# alpha_sample = (alpha_sample > 0.9).float()
alpha_sample = alpha_sample*mask
if real_img:
# uv_rendering = (uv_sample*alpha_sample)*1.0 + img.squeeze(0)*(1-alpha_sample)*0.3 * (mask) + img.squeeze(0)*(1-alpha_sample)* (1-mask)*0.3
uv_rendering = (uv_sample*alpha_sample)*1.0 + img.squeeze(0)*(1-alpha_sample)
uv_rendering = (uv_sample*alpha_sample)*1.0 + img.squeeze(0)*(1-alpha_sample)*0.4 * (mask) + img.squeeze(0)*(1-alpha_sample)* (1-mask)
else:
uv_rendering = (uv_sample*alpha_sample) + (img.squeeze(0)*(1-alpha_sample))
return uv_rendering
def draw_points_on_image(img, points,):
img = img.copy()
for kpx, keypoint in enumerate(points):
color = (0,0,0)
img = cv2.circle(img,(keypoint[0],keypoint[1]), 5, (color[0]*255,color[1]*255,color[2]*255), -1)
return img
def draw_keypoint_on_image(img, keypoints, keypoints_vis, color_map=None):
img = img.copy()
for kpx, (keypoint, vis) in enumerate(zip(keypoints,keypoints_vis)):
if vis > 0:
color = (0,0,255)
if color_map is not None:
color = color_map[kpx]
img = cv2.circle(img,(keypoint[0],keypoint[1]), 10, (color[0]*255,color[1]*255,color[2]*255), -1)
return img
def write_on_image(img, text, location): ## location is x,y
img = img.copy()
color = (0,0,255)
img = cv2.putText(img,"{}".format(text), (location[0],location[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, thickness=2)
return img
def draw_keypoint_and_text_on_image(img, keypoints, keypoints_vis, color_map=None, text=None, text_col=None):
img = img.copy()
for kpx, (keypoint, vis) in enumerate(zip(keypoints,keypoints_vis)):
if vis > 0:
color = (0,0,255)
if color_map is not None:
color = color_map[kpx]
img = cv2.circle(img,(keypoint[0],keypoint[1]), 5, (color[0]*255,color[1]*255,color[2]*255), -1)
color = (0,0,255)
if text_col is not None:
color = text_col[kpx]
if text is not None:
img = cv2.putText(img,"{}".format(text[kpx]), (keypoint[0],keypoint[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, thickness=1)
return img
def save_obj_with_texture(name, results_path, texture_img, mean_shape):
visutil.mkdir(results_path)
verts = np.round(mean_shape['verts'],5)
uv_verts = np.round(mean_shape['uv_verts'],5)
faces = mean_shape['faces']
obj_file = osp.join(results_path, '{}.obj'.format(name))
with open(obj_file,'w') as f:
f.write('mtllib {}.mtl\n'.format(name))
## write vertices
for v in verts:
f.write("v {} {} {}\n".format(v[0], v[1], v[2]))
## write texture vertices
for v in uv_verts:
f.write("vt {} {}\n".format(v[0], 1 - v[1]))
f.write('usemtl bird\n')
f.write('s 1\n')
## write faces
faces = faces + 1
for fv in faces:
f.write('f {}/{}/ {}/{}/ {}/{}/ \n'.format(fv[0], fv[0], fv[1], fv[1], fv[2], fv[2]))
# for fv in faces:
# f.write('f {}// {}// {}// \n'.format(fv[0]+1, fv[1]+1, fv[2]+1))
## mtl file
mtl_file = osp.join(results_path, '{}.mtl'.format(name))
with open(mtl_file,'w') as f:
f.write('# Material Count: 1\n')
f.write('newmtl bird\n')
f.write('Ns 96.078431\n')
f.write('Ka 1.000000 1.000000 1.000000\n')
f.write('Kd 0.640000 0.640000 0.640000\n')
f.write('Ks 0.500000 0.500000 0.500000\n')
f.write('Ke 0.000000 0.000000 0.000000\n')
f.write('Ni 1.00000\n')
f.write('d 1.000000\n')
f.write('illum 2\n')
f.write('map_Kd {}.png\n'.format(name,))
## Dump the texture image
# texture_img[:,:,0], texture_img[:,:,2] = texture_img[:,:,2], texture_img[:,:,0]
# texture_img = texture_img[:,:,[2,1,0]]
# pdb.set_trace()
visutil.save_image(texture_img, osp.join(results_path,'{}.png'.format(name)))
return
def merge_textures(foreground, background,):
'''
3, H, W
Assume foreground to have 1 in the A channel and 0 for the background.
'''
texture = foreground * (foreground[3,None,...] > 0.5) + background * (foreground[3,None,...] <0.5)
return texture
def render_transfer_kps_imgs(keypoint_cmap, img1, img2, kps1, kps2, transfer_kps12, transfer_kps21, common_kps):
visuals = {}
common_vis = kps1[:,0]*0
common_vis[common_kps] = 1
img1_tfs = draw_keypoint_on_image(img1, kps1,
common_vis, keypoint_cmap)
img2_tfs = draw_keypoint_on_image(img2, kps2,
common_vis, keypoint_cmap)
img_tfs12 = draw_keypoint_on_image(img2, transfer_kps12,
common_vis, keypoint_cmap)
img_tfs21 = draw_keypoint_on_image(img1, transfer_kps21,
common_vis, keypoint_cmap)
visuals['tfs_a_img1'] = img1_tfs
visuals['tfs_d_img2'] = img2_tfs
visuals['tfs_b_1to2'] = img_tfs12
visuals['tfs_c_2to1'] = img_tfs21
return visuals
def create_monocolor_texture(uvimg_H, uvimg_W, color=None):
if color is None:
color = [156, 199, 234., 0]
default_tex = np.ones((uvimg_H, uvimg_W,4))
blue = np.array(color) / 255.
blue[3] = color[3]
default_tex = default_tex * blue.reshape(1,1,-1)
return default_tex.transpose(2,0,1)
def create_kp_heat_map_texture(uvimg_H, uvimg_W, u_cord=None, v_cord=None, color=None, transprancy=1):
default_tex = create_monocolor_texture(uvimg_H, uvimg_W)
if color is None:
color = (1,0,0)
box_size = 3
if v_cord is not None and u_cord is not None:
default_tex = default_tex*0
default_tex[0, v_cord-box_size:v_cord+box_size, u_cord-box_size:u_cord+box_size] = color[0]
default_tex[1, v_cord-box_size:v_cord+box_size, u_cord-box_size:u_cord+box_size] = color[1]
default_tex[2, v_cord-box_size:v_cord+box_size, u_cord-box_size:u_cord+box_size] = color[2]
default_tex[3, v_cord-box_size:v_cord+box_size, u_cord-box_size:u_cord+box_size] = transprancy
return default_tex
def upsample_img_mask_uv_map(img, mask, uv_map):
uv_map = torch.nn.functional.upsample(uv_map.permute(2,0,1).unsqueeze(0), scale_factor=4, mode='bilinear')
mask = torch.nn.functional.upsample(mask.unsqueeze(0), scale_factor=4, mode='nearest').squeeze(0)
img = torch.nn.functional.upsample(img.unsqueeze(0), scale_factor=4, mode='nearest').squeeze(0)
uv_map = uv_map.squeeze().permute(1,2,0)
return img, mask, uv_map
def create_texture_image_from_uv_map(uvimg_H, uvimg_W, uv_map, img, mask):
default_tex = np.ones((uvimg_H, uvimg_W,3))
blue = np.array([156, 199, 234.]) / 255.
default_tex = default_tex * blue.reshape(1,1,-1)
count_tex = np.zeros((uvimg_H, uvimg_W,1))
uv_map_inds = uv_map.copy()
uv_map_inds[:,:,0] = np.clip((uv_map[:,:,0] * uvimg_W).round(), 0, uvimg_W-1)
uv_map_inds[:,:,1] = np.clip((uv_map[:,:,1] * uvimg_H).round(), 0, uvimg_H-1)
uv_map_inds = uv_map_inds.astype(np.int32)
non_zero_inds = np.where(mask.squeeze())
for rx,cx in zip(*non_zero_inds):
u_ind = uv_map_inds[rx, cx, 0]
v_ind = uv_map_inds[rx, cx, 1]
if count_tex[v_ind, u_ind, 0] == 0:
default_tex[v_ind, u_ind,:] = img[:, rx,cx]
else:
default_tex[v_ind, u_ind,:] += img[:, rx,cx]
count_tex[v_ind, u_ind, 0] += 1
count_tex = count_tex + 1*(count_tex < 1)
default_tex = default_tex / count_tex
return default_tex.transpose(2,0,1)
def wrap_texture_and_render(renderer, vert, camera, uv_sampler, texture_image, tex_size, other_vps, lights=True ):
sampled_texture = torch.nn.functional.grid_sample(texture_image.unsqueeze(0), uv_sampler)
sampled_texture = sampled_texture.squeeze().permute(1,2,0)
sampled_texture = sampled_texture.view(sampled_texture.size(0), tex_size, tex_size, 3)
sampled_texture = sampled_texture.unsqueeze(3).repeat(1, 1, 1, tex_size, 1)
renderer.set_light_dir([0, 1, -1], 0.4)
img_pred = renderer(vert, camera, texture=sampled_texture)
if not lights:
renderer.set_light_status(lights)
if other_vps:
new_camera = camera.clone()
new_camera[3] = 1
new_camera[4:] *=0
vp1 = renderer.diff_vp(
vert, new_camera, angle=90, axis=[0, 1, 0], texture=sampled_texture, extra_elev=True)
vp2 = renderer.diff_vp(
vert, new_camera, angle=180, axis=[0, 1, 0], texture=sampled_texture, extra_elev=True)
vp3 = renderer.diff_vp(
vert, new_camera, angle=180, axis=[1, 0, 0], texture=sampled_texture)
return (img_pred, vp1, vp2, vp3), texture_image.cpu().numpy().transpose(1,2,0)
else:
return (img_pred), texture_image.cpu().numpy().transpose(1,2,0)
def render_model_with_uv_map(renderer, vert, uvimg_H, uvimg_W, camera, uv_sampler, tex_size=6, other_vps=False):
texture_image = cub_parse.get_sample_grid((uvimg_H,uvimg_W)) * 0.5 + 0.5
texture_image = torch.cat([texture_image[:,:,None, 0], texture_image[:,:,None, 0]*0, texture_image[:,:,None, 1]], dim=-1)
texture_image = texture_image.permute(2,0,1).cuda()
# pdb.set_trace()
render_stuff, texture_image = wrap_texture_and_render(renderer, vert, camera, uv_sampler, texture_image, tex_size, other_vps)
return render_stuff, texture_image
def render_model_default(renderer, vert, uvimg_H, uvimg_W, camera, uv_sampler, tex_size=6, other_vps=False, color=None):
# texture_image = cub_parse.get_sample_grid((uvimg_H,uvimg_W)) * 0.5 + 0.5
# texture_image = torch.cat([texture_image[:,:,None, 0], texture_image[:,:,None, 0]*0, texture_image[:,:,None, 1]], dim=-1)
# texture_image = texture_image.permute(2,0,1).cuda()
# pdb.set_trace()
texture_image = torch.from_numpy(create_monocolor_texture(uvimg_H, uvimg_W, color=color)).float().cuda()[0:3]
render_stuff, texture_image = wrap_texture_and_render(renderer, vert, camera, uv_sampler, texture_image, tex_size, other_vps)
return render_stuff, texture_image
def copy_texture_from_img(mask, img, xy_map):
img = (visutil.undo_resnet_preprocess(img.unsqueeze(0))*mask).squeeze()
img = img.permute(1,2,0)
xy_map_inds = xy_map.clone()
xy_map_inds[:,:,0] = (xy_map_inds[:,:,0] + 1)* (img.size(1)/2)
xy_map_inds[:,:,1] = (xy_map_inds[:,:,1] + 1) * (img.size(0)/2)
xy_map_inds = torch.clamp(xy_map_inds.long(), min=0, max=img.size(0) -1).long().view(-1,2)
new_img = img[xy_map_inds[:,1], xy_map_inds[:,0],:].view(img.shape)
# new_img = img
new_img = new_img.permute(2,0,1)
# new_img = new_img * mask
new_img = (new_img*mask).unsqueeze(0)
new_img = visutil.tensor2im(new_img)
return new_img
def render_model_with_texture(renderer, vert, uvimg_H, uvimg_W, uv_map, img, mask, camera, uv_sampler, tex_size=6, other_vps=False, undo_resnet=True):
uv_map = uv_map.data.cpu().numpy()
if undo_resnet:
img = img.unsqueeze(0)
img = visutil.undo_resnet_preprocess(img).squeeze()
img = img.data.cpu().numpy()
# camera = camera.data.cpu().numpy()
mask = mask.data.cpu().numpy()
texture_image = create_texture_image_from_uv_map(uvimg_H, uvimg_W, uv_map, img, mask)
texture_image = torch.from_numpy(texture_image).float().cuda()
render_stuff, texture_image = wrap_texture_and_render(renderer, vert, camera, uv_sampler, texture_image, tex_size, other_vps)
return render_stuff, texture_image
def asVariable(x):
if type(x) is not torch.autograd.Variable:
x = Variable(x, requires_grad=False)
return x
def convert_as(src, trg):
src = src.type_as(trg)
if src.is_cuda:
src = src.cuda(device=trg.get_device())
if type(trg) is torch.autograd.Variable:
src = Variable(src, requires_grad=False)
return src
def convert2np(x):
# import ipdb; ipdb.set_trace()
# if type(x) is torch.autograd.Variable:
# x = x.data
# Assumes x is gpu tensor..
if type(x) is not np.ndarray:
return x.cpu().numpy()
return x
def tensor2mask(image_tensor, imtype=np.uint8):
# Input is H x W
image_numpy = image_tensor.cpu().float().numpy()
image_numpy = np.expand_dims(image_numpy, 2) * 255.0
image_numpy = np.tile(image_numpy, (1, 1, 3))
return image_numpy.astype(imtype)
def kp2im(kp, img, radius=None):
"""
Input is numpy array or torch.cuda.Tensor
img can be H x W, H x W x C, or C x H x W
kp is |KP| x 2
"""
kp_norm = convert2np(kp)
img = convert2np(img)
if img.ndim == 2:
img = np.dstack((img, ) * 3)
# Make it H x W x C:
elif img.shape[0] == 1 or img.shape[0] == 3:
img = np.transpose(img, (1, 2, 0))
if img.shape[2] == 1: # Gray2RGB for H x W x 1
img = np.dstack((img, ) * 3)
# kp_norm is still in [-1, 1], converts it to image coord.
kp = (kp_norm[:, :2] + 1) * 0.5 * img.shape[0]
if kp_norm.shape[1] == 3:
vis = kp_norm[:, 2] > 0
kp[~vis] = 0
kp = np.hstack((kp, vis.reshape(-1, 1)))
else:
vis = np.ones((kp.shape[0], 1))
kp = np.hstack((kp, vis))
kp_img = draw_kp(kp, img, radius=radius)
return kp_img
def draw_kp(kp, img, radius=None):
"""
kp is 15 x 2 or 3 numpy.
img can be either RGB or Gray
Draws bird points.
"""
if radius is None:
radius = max(4, (np.mean(img.shape[:2]) * 0.01).astype(int))
num_kp = kp.shape[0]
# Generate colors
import pylab
cm = pylab.get_cmap('gist_rainbow')
colors = 255 * np.array([cm(1. * i / num_kp)[:3] for i in range(num_kp)])
white = np.ones(3) * 255
image = img.copy()
if isinstance(image.reshape(-1)[0], np.float32):
# Convert to 255 and np.uint8 for cv2..
image = (image * 255).astype(np.uint8)
kp = np.round(kp).astype(int)
for kpi, color in zip(kp, colors):
# This sometimes causes OverflowError,,
if kpi[2] == 0:
continue
cv2.circle(image, (kpi[0], kpi[1]), radius + 1, white, -1)
cv2.circle(image, (kpi[0], kpi[1]), radius, color, -1)
# import matplotlib.pyplot as plt
# plt.ion()
# plt.clf()
# plt.imshow(image)
# import ipdb; ipdb.set_trace()
return image
def tensor2im(image_tensor, imtype=np.uint8, scale_to_range_1=False):
image_numpy = image_tensor.cpu().float().numpy()
image_numpy = np.transpose(image_numpy, (1, 2, 0))
if scale_to_range_1:
image_numpy = image_numpy - np.min(image_numpy, axis=2, keepdims=True)
image_numpy = image_numpy / np.max(image_numpy)
else:
# Clip to [0, 1]
image_numpy = np.clip(image_numpy, 0, 1)
return (image_numpy * 255).astype(imtype)
| 37.585859
| 150
| 0.61874
| 5,690
| 0.25486
| 0
| 0
| 0
| 0
| 0
| 0
| 3,243
| 0.145257
|
a7641eec8122f15991dc897dc20ebeb0e83b0d20
| 10,764
|
py
|
Python
|
gatenlp/corpora/files.py
|
joancf/python-gatenlp
|
21441d72ded19e9348052e99ac5bc1fc6af7ab6e
|
[
"Apache-2.0"
] | 30
|
2020-04-18T12:28:15.000Z
|
2022-02-18T21:31:18.000Z
|
gatenlp/corpora/files.py
|
joancf/python-gatenlp
|
21441d72ded19e9348052e99ac5bc1fc6af7ab6e
|
[
"Apache-2.0"
] | 133
|
2019-10-16T07:41:59.000Z
|
2022-03-31T07:27:07.000Z
|
gatenlp/corpora/files.py
|
joancf/python-gatenlp
|
21441d72ded19e9348052e99ac5bc1fc6af7ab6e
|
[
"Apache-2.0"
] | 4
|
2021-01-20T08:12:19.000Z
|
2021-10-21T13:29:44.000Z
|
"""
Module that defines Corpus and DocumentSource/DocumentDestination classes which access documents
as lines or parts in a file.
"""
import json
from gatenlp.urlfileutils import yield_lines_from
from gatenlp.document import Document
from gatenlp.corpora.base import DocumentSource, DocumentDestination
from gatenlp.corpora.base import MultiProcessingAble
class BdocjsLinesFileSource(DocumentSource, MultiProcessingAble):
"""
A document source which reads one bdoc json serialization of a document from each line of the given file.
"""
def __init__(self, file):
"""
Create a JsonLinesFileSource.
Args:
file: the file path (a string) or an open file handle.
"""
self.file = file
def __iter__(self):
with open(self.file, "rt", encoding="utf-8") as infp:
for line in infp:
yield Document.load_mem(line, fmt="json")
class BdocjsLinesFileDestination(DocumentDestination):
"""
Writes one line of JSON per document to the a single output file.
"""
def __init__(self, file):
"""
Args:
file: the file to write to. If it exists, it gets overwritten without warning.
Expected to be a string or an open file handle.
"""
if isinstance(file, str):
self.fh = open(file, "wt", encoding="utf-8")
else:
self.fh = file
self.n = 0
def __enter__(self):
return self
def __exit__(self, extype, value, traceback):
self.fh.close()
def append(self, doc):
"""
Append a document to the destination.
Args:
doc: the document, if None, no action is performed.
"""
if doc is None:
return
assert isinstance(doc, Document)
self.fh.write(doc.save_mem(fmt="json"))
self.fh.write("\n")
self.n += 1
def close(self):
self.fh.close()
class JsonLinesFileSource(DocumentSource, MultiProcessingAble):
"""
A document source which reads one json serialization per line, creates a document from one field
in the json and optionally stores all or a selection of remaining fields as document feature "__data".
"""
def __init__(self, file, text_field="text", data_fields=None, data_feature="__data"):
"""
Create a JsonLinesFileSource.
Args:
file: the file path (a string) or an open file handle.
text_field: the field name where to get the document text from.
data_fields: if a list of names, store these fields in the "__data" feature. if True, store all fields.
data_feature: the name of the data feature, default is "__data"
"""
# feature_fields: NOT YET IMPLEMENTED -- a mapping from original json fields to document features
self.file = file
self.text_field = text_field
self.data_fields = data_fields
self.data_feature = data_feature
def __iter__(self):
with open(self.file, "rt", encoding="utf-8") as infp:
for line in infp:
data = json.loads(line)
# TODO: what if the field does not exist? should we use get(text_field, "") instead?
text = data[self.text_field]
doc = Document(text)
if self.data_fields:
if isinstance(self.data_fields, list):
tmp = {}
for fname in self.data_fields:
# TODO: what if the field does not exist?
tmp[fname] = data[fname]
else:
tmp = data
doc.features[self.data_feature] = tmp
yield doc
class JsonLinesFileDestination(DocumentDestination):
"""
Writes one line of JSON per document to the a single output file. This will either write the document json
as nested data or the document text to the field designated for the document and will write other json
fields from the "__data" document feature.
"""
def __init__(self, file, document_field="text", document_bdocjs=False, data_fields=True, data_feature="__data"):
"""
Args:
file: the file to write to. If it exists, it gets overwritten without warning.
Expected to be a string or an open file handle.
document_field: the name of the json field that will contain the document either just the text or
the bdocjs representation if document_bdocjs is True.
document_bdocjs: if True store the bdocjs serialization into the document_field instead of just the text
data_fields: if a list, only store these fields in the json, if False, do not store any additional fields.
Default is True: store all fields as is.
data_feature: the name of the data feature, default is "__data"
"""
if isinstance(file, str):
self.fh = open(file, "wt", encoding="utf-8")
else:
self.fh = file
self.n = 0
self.document_field = document_field
self.document_bdocjs = document_bdocjs
self.data_fields = data_fields
self.data_feature = data_feature
def __enter__(self):
return self
def __exit__(self, _extype, _value, _traceback):
self.fh.close()
def append(self, doc):
"""
Append a document to the destination.
Args:
doc: the document, if None, no action is performed.
"""
if doc is None:
return
assert isinstance(doc, Document)
data = {}
if self.data_fields:
if isinstance(self.data_fields, list):
for fname in self.data_fields:
data[fname] = doc.features[self.data_feature][fname]
else:
data.update(doc.features[self.data_feature])
# assign the document field last so it overwrites anything that comes from the data feature!
if self.document_bdocjs:
data[self.document_field] = doc.save_mem(fmt="json")
else:
data[self.document_field] = doc.text
self.fh.write(json.dumps(data))
self.fh.write("\n")
self.n += 1
def close(self):
self.fh.close()
class TsvFileSource(DocumentSource, MultiProcessingAble):
"""
A TsvFileSource is a DocumentSource which is a single TSV file with a fixed number of tab-separated
values per row. Each document in sequence is created from the text in one of the columns and
document features can be set from arbitrary columns as well.
"""
def __init__(self, source, hdr=True, text_col=None, feature_cols=None, data_cols=None, data_feature="__data"):
"""
Creates the TsvFileSource.
Args:
source: a file path or URL
hdr: if True (default), expects a header line with the column names, if a list, should be the list
of column names, if False/None, no header line is expected.
text_col: the column which contains the text for creating the document. Either the column number,
or the name of the column (only possible if there is a header line) or a function that should
take the list of fields and arbitrary kwargs and return the text. Also passes "cols" and "n"
as keyward arguments.
feature_cols: if not None, must be either a dictionary mapping document feature names to the
column numbers or column names of where to get the feature value from;
or a function that should take the list of fields and arbitrary kwargs and return a dictionary
with the features. Also passes "cols" (dict mapping column names to column indices, or None) and
"n" (current line number) as keyword arguments.
data_cols: if not None, either an iterable of the names of columns to store in the special document
feature "__data" or if "True", stores all columns. At the moment this only works if the tsv file
has a header line. The values are stored as a list in the order of the names given or the original
order of the values in the TSV file.
data_feature: the name of the document feature where to store the data, default is "__data"
"""
assert text_col is not None
self.hdr = hdr
self.text_col = text_col
self.feature_cols = feature_cols
self.data_cols = data_cols
self.source = source
self.n = 0
self.hdr2col = {}
if data_cols and not hdr:
raise Exception("Header must be present if data_cols should be used")
self.data_feature = data_feature
def __iter__(self):
reader = yield_lines_from(self.source)
if self.hdr and self.n == 0:
self.n += 1
self.hdr = next(reader).rstrip("\n\r").split("\t")
if self.hdr:
self.hdr2col = {name: idx for idx, name in enumerate(self.hdr)}
for line in reader:
line = line.rstrip("\n\r")
fields = line.split("\t")
if isinstance(self.text_col, int):
text = fields[self.text_col]
elif callable(self.text_col):
text = self.text_col(fields, cols=self.hdr2col, n=self.n)
else:
text = fields[self.hdr2col[self.text_col]]
doc = Document(text)
if self.feature_cols:
if callable(self.feature_cols):
doc.features.update(
self.feature_cols(fields, cols=self.hdr2col, n=self.n)
)
else:
for fname, colid in self.feature_cols.items():
if isinstance(colid, int):
value = fields[colid]
else:
value = fields[self.hdr2col[colid]]
doc.features[fname] = value
if self.data_cols:
if isinstance(self.data_cols, list):
data = {}
for cname in self.data_cols:
if isinstance(cname, str):
data[cname] = fields[self.hdr2col[cname]]
else:
# assume it is the column index!
data[cname] = fields[cname]
else:
data = fields
doc.features[self.data_feature] = data
self.n += 1
yield doc
| 40.164179
| 118
| 0.590673
| 10,392
| 0.96544
| 2,813
| 0.261334
| 0
| 0
| 0
| 0
| 5,001
| 0.464604
|
a7646b2e354d22868d6a6f4cc986b8c2069e186b
| 709
|
py
|
Python
|
src/ch5-viewmodels/web/services/AccountPageService.py
|
saryeHaddadi/Python.Course.WebAppFastAPI
|
ddc1f1850473c227e715c8ecd2afd741e53c4680
|
[
"MIT"
] | null | null | null |
src/ch5-viewmodels/web/services/AccountPageService.py
|
saryeHaddadi/Python.Course.WebAppFastAPI
|
ddc1f1850473c227e715c8ecd2afd741e53c4680
|
[
"MIT"
] | null | null | null |
src/ch5-viewmodels/web/services/AccountPageService.py
|
saryeHaddadi/Python.Course.WebAppFastAPI
|
ddc1f1850473c227e715c8ecd2afd741e53c4680
|
[
"MIT"
] | null | null | null |
import fastapi
from starlette.requests import Request
from web.viewmodels.account.AccountViewModel import AccountViewModel
from web.viewmodels.account.LoginViewModel import LoginViewModel
from web.viewmodels.account.RegisterViewModel import RegisterViewModel
router = fastapi.APIRouter()
@router.get('/account')
def index(request: Request):
vm = AccountViewModel(request)
return vm.to_dict()
@router.get('/account/register')
def register(request: Request):
vm = RegisterViewModel(request)
return vm.to_dict()
@router.get('/account/login')
def login(request: Request):
vm = LoginViewModel(request)
return vm.to_dict()
@router.get('/account/logout')
def logout():
return {}
| 22.870968
| 70
| 0.760226
| 0
| 0
| 0
| 0
| 408
| 0.575458
| 0
| 0
| 62
| 0.087447
|
a7659e9cd38acecd1d387852d0d503d7207e98a9
| 29,031
|
py
|
Python
|
src/opserver/uveserver.py
|
madkiss/contrail-controller
|
17f622dfe99f8ab4163436399e80f95dd564814c
|
[
"Apache-2.0"
] | null | null | null |
src/opserver/uveserver.py
|
madkiss/contrail-controller
|
17f622dfe99f8ab4163436399e80f95dd564814c
|
[
"Apache-2.0"
] | null | null | null |
src/opserver/uveserver.py
|
madkiss/contrail-controller
|
17f622dfe99f8ab4163436399e80f95dd564814c
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# UVEServer
#
# Operational State Server for UVEs
#
import gevent
import json
import copy
import xmltodict
import redis
import datetime
import sys
from opserver_util import OpServerUtils
import re
from gevent.coros import BoundedSemaphore
from pysandesh.util import UTCTimestampUsec
from pysandesh.connection_info import ConnectionState
from sandesh.viz.constants import UVE_MAP
class UVEServer(object):
def __init__(self, redis_uve_server, logger, redis_password=None):
self._local_redis_uve = redis_uve_server
self._redis_uve_list = []
self._logger = logger
self._sem = BoundedSemaphore(1)
self._redis = None
self._redis_password = redis_password
if self._local_redis_uve:
self._redis = redis.StrictRedis(self._local_redis_uve[0],
self._local_redis_uve[1],
password=self._redis_password,
db=1)
self._uve_reverse_map = {}
for h,m in UVE_MAP.iteritems():
self._uve_reverse_map[m] = h
#end __init__
def update_redis_uve_list(self, redis_uve_list):
self._redis_uve_list = redis_uve_list
# end update_redis_uve_list
def fill_redis_uve_info(self, redis_uve_info):
redis_uve_info.ip = self._local_redis_uve[0]
redis_uve_info.port = self._local_redis_uve[1]
try:
self._redis.ping()
except redis.exceptions.ConnectionError:
redis_uve_info.status = 'DisConnected'
else:
redis_uve_info.status = 'Connected'
#end fill_redis_uve_info
@staticmethod
def merge_previous(state, key, typ, attr, prevdict):
print "%s New val is %s" % (attr, prevdict)
nstate = copy.deepcopy(state)
if UVEServer._is_agg_item(prevdict):
count = int(state[key][typ][attr]['previous']['#text'])
count += int(prevdict['#text'])
nstate[key][typ][attr]['previous']['#text'] = str(count)
if UVEServer._is_agg_list(prevdict):
sname = ParallelAggregator.get_list_name(
state[key][typ][attr]['previous'])
count = len(prevdict['list'][sname]) + \
len(state[key][typ][attr]['previous']['list'][sname])
nstate[key][typ][attr]['previous']['list'][sname].extend(
prevdict['list'][sname])
nstate[key][typ][attr]['previous']['list']['@size'] = \
str(count)
tstate = {}
tstate[typ] = {}
tstate[typ][attr] = copy.deepcopy(
nstate[key][typ][attr]['previous'])
nstate[key][typ][attr]['previous'] =\
ParallelAggregator.consolidate_list(tstate, typ, attr)
print "%s Merged val is %s"\
% (attr, nstate[key][typ][attr]['previous'])
return nstate
def run(self):
lck = False
while True:
try:
k, value = self._redis.brpop("DELETED")
self._sem.acquire()
lck = True
self._logger.debug("%s del received for " % value)
# value is of the format:
# DEL:<key>:<src>:<node-type>:<module>:<instance-id>:<message-type>:<seqno>
info = value.rsplit(":", 6)
key = info[0].split(":", 1)[1]
typ = info[5]
existing = self._redis.hgetall("PREVIOUS:" + key + ":" + typ)
tstate = {}
tstate[key] = {}
tstate[key][typ] = {}
state = UVEServer.convert_previous(existing, tstate, key, typ)
for attr, hval in self._redis.hgetall(value).iteritems():
snhdict = xmltodict.parse(hval)
if UVEServer._is_agg_list(snhdict[attr]):
if snhdict[attr]['list']['@size'] == "0":
continue
if snhdict[attr]['list']['@size'] == "1":
sname = ParallelAggregator.get_list_name(
snhdict[attr])
if not isinstance(
snhdict[attr]['list'][sname], list):
snhdict[attr]['list'][sname] = \
[snhdict[attr]['list'][sname]]
if (attr not in state[key][typ]):
# There is no existing entry for the UVE
vstr = json.dumps(snhdict[attr])
else:
# There is an existing entry
# Merge the new entry with the existing one
state = UVEServer.merge_previous(
state, key, typ, attr, snhdict[attr])
vstr = json.dumps(state[key][typ][attr]['previous'])
# Store the merged result back in the database
self._redis.sadd("PUVES:" + typ, key)
self._redis.sadd("PTYPES:" + key, typ)
self._redis.hset("PREVIOUS:" + key + ":" + typ, attr, vstr)
self._redis.delete(value)
except redis.exceptions.ResponseError:
#send redis connection down msg. Coule be bcos of authentication
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'UVE', status = ConnectionStatus.DOWN,
message = 'UVE result : Connection Error',
server_addrs = ['%s:%d' % (self._local_redis_uve[0],
self._local_redis_uve[1])])
sys.exit()
except redis.exceptions.ConnectionError:
if lck:
self._sem.release()
lck = False
gevent.sleep(5)
else:
if lck:
self._sem.release()
lck = False
self._logger.debug("Deleted %s" % value)
self._logger.debug("UVE %s Type %s" % (key, typ))
@staticmethod
def _is_agg_item(attr):
if attr['@type'] in ['i8', 'i16', 'i32', 'i64', 'byte',
'u8', 'u16', 'u32', 'u64']:
if '@aggtype' in attr:
if attr['@aggtype'] == "counter":
return True
return False
@staticmethod
def _is_agg_list(attr):
if attr['@type'] in ['list']:
if '@aggtype' in attr:
if attr['@aggtype'] == "append":
return True
return False
@staticmethod
def convert_previous(existing, state, key, typ, afilter=None):
# Take the existing delete record, and load it into the state dict
for attr, hval in existing.iteritems():
hdict = json.loads(hval)
if afilter is not None and len(afilter):
if attr not in afilter:
continue
# When recording deleted attributes, only record those
# for which delete-time aggregation is needed
if UVEServer._is_agg_item(hdict):
if (typ not in state[key]):
state[key][typ] = {}
if (attr not in state[key][typ]):
state[key][typ][attr] = {}
state[key][typ][attr]["previous"] = hdict
# For lists that require delete-time aggregation, we need
# to normailize lists of size 1, and ignore those of size 0
if UVEServer._is_agg_list(hdict):
if hdict['list']['@size'] != "0":
if (typ not in state[key]):
state[key][typ] = {}
if (attr not in state[key][typ]):
state[key][typ][attr] = {}
state[key][typ][attr]["previous"] = hdict
if hdict['list']['@size'] == "1":
sname = ParallelAggregator.get_list_name(hdict)
if not isinstance(hdict['list'][sname], list):
hdict['list'][sname] = [hdict['list'][sname]]
return state
def get_part(self, part):
uves = {}
for redis_uve in self._redis_uve_list:
gen_uves = {}
redish = redis.StrictRedis(host=redis_uve[0],
port=redis_uve[1], db=1)
for elems in redish.smembers("PART2KEY:" + str(part)):
info = elems.split(":", 5)
gen = info[0] + ":" + info[1] + ":" + info[2] + ":" + info[3]
key = info[5]
if not gen_uves.has_key(gen):
gen_uves[gen] = {}
gen_uves[gen][key] = 0
uves[redis_uve[0] + ":" + str(redis_uve[1])] = gen_uves
return uves
def get_uve(self, key, flat, filters=None, multi=False, is_alarm=False, base_url=None):
filters = filters or {}
sfilter = filters.get('sfilt')
mfilter = filters.get('mfilt')
tfilter = filters.get('cfilt')
ackfilter = filters.get('ackfilt')
state = {}
state[key] = {}
statdict = {}
for redis_uve in self._redis_uve_list:
redish = redis.StrictRedis(host=redis_uve[0],
port=redis_uve[1],
password=self._redis_password, db=1)
try:
qmap = {}
origins = redish.smembers("ALARM_ORIGINS:" + key)
if not is_alarm:
origins = origins.union(redish.smembers("ORIGINS:" + key))
for origs in origins:
info = origs.rsplit(":", 1)
sm = info[0].split(":", 1)
source = sm[0]
if sfilter is not None:
if sfilter != source:
continue
mdule = sm[1]
if mfilter is not None:
if mfilter != mdule:
continue
dsource = source + ":" + mdule
typ = info[1]
if tfilter is not None:
if typ not in tfilter:
continue
odict = redish.hgetall("VALUES:" + key + ":" + origs)
afilter_list = set()
if tfilter is not None:
afilter_list = tfilter[typ]
for attr, value in odict.iteritems():
if len(afilter_list):
if attr not in afilter_list:
continue
if typ not in state[key]:
state[key][typ] = {}
if value[0] == '<':
snhdict = xmltodict.parse(value)
if snhdict[attr]['@type'] == 'list':
sname = ParallelAggregator.get_list_name(
snhdict[attr])
if snhdict[attr]['list']['@size'] == '0':
continue
elif snhdict[attr]['list']['@size'] == '1':
if not isinstance(
snhdict[attr]['list'][sname], list):
snhdict[attr]['list'][sname] = [
snhdict[attr]['list'][sname]]
if typ == 'UVEAlarms' and attr == 'alarms' and \
ackfilter is not None:
alarms = []
for alarm in snhdict[attr]['list'][sname]:
ack_attr = alarm.get('ack')
if ack_attr:
ack = ack_attr['#text']
else:
ack = 'false'
if ack == ackfilter:
alarms.append(alarm)
if not len(alarms):
continue
snhdict[attr]['list'][sname] = alarms
snhdict[attr]['list']['@size'] = \
str(len(alarms))
else:
continue
# print "Attr %s Value %s" % (attr, snhdict)
if attr not in state[key][typ]:
state[key][typ][attr] = {}
if dsource in state[key][typ][attr]:
print "Found Dup %s:%s:%s:%s:%s = %s" % \
(key, typ, attr, source, mdule, state[
key][typ][attr][dsource])
state[key][typ][attr][dsource] = snhdict[attr]
if sfilter is None and mfilter is None:
for ptyp in redish.smembers("PTYPES:" + key):
afilter = None
if tfilter is not None:
if ptyp not in tfilter:
continue
afilter = tfilter[ptyp]
existing = redish.hgetall("PREVIOUS:" + key + ":" + ptyp)
nstate = UVEServer.convert_previous(
existing, state, key, ptyp, afilter)
state = copy.deepcopy(nstate)
pa = ParallelAggregator(state, self._uve_reverse_map)
rsp = pa.aggregate(key, flat, base_url)
except redis.exceptions.ConnectionError:
self._logger.error("Failed to connect to redis-uve: %s:%d" \
% (redis_uve[0], redis_uve[1]))
except Exception as e:
self._logger.error("Exception: %s" % e)
return {}
else:
self._logger.debug("Computed %s" % key)
for k, v in statdict.iteritems():
if k in rsp:
mp = dict(v.items() + rsp[k].items())
statdict[k] = mp
return dict(rsp.items() + statdict.items())
# end get_uve
def get_uve_regex(self, key):
regex = ''
if key[0] != '*':
regex += '^'
regex += key.replace('*', '.*?')
if key[-1] != '*':
regex += '$'
return re.compile(regex)
# end get_uve_regex
def multi_uve_get(self, table, flat, filters=None, is_alarm=False, base_url=None):
# get_uve_list cannot handle attribute names very efficiently,
# so we don't pass them here
uve_list = self.get_uve_list(table, filters, False, is_alarm)
for uve_name in uve_list:
uve_val = self.get_uve(
table + ':' + uve_name, flat, filters, True, is_alarm, base_url)
if uve_val == {}:
continue
else:
uve = {'name': uve_name, 'value': uve_val}
yield uve
# end multi_uve_get
def get_uve_list(self, table, filters=None, parse_afilter=False,
is_alarm=False):
filters = filters or {}
uve_list = set()
kfilter = filters.get('kfilt')
if kfilter is not None:
patterns = set()
for filt in kfilter:
patterns.add(self.get_uve_regex(filt))
for redis_uve in self._redis_uve_list:
redish = redis.StrictRedis(host=redis_uve[0],
port=redis_uve[1],
password=self._redis_password, db=1)
try:
# For UVE queries, we wanna read both UVE and Alarm table
entries = redish.smembers('ALARM_TABLE:' + table)
if not is_alarm:
entries = entries.union(redish.smembers('TABLE:' + table))
for entry in entries:
info = (entry.split(':', 1)[1]).rsplit(':', 5)
uve_key = info[0]
if kfilter is not None:
kfilter_match = False
for pattern in patterns:
if pattern.match(uve_key):
kfilter_match = True
break
if not kfilter_match:
continue
src = info[1]
sfilter = filters.get('sfilt')
if sfilter is not None:
if sfilter != src:
continue
module = info[2]+':'+info[3]+':'+info[4]
mfilter = filters.get('mfilt')
if mfilter is not None:
if mfilter != module:
continue
typ = info[5]
tfilter = filters.get('cfilt')
if tfilter is not None:
if typ not in tfilter:
continue
if parse_afilter:
if tfilter is not None and len(tfilter[typ]):
valkey = "VALUES:" + table + ":" + uve_key + \
":" + src + ":" + module + ":" + typ
for afilter in tfilter[typ]:
attrval = redish.hget(valkey, afilter)
if attrval is not None:
break
if attrval is None:
continue
uve_list.add(uve_key)
except redis.exceptions.ConnectionError:
self._logger.error('Failed to connect to redis-uve: %s:%d' \
% (redis_uve[0], redis_uve[1]))
except Exception as e:
self._logger.error('Exception: %s' % e)
return set()
return uve_list
# end get_uve_list
# end UVEServer
class ParallelAggregator:
def __init__(self, state, rev_map = {}):
self._state = state
self._rev_map = rev_map
def _default_agg(self, oattr):
itemset = set()
result = []
for source in oattr.keys():
elem = oattr[source]
hdelem = json.dumps(elem)
if hdelem not in itemset:
itemset.add(hdelem)
result.append([elem, source])
else:
for items in result:
if elem in items:
items.append(source)
return result
def _is_sum(self, oattr):
akey = oattr.keys()[0]
if '@aggtype' not in oattr[akey]:
return False
if oattr[akey]['@aggtype'] in ["sum"]:
return True
if oattr[akey]['@type'] in ['i8', 'i16', 'i32', 'i64',
'byte', 'u8', 'u16', 'u32', 'u64']:
if oattr[akey]['@aggtype'] in ["counter"]:
return True
return False
def _is_union(self, oattr):
akey = oattr.keys()[0]
if not oattr[akey]['@type'] in ["list"]:
return False
if '@aggtype' not in oattr[akey]:
return False
if oattr[akey]['@aggtype'] in ["union"]:
return True
else:
return False
def _is_append(self, oattr):
akey = oattr.keys()[0]
if not oattr[akey]['@type'] in ["list"]:
return False
if '@aggtype' not in oattr[akey]:
return False
if oattr[akey]['@aggtype'] in ["append"]:
return True
else:
return False
@staticmethod
def get_list_name(attr):
sname = ""
for sattr in attr['list'].keys():
if sattr[0] not in ['@']:
sname = sattr
return sname
@staticmethod
def _get_list_key(elem):
skey = ""
for sattr in elem.keys():
if '@aggtype' in elem[sattr]:
if elem[sattr]['@aggtype'] in ["listkey"]:
skey = sattr
return skey
def _sum_agg(self, oattr):
akey = oattr.keys()[0]
result = copy.deepcopy(oattr[akey])
count = 0
for source in oattr.keys():
count += int(oattr[source]['#text'])
result['#text'] = str(count)
return result
def _union_agg(self, oattr):
akey = oattr.keys()[0]
result = copy.deepcopy(oattr[akey])
itemset = set()
sname = ParallelAggregator.get_list_name(oattr[akey])
result['list'][sname] = []
siz = 0
for source in oattr.keys():
if isinstance(oattr[source]['list'][sname], basestring):
oattr[source]['list'][sname] = [oattr[source]['list'][sname]]
for elem in oattr[source]['list'][sname]:
hdelem = json.dumps(elem)
if hdelem not in itemset:
itemset.add(hdelem)
result['list'][sname].append(elem)
siz += 1
result['list']['@size'] = str(siz)
return result
def _append_agg(self, oattr):
akey = oattr.keys()[0]
result = copy.deepcopy(oattr[akey])
sname = ParallelAggregator.get_list_name(oattr[akey])
result['list'][sname] = []
siz = 0
for source in oattr.keys():
if not isinstance(oattr[source]['list'][sname], list):
oattr[source]['list'][sname] = [oattr[source]['list'][sname]]
for elem in oattr[source]['list'][sname]:
result['list'][sname].append(elem)
siz += 1
result['list']['@size'] = str(siz)
return result
@staticmethod
def _list_agg_attrs(item):
for ctrs in item.keys():
if '@aggtype'in item[ctrs]:
if item[ctrs]['@aggtype'] in ["listkey"]:
continue
if item[ctrs]['@type'] in ['i8', 'i16', 'i32', 'i64',
'byte', 'u8', 'u16', 'u32', 'u64']:
yield ctrs
@staticmethod
def consolidate_list(result, typ, objattr):
applist = ParallelAggregator.get_list_name(
result[typ][objattr])
appkey = ParallelAggregator._get_list_key(
result[typ][objattr]['list'][applist][0])
# There is no listkey ; no consolidation is possible
if len(appkey) == 0:
return result
# If the list's underlying struct has a listkey present,
# we need to further aggregate entries that have the
# same listkey
mod_result = copy.deepcopy(result[typ][objattr])
mod_result['list'][applist] = []
res_size = 0
mod_result['list']['@size'] = int(res_size)
# Add up stats
for items in result[typ][objattr]['list'][applist]:
matched = False
for res_items in mod_result['list'][applist]:
if items[appkey]['#text'] in [res_items[appkey]['#text']]:
for ctrs in ParallelAggregator._list_agg_attrs(items):
res_items[ctrs]['#text'] += int(items[ctrs]['#text'])
matched = True
if not matched:
newitem = copy.deepcopy(items)
for ctrs in ParallelAggregator._list_agg_attrs(items):
newitem[ctrs]['#text'] = int(items[ctrs]['#text'])
mod_result['list'][applist].append(newitem)
res_size += 1
# Convert results back into strings
for res_items in mod_result['list'][applist]:
for ctrs in ParallelAggregator._list_agg_attrs(res_items):
res_items[ctrs]['#text'] = str(res_items[ctrs]['#text'])
mod_result['list']['@size'] = str(res_size)
return mod_result
def aggregate(self, key, flat, base_url = None):
'''
This function does parallel aggregation of this UVE's state.
It aggregates across all sources and return the global state of the UVE
'''
result = {}
try:
for typ in self._state[key].keys():
result[typ] = {}
for objattr in self._state[key][typ].keys():
if self._is_sum(self._state[key][typ][objattr]):
sum_res = self._sum_agg(self._state[key][typ][objattr])
if flat:
result[typ][objattr] = \
OpServerUtils.uve_attr_flatten(sum_res)
else:
result[typ][objattr] = sum_res
elif self._is_union(self._state[key][typ][objattr]):
union_res = self._union_agg(
self._state[key][typ][objattr])
conv_res = None
if union_res.has_key('@ulink') and base_url and \
union_res['list']['@type'] == 'string':
uterms = union_res['@ulink'].split(":",1)
# This is the linked UVE's table name
m_table = uterms[0]
if self._rev_map.has_key(m_table):
h_table = self._rev_map[m_table]
conv_res = []
sname = ParallelAggregator.get_list_name(union_res)
for el in union_res['list'][sname]:
lobj = {}
lobj['name'] = el
lobj['href'] = base_url + '/analytics/uves/' + \
h_table + '/' + el
if len(uterms) == 2:
lobj['href'] = lobj['href'] + '?cfilt=' + uterms[1]
else:
lobj['href'] = lobj['href'] + '?flat'
conv_res.append(lobj)
if flat:
if not conv_res:
result[typ][objattr] = \
OpServerUtils.uve_attr_flatten(union_res)
else:
result[typ][objattr] = conv_res
else:
result[typ][objattr] = union_res
elif self._is_append(self._state[key][typ][objattr]):
result[typ][objattr] = self._append_agg(
self._state[key][typ][objattr])
append_res = ParallelAggregator.consolidate_list(
result, typ, objattr)
if flat:
result[typ][objattr] =\
OpServerUtils.uve_attr_flatten(append_res)
else:
result[typ][objattr] = append_res
else:
default_res = self._default_agg(
self._state[key][typ][objattr])
if flat:
if (len(default_res) == 1):
result[typ][objattr] =\
OpServerUtils.uve_attr_flatten(
default_res[0][0])
else:
nres = []
for idx in range(len(default_res)):
nres.append(default_res[idx])
nres[idx][0] =\
OpServerUtils.uve_attr_flatten(
default_res[idx][0])
result[typ][objattr] = nres
else:
result[typ][objattr] = default_res
except KeyError:
pass
return result
if __name__ == '__main__':
uveserver = UVEServer(None, 0, None, None)
gevent.spawn(uveserver.run())
uve_state = json.loads(uveserver.get_uve("abc-corp:vn02", False))
print json.dumps(uve_state, indent=4, sort_keys=True)
| 41.711207
| 91
| 0.450484
| 28,320
| 0.975509
| 923
| 0.031794
| 5,890
| 0.202887
| 0
| 0
| 3,300
| 0.113672
|
a765ce6d1c1eea007b73c094feaef3cfb92302b9
| 6,559
|
py
|
Python
|
tests/datasets/test_tonas.py
|
lucaspbastos/mirdata
|
e591c5411c41591e8606812df869dca1ad52ee0f
|
[
"BSD-3-Clause"
] | 224
|
2019-05-08T14:46:05.000Z
|
2022-03-31T12:14:39.000Z
|
tests/datasets/test_tonas.py
|
oriolcolomefont/mirdata
|
e591c5411c41591e8606812df869dca1ad52ee0f
|
[
"BSD-3-Clause"
] | 492
|
2019-04-08T16:59:33.000Z
|
2022-01-19T13:50:56.000Z
|
tests/datasets/test_tonas.py
|
oriolcolomefont/mirdata
|
e591c5411c41591e8606812df869dca1ad52ee0f
|
[
"BSD-3-Clause"
] | 46
|
2019-04-11T15:12:18.000Z
|
2022-01-19T17:33:50.000Z
|
import numpy as np
from tests.test_utils import run_track_tests
from mirdata import annotations
from mirdata.datasets import tonas
TEST_DATA_HOME = "tests/resources/mir_datasets/tonas"
def test_track():
default_trackid = "01-D_AMairena"
dataset = tonas.Dataset(TEST_DATA_HOME)
track = dataset.track(default_trackid)
expected_attributes = {
"singer": "En el barrio de Triana",
"style": "Debla",
"title": "Antonio Mairena",
"tuning_frequency": 451.0654725341684,
"f0_path": "tests/resources/mir_datasets/tonas/Deblas/01-D_AMairena.f0.Corrected",
"notes_path": "tests/resources/mir_datasets/tonas/Deblas/01-D_AMairena.notes.Corrected",
"audio_path": "tests/resources/mir_datasets/tonas/Deblas/01-D_AMairena.wav",
"track_id": "01-D_AMairena",
}
expected_property_types = {
"f0": annotations.F0Data,
"f0_automatic": annotations.F0Data,
"f0_corrected": annotations.F0Data,
"notes": annotations.NoteData,
"audio": tuple,
"singer": str,
"style": str,
"title": str,
"tuning_frequency": float,
}
run_track_tests(track, expected_attributes, expected_property_types)
def test_to_jams():
default_trackid = "01-D_AMairena"
dataset = tonas.Dataset(TEST_DATA_HOME)
track = dataset.track(default_trackid)
jam = track.to_jams()
# Validate cante100 jam schema
assert jam.validate()
# Validate melody
f0 = jam.search(namespace="pitch_contour")[0]["data"]
assert [note.time for note in f0] == [0.197, 0.209, 0.221, 0.232]
assert [note.duration for note in f0] == [0.0, 0.0, 0.0, 0.0]
assert [note.value for note in f0] == [
{"index": 0, "frequency": 0.0, "voiced": False},
{"index": 0, "frequency": 379.299, "voiced": True},
{"index": 0, "frequency": 379.299, "voiced": True},
{"index": 0, "frequency": 379.299, "voiced": True},
]
print([note.confidence for note in f0])
assert [note.confidence for note in f0] == [3.09e-06, 2.86e-06, 7.15e-06, 1.545e-05]
# Validate note transciption
notes = jam.search(namespace="note_hz")[0]["data"]
assert [note.time for note in notes] == [
0.216667,
0.65,
2.183333,
2.566667,
]
assert [note.duration for note in notes] == [
0.433333,
1.016667,
0.3833329999999999,
0.3333330000000001,
]
assert [note.value for note in notes] == [
388.8382625732775,
411.9597888711769,
388.8382625732775,
411.9597888711769,
]
assert [note.confidence for note in notes] == [None, None, None, None]
def test_load_melody():
default_trackid = "01-D_AMairena"
dataset = tonas.Dataset(TEST_DATA_HOME)
track = dataset.track(default_trackid)
f0_path = track.f0_path
f0_data_corrected = tonas.load_f0(f0_path, True)
f0_data_automatic = tonas.load_f0(f0_path, False)
# check types
assert type(f0_data_corrected) == annotations.F0Data
assert type(f0_data_corrected.times) is np.ndarray
assert type(f0_data_corrected.frequencies) is np.ndarray
assert type(f0_data_corrected.voicing) is np.ndarray
assert type(f0_data_corrected._confidence) is np.ndarray
assert type(f0_data_automatic) == annotations.F0Data
assert type(f0_data_automatic.times) is np.ndarray
assert type(f0_data_automatic.frequencies) is np.ndarray
assert type(f0_data_corrected.voicing) is np.ndarray
assert type(f0_data_automatic._confidence) is np.ndarray
# check values
assert np.array_equal(
f0_data_corrected.times,
np.array([0.197, 0.209, 0.221, 0.232]),
)
assert np.array_equal(
f0_data_corrected.frequencies, np.array([0.000, 379.299, 379.299, 379.299])
)
assert np.array_equal(
f0_data_corrected.voicing,
np.array([0.0, 1.0, 1.0, 1.0]),
)
assert np.array_equal(
f0_data_corrected._confidence,
np.array([3.090e-06, 0.00000286, 0.00000715, 0.00001545]),
)
# check values
assert np.array_equal(
f0_data_automatic.times,
np.array([0.197, 0.209, 0.221, 0.232]),
)
assert np.array_equal(
f0_data_automatic.frequencies,
np.array(
[
0.000,
0.000,
143.918,
143.918,
]
),
)
assert np.array_equal(
f0_data_automatic.voicing,
np.array([0.0, 0.0, 1.0, 1.0]),
)
assert np.array_equal(
f0_data_automatic._confidence,
np.array([3.090e-06, 2.860e-06, 0.00000715, 0.00001545]),
)
def test_load_notes():
default_trackid = "01-D_AMairena"
dataset = tonas.Dataset(TEST_DATA_HOME)
track = dataset.track(default_trackid)
notes_path = track.notes_path
notes_data = tonas.load_notes(notes_path)
tuning_frequency = tonas._load_tuning_frequency(notes_path)
# check types
assert type(notes_data) == annotations.NoteData
assert type(notes_data.intervals) is np.ndarray
assert type(notes_data.pitches) is np.ndarray
assert type(notes_data.confidence) is np.ndarray
assert type(tuning_frequency) is float
# check tuning frequency
assert tuning_frequency == 451.0654725341684
# check values
assert np.array_equal(
notes_data.intervals[:, 0], np.array([0.216667, 0.65, 2.183333, 2.566667])
)
assert np.array_equal(
notes_data.intervals[:, 1], np.array([0.65, 1.666667, 2.566666, 2.9])
)
assert np.array_equal(
notes_data.pitches,
np.array(
[388.8382625732775, 411.9597888711769, 388.8382625732775, 411.9597888711769]
),
)
assert np.array_equal(
notes_data.confidence,
np.array(
[
0.018007,
0.010794,
0.00698,
0.03265,
]
),
)
def test_load_audio():
default_trackid = "01-D_AMairena"
dataset = tonas.Dataset(TEST_DATA_HOME)
track = dataset.track(default_trackid)
audio_path = track.audio_path
audio, sr = tonas.load_audio(audio_path)
assert sr == 44100
assert type(audio) is np.ndarray
def test_metadata():
default_trackid = "01-D_AMairena"
dataset = tonas.Dataset(TEST_DATA_HOME)
metadata = dataset._metadata
assert metadata[default_trackid] == {
"title": "En el barrio de Triana",
"style": "Debla",
"singer": "Antonio Mairena",
}
| 30.649533
| 96
| 0.633938
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 939
| 0.143162
|
a765ee4d5ce159cb94158867be1e207d0bdc988c
| 1,064
|
py
|
Python
|
pycreds.py
|
Ennovar/aws-creds-test
|
fcc5c10c8cfb79bb0ea0fd52f2e2f137efd8a9ce
|
[
"Apache-2.0"
] | 7
|
2017-06-13T15:55:23.000Z
|
2019-05-23T18:52:00.000Z
|
pycreds.py
|
Ennovar/aws-creds-test
|
fcc5c10c8cfb79bb0ea0fd52f2e2f137efd8a9ce
|
[
"Apache-2.0"
] | 2
|
2019-02-16T12:56:33.000Z
|
2020-07-02T19:32:58.000Z
|
pycreds.py
|
Ennovar/aws-creds-test
|
fcc5c10c8cfb79bb0ea0fd52f2e2f137efd8a9ce
|
[
"Apache-2.0"
] | 8
|
2017-05-17T22:46:07.000Z
|
2022-03-11T14:27:56.000Z
|
import os
import hashlib
import getpass
import hmac
import botocore.session
import botocore.exceptions
def _hash(value):
return hmac.new(os.environ['TEST_KEY'], value,
digestmod=hashlib.sha256).hexdigest()
def main():
access_key = getpass.getpass("Access Key: ").strip()
secret_access_key = getpass.getpass("Secret Access Key: ").strip()
print("AKID hash: %s" % _hash(access_key))
print("AKID length: %s" % len(access_key))
print("\nSAK hash: %s" % _hash(secret_access_key))
print("SAK length: %s" % len(secret_access_key))
session = botocore.session.get_session()
sts = session.create_client('sts', aws_access_key_id=access_key,
aws_secret_access_key=secret_access_key)
try:
response = sts.get_caller_identity()
print("Successfuly made an AWS request with the "
"provided credentials.\n")
except botocore.exceptions.ClientError as e:
print("Error making AWS request: %s\n" % e)
if __name__ == '__main__':
main()
| 30.4
| 72
| 0.656015
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 230
| 0.216165
|